|
1 | | -import { |
2 | | - query, |
3 | | - mutation, |
4 | | - httpAction, |
5 | | - ActionCtx, |
6 | | - internalQuery, |
7 | | -} from "./_generated/server"; |
8 | | -import { api, components } from "./_generated/api"; |
9 | | -import { |
10 | | - PersistentTextStreaming, |
11 | | - StreamId, |
12 | | - StreamIdValidator, |
13 | | -} from "@convex-dev/persistent-text-streaming"; |
14 | | -import { v } from "convex/values"; |
| 1 | +import { httpAction } from "./_generated/server"; |
| 2 | +import { internal } from "./_generated/api"; |
| 3 | +import { StreamId } from "@convex-dev/persistent-text-streaming"; |
15 | 4 | import { OpenAI } from "openai"; |
| 5 | +import { streamingComponent } from "./streaming"; |
16 | 6 |
|
17 | | -const persistentTextStreaming = new PersistentTextStreaming( |
18 | | - components.persistentTextStreaming |
19 | | -); |
20 | | - |
21 | | -export const createChat = mutation({ |
22 | | - args: { |
23 | | - prompt: v.string(), |
24 | | - }, |
25 | | - handler: async (ctx, args) => { |
26 | | - const streamId = await persistentTextStreaming.createStream(ctx); |
27 | | - const chatId = await ctx.db.insert("chats", { |
28 | | - title: "...", |
29 | | - prompt: args.prompt, |
30 | | - stream: streamId, |
31 | | - }); |
32 | | - return chatId; |
33 | | - }, |
34 | | -}); |
35 | | - |
36 | | -export const getChatBody = query({ |
37 | | - args: { |
38 | | - streamId: StreamIdValidator, |
39 | | - }, |
40 | | - handler: async (ctx, args) => { |
41 | | - return await persistentTextStreaming.getStreamBody( |
42 | | - ctx, |
43 | | - args.streamId as StreamId |
44 | | - ); |
45 | | - }, |
46 | | -}); |
47 | | - |
48 | | -export const getChatForStream = internalQuery({ |
49 | | - args: { |
50 | | - streamId: StreamIdValidator, |
51 | | - }, |
52 | | - handler: async (ctx, args) => { |
53 | | - return await ctx.db |
54 | | - .query("chats") |
55 | | - .withIndex("by_stream", (q) => q.eq("stream", args.streamId)) |
56 | | - .first(); |
57 | | - }, |
58 | | -}); |
59 | | - |
60 | | -type StreamChatRequestBody = { |
61 | | - streamId: string; |
62 | | -}; |
63 | | - |
64 | | -async function gptStreamer( |
65 | | - ctx: ActionCtx, |
66 | | - request: Request, |
67 | | - streamId: StreamId, |
68 | | - chunkAppender: (text: string) => Promise<void> |
69 | | -) { |
70 | | - let chats = await ctx.runQuery(api.chat.getChats); |
71 | | - const prompt = chats[chats.length - 1].prompt; |
72 | | - chats = chats.slice(0, -1); |
73 | | - |
74 | | - // Let's pass along the prior context. |
75 | | - const context = []; |
76 | | - |
77 | | - const bodies = await Promise.all( |
78 | | - chats.map(async (chat) => { |
79 | | - const body = await ctx.runQuery(api.chat.getChatBody, { |
80 | | - streamId: chat.stream, |
81 | | - }); |
82 | | - return body; |
83 | | - }) |
84 | | - ); |
85 | | - |
86 | | - for (let i = 0; i < bodies.length; i++) { |
87 | | - const body = bodies[i]; |
88 | | - const chat = chats[i]; |
89 | | - context.push({ |
90 | | - userMessage: chat.prompt, |
91 | | - assistantMessage: body, |
92 | | - }); |
93 | | - } |
94 | | - |
95 | | - const openai = new OpenAI(); |
96 | | - const stream = await openai.chat.completions.create({ |
97 | | - model: "gpt-4o-mini", |
98 | | - messages: [ |
99 | | - { |
100 | | - role: "system", |
101 | | - content: `You are a helpful assistant that can answer questions and help with tasks. |
102 | | - Please provide your response in markdown format. |
103 | | - |
104 | | - You are continuing a conversation. The conversation so far is found in the following JSON-formatted value: |
105 | | -
|
106 | | - ${JSON.stringify(context)}`, |
107 | | - }, |
108 | | - { |
109 | | - role: "user", |
110 | | - content: prompt, |
111 | | - }, |
112 | | - ], |
113 | | - stream: true, |
114 | | - }); |
115 | | - |
116 | | - for await (const part of stream) { |
117 | | - const text = part.choices[0]?.delta?.content || ""; |
118 | | - await chunkAppender(text); |
119 | | - } |
120 | | -} |
| 7 | +const openai = new OpenAI(); |
121 | 8 |
|
122 | 9 | export const streamChat = httpAction(async (ctx, request) => { |
123 | | - const body = (await request.json()) as StreamChatRequestBody; |
124 | | - const response = await persistentTextStreaming.stream( |
| 10 | + const body = (await request.json()) as { |
| 11 | + streamId: string; |
| 12 | + }; |
| 13 | + |
| 14 | + // Start streaming and persisting at the same time while |
| 15 | + // we immediately return a streaming response to the client |
| 16 | + const response = await streamingComponent.stream( |
125 | 17 | ctx, |
126 | 18 | request, |
127 | 19 | body.streamId as StreamId, |
128 | | - gptStreamer |
| 20 | + async (ctx, request, streamId, append) => { |
| 21 | + // Lets grab the history up to now so that the AI has some context |
| 22 | + const history = await ctx.runQuery(internal.messages.getHistory); |
| 23 | + |
| 24 | + // Lets kickoff a stream request to OpenAI |
| 25 | + const stream = await openai.chat.completions.create({ |
| 26 | + model: "gpt-4.1-mini", |
| 27 | + messages: [ |
| 28 | + { |
| 29 | + role: "system", |
| 30 | + content: `You are a helpful assistant that can answer questions and help with tasks. |
| 31 | + Please provide your response in markdown format. |
| 32 | + |
| 33 | + You are continuing a conversation. The conversation so far is found in the following JSON-formatted value:`, |
| 34 | + }, |
| 35 | + ...history, |
| 36 | + ], |
| 37 | + stream: true, |
| 38 | + }); |
| 39 | + |
| 40 | + // Append each chunk to the persistent stream as they come in from openai |
| 41 | + for await (const part of stream) |
| 42 | + await append(part.choices[0]?.delta?.content || ""); |
| 43 | + } |
129 | 44 | ); |
| 45 | + |
130 | 46 | response.headers.set("Access-Control-Allow-Origin", "*"); |
131 | 47 | response.headers.set("Vary", "Origin"); |
132 | | - return response; |
133 | | -}); |
134 | 48 |
|
135 | | -export const getChats = query({ |
136 | | - args: {}, |
137 | | - handler: async (ctx) => { |
138 | | - return await ctx.db.query("chats").collect(); |
139 | | - }, |
140 | | -}); |
141 | | - |
142 | | -export const clearChat = mutation({ |
143 | | - args: {}, |
144 | | - handler: async (ctx) => { |
145 | | - const chats = await ctx.db.query("chats").collect(); |
146 | | - await Promise.all(chats.map((chat) => ctx.db.delete(chat._id))); |
147 | | - }, |
| 49 | + return response; |
148 | 50 | }); |
0 commit comments