11package com.cjcrafter.openai.chat
22
33import com.google.gson.*
4- import okhttp3.MediaType
4+ import okhttp3.*
55import okhttp3.MediaType.Companion.toMediaType
6- import okhttp3.OkHttpClient
76import okhttp3.OkHttpClient.Builder
8- import okhttp3.Request
9- import okhttp3.RequestBody
107import okhttp3.RequestBody.Companion.toRequestBody
118import java.io.IOException
9+ import java.lang.IllegalArgumentException
1210import java.util.concurrent.TimeUnit
11+ import java.util.function.Consumer
1312
1413/* *
1514 * The ChatBot class wraps the OpenAI API and lets you send messages and
@@ -41,7 +40,9 @@ class ChatBot(private val apiKey: String) {
4140 .readTimeout(0 , TimeUnit .SECONDS ).build()
4241 private val mediaType: MediaType = " application/json; charset=utf-8" .toMediaType()
4342 private val gson: Gson = GsonBuilder ()
44- .registerTypeAdapter(ChatUser ::class .java, JsonSerializer <ChatUser > { src, _, context -> context!! .serialize(src!! .name.lowercase())!! })
43+ .registerTypeAdapter(
44+ ChatUser ::class .java,
45+ JsonSerializer <ChatUser > { src, _, context -> context!! .serialize(src!! .name.lowercase())!! })
4546 .create()
4647
4748 /* *
@@ -56,7 +57,9 @@ class ChatBot(private val apiKey: String) {
5657 * @throws IllegalArgumentException If the input arguments are invalid.
5758 */
5859 @Throws(IOException ::class )
59- fun generateResponse (request : ChatRequest ? ): ChatResponse {
60+ fun generateResponse (request : ChatRequest ): ChatResponse {
61+ request.stream = false // use streamResponse for stream=true
62+
6063 val json = gson.toJson(request)
6164 val body: RequestBody = json.toRequestBody(mediaType)
6265 val httpRequest: Request = Request .Builder ()
@@ -83,4 +86,95 @@ class ChatBot(private val apiKey: String) {
8386 throw ex
8487 }
8588 }
89+
90+ /* *
91+ * This is a helper method that calls [streamResponse], which lets you use
92+ * the generated tokens in real time (As ChatGPT generates them).
93+ *
94+ * This method does not block the thread. Method calls to [onResponse] are
95+ * not handled by the main thread. It is crucial to consider thread safety
96+ * within the context of your program.
97+ *
98+ * @param request The input information for ChatGPT.
99+ * @param onResponse The method to call for each chunk.
100+ * @since 1.2.0
101+ */
102+ fun streamResponseKotlin (request : ChatRequest , onResponse : ChatResponseChunk .() -> Unit ) {
103+ streamResponse(request, { it.onResponse() })
104+ }
105+
106+ /* *
107+ * Uses ChatGPT to generate tokens in real time. As ChatGPT generates
108+ * content, those tokens are sent in a stream in real time. This allows you
109+ * to update the user without long delays between their input and OpenAI's
110+ * response.
111+ *
112+ * For *"simpler"* calls, you can use [generateResponse] which will block
113+ * the thread until the entire response is generated.
114+ *
115+ * Instead of using the [ChatResponse], this method uses [ChatResponseChunk].
116+ * This means that it is not possible to retrieve the number of tokens from
117+ * this method,
118+ *
119+ * This method does not block the thread. Method calls to [onResponse] are
120+ * not handled by the main thread. It is crucial to consider thread safety
121+ * within the context of your program.
122+ *
123+ * @param request The input information for ChatGPT.
124+ * @param onResponse The method to call for each chunk.
125+ * @param onFailure The method to call if the HTTP fails. This method will
126+ * not be called if OpenAI returns an error.
127+ * @see generateResponse
128+ * @see streamResponseKotlin
129+ * @since 1.2.0
130+ */
131+ @JvmOverloads
132+ fun streamResponse (
133+ request : ChatRequest ,
134+ onResponse : Consumer <ChatResponseChunk >, // use Consumer instead of Kotlin for better Java syntax
135+ onFailure : Consumer <IOException > = Consumer { it.printStackTrace() }
136+ ) {
137+ request.stream = true // use requestResponse for stream=false
138+
139+ val json = gson.toJson(request)
140+ val body: RequestBody = json.toRequestBody(mediaType)
141+ val httpRequest: Request = Request .Builder ()
142+ .url(" https://api.openai.com/v1/chat/completions" )
143+ .addHeader(" Content-Type" , " application/json" )
144+ .addHeader(" Authorization" , " Bearer $apiKey " )
145+ .post(body)
146+ .build()
147+
148+ client.newCall(httpRequest).enqueue(object : Callback {
149+ var cache: ChatResponseChunk ? = null
150+
151+ override fun onFailure (call : Call , e : IOException ) {
152+ onFailure.accept(e)
153+ }
154+
155+ override fun onResponse (call : Call , response : Response ) {
156+ response.body?.source()?.use { source ->
157+ while (! source.exhausted()) {
158+
159+ // Parse the JSON string as a map. Every string starts
160+ // with "data: ", so we need to remove that.
161+ var jsonResponse = source.readUtf8Line() ? : continue
162+ if (jsonResponse.isEmpty())
163+ continue
164+ jsonResponse = jsonResponse.substring(" data: " .length)
165+ if (jsonResponse == " [DONE]" )
166+ continue
167+
168+ val rootObject = JsonParser .parseString(jsonResponse).asJsonObject
169+ if (cache == null )
170+ cache = ChatResponseChunk (rootObject)
171+ else
172+ cache!! .update(rootObject)
173+
174+ onResponse.accept(cache!! )
175+ }
176+ }
177+ }
178+ })
179+ }
86180}
0 commit comments