1+ package com.cjcrafter.openai
2+
3+ import com.cjcrafter.openai.chat.ChatRequest
4+ import com.cjcrafter.openai.chat.ChatResponse
5+ import com.cjcrafter.openai.chat.ChatResponseChunk
6+ import com.cjcrafter.openai.chat.ChatUser
7+ import com.google.gson.Gson
8+ import com.google.gson.GsonBuilder
9+ import com.google.gson.JsonObject
10+ import com.google.gson.JsonParser
11+ import com.google.gson.JsonSerializer
12+ import okhttp3.*
13+ import okhttp3.MediaType.Companion.toMediaType
14+ import okhttp3.RequestBody.Companion.toRequestBody
15+ import java.io.IOException
16+ import java.lang.IllegalArgumentException
17+ import java.util.function.Consumer
18+
19+ /* *
20+ * To get your API key:
21+ * 1. Log in to your account: Go to [https://www.openai.com/](openai.com) and
22+ * log in.
23+ * 2. Access the API dashboard: After logging in, click on the "API" tab.
24+ * 3. Choose a subscription plan: Select a suitable plan based on your needs
25+ * and complete the payment process.
26+ * 4. Obtain your API key: After subscribing to a plan, you will be redirected
27+ * to the API dashboard, where you can find your unique API key. Copy and store it securely.
28+ *
29+ * @property apiKey Your OpenAI API key. It starts with `"sk-"` (without the quotes).
30+ * @property organization If you belong to multiple organizations, specify which one to use (else `null`).
31+ * @property client Controls proxies, timeouts, etc.
32+ * @constructor Create a ChatBot for responding to requests.
33+ */
34+ class OpenAI @JvmOverloads constructor(
35+ private val apiKey : String ,
36+ private val organization : String? = null ,
37+ private val client : OkHttpClient = OkHttpClient ()
38+ ) {
39+
40+ private val mediaType: MediaType = " application/json; charset=utf-8" .toMediaType()
41+ private val gson: Gson = GsonBuilder ()
42+ .registerTypeAdapter(ChatUser ::class .java, JsonSerializer <ChatUser > { src, _, context -> context!! .serialize(src!! .name.lowercase())!! })
43+ .create()
44+
45+ private fun buildRequest (request : Any ): Request {
46+ val json = gson.toJson(request)
47+ val body: RequestBody = json.toRequestBody(mediaType)
48+ return Request .Builder ()
49+ .url(" https://api.openai.com/v1/chat/completions" )
50+ .addHeader(" Content-Type" , " application/json" )
51+ .addHeader(" Authorization" , " Bearer $apiKey " )
52+ .apply { if (organization != null ) addHeader(" OpenAI-Organization" , organization) }
53+ .post(body).build()
54+ }
55+
56+ /* *
57+ * Blocks the current thread until OpenAI responds to https request. The
58+ * returned value includes information including tokens, generated text,
59+ * and stop reason. You can access the generated message through
60+ * [ChatResponse.choices].
61+ *
62+ * @param request The input information for ChatGPT.
63+ * @return The returned response.
64+ * @throws IOException If an IO Exception occurs.
65+ * @throws IllegalArgumentException If the input arguments are invalid.
66+ */
67+ @Throws(IOException ::class )
68+ fun generateResponse (request : ChatRequest ): ChatResponse {
69+ request.stream = false // use streamResponse for stream=true
70+ val httpRequest = buildRequest(request)
71+
72+ // Save the JsonObject to check for errors
73+ var rootObject: JsonObject ? = null
74+ try {
75+ client.newCall(httpRequest).execute().use { response ->
76+
77+ // Servers respond to API calls with json blocks. Since raw JSON isn't
78+ // very developer friendly, we wrap for easy data access.
79+ rootObject = JsonParser .parseString(response.body!! .string()).asJsonObject
80+ require(! rootObject!! .has(" error" )) { rootObject!! .get(" error" ).asJsonObject[" message" ].asString }
81+ return ChatResponse (rootObject!! )
82+ }
83+ } catch (ex: Throwable ) {
84+ throw ex
85+ }
86+ }
87+
88+ /* *
89+ * This is a helper method that calls [streamResponse], which lets you use
90+ * the generated tokens in real time (As ChatGPT generates them).
91+ *
92+ * This method does not block the thread. Method calls to [onResponse] are
93+ * not handled by the main thread. It is crucial to consider thread safety
94+ * within the context of your program.
95+ *
96+ * Usage:
97+ * ```
98+ * val messages = mutableListOf("Write a poem".toUserMessage())
99+ * val request = ChatRequest("gpt-3.5-turbo", messages)
100+ * val bot = ChatBot(/* your key */)
101+
102+ * bot.streamResponseKotlin(request) {
103+ * print(choices[0].delta)
104+ *
105+ * // when finishReason != null, this is the last message (done generating new tokens)
106+ * if (choices[0].finishReason != null)
107+ * messages.add(choices[0].message)
108+ * }
109+ * ```
110+ *
111+ * @param request The input information for ChatGPT.
112+ * @param onResponse The method to call for each chunk.
113+ * @since 1.2.0
114+ */
115+ fun streamResponseKotlin (request : ChatRequest , onResponse : ChatResponseChunk .() -> Unit ) {
116+ streamResponse(request, { it.onResponse() })
117+ }
118+
119+ /* *
120+ * Uses ChatGPT to generate tokens in real time. As ChatGPT generates
121+ * content, those tokens are sent in a stream in real time. This allows you
122+ * to update the user without long delays between their input and OpenAI's
123+ * response.
124+ *
125+ * For *"simpler"* calls, you can use [generateResponse] which will block
126+ * the thread until the entire response is generated.
127+ *
128+ * Instead of using the [ChatResponse], this method uses [ChatResponseChunk].
129+ * This means that it is not possible to retrieve the number of tokens from
130+ * this method,
131+ *
132+ * This method does not block the thread. Method calls to [onResponse] are
133+ * not handled by the main thread. It is crucial to consider thread safety
134+ * within the context of your program.
135+ *
136+ * @param request The input information for ChatGPT.
137+ * @param onResponse The method to call for each chunk.
138+ * @param onFailure The method to call if the HTTP fails. This method will
139+ * not be called if OpenAI returns an error.
140+ * @see generateResponse
141+ * @see streamResponseKotlin
142+ * @since 1.2.0
143+ */
144+ @JvmOverloads
145+ fun streamResponse (
146+ request : ChatRequest ,
147+ onResponse : Consumer <ChatResponseChunk >, // use Consumer instead of Kotlin for better Java syntax
148+ onFailure : Consumer <IOException > = Consumer { it.printStackTrace() }
149+ ) {
150+ request.stream = true // use requestResponse for stream=false
151+ val httpRequest = buildRequest(request)
152+
153+ client.newCall(httpRequest).enqueue(object : Callback {
154+ var cache: ChatResponseChunk ? = null
155+
156+ override fun onFailure (call : Call , e : IOException ) {
157+ onFailure.accept(e)
158+ }
159+
160+ override fun onResponse (call : Call , response : Response ) {
161+ response.body?.source()?.use { source ->
162+ while (! source.exhausted()) {
163+
164+ // Parse the JSON string as a map. Every string starts
165+ // with "data: ", so we need to remove that.
166+ var jsonResponse = source.readUtf8Line() ? : continue
167+ if (jsonResponse.isEmpty())
168+ continue
169+ jsonResponse = jsonResponse.substring(" data: " .length)
170+ if (jsonResponse == " [DONE]" )
171+ continue
172+
173+ val rootObject = JsonParser .parseString(jsonResponse).asJsonObject
174+ if (cache == null )
175+ cache = ChatResponseChunk (rootObject)
176+ else
177+ cache!! .update(rootObject)
178+
179+ onResponse.accept(cache!! )
180+ }
181+ }
182+ }
183+ })
184+ }
185+ }
0 commit comments