@@ -35,6 +35,22 @@ import java.util.function.Consumer
3535 * 4. Obtain your API key: After subscribing to a plan, you will be redirected
3636 * to the API dashboard, where you can find your unique API key. Copy and store it securely.
3737 *
38+ * All API methods in this class have a non-blocking option which will enqueues
39+ * the HTTPS request on a different thread. These method names have `Async
40+ * appended to the end of their names.
41+ *
42+ * Completions API:
43+ * * [createCompletion]
44+ * * [streamCompletion]
45+ * * [createCompletionAsync]
46+ * * [streamCompletionAsync]
47+ *
48+ * Chat API:
49+ * * [createChatCompletion]
50+ * * [streamChatCompletion]
51+ * * [createChatCompletionAsync]
52+ * * [streamChatCompletionAsync]
53+ *
3854 * @property apiKey Your OpenAI API key. It starts with `"sk-"` (without the quotes).
3955 * @property organization If you belong to multiple organizations, specify which one to use (else `null`).
4056 * @property client Controls proxies, timeouts, etc.
@@ -60,9 +76,20 @@ class OpenAI @JvmOverloads constructor(
6076 }
6177
6278 /* *
79+ * Predicts which text comes after the prompt, thus "completing" the text.
80+ *
81+ * Calls OpenAI's Completions API and waits until the entire completion is
82+ * generated. When [CompletionRequest.maxTokens] is a big number, it will
83+ * take a long time to generate all the tokens, so it is recommended to use
84+ * [streamCompletionAsync] instead to allow users to see partial completions.
85+ *
86+ * This method blocks the current thread until the stream is complete. For
87+ * non-blocking options, use [streamCompletionAsync]. It is important to
88+ * consider which thread you are currently running on. Running this method
89+ * on [javax.swing]'s thread, for example, will cause your UI to freeze
90+ * temporarily.
6391 *
64- * @param request The input information for the Completions API.
65- * @return The value returned by the Completions API.
92+ * @param request The data to send to the API endpoint.
6693 * @since 1.3.0
6794 */
6895 @Throws(OpenAIError ::class )
@@ -85,11 +112,21 @@ class OpenAI @JvmOverloads constructor(
85112 }
86113
87114 /* *
88- * Create completion async
115+ * Predicts which text comes after the prompt, thus "completing" the text.
116+ *
117+ * Calls OpenAI's Completions API and waits until the entire completion is
118+ * generated. When [CompletionRequest.maxTokens] is a big number, it will
119+ * take a long time to generate all the tokens, so it is recommended to use
120+ * [streamCompletionAsync] instead to allow users to see partial completions.
89121 *
90- * @param request
91- * @param onResponse
92- * @param onFailure
122+ * This method will not block the current thread. The code block [onResponse]
123+ * will be run later on a different thread. Due to the different thread, it
124+ * is important to consider thread safety in the context of your program. To
125+ * avoid thread safety issues, use [streamCompletion] to block the main thread.
126+ *
127+ * @param request The data to send to the API endpoint.
128+ * @param onResponse The code to execute for every chunk of text.
129+ * @param onFailure The code to execute when a failure occurs.
93130 * @since 1.3.0
94131 */
95132 @JvmOverloads
@@ -109,6 +146,8 @@ class OpenAI @JvmOverloads constructor(
109146 }
110147
111148 /* *
149+ * Predicts which text comes after the prompt, thus "completing" the text.
150+ *
112151 * Calls OpenAI's Completions API using a *stream* of data. Streams allow
113152 * developers to access tokens in real time as they are generated. This is
114153 * used to create the "scrolling text" or "living typing" effect. Using
@@ -149,11 +188,13 @@ class OpenAI @JvmOverloads constructor(
149188 }
150189
151190 /* *
191+ * Predicts which text comes after the prompt, thus "completing" the text.
192+ *
152193 * Calls OpenAI's Completions API using a *stream* of data. Streams allow
153194 * developers to access tokens in real time as they are generated. This is
154195 * used to create the "scrolling text" or "living typing" effect. Using
155- * `streamCompletion ` gives users information immediately, as opposed to
156- * `createCompletion ` where you have to wait for the entire message to
196+ * `streamCompletionAsync ` gives users information immediately, as opposed to
197+ * `createCompletionAsync ` where you have to wait for the entire message to
157198 * generate.
158199 *
159200 * This method will not block the current thread. The code block [onResponse]
@@ -183,9 +224,22 @@ class OpenAI @JvmOverloads constructor(
183224 }
184225
185226 /* *
227+ * Responds to the input in a conversational manner. Chat can "remember"
228+ * older parts of the conversation by looking at the different messages in
229+ * the list.
186230 *
187- * @param request The input information for the Completions API.
188- * @return The value returned by the Completions API.
231+ * Calls OpenAI's Completions API and waits until the entire message is
232+ * generated. Since generating an entire CHAT message can be time-consuming,
233+ * it is preferred to use [streamChatCompletionAsync] instead.
234+ *
235+ * This method blocks the current thread until the stream is complete. For
236+ * non-blocking options, use [createChatCompletionAsync]. It is important to
237+ * consider which thread you are currently running on. Running this method
238+ * on [javax.swing]'s thread, for example, will cause your UI to freeze
239+ * temporarily.
240+ *
241+ * @param request The data to send to the API endpoint.
242+ * @return The generated response.
189243 * @since 1.3.0
190244 */
191245 @Throws(OpenAIError ::class )
@@ -195,7 +249,7 @@ class OpenAI @JvmOverloads constructor(
195249 val httpRequest = buildRequest(request, CHAT_ENDPOINT )
196250
197251 try {
198- val httpResponse = client.newCall(httpRequest).execute();
252+ val httpResponse = client.newCall(httpRequest).execute()
199253 lateinit var response: ChatResponse
200254 MyCallback (true , { throw it }) {
201255 response = gson.fromJson(it, ChatResponse ::class .java)
@@ -208,11 +262,22 @@ class OpenAI @JvmOverloads constructor(
208262 }
209263
210264 /* *
211- * Create completion async
265+ * Responds to the input in a conversational manner. Chat can "remember"
266+ * older parts of the conversation by looking at the different messages in
267+ * the list.
212268 *
213- * @param request
214- * @param onResponse
215- * @param onFailure
269+ * Calls OpenAI's Completions API and waits until the entire message is
270+ * generated. Since generating an entire CHAT message can be time-consuming,
271+ * it is preferred to use [streamChatCompletionAsync] instead.
272+ *
273+ * This method will not block the current thread. The code block [onResponse]
274+ * will be run later on a different thread. Due to the different thread, it
275+ * is important to consider thread safety in the context of your program. To
276+ * avoid thread safety issues, use [streamChatCompletion] to block the main thread.
277+ *
278+ * @param request The data to send to the API endpoint.
279+ * @param onResponse The code to execute for every chunk of text.
280+ * @param onFailure The code to execute when a failure occurs.
216281 * @since 1.3.0
217282 */
218283 @JvmOverloads
@@ -232,6 +297,10 @@ class OpenAI @JvmOverloads constructor(
232297 }
233298
234299 /* *
300+ * Responds to the input in a conversational manner. Chat can "remember"
301+ * older parts of the conversation by looking at the different messages in
302+ * the list.
303+ *
235304 * Calls OpenAI's Completions API using a *stream* of data. Streams allow
236305 * developers to access tokens in real time as they are generated. This is
237306 * used to create the "scrolling text" or "living typing" effect. Using
@@ -277,17 +346,21 @@ class OpenAI @JvmOverloads constructor(
277346 }
278347
279348 /* *
349+ * Responds to the input in a conversational manner. Chat can "remember"
350+ * older parts of the conversation by looking at the different messages in
351+ * the list.
352+ *
280353 * Calls OpenAI's Completions API using a *stream* of data. Streams allow
281354 * developers to access tokens in real time as they are generated. This is
282- * used to create the "scrolling text" or "living typing" effect. Using
283- * `streamCompletion ` gives users information immediately, as opposed to
284- * `createCompletion` where you have to wait for the entire message to
355+ * used to create the "scrolling text" or "live typing" effect. Using
356+ * `streamChatCompletionAsync ` gives users information immediately, as opposed to
357+ * [createChatCompletionAsync] where you have to wait for the entire message to
285358 * generate.
286359 *
287360 * This method will not block the current thread. The code block [onResponse]
288361 * will be run later on a different thread. Due to the different thread, it
289362 * is important to consider thread safety in the context of your program. To
290- * avoid thread safety issues, use [streamCompletion ] to block the main thread.
363+ * avoid thread safety issues, use [streamChatCompletion ] to block the main thread.
291364 *
292365 * @param request The data to send to the API endpoint.
293366 * @param onResponse The code to execute for every chunk of text.
0 commit comments