11package com.cjcrafter.openai.chat
22
3- import com.google.gson.JsonObject
43import com.google.gson.annotations.SerializedName
54
65/* *
7- * These are the arguments that control the result of the output. For more
8- * information, refer to the [OpenAI Docs](https://platform.openai.com/docs/api-reference/completions/create).
6+ * [ChatRequest] holds the configurable options that can be sent to the OpenAI
7+ * Chat API. For most use cases, you only need to set [model] and [messages].
8+ * For more detailed descriptions for each option, refer to the
9+ * [Chat Wiki](https://platform.openai.com/docs/api-reference/chat)
910 *
10- * @param model The model used to generate the text. Recommended: "gpt-3.5-turbo."
11- * @param messages All previous messages from the conversation.
12- * @param temperature How "creative" the results are. [0.0, 2.0].
13- * @param topP Controls how "on topic" the tokens are.
14- * @param n Controls how many responses to generate. Numbers >1 will chew through your tokens.
15- * @param stream **UNTESTED** recommend keeping this false.
16- * @param stop The sequence used to stop generating tokens.
17- * @param maxTokens The maximum number of tokens to use.
18- * @param presencePenalty Prevent talking about duplicate topics.
19- * @param frequencyPenalty Prevent repeating the same text.
20- * @param logitBias Control specific tokens from being used.
21- * @param user Who send this request (for moderation).
11+ * [messages] stores **ALL** previous messages from the conversation. It is
12+ * **YOUR RESPONSIBILITY** to store and update this list for your conversations
13+ * (Check out the [JavaChatTest] or the READEME.md for an example). In general,
14+ * the list should start with a [ChatUser.SYSTEM] message, then alternate between
15+ * [ChatUser.USER] and [ChatUser.ASSISTANT]. Failing to follow this order may
16+ * confuse the model and cause it to apologize for not responding.
17+ *
18+ * It is best practice to store 1 [ChatRequest] for each conversation, and to
19+ * update the variables (especially [messages]) between [ChatBot.generateResponse]
20+ * calls. You can easily store your [ChatRequest] as a string or as a json file
21+ * using google's GSON library to serialize the object as a JSON string.
22+ *
23+ * You should not set [stream]. TODO update docs after adding stream support
24+ *
25+ * @property model The model used to generate the text. Recommended: `"gpt-3.5-turbo"` (without quotes).
26+ * @property messages A mutable list of previous messages from the conversation.
27+ * @property temperature How "creative" the results are. [0.0, 2.0]. Defaults to `1.0`.
28+ * @property topP Controls how "on topic" the tokens are. Defaults to `1.0`.
29+ * @property n Controls how many responses to generate. Numbers >1 will chew through your tokens. Defaults to `1`.
30+ * @property stream true=wait until entire message is generated, false=respond procedurally. Defaults to `false`.
31+ * @property stop The sequence used to stop generating tokens. Defaults to `null`.
32+ * @property maxTokens The maximum number of tokens to use. Defaults to `infinity`.
33+ * @property presencePenalty Prevent talking about duplicate topics. Defaults to `0.0`.
34+ * @property frequencyPenalty Prevent repeating the same text. Defaults to `0.0`.
35+ * @property logitBias Increase/Decrease the chances of a specific token to appear in generated text. Defaults to `null`.
36+ * @property user Who send this request (for moderation).
37+ * @constructor Create a chat request
38+ * @see ChatBot.generateResponse
2239 * @see <a href="https://platform.openai.com/docs/api-reference/completions/create">OpenAI Wiki</a>
2340 */
2441data class ChatRequest @JvmOverloads constructor(
25- val model : String ,
42+ var model : String ,
2643 var messages : MutableList <ChatMessage >,
27- val temperature : Float = 1.0f ,
28- @field:SerializedName("top_p") val topP : Float = 1.0f ,
29- val n : Int = 1 ,
30- val stream : Boolean = false ,
31- val stop : String? = null ,
32- @field:SerializedName("max_tokens") val maxTokens : Int? = null ,
33- @field:SerializedName("presence_penalty") val presencePenalty : Float = 0f ,
34- @field:SerializedName("frequency_penalty") val frequencyPenalty : Float = 0f ,
35- @field:SerializedName("logit_bias") val logitBias : JsonObject ? = null ,
36- val user : String? = null
44+ var temperature : Float? = null ,
45+ @field:SerializedName("top_p") var topP : Float? = null ,
46+ var n : Int? = null ,
47+ var stream : Boolean? = null ,
48+ var stop : String? = null ,
49+ @field:SerializedName("max_tokens") var maxTokens : Int? = null ,
50+ @field:SerializedName("presence_penalty") var presencePenalty : Float? = null ,
51+ @field:SerializedName("frequency_penalty") var frequencyPenalty : Float? = null ,
52+ @field:SerializedName("logit_bias") var logitBias : Map < String , Float > ? = null ,
53+ var user : String? = null
3754)
0 commit comments