1+ package com.cjcrafter.openai
2+
3+ /* *
4+ * Holds all the available models for the OpenAI API. Most users are probably
5+ * interested in [Models.Chat].
6+ *
7+ * Note that this list is manually updated, and may fall out of date. For the
8+ * most updated information, check the [OpenAI documentation](https://platform.openai.com/docs/models).
9+ * If you notice that something is out of date, please [open an issue](https://github.com/CJCrafter/ChatGPT-Java-API/issues).
10+ *
11+ * When OpenAI marks a model as _'Legacy'_, the corresponding field in Models
12+ * will be marked as [Deprecated]. Once it is reported that a model throws an
13+ * error due to deprecation, the deprecation level will be set to [DeprecationLevel.ERROR].
14+ */
15+ object Models {
16+
17+ /* *
18+ * Holds all available Chat models. Chat models are used to generate text
19+ * in a conversational manner. Chat models work using conversational memory.
20+ *
21+ * Note that GPT-4 endpoints are only available to [paying customers](https://help.openai.com/en/articles/7102672-how-can-i-access-gpt-4).
22+ *
23+ * @see OpenAI.createChatCompletion
24+ * @see OpenAI.streamChatCompletion
25+ * @see OpenAI.assistants
26+ */
27+ object Chat {
28+
29+ /* *
30+ * `gpt-4` Turbo. Has a context window of 128,000 tokens with training
31+ * data up to April 2023. This model has improved instruction following,
32+ * JSON mode, reproducible output, parallel function calling, and more.
33+ * Returns a maximum of 4,096 output tokens.
34+ */
35+ const val GPT_4_1106_PREVIEW = " gpt-4-1106-preview"
36+
37+ /* *
38+ * `gpt-4` Turbo with vision. Has a context window of 128,000 tokens with
39+ * training data up to April 2023. Has the same capabilities as
40+ * [GPT_4_1106_PREVIEW], but can also understand images.
41+ */
42+ const val GPT_4_VISION_PREVIEW = " gpt-4-vision-preview"
43+
44+ /* *
45+ * Points to the currently supported version of `gpt-4`.
46+ *
47+ * See [continuous model upgrades](https://platform.openai.com/docs/models/continuous-model-upgrades)
48+ */
49+ const val GPT_4 = " gpt-4"
50+
51+ /* *
52+ * Points to the currently supported version of `gpt-4` with a 32k context window.
53+ *
54+ * See [continuous model upgrades](https://platform.openai.com/docs/models/continuous-model-upgrades)
55+ */
56+ const val GPT_4_32k = " gpt-4-32k"
57+
58+ /* *
59+ * Snapshot of `gpt-4` from June 13th, 2023 with improved function calling
60+ * support. Has a context window of 8,192 tokens with training data up
61+ * to September 2021.
62+ */
63+ const val GPT_4_0613 = " gpt-4-0613"
64+
65+ /* *
66+ * Snapshot of `gpt-4-32k` from June 13th, 2023 with improved function
67+ * calling support. Has a context window of 32,768 tokens with training
68+ * data up to September 2021.
69+ */
70+ const val GPT_4_32k_0613 = " gpt-4-32k-0613"
71+
72+ /* *
73+ * Snapshot of `gpt-4` from March 14th 2023 with function calling support.
74+ * This model version will be deprecated on June 13th, 2024. Has a
75+ * context window of 8,192 tokens with training data up to September
76+ * 2021.
77+ */
78+ @Deprecated(
79+ message = " This model will be removed on June 13th, 2024" ,
80+ replaceWith = ReplaceWith (" GPT_4_0613" ),
81+ level = DeprecationLevel .WARNING ,
82+ )
83+ const val GPT_4_0314 = " gpt-4-0314"
84+
85+ /* *
86+ * Snapshot of `gpt-4` from March 14th 2023 with function calling support.
87+ * This model version will be deprecated on June 13th, 2024. Has a
88+ * context window of 32,768 tokens with training data up to September
89+ * 2021.
90+ */
91+ @Deprecated(
92+ message = " This model will be removed on June 13th 2024" ,
93+ replaceWith = ReplaceWith (" GPT_4_32k_0613" ),
94+ level = DeprecationLevel .WARNING ,
95+ )
96+ const val GPT_4_32k_0314 = " gpt-4-32k-0314"
97+
98+ // ///////////////////////////////////////////////////
99+ // GPT 3.5 //
100+ // ///////////////////////////////////////////////////
101+
102+ /* *
103+ * Has a context window of 16,385 tokens with training data up to
104+ * September 2021. This model has improved instruction following, JSON
105+ * mode, reproducible outputs, parallel function calling, and more.
106+ * Returns a maximum of 4,096 output tokens.
107+ */
108+ const val GPT_3_5_TURBO_1106 = " gpt_3.5-turbo-1106"
109+
110+ /* *
111+ * Points to the currently supported version of gpt-3.5-turbo.
112+ *
113+ * See [continuous model upgrades](https://platform.openai.com/docs/models/continuous-model-upgrades)
114+ */
115+ const val GPT_3_5_TURBO = " gpt-3.5-turbo"
116+
117+ /* *
118+ * Points to the currently supported version of gpt-3.5-turbo.
119+ *
120+ * See [continuous model upgrades](https://platform.openai.com/docs/models/continuous-model-upgrades)
121+ */
122+ const val GPT_3_5_TURBO_16k = " gpt-3.5-turbo-16k"
123+
124+ /* *
125+ * Snapshot of `gpt-3.5-turbo` from June 13th, 2023. This model version
126+ * will be deprecated on June 13th, 2024. Has a context window of 4,096
127+ * tokens with training data up to September 2021.
128+ */
129+ @Deprecated(
130+ message = " This model will be removed on June 13th 2024" ,
131+ replaceWith = ReplaceWith (" GPT_3_5_TURBO_1106" ),
132+ level = DeprecationLevel .WARNING ,
133+ )
134+ const val GPT_3_5_TURBO_0613 = " gpt-3.5-turbo-0613"
135+
136+ /* *
137+ * Snapshot of `gpt-3.5-turbo-16k` from June 13th, 2023. This model
138+ * version will be deprecated on June 13th, 2024. Has a context window
139+ * of 16,385 tokens with training data up to September 2021.
140+ */
141+ @Deprecated(
142+ message = " This model will be removed on June 13th 2024" ,
143+ replaceWith = ReplaceWith (" GPT_3_5_TURBO_1106" ),
144+ level = DeprecationLevel .WARNING ,
145+ )
146+ const val GPT_3_5_TURBO_16k_0613 = " gpt-3.5-turbo-16k-0613"
147+
148+ /* *
149+ * Snapshot of `gpt-3.5-turbo` from March 1st, 2023. This model version
150+ * will be deprecated on June 13th, 2024. Has a context window of 4,096
151+ * tokens with training data up to September 2021.
152+ */
153+ @Deprecated(
154+ message = " This model will be removed on June 13th 2024" ,
155+ replaceWith = ReplaceWith (" GPT_3_5_TURBO_1106" ),
156+ level = DeprecationLevel .WARNING ,
157+ )
158+ const val GPT_3_5_TURBO_0301 = " gpt-3.5-turbo-0301"
159+ }
160+
161+ /* *
162+ * Holds all available completion models.
163+ *
164+ * @see OpenAI.createCompletion
165+ * @see OpenAI.streamCompletion
166+ */
167+ object Completion {
168+
169+ /* *
170+ * Similar to `text-davinci-003` but compatible with the legacy
171+ * completions endpoint.
172+ */
173+ const val GPT_3_5_TURBO_INSTRUCT = " gpt-3.5-turbo-instruct"
174+
175+ /* *
176+ * Can do language tasks with better quality and consistency than the
177+ * curie, babbage, or ada models. Will be deprecated on January 4th
178+ * 2024. Has a context window of 4,096 tokens and training data up to
179+ * June 2021.
180+ */
181+ @Deprecated(
182+ message = " This model will be removed on January 4th 2024" ,
183+ replaceWith = ReplaceWith (" GPT_3_5_TURBO_INSTRUCT" ),
184+ level = DeprecationLevel .WARNING ,
185+ )
186+ const val TEXT_DAVINCI_003 = " text-davinci-003"
187+
188+ /* *
189+ * Similar capabilities to `text-davinci-003` but trained with
190+ * supervised fine-tuning instead of reinforcement learning. Will be
191+ * deprecated on January 4th 2024. Has a context window of 4,096 tokens
192+ * and training data up to June 2021.
193+ */
194+ @Deprecated(
195+ message = " This model will be removed on January 4th 2024" ,
196+ replaceWith = ReplaceWith (" GPT_3_5_TURBO_INSTRUCT" ),
197+ level = DeprecationLevel .WARNING ,
198+ )
199+ const val TEXT_DAVINCI_002 = " text-davinci-002"
200+
201+ /* *
202+ * Optimized for code-completion tasks. Will be deprecated on Jan 4th
203+ * 2024. Has a context window of 8,001 tokens and training data up to
204+ * June 2021.
205+ */
206+ @Deprecated(
207+ message = " This model will be removed on January 4th 2024" ,
208+ replaceWith = ReplaceWith (" GPT_3_5_TURBO_INSTRUCT" ),
209+ level = DeprecationLevel .WARNING ,
210+ )
211+ const val CODE_DAVINCI_002 = " code-davinci-002"
212+
213+ // ///////////////////////////////////////////////////
214+ // GPT 3.0 //
215+ // Technically, the models above are part of 3.5 //
216+ // ///////////////////////////////////////////////////
217+
218+ /* *
219+ * Vary capable, faster and lower cost than `davinci`. Has a context
220+ * window of 2,049 tokens and training data up to October 2019.
221+ */
222+ const val TEXT_CURIE_001 = " text-curie-001"
223+
224+ /* *
225+ * Capable of straightforward tasks, very fast, and lower cost. Has a
226+ * context window of 2,049 tokens and training data up to October 2019.
227+ */
228+ const val TEXT_BABBAGE_001 = " text-babbage-001"
229+
230+ /* *
231+ * Capable of very simple tasks, usually the fastest model in the `gpt-3`
232+ * series, and lowest cost. Has a context window of 2,049 tokens and
233+ * training data up to October 2019.
234+ */
235+ const val TEXT_ADA_001 = " text-ada-001"
236+
237+ /* *
238+ * Most capable `gpt-3` model, can do any task the other models can do,
239+ * often with higher quality. Can be fine-tuned. Has a context window of
240+ * 2,049 tokens and training data up to October 2019.
241+ */
242+ const val DAVINCI = " davinci"
243+
244+ /* *
245+ * Very capable, but faster and lower cost than `davinci`. Can be
246+ * fine-tuned. Has a context window of 2,049 tokens and training
247+ * data up to October 2019.
248+ */
249+ const val CURIE = " curie"
250+
251+ /* *
252+ * Capable of straightforward tasks, very fast, and lower cost. Can be
253+ * fine-tuned. Has a context window of 2,049 tokens and training data
254+ * up to October 2019.
255+ */
256+ const val BABBAGE = " babbage"
257+
258+ /* *
259+ * Capable of very simple tasks, usually the fasted model in the `gpt-3`
260+ * series, and lowest cost. Can be fine-tuned. Has a context window of
261+ * 2,049 tokens and training data up to October 2019.
262+ */
263+ const val ADA = " ada"
264+ }
265+ }
0 commit comments