@@ -18,54 +18,54 @@ use super::{
1818pub struct ChatBody {
1919 /// ID of the model to use.
2020 /// See the model endpoint compatibility table for details on which models work with the Chat API.
21- model : String ,
21+ pub model : String ,
2222 /// The messages to generate chat completions for, in the chat format.
23- messages : Vec < Message > ,
23+ pub messages : Vec < Message > ,
2424 /// What sampling temperature to use, between 0 and 2.
2525 /// Higher values like 0.8 will make the output more random,
2626 /// while lower values like 0.2 will make it more focused and deterministic.
2727 /// We generally recommend altering this or top_p but not both.
2828 /// Defaults to 1
2929 #[ serde( skip_serializing_if = "Option::is_none" ) ]
30- temperature : Option < f32 > ,
30+ pub temperature : Option < f32 > ,
3131 /// An alternative to sampling with temperature, called nucleus sampling,
3232 /// where the model considers the results of the tokens with top_p probability mass.
3333 /// So 0.1 means only the tokens comprising the top 10% probability mass are considered.
3434 /// We generally recommend altering this or temperature but not both.
3535 /// Defaults to 1
3636 #[ serde( skip_serializing_if = "Option::is_none" ) ]
37- top_p : Option < f32 > ,
37+ pub top_p : Option < f32 > ,
3838 /// How many chat completion choices to generate for each input message.
3939 /// Defaults to 1
4040 #[ serde( skip_serializing_if = "Option::is_none" ) ]
41- n : Option < i32 > ,
41+ pub n : Option < i32 > ,
4242 /// If set, partial message deltas will be sent, like in ChatGPT.
4343 /// Tokens will be sent as data-only server-sent events as they become available,
4444 /// with the stream terminated by a data: [DONE] message. See the OpenAI Cookbook for example code.
4545 /// Defaults to false
4646 #[ serde( skip_serializing_if = "Option::is_none" ) ]
47- stream : Option < bool > ,
47+ pub stream : Option < bool > ,
4848 /// Up to 4 sequences where the API will stop generating further tokens.
4949 /// Defaults to null
5050 #[ serde( skip_serializing_if = "Option::is_none" ) ]
51- stop : Option < Vec < String > > ,
51+ pub stop : Option < Vec < String > > ,
5252 /// The maximum number of tokens to generate in the chat completion.
5353 /// The total length of input tokens and generated tokens is limited by the model's context length.
5454 /// Defaults to inf
5555 #[ serde( skip_serializing_if = "Option::is_none" ) ]
56- max_tokens : Option < i32 > ,
56+ pub max_tokens : Option < i32 > ,
5757 /// Number between -2.0 and 2.0.
5858 /// Positive values penalize new tokens based on whether they appear in the text so far,
5959 /// increasing the model's likelihood to talk about new topics.
6060 /// Defaults to 0
6161 #[ serde( skip_serializing_if = "Option::is_none" ) ]
62- presence_penalty : Option < f32 > ,
62+ pub presence_penalty : Option < f32 > ,
6363 /// Number between -2.0 and 2.0.
6464 /// Positive values penalize new tokens based on their existing frequency in the text so far,
6565 /// decreasing the model's likelihood to repeat the same line verbatim.
6666 /// Defaults to 0
6767 #[ serde( skip_serializing_if = "Option::is_none" ) ]
68- frequency_penalty : Option < f32 > ,
68+ pub frequency_penalty : Option < f32 > ,
6969 /// Modify the likelihood of specified tokens appearing in the completion.
7070 /// Accepts a json object that maps tokens (specified by their token ID in the tokenizer)
7171 /// to an associated bias value from -100 to 100. Mathematically,
@@ -75,11 +75,11 @@ pub struct ChatBody {
7575 /// values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
7676 /// Defaults to null
7777 #[ serde( skip_serializing_if = "Option::is_none" ) ]
78- logit_bias : Option < HashMap < String , String > > ,
78+ pub logit_bias : Option < HashMap < String , String > > ,
7979 /// A unique identifier representing your end-user,
8080 /// which can help OpenAI to monitor and detect abuse. Learn more.
8181 #[ serde( skip_serializing_if = "Option::is_none" ) ]
82- user : Option < String > ,
82+ pub user : Option < String > ,
8383}
8484
8585pub trait ChatApi {
0 commit comments