Skip to content

Commit 69d59aa

Browse files
Copilotoleander
andauthored
Replace model system with specific GPT-4.1 family variants (#58)
Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: oleander <220827+oleander@users.noreply.github.com>
1 parent 6a081f0 commit 69d59aa

File tree

7 files changed

+201
-64
lines changed

7 files changed

+201
-64
lines changed

src/commit.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -234,7 +234,7 @@ mod tests {
234234
let result = generate(
235235
"diff --git a/test.txt b/test.txt\n+Hello World".to_string(),
236236
1024,
237-
Model::GPT4oMini,
237+
Model::GPT41Mini,
238238
Some(&settings)
239239
)
240240
.await;
@@ -268,7 +268,7 @@ mod tests {
268268
let result = generate(
269269
"diff --git a/test.txt b/test.txt\n+Hello World".to_string(),
270270
1024,
271-
Model::GPT4oMini,
271+
Model::GPT41Mini,
272272
Some(&settings)
273273
)
274274
.await;

src/config.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ use console::Emoji;
1212
const DEFAULT_TIMEOUT: i64 = 30;
1313
const DEFAULT_MAX_COMMIT_LENGTH: i64 = 72;
1414
const DEFAULT_MAX_TOKENS: i64 = 2024;
15-
const DEFAULT_MODEL: &str = "gpt-4o-mini";
15+
const DEFAULT_MODEL: &str = "gpt-4.1"; // Matches Model::default()
1616
const DEFAULT_API_KEY: &str = "<PLACE HOLDER FOR YOUR API KEY>";
1717

1818
#[derive(Debug, Default, Deserialize, PartialEq, Eq, Serialize)]

src/model.rs

Lines changed: 65 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -18,26 +18,26 @@ use crate::config::AppConfig;
1818
static TOKENIZER: OnceLock<CoreBPE> = OnceLock::new();
1919

2020
// Model identifiers - using screaming case for constants
21-
const MODEL_GPT4: &str = "gpt-4";
22-
const MODEL_GPT4_OPTIMIZED: &str = "gpt-4o";
23-
const MODEL_GPT4_MINI: &str = "gpt-4o-mini";
2421
const MODEL_GPT4_1: &str = "gpt-4.1";
22+
const MODEL_GPT4_1_MINI: &str = "gpt-4.1-mini";
23+
const MODEL_GPT4_1_NANO: &str = "gpt-4.1-nano";
24+
const MODEL_GPT4_5: &str = "gpt-4.5";
2525
// TODO: Get this from config.rs or a shared constants module
2626
const DEFAULT_MODEL_NAME: &str = "gpt-4.1";
2727

2828
/// Represents the available AI models for commit message generation.
2929
/// Each model has different capabilities and token limits.
3030
#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone, Serialize, Deserialize, Default)]
3131
pub enum Model {
32-
/// Standard GPT-4 model
33-
GPT4,
34-
/// Optimized GPT-4 model for better performance
35-
GPT4o,
36-
/// Mini version of optimized GPT-4 for faster processing
37-
GPT4oMini,
3832
/// Default model - GPT-4.1 latest version
3933
#[default]
40-
GPT41
34+
GPT41,
35+
/// Mini version of GPT-4.1 for faster processing
36+
GPT41Mini,
37+
/// Nano version of GPT-4.1 for very fast processing
38+
GPT41Nano,
39+
/// GPT-4.5 model for advanced capabilities
40+
GPT45
4141
}
4242

4343
impl Model {
@@ -59,10 +59,7 @@ impl Model {
5959

6060
// Always use the proper tokenizer for accurate counts
6161
// We cannot afford to underestimate tokens as it may cause API failures
62-
let tokenizer = TOKENIZER.get_or_init(|| {
63-
let model_str: &str = self.into();
64-
get_tokenizer(model_str)
65-
});
62+
let tokenizer = TOKENIZER.get_or_init(|| get_tokenizer(self.as_ref()));
6663

6764
// Use direct tokenization for accurate token count
6865
let tokens = tokenizer.encode_ordinary(text);
@@ -75,8 +72,7 @@ impl Model {
7572
/// * `usize` - The maximum number of tokens the model can process
7673
pub fn context_size(&self) -> usize {
7774
profile!("Get context size");
78-
let model_str: &str = self.into();
79-
get_context_size(model_str)
75+
get_context_size(self.as_ref())
8076
}
8177

8278
/// Truncates the given text to fit within the specified token limit.
@@ -167,41 +163,80 @@ impl Model {
167163
}
168164
}
169165

170-
impl From<&Model> for &str {
171-
fn from(model: &Model) -> Self {
172-
match model {
173-
Model::GPT4o => MODEL_GPT4_OPTIMIZED,
174-
Model::GPT4 => MODEL_GPT4,
175-
Model::GPT4oMini => MODEL_GPT4_MINI,
176-
Model::GPT41 => MODEL_GPT4_1
166+
impl AsRef<str> for Model {
167+
fn as_ref(&self) -> &str {
168+
match self {
169+
Model::GPT41 => MODEL_GPT4_1,
170+
Model::GPT41Mini => MODEL_GPT4_1_MINI,
171+
Model::GPT41Nano => MODEL_GPT4_1_NANO,
172+
Model::GPT45 => MODEL_GPT4_5
177173
}
178174
}
179175
}
180176

177+
// Keep conversion to String for cases that need owned strings
178+
impl From<&Model> for String {
179+
fn from(model: &Model) -> Self {
180+
model.as_ref().to_string()
181+
}
182+
}
183+
184+
// Keep the old impl for backwards compatibility where possible
185+
impl Model {
186+
pub fn as_str(&self) -> &str {
187+
self.as_ref()
188+
}
189+
}
190+
181191
impl FromStr for Model {
182192
type Err = anyhow::Error;
183193

184194
fn from_str(s: &str) -> Result<Self> {
185-
match s.trim().to_lowercase().as_str() {
186-
"gpt-4o" => Ok(Model::GPT4o),
187-
"gpt-4" => Ok(Model::GPT4),
188-
"gpt-4o-mini" => Ok(Model::GPT4oMini),
195+
let normalized = s.trim().to_lowercase();
196+
match normalized.as_str() {
189197
"gpt-4.1" => Ok(Model::GPT41),
190-
model => bail!("Invalid model name: {}", model)
198+
"gpt-4.1-mini" => Ok(Model::GPT41Mini),
199+
"gpt-4.1-nano" => Ok(Model::GPT41Nano),
200+
"gpt-4.5" => Ok(Model::GPT45),
201+
// Backward compatibility for deprecated models - map to closest GPT-4.1 equivalent
202+
"gpt-4" | "gpt-4o" => {
203+
log::warn!(
204+
"Model '{}' is deprecated. Mapping to 'gpt-4.1'. \
205+
Please update your configuration with: git ai config set model gpt-4.1",
206+
s
207+
);
208+
Ok(Model::GPT41)
209+
}
210+
"gpt-4o-mini" | "gpt-3.5-turbo" => {
211+
log::warn!(
212+
"Model '{}' is deprecated. Mapping to 'gpt-4.1-mini'. \
213+
Please update your configuration with: git ai config set model gpt-4.1-mini",
214+
s
215+
);
216+
Ok(Model::GPT41Mini)
217+
}
218+
model =>
219+
bail!(
220+
"Invalid model name: '{}'. Supported models: gpt-4.1, gpt-4.1-mini, gpt-4.1-nano, gpt-4.5",
221+
model
222+
),
191223
}
192224
}
193225
}
194226

195227
impl Display for Model {
196228
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
197-
write!(f, "{}", <&str>::from(self))
229+
write!(f, "{}", self.as_ref())
198230
}
199231
}
200232

201233
// Implement conversion from string types to Model with fallback to default
202234
impl From<&str> for Model {
203235
fn from(s: &str) -> Self {
204-
s.parse().unwrap_or_default()
236+
s.parse().unwrap_or_else(|e| {
237+
log::error!("Failed to parse model '{}': {}. Falling back to default model 'gpt-4.1'.", s, e);
238+
Model::default()
239+
})
205240
}
206241
}
207242

src/openai.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ pub async fn generate_commit_message(diff: &str) -> Result<String> {
3737
if let Ok(api_key) = std::env::var("OPENAI_API_KEY") {
3838
if !api_key.is_empty() {
3939
// Use the commit function directly without parsing
40-
match commit::generate(diff.to_string(), 256, Model::GPT4oMini, None).await {
40+
match commit::generate(diff.to_string(), 256, Model::GPT41Mini, None).await {
4141
Ok(response) => return Ok(response.response.trim().to_string()),
4242
Err(e) => {
4343
log::warn!("Direct generation failed, falling back to local: {e}");

0 commit comments

Comments
 (0)