Skip to content

Commit ce8cdc0

Browse files
committed
Use gpt-4-turbo for vision
1 parent 06d9549 commit ce8cdc0

File tree

1 file changed

+7
-9
lines changed

1 file changed

+7
-9
lines changed

llmstack/processors/providers/promptly/web_browser.py

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -359,11 +359,11 @@ def process(self) -> dict:
359359

360360
model = self._config.model if self._config.model else Model.GPT_3_5_LATEST
361361
if model == "gpt-3.5-turbo-latest":
362-
model = "gpt-3.5-turbo-1106"
362+
model = "gpt-3.5-turbo"
363363
elif model == "gpt-4-turbo-latest":
364-
model = "gpt-4-0125-preview"
364+
model = "gpt-4-turbo"
365365
elif model == "gpt-4-vision-latest":
366-
model = "gpt-4-vision-preview"
366+
model = "gpt-4-turbo"
367367

368368
messages = [
369369
{
@@ -394,7 +394,7 @@ def process(self) -> dict:
394394
if response.content.text or response.content.screenshot:
395395
browser_text_response = self._process_browser_content(response)
396396
browser_response = browser_text_response
397-
if self._config.model == Model.GPT_4_V_LATEST:
397+
if self._config.model == Model.GPT_4_V_LATEST or self._config.model == Model.GPT_4_LATEST:
398398
browser_response = [
399399
{
400400
"type": "text",
@@ -465,12 +465,10 @@ def process(self) -> dict:
465465
"messages": messages,
466466
"max_tokens": 4000,
467467
"seed": self._config.seed,
468-
}
469-
470-
if self._config.model is not Model.GPT_4_V_LATEST:
471-
chat_completions_args["response_format"] = {
468+
"response_format": {
472469
"type": "json_object",
473-
}
470+
},
471+
}
474472

475473
result = openai_client.chat.completions.create(
476474
**chat_completions_args,

0 commit comments

Comments
 (0)