Skip to content

Commit 2f13645

Browse files
gustavocidornelaswhoseoyster
authored andcommitted
Update OpenAI model runner to use openai>=1.0.0 and update openai version required by the client
1 parent cf5a90a commit 2f13645

File tree

4 files changed

+30
-34
lines changed

4 files changed

+30
-34
lines changed

examples/monitoring/quickstart/llms/openai_llm_monitor.ipynb

Lines changed: 19 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,16 @@
1313
"This notebook illustrates how to get started monitoring OpenAI LLMs with Openlayer."
1414
]
1515
},
16+
{
17+
"cell_type": "code",
18+
"execution_count": null,
19+
"id": "020c8f6a",
20+
"metadata": {},
21+
"outputs": [],
22+
"source": [
23+
"!pip install openlayer"
24+
]
25+
},
1626
{
1727
"cell_type": "markdown",
1828
"id": "75c2a473",
@@ -54,15 +64,10 @@
5464
"metadata": {},
5565
"outputs": [],
5666
"source": [
57-
"import openlayer\n",
5867
"from openlayer import llm_monitors\n",
5968
"\n",
60-
"# If you're using `openai>=1.0.0`:\n",
6169
"openai_client = openai.OpenAI()\n",
62-
"openai_monitor = llm_monitors.OpenAIMonitor(client=openai_client, publish=True) # with publish=True, every row gets published to Openlayer automatically\n",
63-
"\n",
64-
"# Otherwise, use:\n",
65-
"# openai_monitor = llm_monitors.OpenAIMonitor(publish=True) # with publish=True, every row gets published to Openlayer automatically"
70+
"openai_monitor = llm_monitors.OpenAIMonitor(client=openai_client, publish=True) # with publish=True, every row gets published to Openlayer automatically"
6671
]
6772
},
6873
{
@@ -98,21 +103,13 @@
98103
"metadata": {},
99104
"outputs": [],
100105
"source": [
101-
"# If you're using `openai>=1.0.0`:\n",
102-
"completion = openai_client.chat.completions.create(model=\"gpt-3.5-turbo\",\n",
103-
" messages=[\n",
104-
" {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
105-
" {\"role\": \"user\", \"content\": \"How are you doing today?\"}\n",
106-
" ])\n",
107-
"\n",
108-
"# Othwewise, use:\n",
109-
"# completion = openai.ChatCompletion.create(\n",
110-
"# model=\"gpt-3.5-turbo\",\n",
111-
"# messages=[\n",
112-
"# {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
113-
"# {\"role\": \"user\", \"content\": \"How are you doing today?\"}\n",
114-
"# ]\n",
115-
"# )"
106+
"completion = openai_client.chat.completions.create(\n",
107+
" model=\"gpt-3.5-turbo\",\n",
108+
" messages=[\n",
109+
" {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
110+
" {\"role\": \"user\", \"content\": \"How are you doing today?\"}\n",
111+
" ]\n",
112+
")"
116113
]
117114
},
118115
{
@@ -128,7 +125,7 @@
128125
{
129126
"cell_type": "code",
130127
"execution_count": null,
131-
"id": "654bb896",
128+
"id": "e79ee882",
132129
"metadata": {},
133130
"outputs": [],
134131
"source": []

openlayer/model_runners/ll_model_runners.py

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -448,18 +448,16 @@ def __init__(
448448
"keyword argument 'openai_api_key'"
449449
)
450450

451-
self.openai_api_key = kwargs["openai_api_key"]
451+
self.openai_client = openai.OpenAI(api_key=kwargs["openai_api_key"])
452452
self._initialize_llm()
453453

454454
self.cost: List[float] = []
455455

456456
def _initialize_llm(self):
457457
"""Initializes the OpenAI chat completion model."""
458-
openai.api_key = self.openai_api_key
459-
460458
# Check if API key is valid
461459
try:
462-
openai.Model.list()
460+
self.openai_client.models.list()
463461
except Exception as e:
464462
raise openlayer_exceptions.OpenlayerInvalidLlmApiKey(
465463
"Please pass a valid OpenAI API key as the "
@@ -479,24 +477,25 @@ def _get_llm_input(
479477
def _make_request(self, llm_input: List[Dict[str, str]]) -> Dict[str, Any]:
480478
"""Make the request to OpenAI's chat completion model
481479
for a given input."""
482-
return openai.ChatCompletion.create(
480+
response = self.openai_client.chat.completions.create(
483481
model=self.model_config.get("model", "gpt-3.5-turbo"),
484482
messages=llm_input,
485483
**self.model_config.get("model_parameters", {}),
486484
)
485+
return response
487486

488487
def _get_output(self, response: Dict[str, Any]) -> str:
489488
"""Gets the output from the response."""
490-
return response["choices"][0]["message"]["content"]
489+
return response.choices[0].message.content
491490

492491
def _get_cost_estimate(self, response: Dict[str, Any]) -> None:
493492
"""Estimates the cost from the response."""
494493
model = self.model_config.get("model", "gpt-3.5-turbo")
495494
if model not in self.COST_PER_TOKEN:
496495
return -1
497496
else:
498-
num_input_tokens = response["usage"]["prompt_tokens"]
499-
num_output_tokens = response["usage"]["completion_tokens"]
497+
num_input_tokens = response.usage.prompt_tokens
498+
num_output_tokens = response.usage.completion_tokens
500499
return (
501500
num_input_tokens * self.COST_PER_TOKEN[model]["input"]
502501
+ num_output_tokens * self.COST_PER_TOKEN[model]["output"]

openlayer/version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,4 +22,4 @@
2222
data=data,
2323
)
2424
"""
25-
__version__ = "0.1.0a17"
25+
__version__ = "0.1.0a18"

setup.cfg

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ count = True
66
max-line-length = 192
77

88
[tool:pytest]
9-
testpaths =
9+
testpaths =
1010
tests
1111

1212
[metadata]
@@ -33,7 +33,7 @@ project_urls =
3333
Openlayer User Slack Group = https://l.linklyhq.com/l/1DG73
3434

3535
[options]
36-
packages =
36+
packages =
3737
openlayer
3838
openlayer.model_runners
3939
openlayer.model_runners.prediction_jobs
@@ -44,7 +44,7 @@ install_requires =
4444
jupyter
4545
marshmallow
4646
marshmallow_oneofschema
47-
openai
47+
openai>=1.0.0
4848
pandas
4949
pybars3
5050
requests_toolbelt

0 commit comments

Comments
 (0)