Skip to content

Commit 9af67eb

Browse files
committed
bugfix: issue where NotGiven causes a warning message when passed directly.
1 parent 6525da4 commit 9af67eb

File tree

2 files changed

+37
-4
lines changed

2 files changed

+37
-4
lines changed

instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/utils.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -225,8 +225,10 @@ def get_llm_request_attributes(
225225
service_tier if service_tier != "auto" else None
226226
)
227227

228-
# filter out None values
229-
return {k: v for k, v in attributes.items() if v is not None}
228+
# filter out None values and NOT_GIVEN values
229+
return {
230+
k: v for k, v in attributes.items() if v is not None and v != NOT_GIVEN
231+
}
230232

231233

232234
def handle_span_exception(span, error):

instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_chat_completions.py

Lines changed: 33 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,11 @@
1313
# limitations under the License.
1414
# pylint: disable=too-many-locals
1515

16+
import logging
1617
from typing import Optional
1718

1819
import pytest
19-
from openai import APIConnectionError, NotFoundError, OpenAI
20+
from openai import NOT_GIVEN, APIConnectionError, NotFoundError, OpenAI
2021
from openai.resources.chat.completions import ChatCompletion
2122

2223
from opentelemetry.sdk.trace import ReadableSpan
@@ -43,7 +44,9 @@ def test_chat_completion_with_content(
4344
messages_value = [{"role": "user", "content": "Say this is a test"}]
4445

4546
response = openai_client.chat.completions.create(
46-
messages=messages_value, model=llm_model_value, stream=False
47+
messages=messages_value,
48+
model=llm_model_value,
49+
stream=False,
4750
)
4851

4952
spans = span_exporter.get_finished_spans()
@@ -68,6 +71,30 @@ def test_chat_completion_with_content(
6871
assert_message_in_logs(logs[1], "gen_ai.choice", choice_event, spans[0])
6972

7073

74+
@pytest.mark.vcr()
75+
def test_chat_completion_handles_not_given(
76+
span_exporter, log_exporter, openai_client, instrument_no_content, caplog
77+
):
78+
caplog.set_level(logging.WARNING)
79+
llm_model_value = "gpt-4o-mini"
80+
messages_value = [{"role": "user", "content": "Say this is a test"}]
81+
82+
response = openai_client.chat.completions.create(
83+
messages=messages_value,
84+
model=llm_model_value,
85+
stream=False,
86+
top_p=NOT_GIVEN,
87+
)
88+
89+
spans = span_exporter.get_finished_spans()
90+
assert_completion_attributes(spans[0], llm_model_value, response)
91+
92+
logs = log_exporter.get_finished_logs()
93+
assert len(logs) == 2
94+
95+
assert_no_invalid_type_warning(caplog)
96+
97+
7198
@pytest.mark.vcr()
7299
def test_chat_completion_no_content(
73100
span_exporter, log_exporter, openai_client, instrument_no_content
@@ -947,3 +974,7 @@ def get_current_weather_tool_definition():
947974
},
948975
},
949976
}
977+
978+
979+
def assert_no_invalid_type_warning(caplog):
980+
assert "Invalid type" not in caplog.text

0 commit comments

Comments
 (0)