diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/README.rst b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/README.rst index 32de3ed255..e11fed9fb7 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/README.rst +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/README.rst @@ -56,7 +56,7 @@ Check out the `manual example `_ for more details. Instrumenting all clients ************************* -When using the instrumentor, all clients will automatically trace OpenAI chat completion operations. +When using the instrumentor, all clients will automatically trace OpenAI chat completion and responses operations. You can also optionally capture prompts and completions as log events. Make sure to configure OpenTelemetry tracing, logging, and events to capture all telemetry emitted by the instrumentation. @@ -68,12 +68,20 @@ Make sure to configure OpenTelemetry tracing, logging, and events to capture all OpenAIInstrumentor().instrument() client = OpenAI() + + # Chat completions API response = client.chat.completions.create( model="gpt-4o-mini", messages=[ {"role": "user", "content": "Write a short poem on open telemetry."}, ], ) + + # Responses API (structured outputs) + response = client.responses.create( + model="gpt-4o-mini", + input="Write a short poem on open telemetry.", + ) Enabling message content ************************* diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/__init__.py b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/__init__.py index 1775b704cf..7a572132f2 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/__init__.py +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/__init__.py @@ -29,12 +29,20 @@ OpenAIInstrumentor().instrument() client = OpenAI() + + # Chat completions API response = client.chat.completions.create( model="gpt-4o-mini", messages=[ {"role": "user", "content": "Write a short poem on open telemetry."}, ], ) + + # Responses API + response = client.responses.create( + model="gpt-4o-mini", + input="Write a short poem on open telemetry.", + ) API --- @@ -42,6 +50,7 @@ from typing import Collection +from packaging import version as package_version from wrapt import wrap_function_wrapper from opentelemetry._logs import get_logger @@ -54,7 +63,28 @@ from opentelemetry.trace import get_tracer from .instruments import Instruments -from .patch import async_chat_completions_create, chat_completions_create +from .patch import ( + async_chat_completions_create, + async_responses_create, + async_conversations_create, + async_conversation_items_list, + chat_completions_create, + responses_create, + conversations_create, + conversation_items_list, +) + + +def _is_responses_api_supported(): + """Check if the installed OpenAI version supports the responses API.""" + try: + import openai # pylint: disable=import-outside-toplevel + + return package_version.parse(openai.__version__) >= package_version.parse( + "1.66.0" + ) + except Exception: # pylint: disable=broad-except + return False class OpenAIInstrumentor(BaseInstrumentor): @@ -106,8 +136,67 @@ def _instrument(self, **kwargs): ), ) + # Only instrument responses API if supported (OpenAI >= 1.66.0) + if _is_responses_api_supported(): + wrap_function_wrapper( + module="openai.resources.responses.responses", + name="Responses.create", + wrapper=responses_create( + tracer, logger, instruments, is_content_enabled() + ), + ) + + wrap_function_wrapper( + module="openai.resources.responses.responses", + name="AsyncResponses.create", + wrapper=async_responses_create( + tracer, logger, instruments, is_content_enabled() + ), + ) + + wrap_function_wrapper( + module="openai.resources.conversations.conversations", + name="Conversations.create", + wrapper=conversations_create( + tracer, logger, instruments, is_content_enabled() + ), + ) + + wrap_function_wrapper( + module="openai.resources.conversations.conversations", + name="AsyncConversations.create", + wrapper=async_conversations_create( + tracer, logger, instruments, is_content_enabled() + ), + ) + + wrap_function_wrapper( + module="openai.resources.conversations.items", + name="Items.list", + wrapper=conversation_items_list( + tracer, logger, instruments, is_content_enabled() + ), + ) + + wrap_function_wrapper( + module="openai.resources.conversations.items", + name="AsyncItems.list", + wrapper=async_conversation_items_list( + tracer, logger, instruments, is_content_enabled() + ), + ) + def _uninstrument(self, **kwargs): import openai # pylint: disable=import-outside-toplevel unwrap(openai.resources.chat.completions.Completions, "create") unwrap(openai.resources.chat.completions.AsyncCompletions, "create") + + # Only uninstrument responses API if supported (OpenAI >= 1.66.0) + if _is_responses_api_supported(): + unwrap(openai.resources.responses.responses.Responses, "create") + unwrap(openai.resources.responses.responses.AsyncResponses, "create") + unwrap(openai.resources.conversations.conversations.Conversations, "create") + unwrap(openai.resources.conversations.conversations.AsyncConversations, "create") + unwrap(openai.resources.conversations.items.Items, "list") + unwrap(openai.resources.conversations.items.AsyncItems, "list") diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/patch.py b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/patch.py index fc7beb6e2c..16ca339b6e 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/patch.py +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/patch.py @@ -12,6 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. +# pyright: reportUnknownParameterType=false, reportUnknownVariableType=false, reportUnknownMemberType=false, reportMissingParameterType=false +# pylint: disable=too-many-arguments +# type: ignore +""" +Pylance/Pyright type checking is disabled for this file because OpenTelemetry +instrumentation involves dynamic wrapping of external library methods using +the wrapt library. The wrapped functions, their parameters (args, kwargs), +and return values have types that are determined at runtime and cannot be +statically analyzed. This is the expected and correct approach for +instrumentation code that needs to work generically across different +versions and configurations of the instrumented library. +""" + from timeit import default_timer from typing import Optional @@ -37,6 +50,8 @@ is_streaming, message_to_event, set_span_attribute, + get_property_value, + set_server_address_and_port, ) @@ -51,7 +66,9 @@ def chat_completions_create( def traced_method(wrapped, instance, args, kwargs): span_attributes = {**get_llm_request_attributes(kwargs, instance)} - span_name = f"{span_attributes[GenAIAttributes.GEN_AI_OPERATION_NAME]} {span_attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL]}" + operation_name = span_attributes.get(GenAIAttributes.GEN_AI_OPERATION_NAME, "chat") + model_name = span_attributes.get(GenAIAttributes.GEN_AI_REQUEST_MODEL, "unknown") + span_name = f"{operation_name} {model_name}" with tracer.start_as_current_span( name=span_name, kind=SpanKind.CLIENT, @@ -107,7 +124,9 @@ def async_chat_completions_create( async def traced_method(wrapped, instance, args, kwargs): span_attributes = {**get_llm_request_attributes(kwargs, instance)} - span_name = f"{span_attributes[GenAIAttributes.GEN_AI_OPERATION_NAME]} {span_attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL]}" + operation_name = span_attributes.get(GenAIAttributes.GEN_AI_OPERATION_NAME, "chat") + model_name = span_attributes.get(GenAIAttributes.GEN_AI_REQUEST_MODEL, "unknown") + span_name = f"{operation_name} {model_name}" with tracer.start_as_current_span( name=span_name, kind=SpanKind.CLIENT, @@ -162,11 +181,14 @@ def _record_metrics( common_attributes = { GenAIAttributes.GEN_AI_OPERATION_NAME: GenAIAttributes.GenAiOperationNameValues.CHAT.value, GenAIAttributes.GEN_AI_SYSTEM: GenAIAttributes.GenAiSystemValues.OPENAI.value, - GenAIAttributes.GEN_AI_REQUEST_MODEL: span_attributes[ - GenAIAttributes.GEN_AI_REQUEST_MODEL - ], } + # Only add request model if it exists in span_attributes + if GenAIAttributes.GEN_AI_REQUEST_MODEL in span_attributes: + common_attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] = span_attributes[ + GenAIAttributes.GEN_AI_REQUEST_MODEL + ] + if error_type: common_attributes["error.type"] = error_type @@ -385,24 +407,8 @@ def cleanup(self): tool_calls.append(tool_call_dict) message["tool_calls"] = tool_calls - body = { - "index": idx, - "finish_reason": choice.finish_reason or "error", - "message": message, - } - - event_attributes = { - GenAIAttributes.GEN_AI_SYSTEM: GenAIAttributes.GenAiSystemValues.OPENAI.value - } - context = set_span_in_context(self.span, get_current()) - self.logger.emit( - LogRecord( - event_name="gen_ai.choice", - attributes=event_attributes, - body=body, - context=context, - ) - ) + event_attributes = _response_message_to_event_attributes(message, self.capture_content) + self.span.add_event(name="gen_ai.assistant.message", attributes=event_attributes) self.span.end() self._span_started = False @@ -489,6 +495,29 @@ def set_response_service_tier(self, chunk): self.service_tier = chunk.service_tier def build_streaming_response(self, chunk): + # Handle Responses API ResponseTextDeltaEvent chunks + if hasattr(chunk, "delta") and isinstance(chunk.delta, str) and chunk.delta: + # Responses API streams have delta events where delta is the text content directly + # Ensure we have at least one choice buffer for index 0 + if len(self.choice_buffers) == 0: + self.choice_buffers.append(ChoiceBuffer(0)) + + # Append the delta text to the first (and only) choice buffer + self.choice_buffers[0].append_text_content(chunk.delta) + return + + # Handle Responses API streaming format (chunk.output) - fallback for other chunk types + if hasattr(chunk, "output") and chunk.output is not None: + # Responses API streams have direct output, not choices array + # Ensure we have at least one choice buffer for index 0 + if len(self.choice_buffers) == 0: + self.choice_buffers.append(ChoiceBuffer(0)) + + # Append the output text to the first (and only) choice buffer + self.choice_buffers[0].append_text_content(chunk.output) + return + + # Handle Chat Completions API streaming format (chunk.choices) if getattr(chunk, "choices", None) is None: return @@ -528,3 +557,836 @@ def process_chunk(self, chunk): self.set_response_service_tier(chunk) self.build_streaming_response(chunk) self.set_usage(chunk) + + +def responses_create( + tracer: Tracer, + logger: Logger, + instruments: Instruments, + capture_content: bool, +): + """Wrap the `create` method of the `Responses` class to trace it.""" + + def traced_method(wrapped, instance, args, kwargs): + span_attributes = {**get_llm_request_attributes(kwargs, instance, "responses")} + + operation_name = span_attributes.get(GenAIAttributes.GEN_AI_OPERATION_NAME, "responses") + model_name = span_attributes.get(GenAIAttributes.GEN_AI_REQUEST_MODEL) + + # Extract agent name for span naming if model is not available + assistant_name = None + extra_body = kwargs.get("extra_body") + if extra_body and isinstance(extra_body, dict): + agent_info = extra_body.get("agent") + if agent_info and isinstance(agent_info, dict): + assistant_name = agent_info.get("name") + + # Build span name: prefer model, then assistant name, then just operation + if model_name: + span_name = f"{operation_name} {model_name}" + elif assistant_name: + span_name = f"{operation_name} {assistant_name}" + else: + span_name = operation_name + with tracer.start_as_current_span( + name=span_name, + kind=SpanKind.CLIENT, + attributes=span_attributes, + end_on_exit=False, + ) as span: + # Add conversation ID as span attribute if provided + conversation_id = kwargs.get("conversation") + if conversation_id: + set_span_attribute(span, "gen_ai.conversation.id", conversation_id) + + # Add agent name from extra_body if provided + extra_body = kwargs.get("extra_body") + if extra_body and isinstance(extra_body, dict): + agent_info = extra_body.get("agent") + if agent_info and isinstance(agent_info, dict): + agent_name = agent_info.get("name") + if agent_name: + set_span_attribute(span, "gen_ai.assistant.name", agent_name) + + # Add input message as event if applicable + input_data = kwargs.get("input") + if input_data: + if isinstance(input_data, str): + # Simple string input - add as user message event + message_dict = {"role": "user", "content": input_data} + event_attributes = _response_message_to_event_attributes(message_dict, capture_content) + span.add_event(name="gen_ai.user.message", attributes=event_attributes) + elif isinstance(input_data, dict): + # Dictionary input - add as event based on role + role = input_data.get("role", "user") + event_name = f"gen_ai.{role}.message" if role in ["user", "assistant"] else "gen_ai.message" + event_attributes = _response_message_to_event_attributes(input_data, capture_content) + span.add_event(name=event_name, attributes=event_attributes) + + start = default_timer() + result = None + error_type = None + try: + result = wrapped(*args, **kwargs) + if is_streaming(kwargs): + return StreamWrapper(result, span, logger, capture_content) + + if span.is_recording(): + _set_responses_attributes(span, result, logger, capture_content) + + # Add output messages as events + if hasattr(result, "output") and result.output: + for output_item in result.output: + if hasattr(output_item, "type") and output_item.type == "message": + # Convert output message to event format + message_dict = {"role": "assistant"} + if hasattr(output_item, "content"): + content_items = output_item.content + if content_items: + # Extract text content - check for input_text, output_text, and text types + text_parts = [] + for content_item in content_items: + if hasattr(content_item, "text"): + if hasattr(content_item, "type") and content_item.type in ["input_text", "output_text", "text"]: + text_parts.append(content_item.text) + if text_parts: + message_dict["content"] = " ".join(text_parts) + # Add assistant message as event + event_attributes = _response_message_to_event_attributes(message_dict, capture_content) + span.add_event(name="gen_ai.assistant.message", attributes=event_attributes) + + span.end() + return result + + except Exception as error: + error_type = type(error).__qualname__ + handle_span_exception(span, error) + raise + finally: + duration = max((default_timer() - start), 0) + _record_responses_metrics( + instruments, + duration, + result, + span_attributes, + error_type, + ) + + return traced_method + + +def async_responses_create( + tracer: Tracer, + logger: Logger, + instruments: Instruments, + capture_content: bool, +): + """Wrap the `create` method of the `AsyncResponses` class to trace it.""" + + async def traced_method(wrapped, instance, args, kwargs): + span_attributes = {**get_llm_request_attributes(kwargs, instance, "responses")} + + operation_name = span_attributes.get(GenAIAttributes.GEN_AI_OPERATION_NAME, "responses") + model_name = span_attributes.get(GenAIAttributes.GEN_AI_REQUEST_MODEL) + + # Extract agent name for span naming if model is not available + assistant_name = None + extra_body = kwargs.get("extra_body") + if extra_body and isinstance(extra_body, dict): + agent_info = extra_body.get("agent") + if agent_info and isinstance(agent_info, dict): + assistant_name = agent_info.get("name") + + # Build span name: prefer model, then assistant name, then just operation + if model_name: + span_name = f"{operation_name} {model_name}" + elif assistant_name: + span_name = f"{operation_name} {assistant_name}" + else: + span_name = operation_name + with tracer.start_as_current_span( + name=span_name, + kind=SpanKind.CLIENT, + attributes=span_attributes, + end_on_exit=False, + ) as span: + # Add conversation ID as span attribute if provided + conversation_id = kwargs.get("conversation") + if conversation_id: + set_span_attribute(span, "gen_ai.conversation.id", conversation_id) + + # Add agent name from extra_body if provided + extra_body = kwargs.get("extra_body") + if extra_body and isinstance(extra_body, dict): + agent_info = extra_body.get("agent") + if agent_info and isinstance(agent_info, dict): + agent_name = agent_info.get("name") + if agent_name: + set_span_attribute(span, "gen_ai.assistant.name", agent_name) + + # Add input message as event if applicable + input_data = kwargs.get("input") + if input_data: + if isinstance(input_data, str): + # Simple string input - add as user message event + message_dict = {"role": "user", "content": input_data} + event_attributes = _response_message_to_event_attributes(message_dict, capture_content) + span.add_event(name="gen_ai.user.message", attributes=event_attributes) + elif isinstance(input_data, dict): + # Dictionary input - add as event based on role + role = input_data.get("role", "user") + event_name = f"gen_ai.{role}.message" if role in ["user", "assistant"] else "gen_ai.message" + event_attributes = _response_message_to_event_attributes(input_data, capture_content) + span.add_event(name=event_name, attributes=event_attributes) + + start = default_timer() + result = None + error_type = None + try: + result = await wrapped(*args, **kwargs) + if is_streaming(kwargs): + return StreamWrapper(result, span, logger, capture_content) + + if span.is_recording(): + _set_responses_attributes(span, result, logger, capture_content) + + # Add output messages as events + if hasattr(result, "output") and result.output: + for output_item in result.output: + if hasattr(output_item, "type") and output_item.type == "message": + # Convert output message to event format + message_dict = {"role": "assistant"} + if hasattr(output_item, "content"): + content_items = output_item.content + if content_items: + # Extract text content - check for input_text, output_text, and text types + text_parts = [] + for content_item in content_items: + if hasattr(content_item, "text"): + if hasattr(content_item, "type") and content_item.type in ["input_text", "output_text", "text"]: + text_parts.append(content_item.text) + if text_parts: + message_dict["content"] = " ".join(text_parts) + # Add assistant message as event + event_attributes = _response_message_to_event_attributes(message_dict, capture_content) + span.add_event(name="gen_ai.assistant.message", attributes=event_attributes) + + span.end() + return result + + except Exception as error: + error_type = type(error).__qualname__ + handle_span_exception(span, error) + raise + finally: + duration = max((default_timer() - start), 0) + _record_responses_metrics( + instruments, + duration, + result, + span_attributes, + error_type, + ) + + return traced_method + + +def _set_responses_attributes(span, result, logger: Logger, capture_content: bool): + """Set span attributes for responses API.""" + model = get_property_value(result, "model") + if model: + set_span_attribute(span, GenAIAttributes.GEN_AI_RESPONSE_MODEL, model) + + response_id = get_property_value(result, "id") + if response_id: + set_span_attribute(span, GenAIAttributes.GEN_AI_RESPONSE_ID, response_id) + + service_tier = get_property_value(result, "service_tier") + if service_tier: + set_span_attribute( + span, + GenAIAttributes.GEN_AI_OPENAI_REQUEST_SERVICE_TIER, + service_tier, + ) + + # Get the usage + usage = get_property_value(result, "usage") + if usage: + input_tokens = get_property_value(usage, "input_tokens") + if input_tokens is not None: + set_span_attribute( + span, + GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS, + input_tokens, + ) + output_tokens = get_property_value(usage, "output_tokens") + if output_tokens is not None: + set_span_attribute( + span, + GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS, + output_tokens, + ) + + # Set finish reasons from output + output = get_property_value(result, "output") + if output: + finish_reasons = [] + for item in output: + if hasattr(item, "type") and item.type == "message": + # For message type, we can consider it as "stop" + finish_reasons.append("stop") + if finish_reasons: + set_span_attribute( + span, + GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS, + finish_reasons, + ) + + +def _record_responses_metrics( + instruments: Instruments, + duration: float, + result, + span_attributes: dict, + error_type: Optional[str], +): + """Record metrics for responses API.""" + common_attributes = { + GenAIAttributes.GEN_AI_OPERATION_NAME: "responses", + GenAIAttributes.GEN_AI_SYSTEM: GenAIAttributes.GenAiSystemValues.OPENAI.value, + } + + # Only add request model if it exists in span_attributes + if GenAIAttributes.GEN_AI_REQUEST_MODEL in span_attributes: + common_attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] = span_attributes[ + GenAIAttributes.GEN_AI_REQUEST_MODEL + ] + + if error_type: + common_attributes["error.type"] = error_type + + if result: + model = get_property_value(result, "model") + if model: + common_attributes[GenAIAttributes.GEN_AI_RESPONSE_MODEL] = model + + service_tier = get_property_value(result, "service_tier") + if service_tier: + common_attributes[ + GenAIAttributes.GEN_AI_OPENAI_RESPONSE_SERVICE_TIER + ] = service_tier + + if ServerAttributes.SERVER_ADDRESS in span_attributes: + common_attributes[ServerAttributes.SERVER_ADDRESS] = span_attributes[ + ServerAttributes.SERVER_ADDRESS + ] + + if ServerAttributes.SERVER_PORT in span_attributes: + common_attributes[ServerAttributes.SERVER_PORT] = span_attributes[ + ServerAttributes.SERVER_PORT + ] + + instruments.operation_duration_histogram.record( + duration, + attributes=common_attributes, + ) + + if result: + usage = get_property_value(result, "usage") + if usage: + input_tokens = get_property_value(usage, "input_tokens") + if input_tokens is not None: + input_attributes = { + **common_attributes, + GenAIAttributes.GEN_AI_TOKEN_TYPE: GenAIAttributes.GenAiTokenTypeValues.INPUT.value, + } + instruments.token_usage_histogram.record( + input_tokens, + attributes=input_attributes, + ) + + output_tokens = get_property_value(usage, "output_tokens") + if output_tokens is not None: + completion_attributes = { + **common_attributes, + GenAIAttributes.GEN_AI_TOKEN_TYPE: GenAIAttributes.GenAiTokenTypeValues.COMPLETION.value, + } + instruments.token_usage_histogram.record( + output_tokens, + attributes=completion_attributes, + ) + + +def conversations_create( + tracer: Tracer, + logger: Logger, + instruments: Instruments, + capture_content: bool, +): + """Wrap the `create` method of the `Conversations` class to trace it.""" + + def traced_method(wrapped, instance, args, kwargs): + span_attributes = _get_conversation_request_attributes(kwargs, instance) + + span_name = "create_conversation" + with tracer.start_as_current_span( + name=span_name, + kind=SpanKind.CLIENT, + attributes=span_attributes, + end_on_exit=False, + ) as span: + start_time = default_timer() + error_type = None + try: + result = wrapped(*args, **kwargs) + _set_conversation_attributes(span, result) + return result + except Exception as e: + error_type = type(e).__qualname__ + handle_span_exception(span, e) + raise + finally: + if error_type is None: + span.end() + duration = default_timer() - start_time + _record_conversation_metrics( + instruments, duration, result if error_type is None else None, span_attributes, error_type + ) + + return traced_method + + +def async_conversations_create( + tracer: Tracer, + logger: Logger, + instruments: Instruments, + capture_content: bool, +): + """Wrap the `create` method of the `AsyncConversations` class to trace it.""" + + async def traced_method(wrapped, instance, args, kwargs): + span_attributes = _get_conversation_request_attributes(kwargs, instance) + + span_name = "create_conversation" + with tracer.start_as_current_span( + name=span_name, + kind=SpanKind.CLIENT, + attributes=span_attributes, + end_on_exit=False, + ) as span: + start_time = default_timer() + error_type = None + try: + result = await wrapped(*args, **kwargs) + _set_conversation_attributes(span, result) + return result + except Exception as e: + error_type = type(e).__qualname__ + handle_span_exception(span, e) + raise + finally: + if error_type is None: + span.end() + duration = default_timer() - start_time + _record_conversation_metrics( + instruments, duration, result if error_type is None else None, span_attributes, error_type + ) + + return traced_method + + +def _get_conversation_request_attributes(kwargs, client_instance): + """Get span attributes for conversation create requests.""" + attributes = { + GenAIAttributes.GEN_AI_OPERATION_NAME: "create_conversation", + GenAIAttributes.GEN_AI_SYSTEM: GenAIAttributes.GenAiSystemValues.OPENAI.value, + } + + set_server_address_and_port(client_instance, attributes) + + # filter out None values + return {k: v for k, v in attributes.items() if v is not None} + + +def _set_conversation_attributes(span, result): + """Set span attributes for conversation create response.""" + conversation_id = get_property_value(result, "id") + if conversation_id: + set_span_attribute(span, "gen_ai.conversation.id", conversation_id) + + +def _record_conversation_metrics( + instruments: Instruments, + duration: float, + result, + span_attributes: dict, + error_type: Optional[str], +): + """Record metrics for conversation create API.""" + common_attributes = { + GenAIAttributes.GEN_AI_OPERATION_NAME: "create_conversation", + GenAIAttributes.GEN_AI_SYSTEM: GenAIAttributes.GenAiSystemValues.OPENAI.value, + } + + if error_type: + common_attributes["error.type"] = error_type + + if ServerAttributes.SERVER_ADDRESS in span_attributes: + common_attributes[ServerAttributes.SERVER_ADDRESS] = span_attributes[ + ServerAttributes.SERVER_ADDRESS + ] + + if ServerAttributes.SERVER_PORT in span_attributes: + common_attributes[ServerAttributes.SERVER_PORT] = span_attributes[ + ServerAttributes.SERVER_PORT + ] + + instruments.operation_duration_histogram.record( + duration, + attributes=common_attributes, + ) + + +def conversation_items_list( + tracer: Tracer, + logger: Logger, + instruments: Instruments, + capture_content: bool, +): + """Wrap the `list` method of the `Items` class to trace it.""" + + def traced_method(wrapped, instance, args, kwargs): + span_attributes = _get_conversation_items_request_attributes(kwargs, instance) + + span_name = "list_conversation_items" + span = tracer.start_span( + name=span_name, + kind=SpanKind.CLIENT, + attributes=span_attributes, + ) + + start_time = default_timer() + error_type = None + try: + result = wrapped(*args, **kwargs) + _set_conversation_items_attributes(span, result, args, kwargs) + + # Wrap the result to trace individual items as they are iterated + return ConversationItemsWrapper( + result, span, logger, capture_content, tracer, start_time, instruments, span_attributes + ) + except Exception as e: + error_type = type(e).__qualname__ + handle_span_exception(span, e) + span.end() + duration = default_timer() - start_time + _record_conversation_items_metrics( + instruments, duration, None, span_attributes, error_type + ) + raise + + return traced_method + + +def async_conversation_items_list( + tracer: Tracer, + logger: Logger, + instruments: Instruments, + capture_content: bool, +): + """Wrap the `list` method of the `AsyncItems` class to trace it.""" + + async def traced_method(wrapped, instance, args, kwargs): + span_attributes = _get_conversation_items_request_attributes(kwargs, instance) + + span_name = "list_conversation_items" + span = tracer.start_span( + name=span_name, + kind=SpanKind.CLIENT, + attributes=span_attributes, + ) + + start_time = default_timer() + error_type = None + try: + result = await wrapped(*args, **kwargs) + _set_conversation_items_attributes(span, result, args, kwargs) + + # Wrap the result to trace individual items as they are iterated + return AsyncConversationItemsWrapper( + result, span, logger, capture_content, tracer, start_time, instruments, span_attributes + ) + except Exception as e: + error_type = type(e).__qualname__ + handle_span_exception(span, e) + span.end() + duration = default_timer() - start_time + _record_conversation_items_metrics( + instruments, duration, None, span_attributes, error_type + ) + raise + + return traced_method + + +def _get_conversation_items_request_attributes(kwargs, client_instance): + """Get span attributes for conversation items list requests.""" + attributes = { + GenAIAttributes.GEN_AI_OPERATION_NAME: "list_conversation_items", + GenAIAttributes.GEN_AI_SYSTEM: GenAIAttributes.GenAiSystemValues.OPENAI.value, + } + + set_server_address_and_port(client_instance, attributes) + + # filter out None values + return {k: v for k, v in attributes.items() if v is not None} + + +def _set_conversation_items_attributes(span, result, args, kwargs): + """Set span attributes for conversation items list response.""" + # Add conversation_id from arguments (check both positional and keyword args) + conversation_id = None + if len(args) > 0: + conversation_id = args[0] + elif "conversation_id" in kwargs: + conversation_id = kwargs["conversation_id"] + + if conversation_id: + set_span_attribute(span, "gen_ai.conversation.id", conversation_id) + + # Add pagination info if available + if hasattr(result, "object") and result.object == "list": + set_span_attribute(span, "gen_ai.response.object", result.object) + + if hasattr(result, "has_more"): + set_span_attribute(span, "gen_ai.response.has_more", result.has_more) + + +def _record_conversation_items_metrics( + instruments: Instruments, + duration: float, + result, + span_attributes: dict, + error_type: Optional[str], +): + """Record metrics for conversation items list API.""" + common_attributes = { + GenAIAttributes.GEN_AI_OPERATION_NAME: "list_conversation_items", + GenAIAttributes.GEN_AI_SYSTEM: GenAIAttributes.GenAiSystemValues.OPENAI.value, + } + + if error_type: + common_attributes["error.type"] = error_type + + if ServerAttributes.SERVER_ADDRESS in span_attributes: + common_attributes[ServerAttributes.SERVER_ADDRESS] = span_attributes[ + ServerAttributes.SERVER_ADDRESS + ] + + if ServerAttributes.SERVER_PORT in span_attributes: + common_attributes[ServerAttributes.SERVER_PORT] = span_attributes[ + ServerAttributes.SERVER_PORT + ] + + instruments.operation_duration_histogram.record( + duration, + attributes=common_attributes, + ) + + +def _conversation_item_to_event_attributes(item, capture_content: bool): + """Convert a conversation item to event attributes for logging.""" + event_attributes = { + GenAIAttributes.GEN_AI_SYSTEM: GenAIAttributes.GenAiSystemValues.OPENAI.value + } + + # Add item ID + if hasattr(item, "id") and item.id: + event_attributes["gen_ai.conversation.item.id"] = item.id + + # Add item type + if hasattr(item, "type") and item.type: + event_attributes["gen_ai.conversation.item.type"] = item.type + + # Add item role + if hasattr(item, "role") and item.role: + event_attributes["gen_ai.conversation.item.role"] = item.role + + # Add content if capture is enabled + if capture_content and hasattr(item, "content") and item.content: + content_list = [] + for content_item in item.content: + if hasattr(content_item, "type") and content_item.type == "input_text": + if hasattr(content_item, "text"): + content_list.append(content_item.text) + elif hasattr(content_item, "type") and content_item.type == "output_text": + if hasattr(content_item, "text"): + content_list.append(content_item.text) + elif hasattr(content_item, "type") and content_item.type == "text": + if hasattr(content_item, "text"): + content_list.append(content_item.text) + if content_list: + # Store content as JSON string similar to Azure AI Agents pattern + import json + content_json = json.dumps({"content": " ".join(content_list), "role": getattr(item, "role", "unknown")}) + event_attributes["gen_ai.event.content"] = content_json + + return event_attributes + + +def _response_message_to_event_attributes(message_dict: dict, capture_content: bool): + """Convert a response message to event attributes for logging.""" + event_attributes = { + GenAIAttributes.GEN_AI_SYSTEM: GenAIAttributes.GenAiSystemValues.OPENAI.value + } + + # Add role + if "role" in message_dict: + event_attributes["gen_ai.message.role"] = message_dict["role"] + + # Add content if capture is enabled and available + if capture_content and "content" in message_dict: + import json + content_json = json.dumps({ + "content": message_dict["content"], + "role": message_dict.get("role", "unknown") + }) + event_attributes["gen_ai.event.content"] = content_json + + return event_attributes + + +class ConversationItemsWrapper: + """Wrapper for sync conversation items pagination that traces individual items.""" + + def __init__(self, items_page, span, logger, capture_content, tracer, start_time, instruments, span_attributes): + self.items_page = items_page + self.span = span + self.logger = logger + self.capture_content = capture_content + self.tracer = tracer + self.start_time = start_time + self.instruments = instruments + self.span_attributes = span_attributes + self._iter = None + self._span_ended = False + + def __getattr__(self, name): + """Delegate attribute access to the wrapped items_page.""" + return getattr(self.items_page, name) + + def _end_span_if_needed(self): + """End the span if it hasn't been ended yet.""" + if not self._span_ended: + self.span.end() + duration = default_timer() - self.start_time + _record_conversation_items_metrics( + self.instruments, duration, self.items_page, self.span_attributes, None + ) + self._span_ended = True + + def __iter__(self): + def _item_generator(): + try: + for item in self.items_page: + # Add the item as an event within the main span + event_attributes = _conversation_item_to_event_attributes(item, self.capture_content) + + # Determine event name based on role (similar to Azure AI Agents pattern) + role = getattr(item, "role", "unknown") + if role == "assistant": + event_name = "gen_ai.assistant.message" + elif role == "user": + event_name = "gen_ai.user.message" + else: + event_name = "gen_ai.conversation.item" + + # Add event directly to the span + self.span.add_event( + name=event_name, + attributes=event_attributes + ) + + yield item + finally: + # End the span when iteration is complete + self._end_span_if_needed() + + if self._iter is None: + self._iter = _item_generator() + return self._iter + + def __del__(self): + """Ensure span is ended if the wrapper is garbage collected.""" + try: + self._end_span_if_needed() + except: + pass # Ignore any errors during cleanup + + +class AsyncConversationItemsWrapper: + """Wrapper for async conversation items pagination that traces individual items.""" + + def __init__(self, items_page, span, logger, capture_content, tracer, start_time, instruments, span_attributes): + self.items_page = items_page + self.span = span + self.logger = logger + self.capture_content = capture_content + self.tracer = tracer + self.start_time = start_time + self.instruments = instruments + self.span_attributes = span_attributes + self._iter = None + self._span_ended = False + + def __getattr__(self, name): + """Delegate attribute access to the wrapped items_page.""" + return getattr(self.items_page, name) + + def _end_span_if_needed(self): + """End the span if it hasn't been ended yet.""" + if not self._span_ended: + self.span.end() + duration = default_timer() - self.start_time + _record_conversation_items_metrics( + self.instruments, duration, self.items_page, self.span_attributes, None + ) + self._span_ended = True + + def __aiter__(self): + async def _async_item_generator(): + try: + async for item in self.items_page: + # Add the item as an event within the main span + event_attributes = _conversation_item_to_event_attributes(item, self.capture_content) + + # Determine event name based on role (similar to Azure AI Agents pattern) + role = getattr(item, "role", "unknown") + if role == "assistant": + event_name = "gen_ai.assistant.message" + elif role == "user": + event_name = "gen_ai.user.message" + else: + event_name = "gen_ai.conversation.item" + + # Add event directly to the span + self.span.add_event( + name=event_name, + attributes=event_attributes + ) + + yield item + finally: + # End the span when iteration is complete + self._end_span_if_needed() + + if self._iter is None: + self._iter = _async_item_generator() + return self._iter + + def __del__(self): + """Ensure span is ended if the wrapper is garbage collected.""" + try: + self._end_span_if_needed() + except: + pass # Ignore any errors during cleanup diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_conversation_items_list_no_content.yaml b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_conversation_items_list_no_content.yaml new file mode 100644 index 0000000000..ac4a5fd9d1 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_conversation_items_list_no_content.yaml @@ -0,0 +1,350 @@ +interactions: +- request: + body: |- + {} + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + content-length: + - '2' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - AsyncOpenAI/Python 2.5.0 + x-stainless-arch: + - other:amd64 + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - Windows + x-stainless-package-version: + - 2.5.0 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.9 + method: POST + uri: https://api.openai.com/v1/conversations + response: + body: + string: |- + { + "id": "test_conversation_id", + "object": "conversation", + "created_at": 1234567890, + "metadata": {} + } + headers: + CF-RAY: test_cf_ray_id-ATL + Connection: + - keep-alive + Content-Type: + - application/json + Date: Mon, 01 Jan 2024 00:00:00 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + content-length: + - '141' + openai-organization: test_openai_org_id + openai-processing-ms: '100' + openai-project: test_openai_project_id + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: '100' + x-openai-proxy-wasm: + - v0.1 + x-request-id: test_request_id + status: + code: 200 + message: OK +- request: + body: |- + { + "conversation": "test_conversation_id", + "input": "Say hello", + "model": "gpt-4o-mini" + } + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + content-length: + - '114' + content-type: + - application/json + cookie: + - test_cookie + host: + - api.openai.com + user-agent: + - AsyncOpenAI/Python 2.5.0 + x-stainless-arch: + - other:amd64 + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - Windows + x-stainless-package-version: + - 2.5.0 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.9 + method: POST + uri: https://api.openai.com/v1/responses + response: + body: + string: |- + { + "id": "test_response_id", + "object": "response", + "created_at": 1234567890, + "status": "completed", + "background": false, + "billing": { + "payer": "test_payer" + }, + "conversation": { + "id": "test_conversation_id" + }, + "error": null, + "incomplete_details": null, + "instructions": null, + "max_output_tokens": null, + "max_tool_calls": null, + "model": "gpt-4o-mini-2024-07-18", + "output": [ + { + "id": "test_message_id", + "type": "message", + "status": "completed", + "content": [ + { + "type": "output_text", + "annotations": [], + "logprobs": [], + "text": "Hello! How can I assist you today?" + } + ], + "role": "assistant" + } + ], + "parallel_tool_calls": true, + "previous_response_id": null, + "prompt_cache_key": null, + "reasoning": { + "effort": null, + "summary": null + }, + "safety_identifier": null, + "service_tier": "default", + "store": true, + "temperature": 1.0, + "text": { + "format": { + "type": "text" + }, + "verbosity": "medium" + }, + "tool_choice": "auto", + "tools": [], + "top_logprobs": 0, + "top_p": 1.0, + "truncation": "disabled", + "usage": { + "input_tokens": 9, + "input_tokens_details": { + "cached_tokens": 0 + }, + "output_tokens": 10, + "output_tokens_details": { + "reasoning_tokens": 0 + }, + "total_tokens": 19 + }, + "user": null, + "metadata": {} + } + headers: + CF-RAY: test_cf_ray_id-ATL + Connection: + - keep-alive + Content-Type: + - application/json + Date: Mon, 01 Jan 2024 00:00:00 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + content-length: + - '1533' + openai-organization: test_openai_org_id + openai-processing-ms: '100' + openai-project: test_openai_project_id + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: '100' + x-ratelimit-limit-requests: '1000' + x-ratelimit-limit-tokens: '1000' + x-ratelimit-remaining-requests: '1000' + x-ratelimit-remaining-tokens: '1000' + x-ratelimit-reset-requests: '1000' + x-ratelimit-reset-tokens: '1000' + x-request-id: test_request_id + status: + code: 200 + message: OK +- request: + body: '' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + cookie: + - test_cookie + host: + - api.openai.com + user-agent: + - AsyncOpenAI/Python 2.5.0 + x-stainless-arch: + - other:amd64 + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - Windows + x-stainless-package-version: + - 2.5.0 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.9 + method: GET + uri: https://api.openai.com/v1/conversations/test_conversation_id/items + response: + body: + string: |- + { + "object": "list", + "data": [ + { + "id": "test_message_id", + "type": "message", + "status": "completed", + "content": [ + { + "type": "output_text", + "annotations": [], + "logprobs": [], + "text": "Hello! How can I assist you today?" + } + ], + "role": "assistant" + }, + { + "id": "test_message_id", + "type": "message", + "status": "completed", + "content": [ + { + "type": "input_text", + "text": "Say hello" + } + ], + "role": "user" + } + ], + "first_id": "test_message_id", + "has_more": false, + "last_id": "test_message_id" + } + headers: + CF-RAY: test_cf_ray_id-ATL + Connection: + - keep-alive + Content-Type: + - application/json + Date: Mon, 01 Jan 2024 00:00:00 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + content-length: + - '824' + openai-organization: test_openai_org_id + openai-processing-ms: '100' + openai-project: test_openai_project_id + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: '100' + x-openai-proxy-wasm: + - v0.1 + x-request-id: test_request_id + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_conversation_items_list_with_content.yaml b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_conversation_items_list_with_content.yaml new file mode 100644 index 0000000000..ac4a5fd9d1 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_conversation_items_list_with_content.yaml @@ -0,0 +1,350 @@ +interactions: +- request: + body: |- + {} + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + content-length: + - '2' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - AsyncOpenAI/Python 2.5.0 + x-stainless-arch: + - other:amd64 + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - Windows + x-stainless-package-version: + - 2.5.0 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.9 + method: POST + uri: https://api.openai.com/v1/conversations + response: + body: + string: |- + { + "id": "test_conversation_id", + "object": "conversation", + "created_at": 1234567890, + "metadata": {} + } + headers: + CF-RAY: test_cf_ray_id-ATL + Connection: + - keep-alive + Content-Type: + - application/json + Date: Mon, 01 Jan 2024 00:00:00 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + content-length: + - '141' + openai-organization: test_openai_org_id + openai-processing-ms: '100' + openai-project: test_openai_project_id + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: '100' + x-openai-proxy-wasm: + - v0.1 + x-request-id: test_request_id + status: + code: 200 + message: OK +- request: + body: |- + { + "conversation": "test_conversation_id", + "input": "Say hello", + "model": "gpt-4o-mini" + } + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + content-length: + - '114' + content-type: + - application/json + cookie: + - test_cookie + host: + - api.openai.com + user-agent: + - AsyncOpenAI/Python 2.5.0 + x-stainless-arch: + - other:amd64 + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - Windows + x-stainless-package-version: + - 2.5.0 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.9 + method: POST + uri: https://api.openai.com/v1/responses + response: + body: + string: |- + { + "id": "test_response_id", + "object": "response", + "created_at": 1234567890, + "status": "completed", + "background": false, + "billing": { + "payer": "test_payer" + }, + "conversation": { + "id": "test_conversation_id" + }, + "error": null, + "incomplete_details": null, + "instructions": null, + "max_output_tokens": null, + "max_tool_calls": null, + "model": "gpt-4o-mini-2024-07-18", + "output": [ + { + "id": "test_message_id", + "type": "message", + "status": "completed", + "content": [ + { + "type": "output_text", + "annotations": [], + "logprobs": [], + "text": "Hello! How can I assist you today?" + } + ], + "role": "assistant" + } + ], + "parallel_tool_calls": true, + "previous_response_id": null, + "prompt_cache_key": null, + "reasoning": { + "effort": null, + "summary": null + }, + "safety_identifier": null, + "service_tier": "default", + "store": true, + "temperature": 1.0, + "text": { + "format": { + "type": "text" + }, + "verbosity": "medium" + }, + "tool_choice": "auto", + "tools": [], + "top_logprobs": 0, + "top_p": 1.0, + "truncation": "disabled", + "usage": { + "input_tokens": 9, + "input_tokens_details": { + "cached_tokens": 0 + }, + "output_tokens": 10, + "output_tokens_details": { + "reasoning_tokens": 0 + }, + "total_tokens": 19 + }, + "user": null, + "metadata": {} + } + headers: + CF-RAY: test_cf_ray_id-ATL + Connection: + - keep-alive + Content-Type: + - application/json + Date: Mon, 01 Jan 2024 00:00:00 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + content-length: + - '1533' + openai-organization: test_openai_org_id + openai-processing-ms: '100' + openai-project: test_openai_project_id + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: '100' + x-ratelimit-limit-requests: '1000' + x-ratelimit-limit-tokens: '1000' + x-ratelimit-remaining-requests: '1000' + x-ratelimit-remaining-tokens: '1000' + x-ratelimit-reset-requests: '1000' + x-ratelimit-reset-tokens: '1000' + x-request-id: test_request_id + status: + code: 200 + message: OK +- request: + body: '' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + cookie: + - test_cookie + host: + - api.openai.com + user-agent: + - AsyncOpenAI/Python 2.5.0 + x-stainless-arch: + - other:amd64 + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - Windows + x-stainless-package-version: + - 2.5.0 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.9 + method: GET + uri: https://api.openai.com/v1/conversations/test_conversation_id/items + response: + body: + string: |- + { + "object": "list", + "data": [ + { + "id": "test_message_id", + "type": "message", + "status": "completed", + "content": [ + { + "type": "output_text", + "annotations": [], + "logprobs": [], + "text": "Hello! How can I assist you today?" + } + ], + "role": "assistant" + }, + { + "id": "test_message_id", + "type": "message", + "status": "completed", + "content": [ + { + "type": "input_text", + "text": "Say hello" + } + ], + "role": "user" + } + ], + "first_id": "test_message_id", + "has_more": false, + "last_id": "test_message_id" + } + headers: + CF-RAY: test_cf_ray_id-ATL + Connection: + - keep-alive + Content-Type: + - application/json + Date: Mon, 01 Jan 2024 00:00:00 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + content-length: + - '824' + openai-organization: test_openai_org_id + openai-processing-ms: '100' + openai-project: test_openai_project_id + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: '100' + x-openai-proxy-wasm: + - v0.1 + x-request-id: test_request_id + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_conversations_create.yaml b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_conversations_create.yaml new file mode 100644 index 0000000000..6a18ffeca1 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_conversations_create.yaml @@ -0,0 +1,85 @@ +interactions: +- request: + body: |- + {} + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + content-length: + - '2' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - AsyncOpenAI/Python 2.5.0 + x-stainless-arch: + - other:amd64 + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - Windows + x-stainless-package-version: + - 2.5.0 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.9 + method: POST + uri: https://api.openai.com/v1/conversations + response: + body: + string: |- + { + "id": "test_conversation_id", + "object": "conversation", + "created_at": 1234567890, + "metadata": {} + } + headers: + CF-RAY: test_cf_ray_id-ATL + Connection: + - keep-alive + Content-Type: + - application/json + Date: Mon, 01 Jan 2024 00:00:00 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + content-length: + - '141' + openai-organization: test_openai_org_id + openai-processing-ms: '100' + openai-project: test_openai_project_id + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: '100' + x-openai-proxy-wasm: + - v0.1 + x-request-id: test_request_id + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_responses_create_no_content.yaml b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_responses_create_no_content.yaml new file mode 100644 index 0000000000..b5bc740a4e --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_responses_create_no_content.yaml @@ -0,0 +1,143 @@ +interactions: +- request: + body: |- + { + "input": "Say this is a test", + "model": "gpt-4o-mini" + } + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + content-length: + - '55' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - AsyncOpenAI/Python 2.3.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 2.3.0 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.1 + method: POST + uri: https://api.openai.com/v1/responses + response: + body: + string: |- + { + "id": "resp_BSYMQRl3A3DXL9FWCK9tnGRcKIO7q", + "created_at": 1731368631.0, + "error": null, + "incomplete_details": null, + "instructions": null, + "metadata": {}, + "model": "gpt-4o-mini-2024-07-18", + "object": "response", + "output": [ + { + "id": "msg_BSYMQRl3A3DXL9FWCK9tnGRcKIO7q", + "content": [ + { + "annotations": [], + "text": "This is a test.", + "type": "output_text" + } + ], + "role": "assistant", + "status": null, + "type": "message" + } + ], + "output_text": "This is a test.", + "parallel_tool_calls": null, + "temperature": 1.0, + "tool_choice": null, + "tools": [], + "top_p": 1.0, + "max_output_tokens": null, + "previous_response_id": null, + "reasoning": null, + "status": "completed", + "text": null, + "truncation": null, + "usage": { + "input_tokens": 20, + "output_tokens": 5, + "output_tokens_details": { + "reasoning_tokens": 0 + }, + "total_tokens": 25 + }, + "user": null, + "reasoning_effort": null + } + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8e122593ff368bc8-SIN + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Wed, 15 Oct 2025 01:38:16 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + content-length: + - '965' + openai-organization: test_openai_org_id + openai-processing-ms: + - '287' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '200000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '199977' + x-ratelimit-reset-requests: + - 8.64s + x-ratelimit-reset-tokens: + - 6ms + x-request-id: + - req_5806a3bf0cbd414e8712a96458044027 + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_responses_create_with_content.yaml b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_responses_create_with_content.yaml new file mode 100644 index 0000000000..38a7ec04bb --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_responses_create_with_content.yaml @@ -0,0 +1,143 @@ +interactions: +- request: + body: |- + { + "input": "Say this is a test", + "model": "gpt-4o-mini" + } + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + content-length: + - '55' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - AsyncOpenAI/Python 2.3.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 2.3.0 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.1 + method: POST + uri: https://api.openai.com/v1/responses + response: + body: + string: |- + { + "id": "resp_ASYMQRl3A3DXL9FWCK9tnGRcKIO7q", + "created_at": 1731368630.0, + "error": null, + "incomplete_details": null, + "instructions": null, + "metadata": {}, + "model": "gpt-4o-mini-2024-07-18", + "object": "response", + "output": [ + { + "id": "msg_ASYMQRl3A3DXL9FWCK9tnGRcKIO7q", + "content": [ + { + "annotations": [], + "text": "This is a test.", + "type": "output_text" + } + ], + "role": "assistant", + "status": null, + "type": "message" + } + ], + "output_text": "This is a test.", + "parallel_tool_calls": null, + "temperature": 1.0, + "tool_choice": null, + "tools": [], + "top_p": 1.0, + "max_output_tokens": null, + "previous_response_id": null, + "reasoning": null, + "status": "completed", + "text": null, + "truncation": null, + "usage": { + "input_tokens": 20, + "output_tokens": 5, + "output_tokens_details": { + "reasoning_tokens": 0 + }, + "total_tokens": 25 + }, + "user": null, + "reasoning_effort": null + } + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8e122593ff368bc8-SIN + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Wed, 15 Oct 2025 01:38:16 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + content-length: + - '965' + openai-organization: test_openai_org_id + openai-processing-ms: + - '287' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '200000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '199977' + x-ratelimit-reset-requests: + - 8.64s + x-ratelimit-reset-tokens: + - 6ms + x-request-id: + - req_58cff97afd0e7c0bba910ccf0b044a6f + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_conversation_items_list_no_content.yaml b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_conversation_items_list_no_content.yaml new file mode 100644 index 0000000000..27c426016e --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_conversation_items_list_no_content.yaml @@ -0,0 +1,350 @@ +interactions: +- request: + body: |- + {} + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + content-length: + - '2' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 2.5.0 + x-stainless-arch: + - other:amd64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - Windows + x-stainless-package-version: + - 2.5.0 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.9 + method: POST + uri: https://api.openai.com/v1/conversations + response: + body: + string: |- + { + "id": "test_conversation_id", + "object": "conversation", + "created_at": 1234567890, + "metadata": {} + } + headers: + CF-RAY: test_cf_ray_id-ATL + Connection: + - keep-alive + Content-Type: + - application/json + Date: Mon, 01 Jan 2024 00:00:00 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + content-length: + - '141' + openai-organization: test_openai_org_id + openai-processing-ms: '100' + openai-project: test_openai_project_id + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: '100' + x-openai-proxy-wasm: + - v0.1 + x-request-id: test_request_id + status: + code: 200 + message: OK +- request: + body: |- + { + "conversation": "test_conversation_id", + "input": "Say hello", + "model": "gpt-4o-mini" + } + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + content-length: + - '114' + content-type: + - application/json + cookie: + - test_cookie + host: + - api.openai.com + user-agent: + - OpenAI/Python 2.5.0 + x-stainless-arch: + - other:amd64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - Windows + x-stainless-package-version: + - 2.5.0 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.9 + method: POST + uri: https://api.openai.com/v1/responses + response: + body: + string: |- + { + "id": "test_response_id", + "object": "response", + "created_at": 1234567890, + "status": "completed", + "background": false, + "billing": { + "payer": "test_payer" + }, + "conversation": { + "id": "test_conversation_id" + }, + "error": null, + "incomplete_details": null, + "instructions": null, + "max_output_tokens": null, + "max_tool_calls": null, + "model": "gpt-4o-mini-2024-07-18", + "output": [ + { + "id": "test_message_id", + "type": "message", + "status": "completed", + "content": [ + { + "type": "output_text", + "annotations": [], + "logprobs": [], + "text": "Hello! How can I assist you today?" + } + ], + "role": "assistant" + } + ], + "parallel_tool_calls": true, + "previous_response_id": null, + "prompt_cache_key": null, + "reasoning": { + "effort": null, + "summary": null + }, + "safety_identifier": null, + "service_tier": "default", + "store": true, + "temperature": 1.0, + "text": { + "format": { + "type": "text" + }, + "verbosity": "medium" + }, + "tool_choice": "auto", + "tools": [], + "top_logprobs": 0, + "top_p": 1.0, + "truncation": "disabled", + "usage": { + "input_tokens": 9, + "input_tokens_details": { + "cached_tokens": 0 + }, + "output_tokens": 10, + "output_tokens_details": { + "reasoning_tokens": 0 + }, + "total_tokens": 19 + }, + "user": null, + "metadata": {} + } + headers: + CF-RAY: test_cf_ray_id-ATL + Connection: + - keep-alive + Content-Type: + - application/json + Date: Mon, 01 Jan 2024 00:00:00 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + content-length: + - '1533' + openai-organization: test_openai_org_id + openai-processing-ms: '100' + openai-project: test_openai_project_id + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: '100' + x-ratelimit-limit-requests: '1000' + x-ratelimit-limit-tokens: '1000' + x-ratelimit-remaining-requests: '1000' + x-ratelimit-remaining-tokens: '1000' + x-ratelimit-reset-requests: '1000' + x-ratelimit-reset-tokens: '1000' + x-request-id: test_request_id + status: + code: 200 + message: OK +- request: + body: '' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + cookie: + - test_cookie + host: + - api.openai.com + user-agent: + - OpenAI/Python 2.5.0 + x-stainless-arch: + - other:amd64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - Windows + x-stainless-package-version: + - 2.5.0 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.9 + method: GET + uri: https://api.openai.com/v1/conversations/test_conversation_id/items + response: + body: + string: |- + { + "object": "list", + "data": [ + { + "id": "test_message_id", + "type": "message", + "status": "completed", + "content": [ + { + "type": "output_text", + "annotations": [], + "logprobs": [], + "text": "Hello! How can I assist you today?" + } + ], + "role": "assistant" + }, + { + "id": "test_message_id", + "type": "message", + "status": "completed", + "content": [ + { + "type": "input_text", + "text": "Say hello" + } + ], + "role": "user" + } + ], + "first_id": "test_message_id", + "has_more": false, + "last_id": "test_message_id" + } + headers: + CF-RAY: test_cf_ray_id-ATL + Connection: + - keep-alive + Content-Type: + - application/json + Date: Mon, 01 Jan 2024 00:00:00 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + content-length: + - '824' + openai-organization: test_openai_org_id + openai-processing-ms: '100' + openai-project: test_openai_project_id + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: '100' + x-openai-proxy-wasm: + - v0.1 + x-request-id: test_request_id + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_conversation_items_list_with_content.yaml b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_conversation_items_list_with_content.yaml new file mode 100644 index 0000000000..27c426016e --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_conversation_items_list_with_content.yaml @@ -0,0 +1,350 @@ +interactions: +- request: + body: |- + {} + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + content-length: + - '2' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 2.5.0 + x-stainless-arch: + - other:amd64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - Windows + x-stainless-package-version: + - 2.5.0 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.9 + method: POST + uri: https://api.openai.com/v1/conversations + response: + body: + string: |- + { + "id": "test_conversation_id", + "object": "conversation", + "created_at": 1234567890, + "metadata": {} + } + headers: + CF-RAY: test_cf_ray_id-ATL + Connection: + - keep-alive + Content-Type: + - application/json + Date: Mon, 01 Jan 2024 00:00:00 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + content-length: + - '141' + openai-organization: test_openai_org_id + openai-processing-ms: '100' + openai-project: test_openai_project_id + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: '100' + x-openai-proxy-wasm: + - v0.1 + x-request-id: test_request_id + status: + code: 200 + message: OK +- request: + body: |- + { + "conversation": "test_conversation_id", + "input": "Say hello", + "model": "gpt-4o-mini" + } + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + content-length: + - '114' + content-type: + - application/json + cookie: + - test_cookie + host: + - api.openai.com + user-agent: + - OpenAI/Python 2.5.0 + x-stainless-arch: + - other:amd64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - Windows + x-stainless-package-version: + - 2.5.0 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.9 + method: POST + uri: https://api.openai.com/v1/responses + response: + body: + string: |- + { + "id": "test_response_id", + "object": "response", + "created_at": 1234567890, + "status": "completed", + "background": false, + "billing": { + "payer": "test_payer" + }, + "conversation": { + "id": "test_conversation_id" + }, + "error": null, + "incomplete_details": null, + "instructions": null, + "max_output_tokens": null, + "max_tool_calls": null, + "model": "gpt-4o-mini-2024-07-18", + "output": [ + { + "id": "test_message_id", + "type": "message", + "status": "completed", + "content": [ + { + "type": "output_text", + "annotations": [], + "logprobs": [], + "text": "Hello! How can I assist you today?" + } + ], + "role": "assistant" + } + ], + "parallel_tool_calls": true, + "previous_response_id": null, + "prompt_cache_key": null, + "reasoning": { + "effort": null, + "summary": null + }, + "safety_identifier": null, + "service_tier": "default", + "store": true, + "temperature": 1.0, + "text": { + "format": { + "type": "text" + }, + "verbosity": "medium" + }, + "tool_choice": "auto", + "tools": [], + "top_logprobs": 0, + "top_p": 1.0, + "truncation": "disabled", + "usage": { + "input_tokens": 9, + "input_tokens_details": { + "cached_tokens": 0 + }, + "output_tokens": 10, + "output_tokens_details": { + "reasoning_tokens": 0 + }, + "total_tokens": 19 + }, + "user": null, + "metadata": {} + } + headers: + CF-RAY: test_cf_ray_id-ATL + Connection: + - keep-alive + Content-Type: + - application/json + Date: Mon, 01 Jan 2024 00:00:00 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + content-length: + - '1533' + openai-organization: test_openai_org_id + openai-processing-ms: '100' + openai-project: test_openai_project_id + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: '100' + x-ratelimit-limit-requests: '1000' + x-ratelimit-limit-tokens: '1000' + x-ratelimit-remaining-requests: '1000' + x-ratelimit-remaining-tokens: '1000' + x-ratelimit-reset-requests: '1000' + x-ratelimit-reset-tokens: '1000' + x-request-id: test_request_id + status: + code: 200 + message: OK +- request: + body: '' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + cookie: + - test_cookie + host: + - api.openai.com + user-agent: + - OpenAI/Python 2.5.0 + x-stainless-arch: + - other:amd64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - Windows + x-stainless-package-version: + - 2.5.0 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.9 + method: GET + uri: https://api.openai.com/v1/conversations/test_conversation_id/items + response: + body: + string: |- + { + "object": "list", + "data": [ + { + "id": "test_message_id", + "type": "message", + "status": "completed", + "content": [ + { + "type": "output_text", + "annotations": [], + "logprobs": [], + "text": "Hello! How can I assist you today?" + } + ], + "role": "assistant" + }, + { + "id": "test_message_id", + "type": "message", + "status": "completed", + "content": [ + { + "type": "input_text", + "text": "Say hello" + } + ], + "role": "user" + } + ], + "first_id": "test_message_id", + "has_more": false, + "last_id": "test_message_id" + } + headers: + CF-RAY: test_cf_ray_id-ATL + Connection: + - keep-alive + Content-Type: + - application/json + Date: Mon, 01 Jan 2024 00:00:00 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + content-length: + - '824' + openai-organization: test_openai_org_id + openai-processing-ms: '100' + openai-project: test_openai_project_id + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: '100' + x-openai-proxy-wasm: + - v0.1 + x-request-id: test_request_id + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_conversations_create.yaml b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_conversations_create.yaml new file mode 100644 index 0000000000..12e710d966 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_conversations_create.yaml @@ -0,0 +1,85 @@ +interactions: +- request: + body: |- + {} + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + content-length: + - '2' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 2.5.0 + x-stainless-arch: + - other:amd64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - Windows + x-stainless-package-version: + - 2.5.0 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.9 + method: POST + uri: https://api.openai.com/v1/conversations + response: + body: + string: |- + { + "id": "test_conversation_id", + "object": "conversation", + "created_at": 1234567890, + "metadata": {} + } + headers: + CF-RAY: test_cf_ray_id-ATL + Connection: + - keep-alive + Content-Type: + - application/json + Date: Mon, 01 Jan 2024 00:00:00 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + content-length: + - '141' + openai-organization: test_openai_org_id + openai-processing-ms: '100' + openai-project: test_openai_project_id + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: '100' + x-openai-proxy-wasm: + - v0.1 + x-request-id: test_request_id + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_responses_create_no_content.yaml b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_responses_create_no_content.yaml new file mode 100644 index 0000000000..7df0363a06 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_responses_create_no_content.yaml @@ -0,0 +1,143 @@ +interactions: +- request: + body: |- + { + "input": "Say this is a test", + "model": "gpt-4o-mini" + } + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + content-length: + - '55' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 2.3.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 2.3.0 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.1 + method: POST + uri: https://api.openai.com/v1/responses + response: + body: + string: |- + { + "id": "resp_DSYMQRl3A3DXL9FWCK9tnGRcKIO7q", + "created_at": 1731368633.0, + "error": null, + "incomplete_details": null, + "instructions": null, + "metadata": {}, + "model": "gpt-4o-mini-2024-07-18", + "object": "response", + "output": [ + { + "id": "msg_DSYMQRl3A3DXL9FWCK9tnGRcKIO7q", + "content": [ + { + "annotations": [], + "text": "This is a test.", + "type": "output_text" + } + ], + "role": "assistant", + "status": null, + "type": "message" + } + ], + "output_text": "This is a test.", + "parallel_tool_calls": null, + "temperature": 1.0, + "tool_choice": null, + "tools": [], + "top_p": 1.0, + "max_output_tokens": null, + "previous_response_id": null, + "reasoning": null, + "status": "completed", + "text": null, + "truncation": null, + "usage": { + "input_tokens": 20, + "output_tokens": 5, + "output_tokens_details": { + "reasoning_tokens": 0 + }, + "total_tokens": 25 + }, + "user": null, + "reasoning_effort": null + } + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8e122593ff368bc8-SIN + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Wed, 15 Oct 2025 01:38:18 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + content-length: + - '965' + openai-organization: test_openai_org_id + openai-processing-ms: + - '287' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '200000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '199977' + x-ratelimit-reset-requests: + - 8.64s + x-ratelimit-reset-tokens: + - 6ms + x-request-id: + - req_761d089058134f068881e711b5940f2c + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_responses_create_with_content.yaml b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_responses_create_with_content.yaml new file mode 100644 index 0000000000..04b63c7954 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_responses_create_with_content.yaml @@ -0,0 +1,143 @@ +interactions: +- request: + body: |- + { + "input": "Say this is a test", + "model": "gpt-4o-mini" + } + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + content-length: + - '55' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 2.3.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 2.3.0 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.1 + method: POST + uri: https://api.openai.com/v1/responses + response: + body: + string: |- + { + "id": "resp_CSYMQRl3A3DXL9FWCK9tnGRcKIO7q", + "created_at": 1731368632.0, + "error": null, + "incomplete_details": null, + "instructions": null, + "metadata": {}, + "model": "gpt-4o-mini-2024-07-18", + "object": "response", + "output": [ + { + "id": "msg_CSYMQRl3A3DXL9FWCK9tnGRcKIO7q", + "content": [ + { + "annotations": [], + "text": "This is a test.", + "type": "output_text" + } + ], + "role": "assistant", + "status": null, + "type": "message" + } + ], + "output_text": "This is a test.", + "parallel_tool_calls": null, + "temperature": 1.0, + "tool_choice": null, + "tools": [], + "top_p": 1.0, + "max_output_tokens": null, + "previous_response_id": null, + "reasoning": null, + "status": "completed", + "text": null, + "truncation": null, + "usage": { + "input_tokens": 20, + "output_tokens": 5, + "output_tokens_details": { + "reasoning_tokens": 0 + }, + "total_tokens": 25 + }, + "user": null, + "reasoning_effort": null + } + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8e122593ff368bc8-SIN + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Wed, 15 Oct 2025 01:38:18 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + content-length: + - '965' + openai-organization: test_openai_org_id + openai-processing-ms: + - '287' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '200000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '199977' + x-ratelimit-reset-requests: + - 8.64s + x-ratelimit-reset-tokens: + - 6ms + x-request-id: + - req_3e2141f376b445ed810c436c830a0e84 + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/conftest.py b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/conftest.py index 83a4ba8c67..2e4bd4f650 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/conftest.py +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/conftest.py @@ -89,6 +89,18 @@ def async_openai_client(): @pytest.fixture(scope="module") def vcr_config(): + def comprehensive_scrubber(response): + """Apply all scrubbing functions to clean sensitive data""" + response = scrub_response_headers(response) + response = scrub_response_body(response) + return response + + def request_scrubber(request): + """Scrub request data including URI paths""" + request = scrub_request_body(request) + request = scrub_request_uri(request) + return request + return { "filter_headers": [ ("cookie", "test_cookie"), @@ -97,7 +109,8 @@ def vcr_config(): ("openai-project", "test_openai_project_id"), ], "decode_compressed_response": True, - "before_record_response": scrub_response_headers, + "before_record_response": comprehensive_scrubber, + "before_record_request": request_scrubber, } @@ -231,5 +244,178 @@ def scrub_response_headers(response): This scrubs sensitive response headers. Note they are case-sensitive! """ response["headers"]["openai-organization"] = "test_openai_org_id" + response["headers"]["openai-project"] = "test_openai_project_id" response["headers"]["Set-Cookie"] = "test_set_cookie" + response["headers"]["x-request-id"] = "test_request_id" + + # Scrub CloudFlare and timing headers that could be used for tracking + if "CF-RAY" in response["headers"]: + response["headers"]["CF-RAY"] = "test_cf_ray_id-ATL" + if "Date" in response["headers"]: + response["headers"]["Date"] = "Mon, 01 Jan 2024 00:00:00 GMT" + if "openai-processing-ms" in response["headers"]: + response["headers"]["openai-processing-ms"] = "100" + if "x-envoy-upstream-service-time" in response["headers"]: + response["headers"]["x-envoy-upstream-service-time"] = "100" + + # Scrub rate limiting headers that contain timing info + rate_limit_headers = [ + "x-ratelimit-limit-requests", "x-ratelimit-limit-tokens", + "x-ratelimit-remaining-requests", "x-ratelimit-remaining-tokens", + "x-ratelimit-reset-requests", "x-ratelimit-reset-tokens" + ] + for header in rate_limit_headers: + if header in response["headers"]: + if "limit" in header: + response["headers"][header] = "1000" + elif "remaining" in header: + response["headers"][header] = "999" + elif "reset" in header: + response["headers"][header] = "1s" + return response + + +def scrub_response_body(response): + """ + Scrub sensitive data from response body content including conversation IDs, + response IDs, message IDs, and other identifiers that aren't needed for testing. + """ + import re + + if "body" not in response or "string" not in response["body"]: + return response + + try: + # Parse the JSON response body + body_content = json.loads(response["body"]["string"]) + + # Scrub various OpenAI IDs (e.g., "conv_abc123" -> "test_conversation_id") + if "id" in body_content and isinstance(body_content["id"], str): + if body_content["id"].startswith("conv_"): + body_content["id"] = "test_conversation_id" + elif body_content["id"].startswith("resp_"): + body_content["id"] = "test_response_id" + elif body_content["id"].startswith("msg_"): + body_content["id"] = "test_message_id" + elif body_content["id"].startswith("req_"): + body_content["id"] = "test_request_id" + + # Scrub message IDs in output array (for responses API) + if "output" in body_content and isinstance(body_content["output"], list): + for output_item in body_content["output"]: + if isinstance(output_item, dict) and "id" in output_item: + if output_item["id"].startswith("msg_"): + output_item["id"] = "test_message_id" + + # Scrub conversation items (for conversation items list) + if "data" in body_content and isinstance(body_content["data"], list): + for item in body_content["data"]: + if isinstance(item, dict) and "id" in item: + if item["id"].startswith("msg_"): + item["id"] = "test_message_id" + elif item["id"].startswith("conv_"): + item["id"] = "test_conversation_id" + + # Scrub pagination IDs (first_id, last_id) + for id_field in ["first_id", "last_id"]: + if id_field in body_content and isinstance(body_content[id_field], str): + if body_content[id_field].startswith("msg_"): + body_content[id_field] = "test_message_id" + elif body_content[id_field].startswith("conv_"): + body_content[id_field] = "test_conversation_id" + + # Scrub timestamps to prevent tracking + if "created_at" in body_content and isinstance(body_content["created_at"], (int, float)): + body_content["created_at"] = 1234567890 + + # Scrub billing information + if "billing" in body_content and isinstance(body_content["billing"], dict): + if "payer" in body_content["billing"]: + body_content["billing"]["payer"] = "test_payer" + + # Scrub any nested ID references + def scrub_nested_ids(obj): + if isinstance(obj, dict): + for key, value in obj.items(): + if key == "conversation_id" and isinstance(value, str) and value.startswith("conv_"): + obj[key] = "test_conversation_id" + elif key == "request_id" and isinstance(value, str) and value.startswith("req_"): + obj[key] = "test_request_id" + elif key == "id" and isinstance(value, str): + if value.startswith("conv_"): + obj[key] = "test_conversation_id" + elif value.startswith("msg_"): + obj[key] = "test_message_id" + elif value.startswith("resp_"): + obj[key] = "test_response_id" + elif value.startswith("req_"): + obj[key] = "test_request_id" + elif key == "created_at" and isinstance(value, (int, float)): + obj[key] = 1234567890 + elif key == "payer" and isinstance(value, str): + obj[key] = "test_payer" + elif isinstance(value, (dict, list)): + scrub_nested_ids(value) + elif isinstance(obj, list): + for item in obj: + scrub_nested_ids(item) + + scrub_nested_ids(body_content) + + # Update the response body with scrubbed content + response["body"]["string"] = json.dumps(body_content) + + except (json.JSONDecodeError, KeyError, TypeError): + # If we can't parse the JSON or it's not in expected format, skip scrubbing + pass + + return response + + +def scrub_request_body(request): + """ + Scrub sensitive data from request body content. + """ + if not hasattr(request, 'body') or not request.body: + return request + + try: + # Handle both string and bytes request body + body_data = request.body + if isinstance(body_data, bytes): + body_data = body_data.decode('utf-8') + + # Parse the JSON request body + body_content = json.loads(body_data) + + # Scrub conversation IDs in request body + if "conversation" in body_content and isinstance(body_content["conversation"], str): + if body_content["conversation"].startswith("conv_"): + body_content["conversation"] = "test_conversation_id" + + # Update the request body with scrubbed content + request.body = json.dumps(body_content) + + except (json.JSONDecodeError, AttributeError, TypeError, UnicodeDecodeError): + # If we can't parse the JSON or it's not in expected format, skip scrubbing + pass + + return request + + +def scrub_request_uri(request): + """ + Scrub sensitive IDs from request URI paths. + """ + import re + + if hasattr(request, 'uri') and request.uri: + # Replace conversation IDs in URI path + request.uri = re.sub(r'/conversations/conv_[a-f0-9]+', '/conversations/test_conversation_id', request.uri) + # Replace message IDs in URI path + request.uri = re.sub(r'/messages/msg_[a-f0-9]+', '/messages/test_message_id', request.uri) + # Replace response IDs in URI path + request.uri = re.sub(r'/responses/resp_[a-f0-9]+', '/responses/test_response_id', request.uri) + + return request diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_async_chat_completions.py b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_async_chat_completions.py index 6a3d0b28d0..cdbc7d04e3 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_async_chat_completions.py +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_async_chat_completions.py @@ -17,7 +17,7 @@ import pytest from openai import APIConnectionError, AsyncOpenAI, NotFoundError -from openai.resources.chat.completions import ChatCompletion +from openai.types.chat import ChatCompletion from opentelemetry.sdk.trace import ReadableSpan from opentelemetry.semconv._incubating.attributes import ( diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_async_conversations.py b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_async_conversations.py new file mode 100644 index 0000000000..3a0cf9770c --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_async_conversations.py @@ -0,0 +1,127 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import openai +import pytest +from packaging import version as package_version + +from opentelemetry.semconv._incubating.attributes import ( + gen_ai_attributes as GenAIAttributes, +) + +# Skip all tests in this file if OpenAI version doesn't support conversations API +pytestmark = pytest.mark.skipif( + package_version.parse(openai.__version__) < package_version.parse("1.101.0"), + reason="Conversations API requires OpenAI >= 1.101.0", +) + + +@pytest.mark.vcr() +@pytest.mark.asyncio() +async def test_async_conversations_create( + span_exporter, log_exporter, async_openai_client, instrument_with_content +): + conversation = await async_openai_client.conversations.create() + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + + span = spans[0] + assert span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "create_conversation" + assert span.attributes[GenAIAttributes.GEN_AI_SYSTEM] == "openai" + assert "gen_ai.conversation.id" in span.attributes + assert span.attributes["gen_ai.conversation.id"] == conversation.id + + +@pytest.mark.vcr() +@pytest.mark.asyncio() +async def test_async_conversation_items_list_with_content( + span_exporter, log_exporter, async_openai_client, instrument_with_content +): + # First create a conversation to get a conversation ID + conversation = await async_openai_client.conversations.create() + + # Add some messages to the conversation to create items + await async_openai_client.responses.create( + conversation=conversation.id, + model="gpt-4o-mini", + input="Say hello" + ) + + # Clear spans from conversation creation and response + span_exporter.clear() + + # List conversation items + items = await async_openai_client.conversations.items.list(conversation_id=conversation.id) + + # Iterate over items to trigger the instrumentation + item_list = [] + async for item in items: + item_list.append(item) + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + + span = spans[0] + assert span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "list_conversation_items" + assert span.attributes[GenAIAttributes.GEN_AI_SYSTEM] == "openai" + assert "gen_ai.conversation.id" in span.attributes + assert span.attributes["gen_ai.conversation.id"] == conversation.id + + # Check events for conversation items when capture_content is True + events = span.events + # Should have events for the conversation items (user input + assistant response) + assert len(events) >= 1 + + +@pytest.mark.vcr() +@pytest.mark.asyncio() +async def test_async_conversation_items_list_no_content( + span_exporter, log_exporter, async_openai_client, instrument_no_content +): + # First create a conversation to get a conversation ID + conversation = await async_openai_client.conversations.create() + + # Add some messages to the conversation to create items + await async_openai_client.responses.create( + conversation=conversation.id, + model="gpt-4o-mini", + input="Say hello" + ) + + # Clear spans from conversation creation and response + span_exporter.clear() + + # List conversation items + items = await async_openai_client.conversations.items.list(conversation_id=conversation.id) + + # Iterate over items to trigger the instrumentation + item_list = [] + async for item in items: + item_list.append(item) + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + + span = spans[0] + assert span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "list_conversation_items" + assert span.attributes[GenAIAttributes.GEN_AI_SYSTEM] == "openai" + assert "gen_ai.conversation.id" in span.attributes + assert span.attributes["gen_ai.conversation.id"] == conversation.id + + # Check span events - no content should be captured when capture_content is False + events = span.events + for event in events: + if hasattr(event, 'attributes') and event.attributes: + assert "gen_ai.event.content" not in event.attributes or not event.attributes.get("gen_ai.event.content") diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_async_responses.py b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_async_responses.py new file mode 100644 index 0000000000..a3e82e63a4 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_async_responses.py @@ -0,0 +1,90 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import openai +import pytest +from packaging import version as package_version + +from opentelemetry.semconv._incubating.attributes import ( + gen_ai_attributes as GenAIAttributes, +) + +# Skip all tests in this file if OpenAI version doesn't support responses API +pytestmark = pytest.mark.skipif( + package_version.parse(openai.__version__) < package_version.parse("1.66.0"), + reason="Responses API requires OpenAI >= 1.66.0", +) + + +@pytest.mark.vcr() +@pytest.mark.asyncio() +async def test_async_responses_create_with_content( + span_exporter, log_exporter, async_openai_client, instrument_with_content +): + llm_model_value = "gpt-4o-mini" + input_value = "Say this is a test" + + response = await async_openai_client.responses.create( + input=input_value, model=llm_model_value + ) + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + + span = spans[0] + assert span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "responses" + assert span.attributes[GenAIAttributes.GEN_AI_SYSTEM] == "openai" + assert span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] == llm_model_value + assert span.attributes[GenAIAttributes.GEN_AI_RESPONSE_MODEL] == response.model + assert span.attributes[GenAIAttributes.GEN_AI_RESPONSE_ID] == response.id + + # Check usage tokens if available + if response.usage: + assert GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS in span.attributes + assert GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS in span.attributes + + events = span.events + # At least input message event should be present + assert len(events) >= 1 + + # Check for user input event + user_events = [event for event in events if event.name == "gen_ai.user.message"] + assert len(user_events) >= 1 + + +@pytest.mark.vcr() +@pytest.mark.asyncio() +async def test_async_responses_create_no_content( + span_exporter, log_exporter, async_openai_client, instrument_no_content +): + llm_model_value = "gpt-4o-mini" + input_value = "Say this is a test" + + response = await async_openai_client.responses.create( + input=input_value, model=llm_model_value + ) + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + + span = spans[0] + assert span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "responses" + assert span.attributes[GenAIAttributes.GEN_AI_SYSTEM] == "openai" + assert span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] == llm_model_value + + # Check span events - no content should be captured when capture_content is False + events = span.events + for event in events: + if hasattr(event, 'attributes') and event.attributes: + assert "gen_ai.event.content" not in event.attributes or not event.attributes.get("gen_ai.event.content") diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_chat_completions.py b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_chat_completions.py index f849926e82..5a78b2adf2 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_chat_completions.py +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_chat_completions.py @@ -17,7 +17,7 @@ import pytest from openai import APIConnectionError, NotFoundError, OpenAI -from openai.resources.chat.completions import ChatCompletion +from openai.types.chat import ChatCompletion from opentelemetry.sdk.trace import ReadableSpan from opentelemetry.semconv._incubating.attributes import ( diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_conversations.py b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_conversations.py new file mode 100644 index 0000000000..46e746d082 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_conversations.py @@ -0,0 +1,121 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import openai +import pytest +from packaging import version as package_version + +from opentelemetry.semconv._incubating.attributes import ( + gen_ai_attributes as GenAIAttributes, +) + +# Skip all tests in this file if OpenAI version doesn't support conversations API +pytestmark = pytest.mark.skipif( + package_version.parse(openai.__version__) < package_version.parse("1.101.0"), + reason="Conversations API requires OpenAI >= 1.101.0", +) + + +@pytest.mark.vcr() +def test_conversations_create( + span_exporter, log_exporter, openai_client, instrument_with_content +): + conversation = openai_client.conversations.create() + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + + span = spans[0] + assert span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "create_conversation" + assert span.attributes[GenAIAttributes.GEN_AI_SYSTEM] == "openai" + assert "gen_ai.conversation.id" in span.attributes + assert span.attributes["gen_ai.conversation.id"] == conversation.id + + +@pytest.mark.vcr() +def test_conversation_items_list_with_content( + span_exporter, log_exporter, openai_client, instrument_with_content +): + # First create a conversation to get a conversation ID + conversation = openai_client.conversations.create() + + # Add some messages to the conversation to create items + openai_client.responses.create( + conversation=conversation.id, + model="gpt-4o-mini", + input="Say hello" + ) + + # Clear spans from conversation creation and response + span_exporter.clear() + + # List conversation items + items = openai_client.conversations.items.list(conversation_id=conversation.id) + + # Iterate over items to trigger the instrumentation + item_list = list(items) + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + + span = spans[0] + assert span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "list_conversation_items" + assert span.attributes[GenAIAttributes.GEN_AI_SYSTEM] == "openai" + assert "gen_ai.conversation.id" in span.attributes + assert span.attributes["gen_ai.conversation.id"] == conversation.id + + # Check events for conversation items when capture_content is True + events = span.events + # Should have events for the conversation items (user input + assistant response) + assert len(events) >= 1 + + +@pytest.mark.vcr() +def test_conversation_items_list_no_content( + span_exporter, log_exporter, openai_client, instrument_no_content +): + # First create a conversation to get a conversation ID + conversation = openai_client.conversations.create() + + # Add some messages to the conversation to create items + openai_client.responses.create( + conversation=conversation.id, + model="gpt-4o-mini", + input="Say hello" + ) + + # Clear spans from conversation creation and response + span_exporter.clear() + + # List conversation items + items = openai_client.conversations.items.list(conversation_id=conversation.id) + + # Iterate over items to trigger the instrumentation + item_list = list(items) + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + + span = spans[0] + assert span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "list_conversation_items" + assert span.attributes[GenAIAttributes.GEN_AI_SYSTEM] == "openai" + assert "gen_ai.conversation.id" in span.attributes + assert span.attributes["gen_ai.conversation.id"] == conversation.id + + # Check span events - no content should be captured when capture_content is False + events = span.events + for event in events: + if hasattr(event, 'attributes') and event.attributes: + assert "gen_ai.event.content" not in event.attributes or not event.attributes.get("gen_ai.event.content") diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_responses.py b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_responses.py new file mode 100644 index 0000000000..27a99991ca --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_responses.py @@ -0,0 +1,88 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import openai +import pytest +from packaging import version as package_version + +from opentelemetry.semconv._incubating.attributes import ( + gen_ai_attributes as GenAIAttributes, +) + +# Skip all tests in this file if OpenAI version doesn't support responses API +pytestmark = pytest.mark.skipif( + package_version.parse(openai.__version__) < package_version.parse("1.66.0"), + reason="Responses API requires OpenAI >= 1.66.0", +) + + +@pytest.mark.vcr() +def test_responses_create_with_content( + span_exporter, log_exporter, openai_client, instrument_with_content +): + llm_model_value = "gpt-4o-mini" + input_value = "Say this is a test" + + response = openai_client.responses.create( + input=input_value, model=llm_model_value + ) + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + + span = spans[0] + assert span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "responses" + assert span.attributes[GenAIAttributes.GEN_AI_SYSTEM] == "openai" + assert span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] == llm_model_value + assert span.attributes[GenAIAttributes.GEN_AI_RESPONSE_MODEL] == response.model + assert span.attributes[GenAIAttributes.GEN_AI_RESPONSE_ID] == response.id + + # Check usage tokens if available + if response.usage: + assert GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS in span.attributes + assert GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS in span.attributes + + events = span.events + # At least input message event should be present + assert len(events) >= 1 + + # Check for user input event + user_events = [event for event in events if event.name == "gen_ai.user.message"] + assert len(user_events) >= 1 + + +@pytest.mark.vcr() +def test_responses_create_no_content( + span_exporter, log_exporter, openai_client, instrument_no_content +): + llm_model_value = "gpt-4o-mini" + input_value = "Say this is a test" + + response = openai_client.responses.create( + input=input_value, model=llm_model_value + ) + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + + span = spans[0] + assert span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "responses" + assert span.attributes[GenAIAttributes.GEN_AI_SYSTEM] == "openai" + assert span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] == llm_model_value + + # Check span events - no content should be captured when capture_content is False + events = span.events + for event in events: + if hasattr(event, 'attributes') and event.attributes: + assert "gen_ai.event.content" not in event.attributes or not event.attributes.get("gen_ai.event.content")