|
2 | 2 |
|
3 | 3 | import pydantic |
4 | 4 | import pytest |
| 5 | +from litellm.types.llms.openai import ResponseAPIUsage, ResponsesAPIResponse |
5 | 6 | from litellm.utils import ChatCompletionMessageToolCall, Choices, Function, Message, ModelResponse |
| 7 | +from openai.types.responses import ResponseOutputMessage |
6 | 8 |
|
7 | 9 | import dspy |
8 | 10 |
|
@@ -866,3 +868,62 @@ def get_weather(city: str) -> str: |
866 | 868 | mock_completion.assert_called_once() |
867 | 869 | _, call_kwargs = mock_completion.call_args |
868 | 870 | assert call_kwargs["response_format"] == {"type": "json_object"} |
| 871 | + |
| 872 | + |
| 873 | +def test_json_adapter_with_responses_api(): |
| 874 | + class TestSignature(dspy.Signature): |
| 875 | + question: str = dspy.InputField() |
| 876 | + answer: str = dspy.OutputField() |
| 877 | + |
| 878 | + api_response = ResponsesAPIResponse( |
| 879 | + id="resp_1", |
| 880 | + created_at=0.0, |
| 881 | + error=None, |
| 882 | + incomplete_details=None, |
| 883 | + instructions=None, |
| 884 | + model="openai/gpt-4o", |
| 885 | + object="response", |
| 886 | + output=[ |
| 887 | + ResponseOutputMessage( |
| 888 | + **{ |
| 889 | + "id": "msg_1", |
| 890 | + "type": "message", |
| 891 | + "role": "assistant", |
| 892 | + "status": "completed", |
| 893 | + "content": [ |
| 894 | + {"type": "output_text", "text": '{"answer": "Washington, D.C."}', "annotations": []} |
| 895 | + ], |
| 896 | + }, |
| 897 | + ), |
| 898 | + ], |
| 899 | + metadata={}, |
| 900 | + parallel_tool_calls=False, |
| 901 | + temperature=1.0, |
| 902 | + tool_choice="auto", |
| 903 | + tools=[], |
| 904 | + top_p=1.0, |
| 905 | + max_output_tokens=None, |
| 906 | + previous_response_id=None, |
| 907 | + reasoning=None, |
| 908 | + status="completed", |
| 909 | + text=None, |
| 910 | + truncation="disabled", |
| 911 | + usage=ResponseAPIUsage(input_tokens=10, output_tokens=5, total_tokens=15), |
| 912 | + user=None, |
| 913 | + ) |
| 914 | + |
| 915 | + lm = dspy.LM(model="openai/gpt-4o", model_type="responses", cache=False) |
| 916 | + dspy.configure(lm=lm, adapter=dspy.JSONAdapter()) |
| 917 | + |
| 918 | + program = dspy.Predict(TestSignature) |
| 919 | + with mock.patch("litellm.responses", autospec=True, return_value=api_response) as mock_responses: |
| 920 | + result = program(question="What is the capital of the USA?") |
| 921 | + |
| 922 | + assert result.answer == "Washington, D.C." |
| 923 | + mock_responses.assert_called_once() |
| 924 | + # Verify that response_format was converted to text.format |
| 925 | + call_kwargs = mock_responses.call_args.kwargs |
| 926 | + assert "response_format" not in call_kwargs |
| 927 | + assert "text" in call_kwargs |
| 928 | + assert isinstance(call_kwargs["text"]["format"], type) |
| 929 | + assert issubclass(call_kwargs["text"]["format"], pydantic.BaseModel) |
0 commit comments