From 18379fa1e7d6810f2b2e4feddae0bb52fe32e696 Mon Sep 17 00:00:00 2001 From: chenmoneygithub Date: Tue, 28 Oct 2025 12:17:42 -0700 Subject: [PATCH] lints --- dspy/adapters/types/base_type.py | 2 +- dspy/adapters/types/citation.py | 11 +++----- dspy/clients/lm.py | 6 ++-- dspy/streaming/streaming_listener.py | 6 ++-- tests/adapters/test_json_adapter.py | 41 +++++++++++++++++++--------- tests/clients/test_lm.py | 21 +++++++------- 6 files changed, 49 insertions(+), 38 deletions(-) diff --git a/dspy/adapters/types/base_type.py b/dspy/adapters/types/base_type.py index 04e2794ce7..b7004de537 100644 --- a/dspy/adapters/types/base_type.py +++ b/dspy/adapters/types/base_type.py @@ -88,7 +88,6 @@ def parse_stream_chunk(cls, chunk: ModelResponseStream) -> Optional["Type"]: """ return None - @classmethod def parse_lm_response(cls, response: str | dict[str, Any]) -> Optional["Type"]: """Parse a LM response into the custom type. @@ -101,6 +100,7 @@ def parse_lm_response(cls, response: str | dict[str, Any]) -> Optional["Type"]: """ return None + def split_message_content_for_custom_types(messages: list[dict[str, Any]]) -> list[dict[str, Any]]: """Split user message content into a list of content blocks. diff --git a/dspy/adapters/types/citation.py b/dspy/adapters/types/citation.py index c0afca06e1..4268f194da 100644 --- a/dspy/adapters/types/citation.py +++ b/dspy/adapters/types/citation.py @@ -54,6 +54,7 @@ class AnswerWithSources(Signature): class Citation(Type): """Individual citation with character location information.""" + type: str = "char_location" cited_text: str document_index: int @@ -73,7 +74,7 @@ def format(self) -> dict[str, Any]: "cited_text": self.cited_text, "document_index": self.document_index, "start_char_index": self.start_char_index, - "end_char_index": self.end_char_index + "end_char_index": self.end_char_index, } if self.document_title: @@ -134,9 +135,7 @@ def validate_input(cls, data: Any): return data # Handle case where data is a list of dicts with citation info - if isinstance(data, list) and all( - isinstance(item, dict) and "cited_text" in item for item in data - ): + if isinstance(data, list) and all(isinstance(item, dict) and "cited_text" in item for item in data): return {"citations": [cls.Citation(**item) for item in data]} # Handle case where data is a dict @@ -147,8 +146,7 @@ def validate_input(cls, data: Any): if isinstance(citations_data, list): return { "citations": [ - cls.Citation(**item) if isinstance(item, dict) else item - for item in citations_data + cls.Citation(**item) if isinstance(item, dict) else item for item in citations_data ] } elif "cited_text" in data: @@ -197,7 +195,6 @@ def parse_stream_chunk(cls, chunk) -> Optional["Citations"]: pass return None - @classmethod def parse_lm_response(cls, response: str | dict[str, Any]) -> Optional["Citations"]: """Parse a LM response into Citations. diff --git a/dspy/clients/lm.py b/dspy/clients/lm.py index 4a21f48d3b..71a8934cc4 100644 --- a/dspy/clients/lm.py +++ b/dspy/clients/lm.py @@ -88,7 +88,6 @@ def __init__( model_pattern = re.match(r"^(?:o[1345]|gpt-5)(?:-(?:mini|nano))?", model_family) if model_pattern: - if (temperature and temperature != 1.0) or (max_tokens and max_tokens < 16000): raise ValueError( "OpenAI's reasoning models require passing temperature=1.0 or None and max_tokens >= 16000 or None to " @@ -228,9 +227,7 @@ def thread_function_wrapper(): return job - def reinforce( - self, train_kwargs - ) -> ReinforceJob: + def reinforce(self, train_kwargs) -> ReinforceJob: # TODO(GRPO Team): Should we return an initialized job here? from dspy import settings as settings @@ -482,6 +479,7 @@ def _convert_chat_request_to_responses_request(request: dict[str, Any]): return request + def _get_headers(headers: dict[str, Any] | None = None): headers = headers or {} return { diff --git a/dspy/streaming/streaming_listener.py b/dspy/streaming/streaming_listener.py index e4fc5e1967..eb0cc18f91 100644 --- a/dspy/streaming/streaming_listener.py +++ b/dspy/streaming/streaming_listener.py @@ -287,8 +287,9 @@ def _output_type(self) -> type | None: return None - -def find_predictor_for_stream_listeners(program: "Module", stream_listeners: list[StreamListener]) -> dict[int, list[StreamListener]]: +def find_predictor_for_stream_listeners( + program: "Module", stream_listeners: list[StreamListener] +) -> dict[int, list[StreamListener]]: """Find the predictor for each stream listener. This is a utility function to automatically find the predictor for each stream listener. It is used when some @@ -337,6 +338,7 @@ def find_predictor_for_stream_listeners(program: "Module", stream_listeners: lis predict_id_to_listener[id(listener.predict)].append(listener) return predict_id_to_listener + def _is_streamable(field_type: type | None) -> bool: if field_type is None: return False diff --git a/tests/adapters/test_json_adapter.py b/tests/adapters/test_json_adapter.py index efbbbc93ff..98597f6ad9 100644 --- a/tests/adapters/test_json_adapter.py +++ b/tests/adapters/test_json_adapter.py @@ -334,8 +334,16 @@ class MySignature(dspy.Signature): adapter = dspy.JSONAdapter() messages = adapter.format(MySignature, [], {"document": document_wrapper}) - expected_doc1_content = {"type": "document", "source": {"type": "text", "media_type": "text/plain", "data": "Hello, world!"}, "citations": {"enabled": True}} - expected_doc2_content = {"type": "document", "source": {"type": "text", "media_type": "text/plain", "data": "Hello, world 2!"}, "citations": {"enabled": True}} + expected_doc1_content = { + "type": "document", + "source": {"type": "text", "media_type": "text/plain", "data": "Hello, world!"}, + "citations": {"enabled": True}, + } + expected_doc2_content = { + "type": "document", + "source": {"type": "text", "media_type": "text/plain", "data": "Hello, world 2!"}, + "citations": {"enabled": True}, + } assert expected_doc1_content in messages[1]["content"] assert expected_doc2_content in messages[1]["content"] @@ -643,6 +651,7 @@ class TestSignature(dspy.Signature): _, second_call_kwargs = mock_completion.call_args_list[1] assert second_call_kwargs.get("response_format") == {"type": "json_object"} + def test_json_adapter_json_mode_no_structured_outputs(): class TestSignature(dspy.Signature): question: str = dspy.InputField() @@ -651,11 +660,15 @@ class TestSignature(dspy.Signature): dspy.configure(lm=dspy.LM(model="openai/gpt-4o", cache=False), adapter=dspy.JSONAdapter()) program = dspy.Predict(TestSignature) - with mock.patch("litellm.completion") as mock_completion, \ - mock.patch("litellm.get_supported_openai_params") as mock_get_supported_openai_params, \ - mock.patch("litellm.supports_response_schema") as mock_supports_response_schema: + with ( + mock.patch("litellm.completion") as mock_completion, + mock.patch("litellm.get_supported_openai_params") as mock_get_supported_openai_params, + mock.patch("litellm.supports_response_schema") as mock_supports_response_schema, + ): # Call a model that allows json but not structured outputs - mock_completion.return_value = ModelResponse(choices=[Choices(message=Message(content="{'answer': 'Test output'}"))]) + mock_completion.return_value = ModelResponse( + choices=[Choices(message=Message(content="{'answer': 'Test output'}"))] + ) mock_get_supported_openai_params.return_value = ["response_format"] mock_supports_response_schema.return_value = False @@ -676,11 +689,15 @@ class TestSignature(dspy.Signature): program = dspy.Predict(TestSignature) - with mock.patch("litellm.acompletion") as mock_acompletion, \ - mock.patch("litellm.get_supported_openai_params") as mock_get_supported_openai_params, \ - mock.patch("litellm.supports_response_schema") as mock_supports_response_schema: + with ( + mock.patch("litellm.acompletion") as mock_acompletion, + mock.patch("litellm.get_supported_openai_params") as mock_get_supported_openai_params, + mock.patch("litellm.supports_response_schema") as mock_supports_response_schema, + ): # Call a model that allows json but not structured outputs - mock_acompletion.return_value = ModelResponse(choices=[Choices(message=Message(content="{'answer': 'Test output'}"))]) + mock_acompletion.return_value = ModelResponse( + choices=[Choices(message=Message(content="{'answer': 'Test output'}"))] + ) mock_get_supported_openai_params.return_value = ["response_format"] mock_supports_response_schema.return_value = False @@ -890,9 +907,7 @@ class TestSignature(dspy.Signature): "type": "message", "role": "assistant", "status": "completed", - "content": [ - {"type": "output_text", "text": '{"answer": "Washington, D.C."}', "annotations": []} - ], + "content": [{"type": "output_text", "text": '{"answer": "Washington, D.C."}', "annotations": []}], }, ), ], diff --git a/tests/clients/test_lm.py b/tests/clients/test_lm.py index 04238885c7..336ccc41b4 100644 --- a/tests/clients/test_lm.py +++ b/tests/clients/test_lm.py @@ -30,7 +30,7 @@ def make_response(output_blocks): model="openai/dspy-test-model", object="response", output=output_blocks, - metadata = {}, + metadata={}, parallel_tool_calls=False, temperature=1.0, tool_choice="auto", @@ -107,9 +107,11 @@ def test_disabled_cache_skips_cache_key(monkeypatch): cache = dspy.cache try: - with mock.patch.object(cache, "cache_key", wraps=cache.cache_key) as cache_key_spy, \ - mock.patch.object(cache, "get", wraps=cache.get) as cache_get_spy, \ - mock.patch.object(cache, "put", wraps=cache.put) as cache_put_spy: + with ( + mock.patch.object(cache, "cache_key", wraps=cache.cache_key) as cache_key_spy, + mock.patch.object(cache, "get", wraps=cache.get) as cache_get_spy, + mock.patch.object(cache, "put", wraps=cache.put) as cache_put_spy, + ): def fake_completion(*, cache, num_retries, retry_strategy, **request): return ModelResponse( @@ -315,6 +317,7 @@ def test_reasoning_model_token_parameter(): assert "max_tokens" in lm.kwargs assert lm.kwargs["max_tokens"] == 1000 + @pytest.mark.parametrize("model_name", ["openai/o1", "openai/gpt-5-nano"]) def test_reasoning_model_requirements(model_name): # Should raise assertion error if temperature or max_tokens requirements not met @@ -516,6 +519,7 @@ def test_disable_history(): model="openai/gpt-4o-mini", ) + def test_responses_api(): api_response = make_response( output_blocks=[ @@ -562,9 +566,7 @@ def test_responses_api(): def test_lm_replaces_system_with_developer_role(): - with mock.patch( - "dspy.clients.lm.litellm_responses_completion", return_value={"choices": []} - ) as mock_completion: + with mock.patch("dspy.clients.lm.litellm_responses_completion", return_value={"choices": []}) as mock_completion: lm = dspy.LM( "openai/gpt-4o-mini", cache=False, @@ -572,10 +574,7 @@ def test_lm_replaces_system_with_developer_role(): use_developer_role=True, ) lm.forward(messages=[{"role": "system", "content": "hi"}]) - assert ( - mock_completion.call_args.kwargs["request"]["messages"][0]["role"] - == "developer" - ) + assert mock_completion.call_args.kwargs["request"]["messages"][0]["role"] == "developer" def test_responses_api_tool_calls(litellm_test_server):