@@ -211,6 +211,59 @@ def test_conversation_with_same_agent_succeeds():
211211 assert len (new_conversation .state .events ) > 0
212212
213213
214+ def test_agent_resolve_diff_from_deserialized ():
215+ """Test agent's resolve_diff_from_deserialized method.
216+
217+ Includes tolerance for litellm_extra_body differences injected at CLI load time.
218+ """
219+ with tempfile .TemporaryDirectory ():
220+ # Create original agent
221+ tools = [Tool (name = "TerminalTool" )]
222+ llm = LLM (
223+ model = "gpt-4o-mini" , api_key = SecretStr ("test-key" ), usage_id = "test-llm"
224+ )
225+ original_agent = Agent (llm = llm , tools = tools )
226+
227+ # Serialize and deserialize to simulate persistence
228+ serialized = original_agent .model_dump_json ()
229+ deserialized_agent = AgentBase .model_validate_json (serialized )
230+
231+ # Create runtime agent with same configuration
232+ llm2 = LLM (
233+ model = "gpt-4o-mini" , api_key = SecretStr ("test-key" ), usage_id = "test-llm"
234+ )
235+ runtime_agent = Agent (llm = llm2 , tools = tools )
236+
237+ # Should resolve successfully
238+ resolved = runtime_agent .resolve_diff_from_deserialized (deserialized_agent )
239+ # Test model_dump equality
240+ assert resolved .model_dump (mode = "json" ) == runtime_agent .model_dump (mode = "json" )
241+ assert resolved .llm .model == runtime_agent .llm .model
242+ assert resolved .__class__ == runtime_agent .__class__
243+
244+ # Now simulate CLI injecting dynamic litellm_extra_body metadata at load time
245+ injected = deserialized_agent .model_copy (
246+ update = {
247+ "llm" : deserialized_agent .llm .model_copy (
248+ update = {
249+ "litellm_extra_body" : {
250+ "metadata" : {
251+ "session_id" : "sess-123" ,
252+ "tags" : ["app:openhands" , "model:gpt-4o-mini" ],
253+ "trace_version" : "1.2.3" ,
254+ }
255+ }
256+ }
257+ )
258+ }
259+ )
260+
261+ # Reconcile again: differences in litellm_extra_body should be allowed and
262+ # the runtime value should be preferred without raising an error.
263+ resolved2 = runtime_agent .resolve_diff_from_deserialized (injected )
264+ assert resolved2 .llm .litellm_extra_body == runtime_agent .llm .litellm_extra_body
265+
266+
214267@patch ("openhands.sdk.llm.llm.litellm_completion" )
215268def test_conversation_persistence_lifecycle (mock_completion ):
216269 """Test full conversation persistence lifecycle similar to examples/10_persistence.py.""" # noqa: E501
@@ -288,34 +341,6 @@ def test_conversation_persistence_lifecycle(mock_completion):
288341 assert len (new_conversation .state .events ) >= original_event_count + 2
289342
290343
291- def test_agent_resolve_diff_from_deserialized ():
292- """Test agent's resolve_diff_from_deserialized method."""
293- with tempfile .TemporaryDirectory ():
294- # Create original agent
295- tools = [Tool (name = "TerminalTool" )]
296- llm = LLM (
297- model = "gpt-4o-mini" , api_key = SecretStr ("test-key" ), usage_id = "test-llm"
298- )
299- original_agent = Agent (llm = llm , tools = tools )
300-
301- # Serialize and deserialize to simulate persistence
302- serialized = original_agent .model_dump_json ()
303- deserialized_agent = AgentBase .model_validate_json (serialized )
304-
305- # Create runtime agent with same configuration
306- llm2 = LLM (
307- model = "gpt-4o-mini" , api_key = SecretStr ("test-key" ), usage_id = "test-llm"
308- )
309- runtime_agent = Agent (llm = llm2 , tools = tools )
310-
311- # Should resolve successfully
312- resolved = runtime_agent .resolve_diff_from_deserialized (deserialized_agent )
313- # Test model_dump equality
314- assert resolved .model_dump (mode = "json" ) == runtime_agent .model_dump (mode = "json" )
315- assert resolved .llm .model == runtime_agent .llm .model
316- assert resolved .__class__ == runtime_agent .__class__
317-
318-
319344def test_agent_resolve_diff_allows_security_analyzer_change ():
320345 """Test that security_analyzer can differ between runtime and persisted agents."""
321346 from openhands .sdk .security .llm_analyzer import LLMSecurityAnalyzer
0 commit comments