|
| 1 | +from unittest.mock import MagicMock, patch |
| 2 | + |
| 3 | +import pytest |
| 4 | +from litellm.exceptions import APIConnectionError |
| 5 | +from pydantic import SecretStr |
| 6 | + |
| 7 | +from openhands.core.config import LLMConfig |
| 8 | +from openhands.core.llm import LLM |
| 9 | + |
| 10 | + |
| 11 | +def create_mock_response(content: str = "Test response", response_id: str = "test-id"): |
| 12 | + """Helper function to create properly structured mock responses.""" |
| 13 | + mock_response = MagicMock() |
| 14 | + mock_response.choices = [MagicMock()] |
| 15 | + mock_response.choices[0].message.content = content |
| 16 | + |
| 17 | + # Create a more complete usage mock |
| 18 | + mock_usage = MagicMock() |
| 19 | + mock_usage.get.side_effect = lambda key, default=None: { |
| 20 | + "prompt_tokens": 10, |
| 21 | + "completion_tokens": 5, |
| 22 | + "model_extra": {}, |
| 23 | + }.get(key, default) |
| 24 | + mock_usage.prompt_tokens_details = None |
| 25 | + |
| 26 | + # Mock the response.get() method |
| 27 | + def mock_get(key, default=None): |
| 28 | + if key == "choices": |
| 29 | + return mock_response.choices |
| 30 | + elif key == "usage": |
| 31 | + return mock_usage |
| 32 | + elif key == "id": |
| 33 | + return response_id |
| 34 | + return default |
| 35 | + |
| 36 | + mock_response.get = mock_get |
| 37 | + |
| 38 | + # Also support dict-like access |
| 39 | + def mock_getitem(self, key): |
| 40 | + return { |
| 41 | + "choices": mock_response.choices, |
| 42 | + "usage": mock_usage, |
| 43 | + "id": response_id, |
| 44 | + }[key] |
| 45 | + |
| 46 | + mock_response.__getitem__ = mock_getitem |
| 47 | + |
| 48 | + return mock_response |
| 49 | + |
| 50 | + |
| 51 | +@pytest.fixture |
| 52 | +def default_config(): |
| 53 | + return LLMConfig( |
| 54 | + model="gpt-4o", |
| 55 | + api_key=SecretStr("test_key"), |
| 56 | + num_retries=2, |
| 57 | + retry_min_wait=1, |
| 58 | + retry_max_wait=2, |
| 59 | + ) |
| 60 | + |
| 61 | + |
| 62 | +@patch("openhands.core.llm.llm.litellm_completion") |
| 63 | +def test_completion_retries_api_connection_error( |
| 64 | + mock_litellm_completion, default_config |
| 65 | +): |
| 66 | + """Test that APIConnectionError is properly retried.""" |
| 67 | + mock_response = create_mock_response("Retry successful") |
| 68 | + |
| 69 | + # Mock the litellm_completion to first raise an APIConnectionError, |
| 70 | + # then return a successful response |
| 71 | + mock_litellm_completion.side_effect = [ |
| 72 | + APIConnectionError( |
| 73 | + message="API connection error", |
| 74 | + llm_provider="test_provider", |
| 75 | + model="test_model", |
| 76 | + ), |
| 77 | + mock_response, |
| 78 | + ] |
| 79 | + |
| 80 | + # Create an LLM instance and call completion |
| 81 | + llm = LLM(config=default_config, service_id="test-service") |
| 82 | + response = llm.completion( |
| 83 | + messages=[{"role": "user", "content": "Hello!"}], |
| 84 | + ) |
| 85 | + |
| 86 | + # Verify that the retry was successful |
| 87 | + assert response == mock_response |
| 88 | + assert mock_litellm_completion.call_count == 2 # Initial call + 1 retry |
| 89 | + |
| 90 | + |
| 91 | +@patch("openhands.core.llm.llm.litellm_completion") |
| 92 | +def test_completion_max_retries_api_connection_error( |
| 93 | + mock_litellm_completion, default_config |
| 94 | +): |
| 95 | + """Test that APIConnectionError respects max retries.""" |
| 96 | + # Mock the litellm_completion to raise APIConnectionError multiple times |
| 97 | + mock_litellm_completion.side_effect = [ |
| 98 | + APIConnectionError( |
| 99 | + message="API connection error 1", |
| 100 | + llm_provider="test_provider", |
| 101 | + model="test_model", |
| 102 | + ), |
| 103 | + APIConnectionError( |
| 104 | + message="API connection error 2", |
| 105 | + llm_provider="test_provider", |
| 106 | + model="test_model", |
| 107 | + ), |
| 108 | + APIConnectionError( |
| 109 | + message="API connection error 3", |
| 110 | + llm_provider="test_provider", |
| 111 | + model="test_model", |
| 112 | + ), |
| 113 | + ] |
| 114 | + |
| 115 | + # Create an LLM instance and call completion |
| 116 | + llm = LLM(config=default_config, service_id="test-service") |
| 117 | + |
| 118 | + # The completion should raise an APIConnectionError after exhausting all retries |
| 119 | + with pytest.raises(APIConnectionError) as excinfo: |
| 120 | + llm.completion( |
| 121 | + messages=[{"role": "user", "content": "Hello!"}], |
| 122 | + ) |
| 123 | + |
| 124 | + # Verify that the correct number of retries were attempted |
| 125 | + # The actual behavior is that it tries num_retries times total |
| 126 | + assert mock_litellm_completion.call_count == default_config.num_retries |
| 127 | + |
| 128 | + # The exception should contain connection error information |
| 129 | + assert "API connection error" in str(excinfo.value) |
| 130 | + |
| 131 | + |
| 132 | +@patch("openhands.core.llm.llm.litellm_completion") |
| 133 | +def test_completion_no_retry_on_success(mock_litellm_completion, default_config): |
| 134 | + """Test that successful calls don't trigger retries.""" |
| 135 | + mock_response = create_mock_response("Success on first try") |
| 136 | + mock_litellm_completion.return_value = mock_response |
| 137 | + |
| 138 | + # Create an LLM instance and call completion |
| 139 | + llm = LLM(config=default_config, service_id="test-service") |
| 140 | + response = llm.completion( |
| 141 | + messages=[{"role": "user", "content": "Hello!"}], |
| 142 | + ) |
| 143 | + |
| 144 | + # Verify that no retries were needed |
| 145 | + assert response == mock_response |
| 146 | + assert mock_litellm_completion.call_count == 1 # Only the initial call |
| 147 | + |
| 148 | + |
| 149 | +@patch("openhands.core.llm.llm.litellm_completion") |
| 150 | +def test_completion_no_retry_on_non_retryable_error( |
| 151 | + mock_litellm_completion, default_config |
| 152 | +): |
| 153 | + """Test that non-retryable errors don't trigger retries.""" |
| 154 | + # Mock a non-retryable error (e.g., ValueError) |
| 155 | + mock_litellm_completion.side_effect = ValueError("Invalid input") |
| 156 | + |
| 157 | + # Create an LLM instance and call completion |
| 158 | + llm = LLM(config=default_config, service_id="test-service") |
| 159 | + |
| 160 | + # The completion should raise the ValueError immediately without retries |
| 161 | + with pytest.raises(ValueError) as excinfo: |
| 162 | + llm.completion( |
| 163 | + messages=[{"role": "user", "content": "Hello!"}], |
| 164 | + ) |
| 165 | + |
| 166 | + # Verify that no retries were attempted |
| 167 | + assert mock_litellm_completion.call_count == 1 # Only the initial call |
| 168 | + assert "Invalid input" in str(excinfo.value) |
| 169 | + |
| 170 | + |
| 171 | +def test_retry_configuration_validation(): |
| 172 | + """Test that retry configuration is properly validated.""" |
| 173 | + # Test with zero retries |
| 174 | + config_no_retry = LLMConfig( |
| 175 | + model="gpt-4o", |
| 176 | + api_key=SecretStr("test_key"), |
| 177 | + num_retries=0, |
| 178 | + ) |
| 179 | + llm_no_retry = LLM(config=config_no_retry) |
| 180 | + assert llm_no_retry.config.num_retries == 0 |
| 181 | + |
| 182 | + # Test with custom retry settings |
| 183 | + config_custom = LLMConfig( |
| 184 | + model="gpt-4o", |
| 185 | + api_key=SecretStr("test_key"), |
| 186 | + num_retries=5, |
| 187 | + retry_min_wait=2, |
| 188 | + retry_max_wait=10, |
| 189 | + retry_multiplier=2.0, |
| 190 | + ) |
| 191 | + llm_custom = LLM(config=config_custom) |
| 192 | + assert llm_custom.config.num_retries == 5 |
| 193 | + assert llm_custom.config.retry_min_wait == 2 |
| 194 | + assert llm_custom.config.retry_max_wait == 10 |
| 195 | + assert llm_custom.config.retry_multiplier == 2.0 |
| 196 | + |
| 197 | + |
| 198 | +@patch("openhands.core.llm.llm.litellm_completion") |
| 199 | +def test_retry_listener_callback(mock_litellm_completion, default_config): |
| 200 | + """Test that retry listener callback is called during retries.""" |
| 201 | + retry_calls = [] |
| 202 | + |
| 203 | + def retry_listener(attempt: int, max_attempts: int): |
| 204 | + retry_calls.append((attempt, max_attempts)) |
| 205 | + |
| 206 | + mock_response = create_mock_response("Success after retry") |
| 207 | + |
| 208 | + mock_litellm_completion.side_effect = [ |
| 209 | + APIConnectionError( |
| 210 | + message="Connection failed", |
| 211 | + llm_provider="test_provider", |
| 212 | + model="test_model", |
| 213 | + ), |
| 214 | + mock_response, |
| 215 | + ] |
| 216 | + |
| 217 | + # Create an LLM instance with retry listener |
| 218 | + llm = LLM(config=default_config, retry_listener=retry_listener) |
| 219 | + response = llm.completion( |
| 220 | + messages=[{"role": "user", "content": "Hello!"}], |
| 221 | + ) |
| 222 | + |
| 223 | + # Verify that the retry listener was called |
| 224 | + assert response == mock_response |
| 225 | + assert len(retry_calls) >= 1 # At least one retry attempt should be logged |
| 226 | + |
| 227 | + # Check that retry listener received correct parameters |
| 228 | + if retry_calls: |
| 229 | + attempt, max_attempts = retry_calls[0] |
| 230 | + assert isinstance(attempt, int) |
| 231 | + assert isinstance(max_attempts, int) |
| 232 | + assert attempt >= 1 |
| 233 | + assert max_attempts == default_config.num_retries |
0 commit comments