Skip to content

Commit 301a1a9

Browse files
author
matdev83
committed
Manual Merge PR #630
1 parent 06ecf63 commit 301a1a9

File tree

13 files changed

+276
-31
lines changed

13 files changed

+276
-31
lines changed

README.md

Lines changed: 71 additions & 2 deletions
Large diffs are not rendered by default.

data/test_suite_state.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
{
2-
"test_count": 5072,
2+
"test_count": 5074,
33
"last_updated": "1762168167.0802596"
44
}

src/connectors/gemini_oauth_base.py

Lines changed: 26 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1870,9 +1870,18 @@ async def _chat_completions_code_assist(
18701870
openai_response = self._extract_generated_text_from_response(
18711871
response_json
18721872
)
1873+
except BackendError:
1874+
# Preserve backend-specific error codes/details for graceful handling
1875+
raise
18731876
except Exception as e:
1874-
logger.error(f"Failed to process API response: {e}", exc_info=True)
1875-
raise BackendError(f"Failed to process API response: {e}")
1877+
message = f"Failed to process API response: {e}"
1878+
logger.error(message, exc_info=True)
1879+
raise BackendError(
1880+
message=message,
1881+
backend_name=self.backend_type,
1882+
code="gemini_response_processing_failed",
1883+
details={"inner_error": str(e)},
1884+
) from e
18761885

18771886
# Calculate usage (best effort)
18781887
encoding = tiktoken.get_encoding("cl100k_base")
@@ -2899,10 +2908,22 @@ async def _probe_model_recovery(
28992908

29002909
logger.debug(f"Model {model} probe {state.probe_success_count}/2 succeeded")
29012910

2902-
except Exception as e:
2903-
# Probe failed, reset success count
2911+
except BackendError as error:
29042912
state.probe_success_count = 0
2905-
logger.debug(f"Model {model} recovery probe failed: {e}")
2913+
log_message = (
2914+
f"Model {model} recovery probe failed with backend error: {error}"
2915+
)
2916+
if self._is_rate_limit_like_error(error):
2917+
logger.info(log_message)
2918+
else:
2919+
logger.warning(log_message)
2920+
except Exception as exc: # pragma: no cover - defensive logging path
2921+
state.probe_success_count = 0
2922+
logger.warning(
2923+
"Model %s recovery probe encountered unexpected error: %s",
2924+
model,
2925+
exc,
2926+
)
29062927

29072928
return False
29082929

src/connectors/openai.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -679,7 +679,7 @@ async def cancel_stream() -> None:
679679
await response.aclose()
680680

681681
async def gen() -> AsyncGenerator[ProcessedResponse, None]:
682-
async def text_generator() -> AsyncGenerator[str, None]:
682+
async def text_generator() -> AsyncGenerator[dict[Any, Any] | Any, None]:
683683
async def iter_sse_messages() -> AsyncGenerator[str, None]:
684684
buffer = ""
685685
separator = "\n\n"

src/connectors/qwen_oauth.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1087,9 +1087,8 @@ async def chat_completions(
10871087
# Handle reasoning_effort by appending " /think" to the last user message
10881088
# Append by default unless explicitly set to "low"
10891089
reasoning_effort = None
1090-
if hasattr(request_data, "reasoning_effort"):
1091-
reasoning_effort = request_data.reasoning_effort
1092-
elif isinstance(request_data, dict):
1090+
reasoning_effort = getattr(request_data, "reasoning_effort", None)
1091+
if reasoning_effort is None and isinstance(request_data, dict):
10931092
reasoning_effort = request_data.get("reasoning_effort")
10941093

10951094
# Append " /think" unless reasoning_effort is explicitly "low"

src/core/metadata.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,6 @@ def _load_project_metadata() -> tuple[str, str]:
88
try:
99
data = tomli.loads(pyproject.read_text())
1010
meta = data.get("project", {})
11-
return meta.get("name", "llm-interactive-proxy"), meta.get("version", "0.0.0")
11+
return meta.get("name", "llm-interactive-proxy"), meta.get("version", "0.1.0")
1212
except Exception:
1313
return "llm-interactive-proxy", "0.0.0"

src/core/services/uri_parameter_validator.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -75,11 +75,11 @@ def validate_and_normalize(
7575
try:
7676
# Type conversion and validation
7777
normalized_value: float | str
78-
if param_type == float:
78+
if param_type is float:
7979
normalized_value = self._validate_float_param(
8080
param_name, param_value, rules
8181
)
82-
elif param_type == str:
82+
elif param_type is str:
8383
normalized_value = self._validate_string_param(
8484
param_name, param_value, rules
8585
)

tests/integration/test_pytest_compression_e2e.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
import pytest
1212
from src.core.app.application_builder import ApplicationBuilder
1313
from src.core.config.app_config import AppConfig
14-
from src.core.domain.chat import ToolCall
14+
from src.core.domain.chat import FunctionCall, ToolCall
1515
from src.core.domain.processed_result import ProcessedResult
1616
from src.core.domain.session import Session, SessionState
1717
from src.core.services.session_service_impl import SessionService
@@ -66,7 +66,7 @@ async def test_pytest_compression_end_to_end_with_real_execution():
6666
tool_call = ToolCall(
6767
id="call_pytest_test_123",
6868
type="function",
69-
function={"name": "bash", "arguments": json.dumps(pytest_cmd)},
69+
function=FunctionCall(name="bash", arguments=json.dumps(pytest_cmd)),
7070
)
7171

7272
logger.info(
@@ -100,7 +100,7 @@ async def test_pytest_compression_end_to_end_with_real_execution():
100100
# Note: The reactor service returns None because the handler doesn't swallow the tool call,
101101
# it just sets the compression state. This is the correct behavior.
102102
assert (
103-
updated_session.state.compress_next_tool_call_reply == True
103+
updated_session.state.compress_next_tool_call_reply
104104
), "Compression state should be set to True"
105105

106106
# Act - Phase 2: Create mock command result with real pytest output
@@ -314,7 +314,7 @@ async def test_pytest_compression_state_machine_flow():
314314
await session_service.update_session(session)
315315

316316
# Verify initial state
317-
assert session.state.compress_next_tool_call_reply == False
317+
assert not session.state.compress_next_tool_call_reply
318318

319319
# Act - Process pytest tool call
320320
from src.core.interfaces.tool_call_reactor_interface import ToolCallContext
@@ -344,7 +344,7 @@ async def test_pytest_compression_state_machine_flow():
344344

345345
# Assert - State should be set
346346
updated_session = await session_service.get_session(session_id)
347-
assert updated_session.state.compress_next_tool_call_reply == True
347+
assert updated_session.state.compress_next_tool_call_reply
348348
# Note: The reactor service returns None because the handler doesn't swallow the tool call,
349349
# it just sets the compression state. This is the correct behavior.
350350
logger.info(

tests/unit/connectors/test_gemini_oauth_plan_usage.py

Lines changed: 62 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,71 @@
22

33
import pytest
44
from src.connectors.gemini_oauth_plan import GeminiOAuthPlanConnector
5-
from src.core.domain.chat import ChatMessage, ChatRequest
5+
from src.core.common.exceptions import BackendError
6+
from src.core.domain.chat import CanonicalChatRequest, ChatMessage, ChatRequest
67
from src.core.domain.responses import ResponseEnvelope, StreamingResponseEnvelope
78

89

10+
@pytest.mark.asyncio
11+
async def test_code_assist_empty_response_preserves_backend_error():
12+
mock_client = AsyncMock()
13+
mock_config = MagicMock()
14+
mock_translation_service = MagicMock()
15+
16+
connector = GeminiOAuthPlanConnector(
17+
client=mock_client,
18+
config=mock_config,
19+
translation_service=mock_translation_service,
20+
)
21+
22+
connector.gemini_api_base_url = "https://cloudcode-pa.googleapis.com"
23+
connector._oauth_credentials = {"access_token": "fake_token"}
24+
connector._refresh_token_if_needed = AsyncMock(return_value=True)
25+
connector._discover_project_id = AsyncMock(return_value="fake_project")
26+
27+
mock_translation_service.from_domain_to_gemini_request.return_value = {
28+
"contents": [
29+
{
30+
"role": "user",
31+
"parts": [{"text": "Hello"}],
32+
}
33+
]
34+
}
35+
36+
class FakeResponse:
37+
status_code = 200
38+
39+
@staticmethod
40+
def json() -> dict[str, list[dict[str, object]]]:
41+
return {"candidates": []}
42+
43+
fake_response = FakeResponse()
44+
45+
mock_auth_session = MagicMock()
46+
mock_auth_session.headers = {}
47+
mock_auth_session.request.return_value = fake_response
48+
49+
request = CanonicalChatRequest(
50+
model="gemini-2.5-pro",
51+
messages=[ChatMessage(role="user", content="Hello")],
52+
)
53+
54+
with (
55+
patch(
56+
"google.auth.transport.requests.AuthorizedSession",
57+
return_value=mock_auth_session,
58+
),
59+
pytest.raises(BackendError) as excinfo,
60+
):
61+
await connector._chat_completions_code_assist(
62+
request_data=request,
63+
processed_messages=[ChatMessage(role="user", content="Hello")],
64+
effective_model="gemini-2.5-pro",
65+
)
66+
67+
assert excinfo.value.code == "empty_response"
68+
69+
970
@pytest.mark.asyncio
1071
async def test_chat_completions_with_tiktoken_usage_calculation():
1172
"""

tests/unit/core/services/test_content_rewriter_service.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
import shutil
33
import tempfile
44
import unittest
5-
from types import SimpleNamespace
65

76
from src.core.config.app_config import RewritingConfig
87
from src.core.domain.replacement_rule import ReplacementMode
@@ -151,7 +150,7 @@ def tearDown(self):
151150
self.test_config_dir, ignore_errors=True
152151
)
153152
)
154-
except:
153+
except Exception:
155154
pass
156155

157156
def test_load_rules(self):
@@ -248,7 +247,9 @@ def test_app_config_overrides_default_config_path(self):
248247
) as handle:
249248
handle.write("rewritten custom reply")
250249

251-
app_config = SimpleNamespace(
250+
from src.core.config.app_config import AppConfig
251+
252+
app_config = AppConfig(
252253
rewriting=RewritingConfig(enabled=True, config_path=alternate_dir)
253254
)
254255

0 commit comments

Comments
 (0)