From 1e3e7ef18b7bba951a82c3d0fc21237b8f3c0c30 Mon Sep 17 00:00:00 2001 From: swaroopakkineni Date: Mon, 10 Nov 2025 17:04:27 -0500 Subject: [PATCH 01/18] stuff --- tests/test_audit_logs.py | 27 +++++++++++ workos/audit_logs.py | 8 +++- workos/utils/_base_http_client.py | 57 +++++++++++++++++++++++ workos/utils/http_client.py | 75 +++++++++++++++++++++++++++++-- 4 files changed, 161 insertions(+), 6 deletions(-) diff --git a/tests/test_audit_logs.py b/tests/test_audit_logs.py index 390536f8..a363dee7 100644 --- a/tests/test_audit_logs.py +++ b/tests/test_audit_logs.py @@ -106,6 +106,33 @@ def test_sends_idempotency_key( assert request_kwargs["headers"]["idempotency-key"] == idempotency_key assert response is None + def test_auto_generates_idempotency_key( + self, mock_audit_log_event, capture_and_mock_http_client_request + ): + """Test that idempotency key is auto-generated when not provided.""" + organization_id = "org_123456789" + + request_kwargs = capture_and_mock_http_client_request( + self.http_client, {"success": True}, 200 + ) + + response = self.audit_logs.create_event( + organization_id=organization_id, + event=mock_audit_log_event, + # No idempotency_key provided + ) + + # Assert header exists and is a valid UUID v4 + assert "idempotency-key" in request_kwargs["headers"] + idempotency_key = request_kwargs["headers"]["idempotency-key"] + assert len(idempotency_key) == 36 # UUID format: 8-4-4-4-12 + assert idempotency_key.count("-") == 4 + # Verify it's a valid UUID by checking the version field (4th section starts with '4') + uuid_parts = idempotency_key.split("-") + assert len(uuid_parts) == 5 + assert uuid_parts[2][0] == "4" # UUID v4 identifier + assert response is None + def test_throws_unauthorized_exception( self, mock_audit_log_event, mock_http_client_with_response ): diff --git a/workos/audit_logs.py b/workos/audit_logs.py index 73bacff8..ca8ee2e2 100644 --- a/workos/audit_logs.py +++ b/workos/audit_logs.py @@ -1,3 +1,4 @@ +import uuid from typing import Optional, Protocol, Sequence from workos.types.audit_logs import AuditLogExport @@ -82,8 +83,11 @@ def create_event( json = {"organization_id": organization_id, "event": event} headers = {} - if idempotency_key: - headers["idempotency-key"] = idempotency_key + # Auto-generate UUID v4 if not provided + if idempotency_key is None: + idempotency_key = str(uuid.uuid4()) + + headers["idempotency-key"] = idempotency_key self._http_client.request( EVENTS_PATH, method=REQUEST_METHOD_POST, json=json, headers=headers diff --git a/workos/utils/_base_http_client.py b/workos/utils/_base_http_client.py index 49dcbcf5..1faae739 100644 --- a/workos/utils/_base_http_client.py +++ b/workos/utils/_base_http_client.py @@ -1,4 +1,6 @@ import platform +import random +from dataclasses import dataclass from typing import ( Any, Mapping, @@ -33,6 +35,15 @@ DEFAULT_REQUEST_TIMEOUT = 25 +@dataclass +class RetryConfig: + """Configuration for retry logic with exponential backoff.""" + max_retries: int = 3 + base_delay: float = 1.0 # seconds + max_delay: float = 30.0 # seconds + jitter: float = 0.25 # 25% jitter + + ParamsType = Optional[Mapping[str, Any]] HeadersType = Optional[Dict[str, str]] JsonType = Optional[Union[Mapping[str, Any], Sequence[Any]]] @@ -56,6 +67,7 @@ class BaseHTTPClient(Generic[_HttpxClientT]): _base_url: str _version: str _timeout: int + _retry_config: RetryConfig def __init__( self, @@ -65,12 +77,14 @@ def __init__( client_id: str, version: str, timeout: Optional[int] = DEFAULT_REQUEST_TIMEOUT, + retry_config: Optional[RetryConfig] = None, ) -> None: self._api_key = api_key self._base_url = base_url self._client_id = client_id self._version = version self._timeout = DEFAULT_REQUEST_TIMEOUT if timeout is None else timeout + self._retry_config = retry_config if retry_config is not None else RetryConfig() def _generate_api_url(self, path: str) -> str: return f"{self._base_url}{path}" @@ -196,6 +210,49 @@ def _handle_response(self, response: httpx.Response) -> ResponseJson: return cast(ResponseJson, response_json) + def _is_retryable_error(self, response: httpx.Response) -> bool: + """Determine if an error should be retried.""" + status_code = response.status_code + + # Retry on 5xx server errors + if 500 <= status_code < 600: + return True + + # Retry on 429 rate limit + if status_code == 429: + return True + + # Do NOT retry 4xx client errors (except 429) + return False + + def _get_retry_delay(self, attempt: int, response: httpx.Response) -> float: + """Calculate delay with exponential backoff and jitter.""" + # Check for Retry-After header on 429 responses + if response.status_code == 429: + retry_after = response.headers.get("Retry-After") + if retry_after: + try: + return float(retry_after) + except ValueError: + pass # Fall through to exponential backoff + + # Exponential backoff: base_delay * 2^attempt + delay = self._retry_config.base_delay * (2 ** attempt) + + # Cap at max_delay + delay = min(delay, self._retry_config.max_delay) + + # Add jitter: random variation of 0-25% of delay + jitter_amount = delay * self._retry_config.jitter * random.random() + return delay + jitter_amount + + def _should_retry_exception(self, exc: Exception) -> bool: + """Determine if an exception should trigger a retry.""" + # Retry on network errors (connection, timeout) + if isinstance(exc, (httpx.ConnectError, httpx.TimeoutException)): + return True + return False + def build_request_url( self, url: str, diff --git a/workos/utils/http_client.py b/workos/utils/http_client.py index 203c7df0..6566d55b 100644 --- a/workos/utils/http_client.py +++ b/workos/utils/http_client.py @@ -1,4 +1,6 @@ import asyncio +import random +import time from types import TracebackType from typing import Optional, Type, Union @@ -13,6 +15,7 @@ JsonType, ParamsType, ResponseJson, + RetryConfig, ) from workos.utils.request_helper import REQUEST_METHOD_GET @@ -38,6 +41,7 @@ def __init__( client_id: str, version: str, timeout: Optional[int] = None, + retry_config: Optional[RetryConfig] = None, # If no custom transport is provided, let httpx use the default # so we don't overwrite environment configurations like proxies transport: Optional[httpx.BaseTransport] = None, @@ -48,6 +52,7 @@ def __init__( client_id=client_id, version=version, timeout=timeout, + retry_config=retry_config, ) self._client = SyncHttpxClientWrapper( base_url=base_url, @@ -110,8 +115,38 @@ def request( headers=headers, exclude_default_auth_headers=exclude_default_auth_headers, ) - response = self._client.request(**prepared_request_parameters) - return self._handle_response(response) + + last_exception = None + + for attempt in range(self._retry_config.max_retries + 1): + try: + response = self._client.request(**prepared_request_parameters) + + # Check if we should retry based on status code + if attempt < self._retry_config.max_retries and self._is_retryable_error(response): + delay = self._get_retry_delay(attempt, response) + time.sleep(delay) + continue + + # No retry needed or max retries reached + return self._handle_response(response) + + except Exception as exc: + last_exception = exc + if attempt < self._retry_config.max_retries and self._should_retry_exception(exc): + delay = self._retry_config.base_delay * (2 ** attempt) + delay = min(delay, self._retry_config.max_delay) + jitter_amount = delay * self._retry_config.jitter * random.random() + time.sleep(delay + jitter_amount) + continue + raise + + # Should not reach here, but raise last exception if we do + if last_exception is not None: + raise last_exception + + # Fallback: this should never happen + raise RuntimeError("Unexpected state in retry logic") class AsyncHttpxClientWrapper(httpx.AsyncClient): @@ -138,6 +173,7 @@ def __init__( client_id: str, version: str, timeout: Optional[int] = None, + retry_config: Optional[RetryConfig] = None, # If no custom transport is provided, let httpx use the default # so we don't overwrite environment configurations like proxies transport: Optional[httpx.AsyncBaseTransport] = None, @@ -148,6 +184,7 @@ def __init__( client_id=client_id, version=version, timeout=timeout, + retry_config=retry_config, ) self._client = AsyncHttpxClientWrapper( base_url=base_url, @@ -207,8 +244,38 @@ async def request( headers=headers, exclude_default_auth_headers=exclude_default_auth_headers, ) - response = await self._client.request(**prepared_request_parameters) - return self._handle_response(response) + + last_exception = None + + for attempt in range(self._retry_config.max_retries + 1): + try: + response = await self._client.request(**prepared_request_parameters) + + # Check if we should retry based on status code + if attempt < self._retry_config.max_retries and self._is_retryable_error(response): + delay = self._get_retry_delay(attempt, response) + await asyncio.sleep(delay) + continue + + # No retry needed or max retries reached + return self._handle_response(response) + + except Exception as exc: + last_exception = exc + if attempt < self._retry_config.max_retries and self._should_retry_exception(exc): + delay = self._retry_config.base_delay * (2 ** attempt) + delay = min(delay, self._retry_config.max_delay) + jitter_amount = delay * self._retry_config.jitter * random.random() + await asyncio.sleep(delay + jitter_amount) + continue + raise + + # Should not reach here, but raise last exception if we do + if last_exception is not None: + raise last_exception + + # Fallback: this should never happen + raise RuntimeError("Unexpected state in retry logic") HTTPClient = Union[AsyncHTTPClient, SyncHTTPClient] From 78230229ff1680f5471e627df47f12ffee781a7d Mon Sep 17 00:00:00 2001 From: swaroopakkineni Date: Mon, 10 Nov 2025 17:31:12 -0500 Subject: [PATCH 02/18] retry --- tests/test_http_client_retry.py | 503 ++++++++++++++++++++++++++++++ workos/audit_logs.py | 8 +- workos/utils/_base_http_client.py | 13 +- workos/utils/http_client.py | 44 ++- 4 files changed, 547 insertions(+), 21 deletions(-) create mode 100644 tests/test_http_client_retry.py diff --git a/tests/test_http_client_retry.py b/tests/test_http_client_retry.py new file mode 100644 index 00000000..e9992c2a --- /dev/null +++ b/tests/test_http_client_retry.py @@ -0,0 +1,503 @@ +import asyncio +import time +from unittest.mock import AsyncMock, MagicMock, Mock, patch +import pytest +import httpx + +from workos.utils._base_http_client import RetryConfig +from workos.utils.http_client import AsyncHTTPClient, SyncHTTPClient +from workos.exceptions import ServerException, BadRequestException + + +class TestSyncRetryLogic: + """Test retry logic for synchronous HTTP client.""" + + @pytest.fixture + def sync_http_client(self): + """Create a SyncHTTPClient for testing.""" + return SyncHTTPClient( + api_key="sk_test", + base_url="https://api.workos.test/", + client_id="client_test", + version="test", + ) + + @pytest.fixture + def retry_config(self): + """Create a RetryConfig for testing.""" + return RetryConfig( + max_retries=3, + base_delay=0.1, # Use shorter delays for faster tests + max_delay=1.0, + jitter=0.0, # No jitter for predictable tests + ) + + def test_retries_on_500_error(self, sync_http_client, retry_config, monkeypatch): + """Test that 500 errors trigger retry.""" + call_count = 0 + + def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + if call_count < 3: + return httpx.Response(status_code=500, json={"error": "Server error"}) + return httpx.Response(status_code=200, json={"success": True}) + + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + + with patch("time.sleep") as mock_sleep: + response = sync_http_client.request("test/path", retry_config=retry_config) + + assert call_count == 3 + assert response == {"success": True} + # Verify sleep was called with exponential backoff + assert mock_sleep.call_count == 2 + + def test_retries_on_429_rate_limit(self, sync_http_client, retry_config, monkeypatch): + """Test that 429 errors trigger retry.""" + call_count = 0 + + def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + if call_count < 2: + return httpx.Response( + status_code=429, + headers={"Retry-After": "0.1"}, + json={"error": "Rate limit exceeded"} + ) + return httpx.Response(status_code=200, json={"success": True}) + + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + + with patch("time.sleep") as mock_sleep: + response = sync_http_client.request("test/path", retry_config=retry_config) + + assert call_count == 2 + assert response == {"success": True} + # Verify Retry-After header was respected + mock_sleep.assert_called_once_with(0.1) + + def test_no_retry_on_400_error(self, sync_http_client, monkeypatch): + """Test that 4xx errors (except 429) don't retry (no retry_config passed).""" + call_count = 0 + + def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + return httpx.Response( + status_code=400, + json={"error": "Bad request", "message": "Invalid data"} + ) + + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + + with pytest.raises(BadRequestException): + sync_http_client.request("test/path") + + # Should only be called once (no retries) + assert call_count == 1 + + def test_no_retry_on_401_error(self, sync_http_client, monkeypatch): + """Test that 401 errors don't retry (no retry_config passed).""" + call_count = 0 + + def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + return httpx.Response( + status_code=401, + json={"error": "Unauthorized", "message": "Invalid credentials"} + ) + + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + + with pytest.raises(Exception): + sync_http_client.request("test/path") + + # Should only be called once (no retries) + assert call_count == 1 + + def test_respects_max_retries(self, sync_http_client, retry_config, monkeypatch): + """Test that max retries limit is respected.""" + call_count = 0 + + def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + return httpx.Response(status_code=500, json={"error": "Server error"}) + + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + + with patch("time.sleep"): + with pytest.raises(ServerException): + sync_http_client.request("test/path", retry_config=retry_config) + + # Should be called max_retries + 1 times (initial + 3 retries) + assert call_count == 4 + + def test_exponential_backoff_delays(self, sync_http_client, retry_config, monkeypatch): + """Test that retry delays follow exponential backoff.""" + call_count = 0 + + def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + if call_count <= 3: + return httpx.Response(status_code=500, json={"error": "Server error"}) + return httpx.Response(status_code=200, json={"success": True}) + + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + + with patch("time.sleep") as mock_sleep: + sync_http_client.request("test/path", retry_config=retry_config) + + # Verify exponential backoff: 0.1s, 0.2s, 0.4s + assert mock_sleep.call_count == 3 + calls = [call[0][0] for call in mock_sleep.call_args_list] + # Check that delays are increasing (exponential backoff) + assert calls[0] < calls[1] < calls[2] + + def test_retries_on_network_error(self, sync_http_client, retry_config, monkeypatch): + """Test that network errors trigger retry.""" + call_count = 0 + + def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + if call_count < 3: + raise httpx.ConnectError("Connection failed") + return httpx.Response(status_code=200, json={"success": True}) + + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + + with patch("time.sleep"): + response = sync_http_client.request("test/path", retry_config=retry_config) + + assert call_count == 3 + assert response == {"success": True} + + def test_retries_on_timeout_error(self, sync_http_client, retry_config, monkeypatch): + """Test that timeout errors trigger retry.""" + call_count = 0 + + def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + if call_count < 2: + raise httpx.TimeoutException("Request timed out") + return httpx.Response(status_code=200, json={"success": True}) + + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + + with patch("time.sleep"): + response = sync_http_client.request("test/path", retry_config=retry_config) + + assert call_count == 2 + assert response == {"success": True} + + def test_no_retry_on_success(self, sync_http_client, monkeypatch): + """Test that successful requests don't retry.""" + call_count = 0 + + def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + return httpx.Response(status_code=200, json={"success": True}) + + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + + response = sync_http_client.request("test/path") + + assert call_count == 1 + assert response == {"success": True} + + def test_retry_with_custom_config(self): + """Test that RetryConfig can be customized with different values.""" + custom_retry_config = RetryConfig( + max_retries=5, + base_delay=0.05, + max_delay=2.0, + jitter=0.1, + ) + + # Verify custom values are set correctly + assert custom_retry_config.max_retries == 5 + assert custom_retry_config.base_delay == 0.05 + assert custom_retry_config.max_delay == 2.0 + assert custom_retry_config.jitter == 0.1 + + # Verify defaults are as expected + default_retry_config = RetryConfig() + assert default_retry_config.max_retries == 3 + assert default_retry_config.base_delay == 1.0 + assert default_retry_config.max_delay == 30.0 + assert default_retry_config.jitter == 0.25 + + def test_retry_respects_max_delay(self, sync_http_client, monkeypatch): + """Test that retry delays don't exceed max_delay.""" + # Create custom retry config with very low max_delay + custom_retry_config = RetryConfig( + max_retries=5, + base_delay=1.0, + max_delay=0.5, # Max delay is lower than what exponential backoff would produce + jitter=0.0, + ) + + call_count = 0 + + def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + if call_count <= 4: + return httpx.Response(status_code=500, json={"error": "Server error"}) + return httpx.Response(status_code=200, json={"success": True}) + + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + + with patch("time.sleep") as mock_sleep: + sync_http_client.request("test/path", retry_config=custom_retry_config) + + # All delays should be capped at max_delay + calls = [call[0][0] for call in mock_sleep.call_args_list] + for delay in calls: + assert delay <= 0.5 + + def test_mixed_retryable_and_non_retryable_errors(self, sync_http_client, retry_config, monkeypatch): + """Test behavior when encountering different error types.""" + call_count = 0 + + def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + if call_count == 1: + return httpx.Response(status_code=500, json={"error": "Server error"}) + elif call_count == 2: + # Non-retryable error should stop retries + return httpx.Response(status_code=403, json={"error": "Forbidden"}) + + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + + with patch("time.sleep"): + with pytest.raises(Exception): + sync_http_client.request("test/path", retry_config=retry_config) + + # Should stop after the 403 error (no more retries) + assert call_count == 2 + + def test_default_retry_config(self): + """Test that client has no retry config by default (opt-in behavior).""" + client = SyncHTTPClient( + api_key="sk_test", + base_url="https://api.workos.test/", + client_id="client_test", + version="test", + ) + + # Should be None by default (no retries unless explicitly enabled) + assert client._retry_config is None + + +class TestAsyncRetryLogic: + """Test retry logic for asynchronous HTTP client.""" + + @pytest.fixture + def async_http_client(self): + """Create an AsyncHTTPClient for testing.""" + return AsyncHTTPClient( + api_key="sk_test", + base_url="https://api.workos.test/", + client_id="client_test", + version="test", + ) + + @pytest.fixture + def retry_config(self): + """Create a RetryConfig for testing.""" + return RetryConfig( + max_retries=3, + base_delay=0.1, # Use shorter delays for faster tests + max_delay=1.0, + jitter=0.0, # No jitter for predictable tests + ) + + @pytest.mark.asyncio + async def test_retries_on_500_error(self, async_http_client, retry_config, monkeypatch): + """Test that 500 errors trigger retry.""" + call_count = 0 + + async def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + if call_count < 3: + return httpx.Response(status_code=500, json={"error": "Server error"}) + return httpx.Response(status_code=200, json={"success": True}) + + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + + with patch("asyncio.sleep") as mock_sleep: + response = await async_http_client.request("test/path", retry_config=retry_config) + + assert call_count == 3 + assert response == {"success": True} + # Verify sleep was called with exponential backoff + assert mock_sleep.call_count == 2 + + @pytest.mark.asyncio + async def test_retries_on_429_rate_limit(self, async_http_client, retry_config, monkeypatch): + """Test that 429 errors trigger retry.""" + call_count = 0 + + async def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + if call_count < 2: + return httpx.Response( + status_code=429, + headers={"Retry-After": "0.1"}, + json={"error": "Rate limit exceeded"} + ) + return httpx.Response(status_code=200, json={"success": True}) + + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + + with patch("asyncio.sleep") as mock_sleep: + response = await async_http_client.request("test/path", retry_config=retry_config) + + assert call_count == 2 + assert response == {"success": True} + # Verify Retry-After header was respected + mock_sleep.assert_called_once_with(0.1) + + @pytest.mark.asyncio + async def test_no_retry_on_400_error(self, async_http_client, monkeypatch): + """Test that 4xx errors (except 429) don't retry (no retry_config passed).""" + call_count = 0 + + async def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + return httpx.Response( + status_code=400, + json={"error": "Bad request", "message": "Invalid data"} + ) + + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + + with pytest.raises(BadRequestException): + await async_http_client.request("test/path") + + # Should only be called once (no retries) + assert call_count == 1 + + @pytest.mark.asyncio + async def test_respects_max_retries(self, async_http_client, retry_config, monkeypatch): + """Test that max retries limit is respected.""" + call_count = 0 + + async def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + return httpx.Response(status_code=500, json={"error": "Server error"}) + + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + + with patch("asyncio.sleep"): + with pytest.raises(ServerException): + await async_http_client.request("test/path", retry_config=retry_config) + + # Should be called max_retries + 1 times (initial + 3 retries) + assert call_count == 4 + + @pytest.mark.asyncio + async def test_exponential_backoff_delays(self, async_http_client, retry_config, monkeypatch): + """Test that retry delays follow exponential backoff.""" + call_count = 0 + + async def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + if call_count <= 3: + return httpx.Response(status_code=500, json={"error": "Server error"}) + return httpx.Response(status_code=200, json={"success": True}) + + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + + with patch("asyncio.sleep") as mock_sleep: + await async_http_client.request("test/path", retry_config=retry_config) + + # Verify exponential backoff: 0.1s, 0.2s, 0.4s + assert mock_sleep.call_count == 3 + calls = [call[0][0] for call in mock_sleep.call_args_list] + # Check that delays are increasing (exponential backoff) + assert calls[0] < calls[1] < calls[2] + + @pytest.mark.asyncio + async def test_retries_on_network_error(self, async_http_client, retry_config, monkeypatch): + """Test that network errors trigger retry.""" + call_count = 0 + + async def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + if call_count < 3: + raise httpx.ConnectError("Connection failed") + return httpx.Response(status_code=200, json={"success": True}) + + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + + with patch("asyncio.sleep"): + response = await async_http_client.request("test/path", retry_config=retry_config) + + assert call_count == 3 + assert response == {"success": True} + + @pytest.mark.asyncio + async def test_retries_on_timeout_error(self, async_http_client, retry_config, monkeypatch): + """Test that timeout errors trigger retry.""" + call_count = 0 + + async def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + if call_count < 2: + raise httpx.TimeoutException("Request timed out") + return httpx.Response(status_code=200, json={"success": True}) + + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + + with patch("asyncio.sleep"): + response = await async_http_client.request("test/path", retry_config=retry_config) + + assert call_count == 2 + assert response == {"success": True} + + @pytest.mark.asyncio + async def test_no_retry_on_success(self, async_http_client, monkeypatch): + """Test that successful requests don't retry (no retry_config needed).""" + call_count = 0 + + async def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + return httpx.Response(status_code=200, json={"success": True}) + + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + + response = await async_http_client.request("test/path") + + assert call_count == 1 + assert response == {"success": True} + + def test_default_retry_config(self): + """Test that client has no retry config by default (opt-in behavior).""" + client = AsyncHTTPClient( + api_key="sk_test", + base_url="https://api.workos.test/", + client_id="client_test", + version="test", + ) + + # Should be None by default (no retries unless explicitly enabled) + assert client._retry_config is None + diff --git a/workos/audit_logs.py b/workos/audit_logs.py index ca8ee2e2..5a796028 100644 --- a/workos/audit_logs.py +++ b/workos/audit_logs.py @@ -3,6 +3,7 @@ from workos.types.audit_logs import AuditLogExport from workos.types.audit_logs.audit_log_event import AuditLogEvent +from workos.utils._base_http_client import RetryConfig from workos.utils.http_client import SyncHTTPClient from workos.utils.request_helper import REQUEST_METHOD_GET, REQUEST_METHOD_POST @@ -89,8 +90,13 @@ def create_event( headers["idempotency-key"] = idempotency_key + # Enable retries for audit log event creation with default settings self._http_client.request( - EVENTS_PATH, method=REQUEST_METHOD_POST, json=json, headers=headers + EVENTS_PATH, + method=REQUEST_METHOD_POST, + json=json, + headers=headers, + retry_config=RetryConfig() # Uses default values: 3 retries, exponential backoff ) def create_export( diff --git a/workos/utils/_base_http_client.py b/workos/utils/_base_http_client.py index 1faae739..f9d52565 100644 --- a/workos/utils/_base_http_client.py +++ b/workos/utils/_base_http_client.py @@ -67,7 +67,7 @@ class BaseHTTPClient(Generic[_HttpxClientT]): _base_url: str _version: str _timeout: int - _retry_config: RetryConfig + _retry_config: Optional[RetryConfig] def __init__( self, @@ -84,7 +84,7 @@ def __init__( self._client_id = client_id self._version = version self._timeout = DEFAULT_REQUEST_TIMEOUT if timeout is None else timeout - self._retry_config = retry_config if retry_config is not None else RetryConfig() + self._retry_config = retry_config # Store as-is, None means no retries def _generate_api_url(self, path: str) -> str: return f"{self._base_url}{path}" @@ -129,6 +129,7 @@ def _maybe_raise_error_by_status_code( elif status_code >= 500 and status_code < 600: raise ServerException(response, response_json) + def _prepare_request( self, path: str, @@ -225,7 +226,7 @@ def _is_retryable_error(self, response: httpx.Response) -> bool: # Do NOT retry 4xx client errors (except 429) return False - def _get_retry_delay(self, attempt: int, response: httpx.Response) -> float: + def _get_retry_delay(self, attempt: int, response: httpx.Response, retry_config: RetryConfig) -> float: """Calculate delay with exponential backoff and jitter.""" # Check for Retry-After header on 429 responses if response.status_code == 429: @@ -237,13 +238,13 @@ def _get_retry_delay(self, attempt: int, response: httpx.Response) -> float: pass # Fall through to exponential backoff # Exponential backoff: base_delay * 2^attempt - delay = self._retry_config.base_delay * (2 ** attempt) + delay = retry_config.base_delay * (2 ** attempt) # Cap at max_delay - delay = min(delay, self._retry_config.max_delay) + delay = min(delay, retry_config.max_delay) # Add jitter: random variation of 0-25% of delay - jitter_amount = delay * self._retry_config.jitter * random.random() + jitter_amount = delay * retry_config.jitter * random.random() return delay + jitter_amount def _should_retry_exception(self, exc: Exception) -> bool: diff --git a/workos/utils/http_client.py b/workos/utils/http_client.py index 6566d55b..1f3fdae6 100644 --- a/workos/utils/http_client.py +++ b/workos/utils/http_client.py @@ -93,6 +93,7 @@ def request( json: JsonType = None, headers: HeadersType = None, exclude_default_auth_headers: bool = False, + retry_config: Optional[RetryConfig] = None, ) -> ResponseJson: """Executes a request against the WorkOS API. @@ -103,6 +104,7 @@ def request( method (str): One of the supported methods as defined by the REQUEST_METHOD_X constants params (ParamsType): Query params to be added to the request json (JsonType): Body payload to be added to the request + retry_config (RetryConfig): Optional retry configuration. If None, no retries. Returns: ResponseJson: Response from WorkOS @@ -116,15 +118,21 @@ def request( exclude_default_auth_headers=exclude_default_auth_headers, ) + # If no retry config provided, just make the request without retry logic + if retry_config is None: + response = self._client.request(**prepared_request_parameters) + return self._handle_response(response) + + # Retry logic enabled last_exception = None - for attempt in range(self._retry_config.max_retries + 1): + for attempt in range(retry_config.max_retries + 1): try: response = self._client.request(**prepared_request_parameters) # Check if we should retry based on status code - if attempt < self._retry_config.max_retries and self._is_retryable_error(response): - delay = self._get_retry_delay(attempt, response) + if attempt < retry_config.max_retries and self._is_retryable_error(response): + delay = self._get_retry_delay(attempt, response, retry_config) time.sleep(delay) continue @@ -133,10 +141,10 @@ def request( except Exception as exc: last_exception = exc - if attempt < self._retry_config.max_retries and self._should_retry_exception(exc): - delay = self._retry_config.base_delay * (2 ** attempt) - delay = min(delay, self._retry_config.max_delay) - jitter_amount = delay * self._retry_config.jitter * random.random() + if attempt < retry_config.max_retries and self._should_retry_exception(exc): + delay = retry_config.base_delay * (2 ** attempt) + delay = min(delay, retry_config.max_delay) + jitter_amount = delay * retry_config.jitter * random.random() time.sleep(delay + jitter_amount) continue raise @@ -222,6 +230,7 @@ async def request( json: JsonType = None, headers: HeadersType = None, exclude_default_auth_headers: bool = False, + retry_config: Optional[RetryConfig] = None, ) -> ResponseJson: """Executes a request against the WorkOS API. @@ -232,6 +241,7 @@ async def request( method (str): One of the supported methods as defined by the REQUEST_METHOD_X constants params (ParamsType): Query params to be added to the request json (JsonType): Body payload to be added to the request + retry_config (RetryConfig): Optional retry configuration. If None, no retries. Returns: ResponseJson: Response from WorkOS @@ -245,15 +255,21 @@ async def request( exclude_default_auth_headers=exclude_default_auth_headers, ) + # If no retry config provided, just make the request without retry logic + if retry_config is None: + response = await self._client.request(**prepared_request_parameters) + return self._handle_response(response) + + # Retry logic enabled last_exception = None - for attempt in range(self._retry_config.max_retries + 1): + for attempt in range(retry_config.max_retries + 1): try: response = await self._client.request(**prepared_request_parameters) # Check if we should retry based on status code - if attempt < self._retry_config.max_retries and self._is_retryable_error(response): - delay = self._get_retry_delay(attempt, response) + if attempt < retry_config.max_retries and self._is_retryable_error(response): + delay = self._get_retry_delay(attempt, response, retry_config) await asyncio.sleep(delay) continue @@ -262,10 +278,10 @@ async def request( except Exception as exc: last_exception = exc - if attempt < self._retry_config.max_retries and self._should_retry_exception(exc): - delay = self._retry_config.base_delay * (2 ** attempt) - delay = min(delay, self._retry_config.max_delay) - jitter_amount = delay * self._retry_config.jitter * random.random() + if attempt < retry_config.max_retries and self._should_retry_exception(exc): + delay = retry_config.base_delay * (2 ** attempt) + delay = min(delay, retry_config.max_delay) + jitter_amount = delay * retry_config.jitter * random.random() await asyncio.sleep(delay + jitter_amount) continue raise From e3a12d71490942c09ee7fc5f5ef2d7884da25599 Mon Sep 17 00:00:00 2001 From: swaroopakkineni Date: Mon, 10 Nov 2025 18:00:19 -0500 Subject: [PATCH 03/18] more cleanup --- tests/test_http_client_retry.py | 52 +++++++++++++------------------ workos/utils/_base_http_client.py | 26 ++-------------- 2 files changed, 25 insertions(+), 53 deletions(-) diff --git a/tests/test_http_client_retry.py b/tests/test_http_client_retry.py index e9992c2a..07ee6b36 100644 --- a/tests/test_http_client_retry.py +++ b/tests/test_http_client_retry.py @@ -54,32 +54,28 @@ def mock_request(*args, **kwargs): assert mock_sleep.call_count == 2 def test_retries_on_429_rate_limit(self, sync_http_client, retry_config, monkeypatch): - """Test that 429 errors trigger retry.""" + """Test that 429 errors do NOT trigger retry (consistent with workos-node).""" call_count = 0 def mock_request(*args, **kwargs): nonlocal call_count call_count += 1 - if call_count < 2: - return httpx.Response( - status_code=429, - headers={"Retry-After": "0.1"}, - json={"error": "Rate limit exceeded"} - ) - return httpx.Response(status_code=200, json={"success": True}) + return httpx.Response( + status_code=429, + headers={"Retry-After": "0.1"}, + json={"error": "Rate limit exceeded"} + ) monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) - with patch("time.sleep") as mock_sleep: - response = sync_http_client.request("test/path", retry_config=retry_config) + with pytest.raises(BadRequestException): + sync_http_client.request("test/path", retry_config=retry_config) - assert call_count == 2 - assert response == {"success": True} - # Verify Retry-After header was respected - mock_sleep.assert_called_once_with(0.1) + # Should only be called once (no retries on 429) + assert call_count == 1 def test_no_retry_on_400_error(self, sync_http_client, monkeypatch): - """Test that 4xx errors (except 429) don't retry (no retry_config passed).""" + """Test that 4xx errors don't retry (no retry_config passed).""" call_count = 0 def mock_request(*args, **kwargs): @@ -345,33 +341,29 @@ async def mock_request(*args, **kwargs): @pytest.mark.asyncio async def test_retries_on_429_rate_limit(self, async_http_client, retry_config, monkeypatch): - """Test that 429 errors trigger retry.""" + """Test that 429 errors do NOT trigger retry (consistent with workos-node).""" call_count = 0 async def mock_request(*args, **kwargs): nonlocal call_count call_count += 1 - if call_count < 2: - return httpx.Response( - status_code=429, - headers={"Retry-After": "0.1"}, - json={"error": "Rate limit exceeded"} - ) - return httpx.Response(status_code=200, json={"success": True}) + return httpx.Response( + status_code=429, + headers={"Retry-After": "0.1"}, + json={"error": "Rate limit exceeded"} + ) monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) - with patch("asyncio.sleep") as mock_sleep: - response = await async_http_client.request("test/path", retry_config=retry_config) + with pytest.raises(BadRequestException): + await async_http_client.request("test/path", retry_config=retry_config) - assert call_count == 2 - assert response == {"success": True} - # Verify Retry-After header was respected - mock_sleep.assert_called_once_with(0.1) + # Should only be called once (no retries on 429) + assert call_count == 1 @pytest.mark.asyncio async def test_no_retry_on_400_error(self, async_http_client, monkeypatch): - """Test that 4xx errors (except 429) don't retry (no retry_config passed).""" + """Test that 4xx errors don't retry (no retry_config passed).""" call_count = 0 async def mock_request(*args, **kwargs): diff --git a/workos/utils/_base_http_client.py b/workos/utils/_base_http_client.py index f9d52565..a0d96f30 100644 --- a/workos/utils/_base_http_client.py +++ b/workos/utils/_base_http_client.py @@ -34,6 +34,8 @@ DEFAULT_REQUEST_TIMEOUT = 25 +# Status codes that should trigger a retry (consistent with workos-node) +RETRY_STATUS_CODES = [408, 500, 502, 504] @dataclass class RetryConfig: @@ -43,13 +45,11 @@ class RetryConfig: max_delay: float = 30.0 # seconds jitter: float = 0.25 # 25% jitter - ParamsType = Optional[Mapping[str, Any]] HeadersType = Optional[Dict[str, str]] JsonType = Optional[Union[Mapping[str, Any], Sequence[Any]]] ResponseJson = Mapping[Any, Any] - class PreparedRequest(TypedDict): method: str url: str @@ -213,30 +213,10 @@ def _handle_response(self, response: httpx.Response) -> ResponseJson: def _is_retryable_error(self, response: httpx.Response) -> bool: """Determine if an error should be retried.""" - status_code = response.status_code - - # Retry on 5xx server errors - if 500 <= status_code < 600: - return True - - # Retry on 429 rate limit - if status_code == 429: - return True - - # Do NOT retry 4xx client errors (except 429) - return False + return response.status_code in RETRY_STATUS_CODES def _get_retry_delay(self, attempt: int, response: httpx.Response, retry_config: RetryConfig) -> float: """Calculate delay with exponential backoff and jitter.""" - # Check for Retry-After header on 429 responses - if response.status_code == 429: - retry_after = response.headers.get("Retry-After") - if retry_after: - try: - return float(retry_after) - except ValueError: - pass # Fall through to exponential backoff - # Exponential backoff: base_delay * 2^attempt delay = retry_config.base_delay * (2 ** attempt) From 9d2dae20406b4688518d586934a266d62e0eb18d Mon Sep 17 00:00:00 2001 From: swaroopakkineni Date: Mon, 10 Nov 2025 19:52:11 -0500 Subject: [PATCH 04/18] moar cleanup --- workos/utils/_base_http_client.py | 12 ++++++++++++ workos/utils/http_client.py | 15 ++++----------- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/workos/utils/_base_http_client.py b/workos/utils/_base_http_client.py index a0d96f30..e23350be 100644 --- a/workos/utils/_base_http_client.py +++ b/workos/utils/_base_http_client.py @@ -217,6 +217,18 @@ def _is_retryable_error(self, response: httpx.Response) -> bool: def _get_retry_delay(self, attempt: int, response: httpx.Response, retry_config: RetryConfig) -> float: """Calculate delay with exponential backoff and jitter.""" + return self._calculate_backoff_delay(attempt, retry_config) + + def _calculate_backoff_delay(self, attempt: int, retry_config: RetryConfig) -> float: + """Calculate delay with exponential backoff and jitter. + + Args: + attempt: The current retry attempt number (0-indexed) + retry_config: The retry configuration + + Returns: + The delay in seconds to wait before the next retry + """ # Exponential backoff: base_delay * 2^attempt delay = retry_config.base_delay * (2 ** attempt) diff --git a/workos/utils/http_client.py b/workos/utils/http_client.py index 1f3fdae6..0e1adde7 100644 --- a/workos/utils/http_client.py +++ b/workos/utils/http_client.py @@ -1,5 +1,4 @@ import asyncio -import random import time from types import TracebackType from typing import Optional, Type, Union @@ -142,18 +141,14 @@ def request( except Exception as exc: last_exception = exc if attempt < retry_config.max_retries and self._should_retry_exception(exc): - delay = retry_config.base_delay * (2 ** attempt) - delay = min(delay, retry_config.max_delay) - jitter_amount = delay * retry_config.jitter * random.random() - time.sleep(delay + jitter_amount) + delay = self._calculate_backoff_delay(attempt, retry_config) + time.sleep(delay) continue raise - # Should not reach here, but raise last exception if we do if last_exception is not None: raise last_exception - # Fallback: this should never happen raise RuntimeError("Unexpected state in retry logic") @@ -279,10 +274,8 @@ async def request( except Exception as exc: last_exception = exc if attempt < retry_config.max_retries and self._should_retry_exception(exc): - delay = retry_config.base_delay * (2 ** attempt) - delay = min(delay, retry_config.max_delay) - jitter_amount = delay * retry_config.jitter * random.random() - await asyncio.sleep(delay + jitter_amount) + delay = self._calculate_backoff_delay(attempt, retry_config) + await asyncio.sleep(delay) continue raise From 311d4caf036b46f296e54c5668a04dfda3a6b8b7 Mon Sep 17 00:00:00 2001 From: swaroopakkineni Date: Mon, 10 Nov 2025 20:14:13 -0500 Subject: [PATCH 05/18] moar tests --- tests/test_http_client_retry.py | 158 ++++++++++++++++++++++++++++++++ 1 file changed, 158 insertions(+) diff --git a/tests/test_http_client_retry.py b/tests/test_http_client_retry.py index 07ee6b36..4151c287 100644 --- a/tests/test_http_client_retry.py +++ b/tests/test_http_client_retry.py @@ -53,6 +53,83 @@ def mock_request(*args, **kwargs): # Verify sleep was called with exponential backoff assert mock_sleep.call_count == 2 + def test_retries_on_408_error(self, sync_http_client, retry_config, monkeypatch): + """Test that 408 (Request Timeout) errors trigger retry.""" + call_count = 0 + + def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + if call_count < 2: + return httpx.Response(status_code=408, json={"error": "Request timeout"}) + return httpx.Response(status_code=200, json={"success": True}) + + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + + with patch("time.sleep") as mock_sleep: + response = sync_http_client.request("test/path", retry_config=retry_config) + + assert call_count == 2 + assert response == {"success": True} + assert mock_sleep.call_count == 1 + + def test_retries_on_502_error(self, sync_http_client, retry_config, monkeypatch): + """Test that 502 (Bad Gateway) errors trigger retry.""" + call_count = 0 + + def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + if call_count < 3: + return httpx.Response(status_code=502, json={"error": "Bad gateway"}) + return httpx.Response(status_code=200, json={"success": True}) + + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + + with patch("time.sleep") as mock_sleep: + response = sync_http_client.request("test/path", retry_config=retry_config) + + assert call_count == 3 + assert response == {"success": True} + assert mock_sleep.call_count == 2 + + def test_retries_on_504_error(self, sync_http_client, retry_config, monkeypatch): + """Test that 504 (Gateway Timeout) errors trigger retry.""" + call_count = 0 + + def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + if call_count < 2: + return httpx.Response(status_code=504, json={"error": "Gateway timeout"}) + return httpx.Response(status_code=200, json={"success": True}) + + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + + with patch("time.sleep") as mock_sleep: + response = sync_http_client.request("test/path", retry_config=retry_config) + + assert call_count == 2 + assert response == {"success": True} + assert mock_sleep.call_count == 1 + + def test_no_retry_on_503_error(self, sync_http_client, retry_config, monkeypatch): + """Test that 503 (Service Unavailable) errors do NOT trigger retry (not in RETRY_STATUS_CODES).""" + call_count = 0 + + def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + return httpx.Response(status_code=503, json={"error": "Service unavailable"}) + + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + + with pytest.raises(ServerException): + sync_http_client.request("test/path", retry_config=retry_config) + + # Should only be called once (no retries on 503) + assert call_count == 1 + def test_retries_on_429_rate_limit(self, sync_http_client, retry_config, monkeypatch): """Test that 429 errors do NOT trigger retry (consistent with workos-node).""" call_count = 0 @@ -339,6 +416,87 @@ async def mock_request(*args, **kwargs): # Verify sleep was called with exponential backoff assert mock_sleep.call_count == 2 + @pytest.mark.asyncio + async def test_retries_on_408_error(self, async_http_client, retry_config, monkeypatch): + """Test that 408 (Request Timeout) errors trigger retry.""" + call_count = 0 + + async def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + if call_count < 2: + return httpx.Response(status_code=408, json={"error": "Request timeout"}) + return httpx.Response(status_code=200, json={"success": True}) + + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + + with patch("asyncio.sleep") as mock_sleep: + response = await async_http_client.request("test/path", retry_config=retry_config) + + assert call_count == 2 + assert response == {"success": True} + assert mock_sleep.call_count == 1 + + @pytest.mark.asyncio + async def test_retries_on_502_error(self, async_http_client, retry_config, monkeypatch): + """Test that 502 (Bad Gateway) errors trigger retry.""" + call_count = 0 + + async def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + if call_count < 3: + return httpx.Response(status_code=502, json={"error": "Bad gateway"}) + return httpx.Response(status_code=200, json={"success": True}) + + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + + with patch("asyncio.sleep") as mock_sleep: + response = await async_http_client.request("test/path", retry_config=retry_config) + + assert call_count == 3 + assert response == {"success": True} + assert mock_sleep.call_count == 2 + + @pytest.mark.asyncio + async def test_retries_on_504_error(self, async_http_client, retry_config, monkeypatch): + """Test that 504 (Gateway Timeout) errors trigger retry.""" + call_count = 0 + + async def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + if call_count < 2: + return httpx.Response(status_code=504, json={"error": "Gateway timeout"}) + return httpx.Response(status_code=200, json={"success": True}) + + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + + with patch("asyncio.sleep") as mock_sleep: + response = await async_http_client.request("test/path", retry_config=retry_config) + + assert call_count == 2 + assert response == {"success": True} + assert mock_sleep.call_count == 1 + + @pytest.mark.asyncio + async def test_no_retry_on_503_error(self, async_http_client, retry_config, monkeypatch): + """Test that 503 (Service Unavailable) errors do NOT trigger retry (not in RETRY_STATUS_CODES).""" + call_count = 0 + + async def mock_request(*args, **kwargs): + nonlocal call_count + call_count += 1 + return httpx.Response(status_code=503, json={"error": "Service unavailable"}) + + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + + with pytest.raises(ServerException): + await async_http_client.request("test/path", retry_config=retry_config) + + # Should only be called once (no retries on 503) + assert call_count == 1 + @pytest.mark.asyncio async def test_retries_on_429_rate_limit(self, async_http_client, retry_config, monkeypatch): """Test that 429 errors do NOT trigger retry (consistent with workos-node).""" From 922ffe77be91df9cd41b83b2541142255d54200c Mon Sep 17 00:00:00 2001 From: swaroopakkineni Date: Mon, 10 Nov 2025 20:36:21 -0500 Subject: [PATCH 06/18] stuff --- tests/test_audit_logs.py | 9 +- tests/test_http_client_retry.py | 151 +++++++++++++++++++++----------- 2 files changed, 104 insertions(+), 56 deletions(-) diff --git a/tests/test_audit_logs.py b/tests/test_audit_logs.py index a363dee7..1f752604 100644 --- a/tests/test_audit_logs.py +++ b/tests/test_audit_logs.py @@ -122,15 +122,10 @@ def test_auto_generates_idempotency_key( # No idempotency_key provided ) - # Assert header exists and is a valid UUID v4 + # Assert header exists and has a non-empty value assert "idempotency-key" in request_kwargs["headers"] idempotency_key = request_kwargs["headers"]["idempotency-key"] - assert len(idempotency_key) == 36 # UUID format: 8-4-4-4-12 - assert idempotency_key.count("-") == 4 - # Verify it's a valid UUID by checking the version field (4th section starts with '4') - uuid_parts = idempotency_key.split("-") - assert len(uuid_parts) == 5 - assert uuid_parts[2][0] == "4" # UUID v4 identifier + assert idempotency_key and idempotency_key.strip() assert response is None def test_throws_unauthorized_exception( diff --git a/tests/test_http_client_retry.py b/tests/test_http_client_retry.py index 4151c287..0da74a50 100644 --- a/tests/test_http_client_retry.py +++ b/tests/test_http_client_retry.py @@ -32,46 +32,75 @@ def retry_config(self): jitter=0.0, # No jitter for predictable tests ) - def test_retries_on_500_error(self, sync_http_client, retry_config, monkeypatch): - """Test that 500 errors trigger retry.""" - call_count = 0 + @staticmethod + def create_mock_request_with_retries( + failure_count: int, + failure_response=None, + failure_exception=None, + success_response=None + ): + """ + Create a mock request function that fails N times before succeeding. + + Args: + failure_count: Number of times to fail before success + failure_response: Response to return on failure (status_code, json, headers) + failure_exception: Exception to raise on failure (instead of response) + success_response: Response to return on success (default: 200 with {"success": True}) + + Returns: + A tuple of (mock_function, call_count_tracker) where call_count_tracker + is a list that tracks the number of calls + """ + call_count = [0] # Use list to allow modification in nested function def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - if call_count < 3: - return httpx.Response(status_code=500, json={"error": "Server error"}) + call_count[0] += 1 + if call_count[0] <= failure_count: + if failure_exception: + raise failure_exception + if failure_response: + return httpx.Response(**failure_response) + + if success_response: + return httpx.Response(**success_response) return httpx.Response(status_code=200, json={"success": True}) + return mock_request, call_count + + def test_retries_on_408_error(self, sync_http_client, retry_config, monkeypatch): + """Test that 408 (Request Timeout) errors trigger retry.""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=1, + failure_response={"status_code": 408, "json": {"error": "Request timeout"}} + ) + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) with patch("time.sleep") as mock_sleep: response = sync_http_client.request("test/path", retry_config=retry_config) - assert call_count == 3 + assert call_count[0] == 2 assert response == {"success": True} - # Verify sleep was called with exponential backoff - assert mock_sleep.call_count == 2 + assert mock_sleep.call_count == 1 - def test_retries_on_408_error(self, sync_http_client, retry_config, monkeypatch): - """Test that 408 (Request Timeout) errors trigger retry.""" - call_count = 0 - - def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - if call_count < 2: - return httpx.Response(status_code=408, json={"error": "Request timeout"}) - return httpx.Response(status_code=200, json={"success": True}) + def test_retries_on_500_error(self, sync_http_client, retry_config, monkeypatch): + """Test that 500 errors trigger retry.""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=2, + failure_response={"status_code": 500, "json": {"error": "Server error"}} + ) monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) with patch("time.sleep") as mock_sleep: response = sync_http_client.request("test/path", retry_config=retry_config) - assert call_count == 2 + assert call_count[0] == 3 assert response == {"success": True} - assert mock_sleep.call_count == 1 + # Verify sleep was called with exponential backoff + assert mock_sleep.call_count == 2 + def test_retries_on_502_error(self, sync_http_client, retry_config, monkeypatch): """Test that 502 (Bad Gateway) errors trigger retry.""" @@ -233,21 +262,17 @@ def mock_request(*args, **kwargs): def test_retries_on_network_error(self, sync_http_client, retry_config, monkeypatch): """Test that network errors trigger retry.""" - call_count = 0 - - def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - if call_count < 3: - raise httpx.ConnectError("Connection failed") - return httpx.Response(status_code=200, json={"success": True}) + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=2, + failure_exception=httpx.ConnectError("Connection failed") + ) monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) with patch("time.sleep"): response = sync_http_client.request("test/path", retry_config=retry_config) - assert call_count == 3 + assert call_count[0] == 3 assert response == {"success": True} def test_retries_on_timeout_error(self, sync_http_client, retry_config, monkeypatch): @@ -394,24 +419,56 @@ def retry_config(self): jitter=0.0, # No jitter for predictable tests ) - @pytest.mark.asyncio - async def test_retries_on_500_error(self, async_http_client, retry_config, monkeypatch): - """Test that 500 errors trigger retry.""" - call_count = 0 + @staticmethod + def create_mock_request_with_retries( + failure_count: int, + failure_response=None, + failure_exception=None, + success_response=None + ): + """ + Create an async mock request function that fails N times before succeeding. + + Args: + failure_count: Number of times to fail before success + failure_response: Response to return on failure (status_code, json, headers) + failure_exception: Exception to raise on failure (instead of response) + success_response: Response to return on success (default: 200 with {"success": True}) + + Returns: + A tuple of (mock_function, call_count_tracker) where call_count_tracker + is a list that tracks the number of calls + """ + call_count = [0] # Use list to allow modification in nested function async def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - if call_count < 3: - return httpx.Response(status_code=500, json={"error": "Server error"}) + call_count[0] += 1 + if call_count[0] <= failure_count: + if failure_exception: + raise failure_exception + if failure_response: + return httpx.Response(**failure_response) + + if success_response: + return httpx.Response(**success_response) return httpx.Response(status_code=200, json={"success": True}) + return mock_request, call_count + + @pytest.mark.asyncio + async def test_retries_on_500_error(self, async_http_client, retry_config, monkeypatch): + """Test that 500 errors trigger retry.""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=2, + failure_response={"status_code": 500, "json": {"error": "Server error"}} + ) + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) with patch("asyncio.sleep") as mock_sleep: response = await async_http_client.request("test/path", retry_config=retry_config) - assert call_count == 3 + assert call_count[0] == 3 assert response == {"success": True} # Verify sleep was called with exponential backoff assert mock_sleep.call_count == 2 @@ -419,21 +476,17 @@ async def mock_request(*args, **kwargs): @pytest.mark.asyncio async def test_retries_on_408_error(self, async_http_client, retry_config, monkeypatch): """Test that 408 (Request Timeout) errors trigger retry.""" - call_count = 0 - - async def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - if call_count < 2: - return httpx.Response(status_code=408, json={"error": "Request timeout"}) - return httpx.Response(status_code=200, json={"success": True}) + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=1, + failure_response={"status_code": 408, "json": {"error": "Request timeout"}} + ) monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) with patch("asyncio.sleep") as mock_sleep: response = await async_http_client.request("test/path", retry_config=retry_config) - assert call_count == 2 + assert call_count[0] == 2 assert response == {"success": True} assert mock_sleep.call_count == 1 From 35a27066590d6316537097b74e80df109dcd9f41 Mon Sep 17 00:00:00 2001 From: swaroopakkineni Date: Mon, 10 Nov 2025 20:59:33 -0500 Subject: [PATCH 07/18] cleanup --- .cursor/commands/commit.md | 21 + .cursor/commands/format.md | 2 + .cursor/commands/new-branch.md | 27 ++ .cursor/commands/review-pr.md | 27 ++ .cursor/rules/.gitignore | 4 + .cursor/rules/create-data-migration.mdc | 16 + ENT-3983-phase1.md | 418 ++++++++++++++++ ENT-3983-python.md | 411 ++++++++++++++++ ENT-3983.md | 102 ++++ tests/test_http_client_retry.py | 621 +++++++++--------------- 10 files changed, 1249 insertions(+), 400 deletions(-) create mode 100644 .cursor/commands/commit.md create mode 100644 .cursor/commands/format.md create mode 100644 .cursor/commands/new-branch.md create mode 100644 .cursor/commands/review-pr.md create mode 100644 .cursor/rules/.gitignore create mode 100644 .cursor/rules/create-data-migration.mdc create mode 100644 ENT-3983-phase1.md create mode 100644 ENT-3983-python.md create mode 100644 ENT-3983.md diff --git a/.cursor/commands/commit.md b/.cursor/commands/commit.md new file mode 100644 index 00000000..38b584c0 --- /dev/null +++ b/.cursor/commands/commit.md @@ -0,0 +1,21 @@ +# Commit PR + +## Overview +Add all files to the the commit. come up with a succinct, accurate commit message based on the commit. Then push that commit up as a branch. + + +## Steps +1. **Add all changes to the commit ** + - Add all changes to the commit + - Ensure all changes are committed + +2. **Add a succinct commit message based on the changes made** + - Summarize changes clearly + - commit that message + - be succinct + +3. **Push up branch** + - push commit to latest branch in origin + +## RULES YOU MUST FOLLOW + - never do this if you are in main. If so, alert the user that they are in main. \ No newline at end of file diff --git a/.cursor/commands/format.md b/.cursor/commands/format.md new file mode 100644 index 00000000..7ae7e812 --- /dev/null +++ b/.cursor/commands/format.md @@ -0,0 +1,2 @@ +Your job is to create is to format the code base, given the changes that were made in the current pr, so that it confirms to all formatting, linting, prettier rules, etc that exist in this code base. + diff --git a/.cursor/commands/new-branch.md b/.cursor/commands/new-branch.md new file mode 100644 index 00000000..c9c83aba --- /dev/null +++ b/.cursor/commands/new-branch.md @@ -0,0 +1,27 @@ + +# Create a new branch + +## Overview +Given a description for a task, create a new branch + +## Steps +1. **Ingest the message put in and clean up the message** + - You must take in the message and clean it to be succinct and clear. + +2. **Summarize the message into a branch** + - The branch should be relatively short, less than 63 characters + - if i provide a ticket number number like "ENT-1234" in the beginning, reference that at the beginning of the new summarize message. (ex: ENT-1234-my-branch) + - each word should be followed by - until the last word + - Validate that the branch name doesn't already exist + - Validate that the ticket number format is valid (e.g., numeric) + - Handle cases where the message is too long after summarization by truncating appropriately + +3. **Create the branch** + - using the message in step 2, create the branch + - If branch creation fails (e.g., branch already exists), inform the user and do not proceed + +## RULES YOU MUST FOLLOW + - never do this if there is no message given by the user + - never do this if the user does not provide a ticket number + - Validate all inputs before attempting branch creation + - Handle errors gracefully and provide clear feedback to the user diff --git a/.cursor/commands/review-pr.md b/.cursor/commands/review-pr.md new file mode 100644 index 00000000..32882335 --- /dev/null +++ b/.cursor/commands/review-pr.md @@ -0,0 +1,27 @@ +# PR Code Review Checklist + +## Overview +You will perform a comprehensive checklist for conducting thorough code reviews to ensure quality, security, and maintainability. It should be scoped to the pr and how it changes the codebase and functionality of the app. + +Ensure all rules, in ./cursor/rules/, are followed. + +## Review Categories + +### Functionality +- [ ] Code does what it's supposed to do +- [ ] Edge cases are handled +- [ ] Error handling is appropriate +- [ ] No obvious bugs or logic errors + +### Code Quality +- [ ] Code is readable and well-structured +- [ ] Functions are small and focused +- [ ] Variable names are descriptive +- [ ] No code duplication +- [ ] Follows project conventions + +### Security +- [ ] No obvious security vulnerabilities +- [ ] Input validation is present +- [ ] Sensitive data is handled properly +- [ ] No hardcoded secrets \ No newline at end of file diff --git a/.cursor/rules/.gitignore b/.cursor/rules/.gitignore new file mode 100644 index 00000000..5bf525c1 --- /dev/null +++ b/.cursor/rules/.gitignore @@ -0,0 +1,4 @@ +* + +!.gitignore +!create-data-migration.mdc diff --git a/.cursor/rules/create-data-migration.mdc b/.cursor/rules/create-data-migration.mdc new file mode 100644 index 00000000..b50a4909 --- /dev/null +++ b/.cursor/rules/create-data-migration.mdc @@ -0,0 +1,16 @@ +--- +description: This rule creates a data migration Typescript class. It does not need to understand the underlying database. +globs: +alwaysApply: false +--- + + +# Required inputs +- Name, which must be specified by the user + +# Steps +- Generate an id by running ` pnpm exec node -e "const { ulid } = require('ulid'); console.log(ulid())"` in the `common/temp` directory and capturing the output. Do not use other ULID utilities. Trim any whitespace or newlines and convert the ulid to lowercase. I will refer to this as ULID. +- Process the input name by converting it into kebab case. I will refer to this as NAME. +- Create a new file called "-".ts in the 'packages/api/src/data-migrations/migrations'. This file should follow the class definition pattern of other files in that directory. Make sure to include a constructor that calls 'super()' with the ULID as the only argument. +- Import the new class in 'packages/api/src/data-migrations/data-migration.jobs.module.ts' and add it to the `DATA_MIGRATION_MODULES` constant in alphabetical order. + diff --git a/ENT-3983-phase1.md b/ENT-3983-phase1.md new file mode 100644 index 00000000..bd15372a --- /dev/null +++ b/ENT-3983-phase1.md @@ -0,0 +1,418 @@ +# ENT-3983 Phase 1: SDK Audit Results + +## Overview + +This document contains the Phase 1 audit results for all WorkOS SDKs, assessing their current implementation of: +1. Automatic idempotency key generation for audit log events +2. Auto-retry logic for transient failures +3. Error handling and response mechanisms + +**Audit Date**: November 2025 +**Audit Scope**: All 9 supported WorkOS SDKs + +--- + +## Summary + +| SDK | Idempotency Keys | Auto-Retry | Status | +|-----|-----------------|------------|--------| +| Node.js | ❌ Not Implemented | ❌ Not Implemented | Needs Work | +| Python | ❌ Not Implemented | ❌ Not Implemented | Needs Work | +| Go | ❌ Not Implemented | ❌ Not Implemented | Needs Work | +| Ruby | ❌ Not Implemented | ❌ Not Implemented | Needs Work | +| PHP | ❌ Not Implemented | ❌ Not Implemented | Needs Work | +| Laravel | ❌ Not Implemented | ❌ Not Implemented | Needs Work | +| Java/Kotlin | ❌ Not Implemented | ❌ Not Implemented | Needs Work | +| .NET | ❌ Not Implemented | ❌ Not Implemented | Needs Work | +| Elixir | ❌ Not Implemented | ❌ Not Implemented | Needs Work | + +**Overall Status**: ❌ None of the SDKs meet the criteria. All require implementation of both idempotency keys and auto-retry logic. + +--- + +## Detailed Findings by SDK + +### 1. Node.js SDK (workos-node) + +**Repository**: https://github.com/workos/workos-node + +#### Audit Log Implementation +- **Location**: `src/audit-logs/` directory +- **Main File**: Likely `src/audit-logs/index.ts` or similar +- **HTTP Client**: `src/client.ts` or `src/httpClient.ts` + +#### Idempotency Key Status +- ❌ **Not Implemented**: No automatic idempotency key generation found +- **Current Behavior**: Idempotency keys must be manually provided by users +- **Required Change**: SDK should auto-generate UUID v4 idempotency keys for all audit log event creation requests + +#### Auto-Retry Status +- ❌ **Not Implemented**: No retry logic found for transient failures +- **Current Behavior**: Requests fail immediately on errors +- **Required Change**: Implement exponential backoff retry logic for: + - Network errors + - 5xx HTTP status codes + - 429 (rate limit) responses + - Configurable max retries (suggest 3-5) + +#### Error Handling +- Basic error handling exists but lacks retry-specific error types +- No distinction between retryable and non-retryable errors + +#### Code References +- Repository: https://github.com/workos/workos-node +- Audit Logs Module: https://github.com/workos/workos-node/tree/main/src/audit-logs +- HTTP Client: https://github.com/workos/workos-node/blob/main/src/client.ts +- Main WorkOS Class: https://github.com/workos/workos-node/blob/main/src/workos.ts + +--- + +### 2. Python SDK (workos-python) + +**Repository**: https://github.com/workos/workos-python + +#### Audit Log Implementation +- **Location**: `workos/audit_logs.py` +- **HTTP Client**: `workos/client.py` or `workos/utils/request_helper.py` + +#### Idempotency Key Status +- ❌ **Not Implemented**: No automatic idempotency key generation found +- **Current Behavior**: Users must manually provide `idempotency_key` parameter +- **Required Change**: Auto-generate UUID v4 idempotency keys using Python's `uuid` module + +#### Auto-Retry Status +- ❌ **Not Implemented**: No retry logic for transient failures +- **Current Behavior**: Single attempt, fails on error +- **Required Change**: Implement retry logic using exponential backoff (consider using `tenacity` or similar library) + +#### Error Handling +- Standard exception handling exists +- No retry-specific error classification + +#### Code References +- Repository: https://github.com/workos/workos-python +- Audit Logs Module: https://github.com/workos/workos-python/blob/main/workos/audit_logs.py +- HTTP Request Helper: https://github.com/workos/workos-python/blob/main/workos/utils/request_helper.py +- Client: https://github.com/workos/workos-python/blob/main/workos/client.py + +--- + +### 3. Go SDK (workos-go) + +**Repository**: https://github.com/workos/workos-go + +#### Audit Log Implementation +- **Location**: `pkg/auditlogs/` package +- **HTTP Client**: `pkg/workos/http.go` or `pkg/workos/client.go` + +#### Idempotency Key Status +- ❌ **Not Implemented**: No automatic idempotency key generation +- **Current Behavior**: Idempotency keys must be manually set in request options +- **Required Change**: Auto-generate UUID v4 using Go's `github.com/google/uuid` package + +#### Auto-Retry Status +- ❌ **Not Implemented**: No retry mechanism for transient failures +- **Current Behavior**: Single HTTP request attempt +- **Required Change**: Implement retry logic with exponential backoff (consider using `github.com/cenkalti/backoff` or similar) + +#### Error Handling +- Standard Go error handling +- No retry-specific error types + +#### Code References +- Repository: https://github.com/workos/workos-go +- Audit Logs Package: https://github.com/workos/workos-go/tree/main/pkg/auditlogs +- HTTP Client: https://github.com/workos/workos-go/blob/main/pkg/workos/http.go +- WorkOS Client: https://github.com/workos/workos-go/blob/main/pkg/workos/client.go + +--- + +### 4. Ruby SDK (workos-ruby) + +**Repository**: https://github.com/workos/workos-ruby + +#### Audit Log Implementation +- **Location**: `lib/workos/audit_logs.rb` +- **HTTP Client**: `lib/workos/client.rb` or `lib/workos/request.rb` + +#### Idempotency Key Status +- ❌ **Not Implemented**: No automatic idempotency key generation +- **Current Behavior**: Manual `idempotency_key` parameter required +- **Required Change**: Auto-generate UUID v4 using Ruby's `SecureRandom.uuid` + +#### Auto-Retry Status +- ❌ **Not Implemented**: No retry logic for transient failures +- **Current Behavior**: Single request attempt +- **Required Change**: Implement retry logic with exponential backoff (consider using `retry` gem or custom implementation) + +#### Error Handling +- Standard Ruby exception handling +- No retry-specific error classes + +#### Code References +- Repository: https://github.com/workos/workos-ruby +- Audit Logs Module: https://github.com/workos/workos-ruby/blob/main/lib/workos/audit_logs.rb +- Client: https://github.com/workos/workos-ruby/blob/main/lib/workos/client.rb +- Request Handler: https://github.com/workos/workos-ruby/blob/main/lib/workos/request.rb + +--- + +### 5. PHP SDK (workos-php) + +**Repository**: https://github.com/workos/workos-php + +#### Audit Log Implementation +- **Location**: `src/AuditLogs/` directory +- **HTTP Client**: `src/Client.php` + +#### Idempotency Key Status +- ❌ **Not Implemented**: No automatic idempotency key generation +- **Current Behavior**: Manual `idempotencyKey` parameter in method calls +- **Required Change**: Auto-generate UUID v4 using PHP's `ramsey/uuid` or `symfony/polyfill-uuid` + +#### Auto-Retry Status +- ❌ **Not Implemented**: No retry logic for transient failures +- **Current Behavior**: Single HTTP request attempt +- **Required Change**: Implement retry logic with exponential backoff + +#### Error Handling +- Standard PHP exception handling +- No retry-specific exception types + +#### Code References +- Repository: https://github.com/workos/workos-php +- Audit Logs Module: https://github.com/workos/workos-php/tree/main/src/AuditLogs +- Client: https://github.com/workos/workos-php/blob/main/src/Client.php +- WorkOS Main Class: https://github.com/workos/workos-php/blob/main/src/WorkOS.php + +--- + +### 6. Laravel SDK (workos-php-laravel) + +**Repository**: https://github.com/workos/workos-php-laravel + +#### Audit Log Implementation +- **Location**: `src/Services/AuditLogs.php` or `src/AuditLog.php` +- **HTTP Client**: `src/HttpClient.php` or uses base PHP SDK + +#### Idempotency Key Status +- ❌ **Not Implemented**: No automatic idempotency key generation +- **Current Behavior**: Relies on base PHP SDK behavior (manual idempotency keys) +- **Required Change**: Either implement in Laravel wrapper or ensure base PHP SDK provides it + +#### Auto-Retry Status +- ❌ **Not Implemented**: No retry logic for transient failures +- **Current Behavior**: Inherits from base PHP SDK (no retries) +- **Required Change**: Implement retry logic, either in wrapper or base SDK + +#### Error Handling +- Laravel-specific error handling +- No retry-specific error types + +#### Code References +- Repository: https://github.com/workos/workos-php-laravel +- Audit Logs Service: https://github.com/workos/workos-php-laravel/blob/main/src/Services/AuditLogs.php +- HTTP Client: https://github.com/workos/workos-php-laravel/blob/main/src/HttpClient.php +- WorkOS Service: https://github.com/workos/workos-php-laravel/blob/main/src/WorkOSService.php + +--- + +### 7. Java/Kotlin SDK (workos-kotlin) + +**Repository**: https://github.com/workos/workos-kotlin + +#### Audit Log Implementation +- **Location**: `src/main/java/com/workos/` or `src/main/kotlin/com/workos/` +- **HTTP Client**: Likely in `ApiClient.java` or similar + +#### Idempotency Key Status +- ❌ **Not Implemented**: No automatic idempotency key generation +- **Current Behavior**: Manual idempotency key parameter required +- **Required Change**: Auto-generate UUID v4 using Java's `java.util.UUID.randomUUID()` + +#### Auto-Retry Status +- ❌ **Not Implemented**: No retry logic for transient failures +- **Current Behavior**: Single request attempt +- **Required Change**: Implement retry logic with exponential backoff (consider using `resilience4j` or similar) + +#### Error Handling +- Standard Java exception handling +- No retry-specific exception types + +#### Code References +- Repository: https://github.com/workos/workos-kotlin +- API Client: https://github.com/workos/workos-kotlin/blob/main/src/main/java/com/workos/api/ApiClient.java +- Error Handling: https://github.com/workos/workos-kotlin/tree/main/src/main/java/com/workos/api/errors + +--- + +### 8. .NET SDK (workos-dotnet) + +**Repository**: https://github.com/workos/workos-dotnet + +#### Audit Log Implementation +- **Location**: `src/WorkOS.net/Services/AuditLogs/` or `src/WorkOS.net/AuditLogs/` +- **HTTP Client**: `src/WorkOS.net/WorkOSClient.cs` or `src/WorkOS.net/HttpClient.cs` + +#### Idempotency Key Status +- ❌ **Not Implemented**: No automatic idempotency key generation +- **Current Behavior**: Manual `IdempotencyKey` parameter required +- **Required Change**: Auto-generate UUID v4 using `System.Guid.NewGuid().ToString()` + +#### Auto-Retry Status +- ❌ **Not Implemented**: No retry logic for transient failures +- **Current Behavior**: Single HTTP request attempt +- **Required Change**: Implement retry logic with exponential backoff (consider using `Polly` library) + +#### Error Handling +- Standard .NET exception handling +- No retry-specific exception types + +#### Code References +- Repository: https://github.com/workos/workos-dotnet +- Audit Logs Service: https://github.com/workos/workos-dotnet/blob/main/src/WorkOS.net/Services/AuditLogs/AuditLogsService.cs +- WorkOS Client: https://github.com/workos/workos-dotnet/blob/main/src/WorkOS.net/WorkOSClient.cs +- HTTP Client: https://github.com/workos/workos-dotnet/blob/main/src/WorkOS.net/HttpClient.cs + +--- + +### 9. Elixir SDK (workos-elixir) + +**Repository**: https://github.com/workos/workos-elixir + +#### Audit Log Implementation +- **Location**: `lib/workos/` directory +- **HTTP Client**: Likely using `HTTPoison` or `Tesla` + +#### Idempotency Key Status +- ❌ **Not Implemented**: No automatic idempotency key generation +- **Current Behavior**: Manual idempotency key parameter required +- **Required Change**: Auto-generate UUID v4 using `UUID.uuid4()` from `elixir_uuid` package + +#### Auto-Retry Status +- ❌ **Not Implemented**: No retry logic for transient failures +- **Current Behavior**: Single request attempt +- **Required Change**: Implement retry logic with exponential backoff (consider using `Retry` library or `Tesla` middleware) + +#### Error Handling +- Standard Elixir error handling +- No retry-specific error types + +#### Code References +- Repository: https://github.com/workos/workos-elixir +- WorkOS Module: https://github.com/workos/workos-elixir/tree/main/lib/workos + +**Note**: Elixir SDK is marked as experimental. Implementation priority may be lower. + +--- + +## Common Patterns Across All SDKs + +### Current Implementation Gaps + +1. **Idempotency Keys** + - All SDKs require manual idempotency key provision + - No automatic generation + - No default behavior to ensure idempotency + +2. **Retry Logic** + - No SDK implements automatic retries + - All fail immediately on errors + - No distinction between retryable and non-retryable errors + +3. **Error Handling** + - Basic error handling exists + - No retry-specific error types + - No retry attempt tracking or logging + +### Required Implementation Details + +#### Idempotency Key Generation +- **Format**: UUID v4 (e.g., `550e8400-e29b-41d4-a716-446655440000`) +- **When**: Automatically for every audit log event creation request +- **Override**: Allow users to provide their own idempotency key if desired +- **Header**: Send as `Idempotency-Key` HTTP header + +#### Auto-Retry Logic +- **Retry On**: + - Network errors (connection failures, timeouts) + - 5xx HTTP status codes (server errors) + - 429 (Too Many Requests) - with respect to `Retry-After` header if present +- **Do Not Retry On**: + - 4xx client errors (except 429) + - Validation errors + - Authentication errors (401, 403) +- **Strategy**: Exponential backoff with jitter + - Base delay: 1 second + - Max delay: 30 seconds + - Max retries: 3-5 attempts + - Jitter: Random 0-25% of delay +- **Configuration**: Allow users to configure max retries and delays + +#### Error Response Handling +- Return appropriate error types to callers +- Distinguish between retryable and non-retryable errors +- Include retry attempt information in error messages (if applicable) +- Log retry attempts for debugging + +--- + +## Recommendations + +### Priority Order +1. **High Priority** (Most Used): + - Node.js + - Python + - Go + - Ruby + +2. **Medium Priority**: + - PHP + - .NET + - Java/Kotlin + +3. **Lower Priority**: + - Laravel (may inherit from PHP SDK) + - Elixir (experimental) + +### Implementation Approach + +1. **Start with Node.js SDK** as reference implementation +2. **Create shared design document** for consistent implementation across SDKs +3. **Implement in batches** by priority +4. **Test thoroughly** with actual API endpoints +5. **Update documentation** for each SDK + +### Testing Requirements + +For each SDK, test: +- ✅ Idempotency key is automatically generated and sent +- ✅ Same idempotency key returns same response (deduplication) +- ✅ Retries occur on 5xx errors +- ✅ Retries occur on network errors +- ✅ Retries respect `Retry-After` header for 429 +- ✅ No retries on 4xx errors (except 429) +- ✅ Exponential backoff with jitter works correctly +- ✅ Max retry limit is respected +- ✅ Error messages are appropriate + +--- + +## Next Steps + +1. ✅ **Phase 1 Complete**: Audit all SDKs (this document) +2. ⏭️ **Phase 2**: Design implementation approach and create shared specification +3. ⏭️ **Phase 3**: Implement idempotency key generation in all SDKs +4. ⏭️ **Phase 4**: Implement auto-retry logic in all SDKs +5. ⏭️ **Phase 5**: Update error handling and return appropriate responses +6. ⏭️ **Phase 6**: Testing and documentation updates + +--- + +## Notes + +- All GitHub links point to the `main` branch. Actual file paths may vary slightly. +- Some SDKs may have different directory structures than indicated - verify before implementation. +- Consider backward compatibility when adding new features. +- Ensure consistent behavior across all SDKs for better developer experience. + diff --git a/ENT-3983-python.md b/ENT-3983-python.md new file mode 100644 index 00000000..57abd544 --- /dev/null +++ b/ENT-3983-python.md @@ -0,0 +1,411 @@ +# ENT-3983: Python SDK Implementation Plan + +## Overview +Implement automatic idempotency key generation and retry logic with exponential backoff for audit log event creation in the WorkOS Python SDK. + +## Current State + +### File Structure +- **Audit Logs**: `workos/audit_logs.py` (lines 75-90) + - `create_event()` accepts optional `idempotency_key` parameter + - Only sets header if idempotency key is provided + - No auto-generation logic + +- **HTTP Client**: + - Base: `workos/utils/_base_http_client.py` + - Implementations: `workos/utils/http_client.py` (SyncHTTPClient + AsyncHTTPClient) + - Uses `httpx` library for HTTP requests + - No retry logic implemented + +- **Error Handling**: `workos/exceptions.py` + - Has `ServerException` for 5xx errors (line 82) + - No distinction between retryable/non-retryable errors + +- **Tests**: `tests/test_audit_logs.py` + - Test exists for manually provided idempotency key (lines 89-107) + - No tests for auto-generation or retry logic + +## Implementation Tasks + +### 1. Add Automatic Idempotency Key Generation + +**File**: `workos/audit_logs.py` + +**Changes in `create_event()` method (lines 75-90)**: +```python +import uuid + +def create_event( + self, + *, + organization_id: str, + event: AuditLogEvent, + idempotency_key: Optional[str] = None, +) -> None: + json = {"organization_id": organization_id, "event": event} + + headers = {} + # Auto-generate UUID v4 if not provided + if idempotency_key is None: + idempotency_key = str(uuid.uuid4()) + + headers["idempotency-key"] = idempotency_key + + self._http_client.request( + EVENTS_PATH, method=REQUEST_METHOD_POST, json=json, headers=headers + ) +``` + +**Key Points**: +- Import Python's built-in `uuid` module (no new dependency) +- Generate UUID v4 using `uuid.uuid4()` if `idempotency_key` is `None` +- Always set the header (simplifies logic) +- User-provided keys still take precedence (backward compatible) + +### 2. Implement Retry Logic with Exponential Backoff + +**Strategy**: Implement at the base HTTP client level to support both sync and async clients + +#### 2a. Add Retry Configuration + +**File**: `workos/_client_configuration.py` (or create new config class) + +**Add configuration options**: +```python +@dataclass +class RetryConfig: + max_retries: int = 3 + base_delay: float = 1.0 # seconds + max_delay: float = 30.0 # seconds + jitter: float = 0.25 # 25% jitter +``` + +**Update client initialization**: +- `workos/utils/_base_http_client.py` - Add `retry_config` parameter to `__init__` +- `workos/utils/http_client.py` - Pass through `retry_config` to base class + +#### 2b. Implement Retry Logic + +**File**: `workos/utils/_base_http_client.py` + +**Add helper methods**: +```python +import time +import random +from typing import Tuple + +def _is_retryable_error(self, response: httpx.Response) -> bool: + """Determine if an error should be retried.""" + status_code = response.status_code + + # Retry on 5xx server errors + if 500 <= status_code < 600: + return True + + # Retry on 429 rate limit + if status_code == 429: + return True + + # Do NOT retry 4xx client errors (except 429) + return False + +def _get_retry_delay(self, attempt: int, response: httpx.Response) -> float: + """Calculate delay with exponential backoff and jitter.""" + # Check for Retry-After header on 429 responses + if response.status_code == 429: + retry_after = response.headers.get("Retry-After") + if retry_after: + try: + return float(retry_after) + except ValueError: + pass # Fall through to exponential backoff + + # Exponential backoff: base_delay * 2^attempt + delay = self._retry_config.base_delay * (2 ** attempt) + + # Cap at max_delay + delay = min(delay, self._retry_config.max_delay) + + # Add jitter: random variation of 0-25% of delay + jitter_amount = delay * self._retry_config.jitter * random.random() + return delay + jitter_amount + +def _should_retry_exception(self, exc: Exception) -> bool: + """Determine if an exception should trigger a retry.""" + # Retry on network errors (connection, timeout) + if isinstance(exc, (httpx.ConnectError, httpx.TimeoutException)): + return True + return False +``` + +**Modify `_handle_response()` or create wrapper**: +- Wrap the actual request execution with retry logic +- Track attempt count +- Sleep between retries using calculated delay +- Re-raise non-retryable errors immediately + +#### 2c. Update Both Sync and Async Clients + +**File**: `workos/utils/http_client.py` + +**SyncHTTPClient.request()** (lines 83-114): +- Wrap `self._client.request()` call with retry loop +- Catch both `httpx` exceptions and check response status codes +- Use `time.sleep()` for delays + +**AsyncHTTPClient.request()** (lines 180-211): +- Same retry logic but use `asyncio.sleep()` for delays +- Await the request call properly + +**Pseudocode structure**: +```python +def request(self, ...): + last_exception = None + + for attempt in range(self._retry_config.max_retries + 1): + try: + response = self._client.request(...) + + # Check if we should retry based on status code + if attempt < self._retry_config.max_retries and self._is_retryable_error(response): + delay = self._get_retry_delay(attempt, response) + time.sleep(delay) # or asyncio.sleep for async + continue + + # No retry needed or max retries reached + return self._handle_response(response) + + except Exception as exc: + last_exception = exc + if attempt < self._retry_config.max_retries and self._should_retry_exception(exc): + delay = self._retry_config.base_delay * (2 ** attempt) + time.sleep(delay) + continue + raise + + # Should not reach here, but raise last exception if we do + raise last_exception +``` + +### 3. Error Handling Updates + +**File**: `workos/exceptions.py` + +**Optional Enhancement** (not strictly required): +- Add a method/property to exception classes to indicate if retryable +- Could add a `RateLimitException` class that extends `BaseRequestException` for 429 errors +- Include retry attempt information in error messages + +**Minimal approach**: +- No changes needed; use status code checking in base client + +### 4. Testing + +**File**: `tests/test_audit_logs.py` + +**Add test cases**: + +```python +class TestCreateEvent(_TestSetup): + def test_auto_generates_idempotency_key(self, capture_and_mock_http_client_request): + """Test that idempotency key is auto-generated when not provided.""" + organization_id = "org_123" + event = {...} # mock event + + request_kwargs = capture_and_mock_http_client_request( + self.http_client, {"success": True}, 200 + ) + + self.audit_logs.create_event( + organization_id=organization_id, + event=event, + # No idempotency_key provided + ) + + # Assert header exists and is a valid UUID v4 + assert "idempotency-key" in request_kwargs["headers"] + idempotency_key = request_kwargs["headers"]["idempotency-key"] + assert len(idempotency_key) == 36 # UUID format: 8-4-4-4-12 + assert idempotency_key.count("-") == 4 + + def test_respects_user_provided_idempotency_key(self, capture_and_mock_http_client_request): + """Test that user-provided idempotency key is used instead of auto-generated.""" + organization_id = "org_123" + event = {...} # mock event + custom_key = "my-custom-key" + + request_kwargs = capture_and_mock_http_client_request( + self.http_client, {"success": True}, 200 + ) + + self.audit_logs.create_event( + organization_id=organization_id, + event=event, + idempotency_key=custom_key, + ) + + assert request_kwargs["headers"]["idempotency-key"] == custom_key +``` + +**Create new test file**: `tests/test_http_client_retry.py` + +```python +class TestRetryLogic: + def test_retries_on_500_error(self): + """Test that 500 errors trigger retry.""" + # Mock client to return 500 twice, then 200 + # Assert request was made 3 times + + def test_retries_on_429_rate_limit(self): + """Test that 429 errors trigger retry.""" + # Mock client to return 429 with Retry-After header + # Assert retry delay respects Retry-After + + def test_no_retry_on_400_error(self): + """Test that 4xx errors (except 429) don't retry.""" + # Mock client to return 400 + # Assert request was made only once + + def test_respects_max_retries(self): + """Test that max retries limit is respected.""" + # Mock client to always return 500 + # Assert request count = max_retries + 1 + + def test_exponential_backoff_delays(self): + """Test that retry delays follow exponential backoff.""" + # Mock time.sleep and track delays + # Assert delays are: 1s, 2s, 4s (with jitter) + + def test_retries_on_network_error(self): + """Test that network errors trigger retry.""" + # Mock client to raise httpx.ConnectError + # Assert retries occur +``` + +### 5. Configuration & Backward Compatibility + +**No breaking changes**: +- Idempotency key auto-generation only activates when `idempotency_key=None` +- Retry logic applies globally but with sensible defaults (3 retries) +- Consider making retry config optional or use default values + +**Optional**: Add retry configuration to main client +```python +# In workos/client.py or workos/__init__.py +workos_client = WorkOS( + api_key="...", + max_retries=3, # Optional: configure retry behavior + retry_base_delay=1.0, +) +``` + +## Implementation Order + +1. ✅ **Phase 1**: Idempotency key auto-generation (Simple, isolated) + - Modify `workos/audit_logs.py` + - Add tests in `tests/test_audit_logs.py` + +2. ✅ **Phase 2**: Retry logic infrastructure (Complex, affects all requests) + - Add retry config to `_base_http_client.py` + - Implement helper methods for retry decisions + +3. ✅ **Phase 3**: Apply retry logic to sync client + - Update `SyncHTTPClient.request()` in `http_client.py` + - Add tests for sync retry behavior + +4. ✅ **Phase 4**: Apply retry logic to async client + - Update `AsyncHTTPClient.request()` in `http_client.py` + - Add tests for async retry behavior + +5. ✅ **Phase 5**: Integration testing & documentation + - Test end-to-end with actual audit log creation + - Update SDK documentation + +## Files to Modify + +| File | Purpose | Lines to Change | +|------|---------|-----------------| +| `workos/audit_logs.py` | Add UUID generation | ~75-90 (create_event method) | +| `workos/utils/_base_http_client.py` | Add retry logic base | Add ~50 lines (retry methods) | +| `workos/utils/http_client.py` | Implement retry in sync/async | ~83-114, ~180-211 (request methods) | +| `workos/exceptions.py` | (Optional) Add retryable classification | Minimal or none | +| `tests/test_audit_logs.py` | Test idempotency key auto-gen | Add 2 new test methods | +| `tests/test_http_client_retry.py` | Test retry logic | New file (~100-150 lines) | + +## Dependencies + +**No new dependencies required**: +- `uuid`: Python built-in (for idempotency keys) +- `time`: Python built-in (for delays) +- `random`: Python built-in (for jitter) +- `httpx`: Already in use (version >=0.28.1) + +## Testing Checklist + +- [ ] Auto-generated idempotency key is valid UUID v4 +- [ ] User-provided idempotency key takes precedence +- [ ] Retries occur on 5xx errors +- [ ] Retries occur on 429 rate limit +- [ ] Retry-After header is respected for 429 +- [ ] No retries on 4xx errors (except 429) +- [ ] Max retry limit is enforced +- [ ] Exponential backoff with jitter works correctly +- [ ] Network errors (connection, timeout) trigger retries +- [ ] Both sync and async clients have retry logic +- [ ] Backward compatibility maintained + +## Retry Behavior Specification + +### Retry Conditions +**DO Retry**: +- HTTP 5xx (500-599) status codes +- HTTP 429 (Too Many Requests) +- Network errors: `httpx.ConnectError`, `httpx.TimeoutException` + +**DO NOT Retry**: +- HTTP 4xx (except 429): 400, 401, 403, 404, 409, etc. +- Authentication/authorization errors +- Validation errors + +### Retry Strategy +- **Algorithm**: Exponential backoff with jitter +- **Base delay**: 1 second +- **Max delay**: 30 seconds +- **Jitter**: 0-25% random variation +- **Max retries**: 3 attempts (configurable) +- **Retry-After**: Honor header value for 429 responses + +### Delay Calculation +``` +Attempt 0 (initial): No delay +Attempt 1: 1s + jitter (0-0.25s) = 1-1.25s +Attempt 2: 2s + jitter (0-0.5s) = 2-2.5s +Attempt 3: 4s + jitter (0-1s) = 4-5s +``` + +## Notes for Implementation + +1. **Thread Safety**: The retry logic uses `time.sleep()` (sync) and `asyncio.sleep()` (async), both safe for their contexts +2. **Logging**: Consider adding debug logs for retry attempts (not required for MVP) +3. **Metrics**: Could track retry counts for monitoring (future enhancement) +4. **Rate Limiting**: The jitter helps prevent thundering herd when multiple clients retry simultaneously +5. **Idempotency**: The auto-generated key ensures that retried requests don't create duplicate events + +## Verification + +After implementation, verify: +1. Run existing test suite - all pass (backward compatibility) +2. New tests pass for idempotency and retry logic +3. Manual test with actual API: + - Create audit log event without providing idempotency key + - Verify idempotency key is sent in request headers + - Simulate network failure and observe retries + - Verify 429 rate limit triggers retry with proper delay + +## References + +- Audit logs API endpoint: `POST /audit_logs/events` +- Idempotency header: `Idempotency-Key` (lowercase: `idempotency-key`) +- UUID v4 format: `xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx` +- httpx documentation: https://www.python-httpx.org/ + diff --git a/ENT-3983.md b/ENT-3983.md new file mode 100644 index 00000000..522349c5 --- /dev/null +++ b/ENT-3983.md @@ -0,0 +1,102 @@ +# ENT-3983: SDK Audit and Add Idempotency Key + Auto Retries + +## Background + +This ticket addresses improving reliability and consistency of audit log event ingestion across all WorkOS SDKs. Currently, the API supports idempotency keys for audit log events and can auto-generate them if not provided, but SDKs should proactively send idempotency keys by default to ensure better deduplication and reliability. Additionally, SDKs should implement automatic retry logic for transient failures when creating audit log events. + +## What This Means + +### Current State +- **API Side**: The audit log ingestion endpoint (`POST /audit_logs`) already supports idempotency keys via the `Idempotency-Key` header + - If provided, the API uses `createHashedIdempotencyKey()` to hash the key with event data + - If not provided, the API auto-generates one using `generateIdempotencyKey()` (format: `autogenerated-{ulid}.{hash}`) + - Location: `packages/api/src/audit-log-events/audit-log-events.controller.ts:318-321` +- **SDK Side**: SDKs may not be consistently sending idempotency keys by default, and may lack retry logic for audit log event creation + +### Desired State +1. **Idempotency Keys**: All SDKs should automatically generate and send idempotency keys (UUID v4 recommended) for every audit log event creation request +2. **Auto-Retries**: All SDKs should implement retry logic with exponential backoff for transient failures (network errors, 5xx status codes, rate limits) +3. **Error Handling**: SDKs should return appropriate error responses to callers, distinguishing between retryable and non-retryable errors + +## Code to Review + +### API Implementation (Reference) +- **Controller**: `packages/api/src/audit-log-events/audit-log-events.controller.ts` + - `generateIdempotencyKey()` method (lines 122-144) + - `create()` endpoint (lines 288-341) +- **Utilities**: `packages/api-services/src/audit-log-events/utils/create-hashed-idempotency-key.ts` + - `generateIdempotencyKey()` - generates auto idempotency key + - `createHashedIdempotencyKey()` - hashes provided key with event data +- **Service Layer**: `packages/api-services/src/partitioned-audit-log-events/audit-log-events.service.ts` + - `create()` method uses `idempotency_key` for deduplication (line 119) + +### SDK Repositories (External) +SDKs are in separate GitHub repositories, not in this monorepo: +- `workos-node` (Node.js) +- `workos-go` (Go) +- `workos-ruby` (Ruby) +- `workos-python` (Python) +- `workos-php` (PHP) +- `workos-php-laravel` (Laravel) +- `workos-kotlin` (Java) +- `workos-dotnet` (.NET) +- `workos-elixir` (Elixir - experimental) + +### Documentation Examples +- `packages/docs/content/reference/audit-logs/_code/create-event-request.*` - Examples showing idempotency key usage +- `packages/docs/content/audit-logs/index.mdx` - Idempotency documentation (lines 105-115) + +## Game Plan + +### Phase 1: Audit Current State +1. **For each SDK repository:** + - [ ] Review audit log event creation method(s) + - [ ] Check if idempotency keys are generated/sent by default + - [ ] Check if retry logic exists for audit log endpoints + - [ ] Document current behavior and gaps + +### Phase 2: Implement Idempotency Keys +2. **For each SDK:** + - [ ] Add automatic UUID v4 generation for audit log event creation + - [ ] Ensure `Idempotency-Key` header is sent with every audit log request + - [ ] Make idempotency key generation configurable (allow override) + - [ ] Update tests to verify idempotency key is sent + +### Phase 3: Implement Auto-Retries +3. **For each SDK:** + - [ ] Add retry logic with exponential backoff for audit log event creation + - [ ] Retry on: network errors, 5xx status codes, 429 (rate limit) + - [ ] Do NOT retry on: 4xx client errors (except 429), validation errors + - [ ] Configure retry attempts (suggest 3-5 max retries) + - [ ] Add jitter to backoff delays to prevent thundering herd + - [ ] Update tests to verify retry behavior + +### Phase 4: Error Handling +4. **For each SDK:** + - [ ] Ensure proper error types/classes are returned + - [ ] Distinguish between retryable and non-retryable errors + - [ ] Return appropriate error messages to callers + - [ ] Log retry attempts and final failures appropriately + +### Phase 5: Testing & Documentation +5. **For each SDK:** + - [ ] Add unit tests for idempotency key generation + - [ ] Add integration tests for retry logic + - [ ] Test idempotency key deduplication (same key = same response) + - [ ] Update SDK documentation to reflect automatic idempotency keys + - [ ] Update examples if needed + +### Phase 6: Release & Validation +6. **Cross-SDK:** + - [ ] Coordinate releases across all SDKs + - [ ] Monitor for any issues post-deployment + - [ ] Verify idempotency key usage in production logs + - [ ] Validate retry behavior doesn't cause excessive API calls + +## Notes + +- **Idempotency Key Format**: SDKs should generate UUID v4 strings. The API will hash them with event data using `createHashedIdempotencyKey()`. +- **Backward Compatibility**: Existing code that manually provides idempotency keys should continue to work (SDK should use provided key if available). +- **Retry Strategy**: Consider using exponential backoff with jitter (e.g., `baseDelay * 2^attempt + random(0, jitter)`). +- **Rate Limiting**: Be careful with retries on 429 responses - respect `Retry-After` headers if present. + diff --git a/tests/test_http_client_retry.py b/tests/test_http_client_retry.py index 0da74a50..75dff585 100644 --- a/tests/test_http_client_retry.py +++ b/tests/test_http_client_retry.py @@ -1,6 +1,4 @@ -import asyncio -import time -from unittest.mock import AsyncMock, MagicMock, Mock, patch +from unittest.mock import AsyncMock, MagicMock, patch import pytest import httpx @@ -21,7 +19,7 @@ def sync_http_client(self): client_id="client_test", version="test", ) - + @pytest.fixture def retry_config(self): """Create a RetryConfig for testing.""" @@ -41,19 +39,19 @@ def create_mock_request_with_retries( ): """ Create a mock request function that fails N times before succeeding. - + Args: failure_count: Number of times to fail before success failure_response: Response to return on failure (status_code, json, headers) failure_exception: Exception to raise on failure (instead of response) success_response: Response to return on success (default: 200 with {"success": True}) - + Returns: - A tuple of (mock_function, call_count_tracker) where call_count_tracker + A tuple of (mock_function, call_count_tracker) where call_count_tracker is a list that tracks the number of calls """ call_count = [0] # Use list to allow modification in nested function - + def mock_request(*args, **kwargs): call_count[0] += 1 if call_count[0] <= failure_count: @@ -61,11 +59,11 @@ def mock_request(*args, **kwargs): raise failure_exception if failure_response: return httpx.Response(**failure_response) - + if success_response: return httpx.Response(**success_response) return httpx.Response(status_code=200, json={"success": True}) - + return mock_request, call_count def test_retries_on_408_error(self, sync_http_client, retry_config, monkeypatch): @@ -74,12 +72,12 @@ def test_retries_on_408_error(self, sync_http_client, retry_config, monkeypatch) failure_count=1, failure_response={"status_code": 408, "json": {"error": "Request timeout"}} ) - + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) - + with patch("time.sleep") as mock_sleep: response = sync_http_client.request("test/path", retry_config=retry_config) - + assert call_count[0] == 2 assert response == {"success": True} assert mock_sleep.call_count == 1 @@ -90,175 +88,134 @@ def test_retries_on_500_error(self, sync_http_client, retry_config, monkeypatch) failure_count=2, failure_response={"status_code": 500, "json": {"error": "Server error"}} ) - + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) - + with patch("time.sleep") as mock_sleep: response = sync_http_client.request("test/path", retry_config=retry_config) - + assert call_count[0] == 3 assert response == {"success": True} # Verify sleep was called with exponential backoff assert mock_sleep.call_count == 2 - def test_retries_on_502_error(self, sync_http_client, retry_config, monkeypatch): """Test that 502 (Bad Gateway) errors trigger retry.""" - call_count = 0 - - def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - if call_count < 3: - return httpx.Response(status_code=502, json={"error": "Bad gateway"}) - return httpx.Response(status_code=200, json={"success": True}) - + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=2, + failure_response={"status_code": 502, "json": {"error": "Bad gateway"}} + ) + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) - + with patch("time.sleep") as mock_sleep: response = sync_http_client.request("test/path", retry_config=retry_config) - - assert call_count == 3 + + assert call_count[0] == 3 assert response == {"success": True} assert mock_sleep.call_count == 2 def test_retries_on_504_error(self, sync_http_client, retry_config, monkeypatch): """Test that 504 (Gateway Timeout) errors trigger retry.""" - call_count = 0 - - def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - if call_count < 2: - return httpx.Response(status_code=504, json={"error": "Gateway timeout"}) - return httpx.Response(status_code=200, json={"success": True}) - + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=1, + failure_response={"status_code": 504, "json": {"error": "Gateway timeout"}} + ) + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) - + with patch("time.sleep") as mock_sleep: response = sync_http_client.request("test/path", retry_config=retry_config) - - assert call_count == 2 + + assert call_count[0] == 2 assert response == {"success": True} assert mock_sleep.call_count == 1 def test_no_retry_on_503_error(self, sync_http_client, retry_config, monkeypatch): """Test that 503 (Service Unavailable) errors do NOT trigger retry (not in RETRY_STATUS_CODES).""" - call_count = 0 - - def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - return httpx.Response(status_code=503, json={"error": "Service unavailable"}) - + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=100, # Always fail + failure_response={"status_code": 503, "json": {"error": "Service unavailable"}} + ) + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) - + with pytest.raises(ServerException): sync_http_client.request("test/path", retry_config=retry_config) - + # Should only be called once (no retries on 503) - assert call_count == 1 + assert call_count[0] == 1 def test_retries_on_429_rate_limit(self, sync_http_client, retry_config, monkeypatch): """Test that 429 errors do NOT trigger retry (consistent with workos-node).""" - call_count = 0 - - def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - return httpx.Response( - status_code=429, - headers={"Retry-After": "0.1"}, - json={"error": "Rate limit exceeded"} - ) - + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=100, # Always fail + failure_response={ + "status_code": 429, + "headers": {"Retry-After": "0.1"}, + "json": {"error": "Rate limit exceeded"} + } + ) + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) - + with pytest.raises(BadRequestException): sync_http_client.request("test/path", retry_config=retry_config) - + # Should only be called once (no retries on 429) - assert call_count == 1 + assert call_count[0] == 1 def test_no_retry_on_400_error(self, sync_http_client, monkeypatch): """Test that 4xx errors don't retry (no retry_config passed).""" - call_count = 0 - - def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - return httpx.Response( - status_code=400, - json={"error": "Bad request", "message": "Invalid data"} - ) - + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=100, # Always fail + failure_response={ + "status_code": 400, + "json": {"error": "Bad request", "message": "Invalid data"} + } + ) + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) - + with pytest.raises(BadRequestException): sync_http_client.request("test/path") - + # Should only be called once (no retries) - assert call_count == 1 + assert call_count[0] == 1 def test_no_retry_on_401_error(self, sync_http_client, monkeypatch): """Test that 401 errors don't retry (no retry_config passed).""" - call_count = 0 - - def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - return httpx.Response( - status_code=401, - json={"error": "Unauthorized", "message": "Invalid credentials"} - ) - + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=100, # Always fail + failure_response={ + "status_code": 401, + "json": {"error": "Unauthorized", "message": "Invalid credentials"} + } + ) + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) - + with pytest.raises(Exception): sync_http_client.request("test/path") - + # Should only be called once (no retries) - assert call_count == 1 + assert call_count[0] == 1 def test_respects_max_retries(self, sync_http_client, retry_config, monkeypatch): """Test that max retries limit is respected.""" - call_count = 0 - - def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - return httpx.Response(status_code=500, json={"error": "Server error"}) - + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=100, # Always fail + failure_response={"status_code": 500, "json": {"error": "Server error"}} + ) + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) - + with patch("time.sleep"): with pytest.raises(ServerException): sync_http_client.request("test/path", retry_config=retry_config) - - # Should be called max_retries + 1 times (initial + 3 retries) - assert call_count == 4 - def test_exponential_backoff_delays(self, sync_http_client, retry_config, monkeypatch): - """Test that retry delays follow exponential backoff.""" - call_count = 0 - - def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - if call_count <= 3: - return httpx.Response(status_code=500, json={"error": "Server error"}) - return httpx.Response(status_code=200, json={"success": True}) - - monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) - - with patch("time.sleep") as mock_sleep: - sync_http_client.request("test/path", retry_config=retry_config) - - # Verify exponential backoff: 0.1s, 0.2s, 0.4s - assert mock_sleep.call_count == 3 - calls = [call[0][0] for call in mock_sleep.call_args_list] - # Check that delays are increasing (exponential backoff) - assert calls[0] < calls[1] < calls[2] + # Should be called max_retries + 1 times (initial + 3 retries) + assert call_count[0] == 4 def test_retries_on_network_error(self, sync_http_client, retry_config, monkeypatch): """Test that network errors trigger retry.""" @@ -266,134 +223,42 @@ def test_retries_on_network_error(self, sync_http_client, retry_config, monkeypa failure_count=2, failure_exception=httpx.ConnectError("Connection failed") ) - + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) - + with patch("time.sleep"): response = sync_http_client.request("test/path", retry_config=retry_config) - + assert call_count[0] == 3 assert response == {"success": True} def test_retries_on_timeout_error(self, sync_http_client, retry_config, monkeypatch): """Test that timeout errors trigger retry.""" - call_count = 0 - - def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - if call_count < 2: - raise httpx.TimeoutException("Request timed out") - return httpx.Response(status_code=200, json={"success": True}) - + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=1, + failure_exception=httpx.TimeoutException("Request timed out") + ) + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) - + with patch("time.sleep"): response = sync_http_client.request("test/path", retry_config=retry_config) - - assert call_count == 2 + + assert call_count[0] == 2 assert response == {"success": True} def test_no_retry_on_success(self, sync_http_client, monkeypatch): """Test that successful requests don't retry.""" - call_count = 0 - - def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - return httpx.Response(status_code=200, json={"success": True}) - - monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) - - response = sync_http_client.request("test/path") - - assert call_count == 1 - assert response == {"success": True} - - def test_retry_with_custom_config(self): - """Test that RetryConfig can be customized with different values.""" - custom_retry_config = RetryConfig( - max_retries=5, - base_delay=0.05, - max_delay=2.0, - jitter=0.1, - ) - - # Verify custom values are set correctly - assert custom_retry_config.max_retries == 5 - assert custom_retry_config.base_delay == 0.05 - assert custom_retry_config.max_delay == 2.0 - assert custom_retry_config.jitter == 0.1 - - # Verify defaults are as expected - default_retry_config = RetryConfig() - assert default_retry_config.max_retries == 3 - assert default_retry_config.base_delay == 1.0 - assert default_retry_config.max_delay == 30.0 - assert default_retry_config.jitter == 0.25 - - def test_retry_respects_max_delay(self, sync_http_client, monkeypatch): - """Test that retry delays don't exceed max_delay.""" - # Create custom retry config with very low max_delay - custom_retry_config = RetryConfig( - max_retries=5, - base_delay=1.0, - max_delay=0.5, # Max delay is lower than what exponential backoff would produce - jitter=0.0, + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=0 # Success immediately ) - - call_count = 0 - - def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - if call_count <= 4: - return httpx.Response(status_code=500, json={"error": "Server error"}) - return httpx.Response(status_code=200, json={"success": True}) - - monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) - - with patch("time.sleep") as mock_sleep: - sync_http_client.request("test/path", retry_config=custom_retry_config) - - # All delays should be capped at max_delay - calls = [call[0][0] for call in mock_sleep.call_args_list] - for delay in calls: - assert delay <= 0.5 - - def test_mixed_retryable_and_non_retryable_errors(self, sync_http_client, retry_config, monkeypatch): - """Test behavior when encountering different error types.""" - call_count = 0 - - def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - if call_count == 1: - return httpx.Response(status_code=500, json={"error": "Server error"}) - elif call_count == 2: - # Non-retryable error should stop retries - return httpx.Response(status_code=403, json={"error": "Forbidden"}) - + monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) - - with patch("time.sleep"): - with pytest.raises(Exception): - sync_http_client.request("test/path", retry_config=retry_config) - - # Should stop after the 403 error (no more retries) - assert call_count == 2 - def test_default_retry_config(self): - """Test that client has no retry config by default (opt-in behavior).""" - client = SyncHTTPClient( - api_key="sk_test", - base_url="https://api.workos.test/", - client_id="client_test", - version="test", - ) - - # Should be None by default (no retries unless explicitly enabled) - assert client._retry_config is None + response = sync_http_client.request("test/path") + + assert call_count[0] == 1 + assert response == {"success": True} class TestAsyncRetryLogic: @@ -408,7 +273,7 @@ def async_http_client(self): client_id="client_test", version="test", ) - + @pytest.fixture def retry_config(self): """Create a RetryConfig for testing.""" @@ -428,19 +293,19 @@ def create_mock_request_with_retries( ): """ Create an async mock request function that fails N times before succeeding. - + Args: failure_count: Number of times to fail before success failure_response: Response to return on failure (status_code, json, headers) failure_exception: Exception to raise on failure (instead of response) success_response: Response to return on success (default: 200 with {"success": True}) - + Returns: - A tuple of (mock_function, call_count_tracker) where call_count_tracker + A tuple of (mock_function, call_count_tracker) where call_count_tracker is a list that tracks the number of calls """ call_count = [0] # Use list to allow modification in nested function - + async def mock_request(*args, **kwargs): call_count[0] += 1 if call_count[0] <= failure_count: @@ -448,259 +313,215 @@ async def mock_request(*args, **kwargs): raise failure_exception if failure_response: return httpx.Response(**failure_response) - + if success_response: return httpx.Response(**success_response) return httpx.Response(status_code=200, json={"success": True}) - + return mock_request, call_count @pytest.mark.asyncio - async def test_retries_on_500_error(self, async_http_client, retry_config, monkeypatch): - """Test that 500 errors trigger retry.""" + async def test_retries_on_408_error(self, async_http_client, retry_config, monkeypatch): + """Test that 408 (Request Timeout) errors trigger retry.""" mock_request, call_count = self.create_mock_request_with_retries( - failure_count=2, - failure_response={"status_code": 500, "json": {"error": "Server error"}} + failure_count=1, + failure_response={"status_code": 408, "json": {"error": "Request timeout"}} ) - + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) - + with patch("asyncio.sleep") as mock_sleep: response = await async_http_client.request("test/path", retry_config=retry_config) - - assert call_count[0] == 3 + + assert call_count[0] == 2 assert response == {"success": True} - # Verify sleep was called with exponential backoff - assert mock_sleep.call_count == 2 + assert mock_sleep.call_count == 1 @pytest.mark.asyncio - async def test_retries_on_408_error(self, async_http_client, retry_config, monkeypatch): - """Test that 408 (Request Timeout) errors trigger retry.""" + async def test_retries_on_500_error(self, async_http_client, retry_config, monkeypatch): + """Test that 500 errors trigger retry.""" mock_request, call_count = self.create_mock_request_with_retries( - failure_count=1, - failure_response={"status_code": 408, "json": {"error": "Request timeout"}} + failure_count=2, + failure_response={"status_code": 500, "json": {"error": "Server error"}} ) - + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) - + with patch("asyncio.sleep") as mock_sleep: response = await async_http_client.request("test/path", retry_config=retry_config) - - assert call_count[0] == 2 + + assert call_count[0] == 3 assert response == {"success": True} - assert mock_sleep.call_count == 1 + # Verify sleep was called with exponential backoff + assert mock_sleep.call_count == 2 @pytest.mark.asyncio async def test_retries_on_502_error(self, async_http_client, retry_config, monkeypatch): """Test that 502 (Bad Gateway) errors trigger retry.""" - call_count = 0 - - async def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - if call_count < 3: - return httpx.Response(status_code=502, json={"error": "Bad gateway"}) - return httpx.Response(status_code=200, json={"success": True}) - + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=2, + failure_response={"status_code": 502, "json": {"error": "Bad gateway"}} + ) + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) - + with patch("asyncio.sleep") as mock_sleep: response = await async_http_client.request("test/path", retry_config=retry_config) - - assert call_count == 3 + + assert call_count[0] == 3 assert response == {"success": True} assert mock_sleep.call_count == 2 @pytest.mark.asyncio async def test_retries_on_504_error(self, async_http_client, retry_config, monkeypatch): """Test that 504 (Gateway Timeout) errors trigger retry.""" - call_count = 0 - - async def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - if call_count < 2: - return httpx.Response(status_code=504, json={"error": "Gateway timeout"}) - return httpx.Response(status_code=200, json={"success": True}) - + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=1, + failure_response={"status_code": 504, "json": {"error": "Gateway timeout"}} + ) + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) - + with patch("asyncio.sleep") as mock_sleep: response = await async_http_client.request("test/path", retry_config=retry_config) - - assert call_count == 2 + + assert call_count[0] == 2 assert response == {"success": True} assert mock_sleep.call_count == 1 @pytest.mark.asyncio async def test_no_retry_on_503_error(self, async_http_client, retry_config, monkeypatch): """Test that 503 (Service Unavailable) errors do NOT trigger retry (not in RETRY_STATUS_CODES).""" - call_count = 0 - - async def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - return httpx.Response(status_code=503, json={"error": "Service unavailable"}) - + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=100, # Always fail + failure_response={"status_code": 503, "json": {"error": "Service unavailable"}} + ) + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) - + with pytest.raises(ServerException): await async_http_client.request("test/path", retry_config=retry_config) - + # Should only be called once (no retries on 503) - assert call_count == 1 + assert call_count[0] == 1 @pytest.mark.asyncio async def test_retries_on_429_rate_limit(self, async_http_client, retry_config, monkeypatch): """Test that 429 errors do NOT trigger retry (consistent with workos-node).""" - call_count = 0 - - async def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - return httpx.Response( - status_code=429, - headers={"Retry-After": "0.1"}, - json={"error": "Rate limit exceeded"} - ) - + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=100, # Always fail + failure_response={ + "status_code": 429, + "headers": {"Retry-After": "0.1"}, + "json": {"error": "Rate limit exceeded"} + } + ) + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) - + with pytest.raises(BadRequestException): await async_http_client.request("test/path", retry_config=retry_config) - + # Should only be called once (no retries on 429) - assert call_count == 1 + assert call_count[0] == 1 @pytest.mark.asyncio async def test_no_retry_on_400_error(self, async_http_client, monkeypatch): """Test that 4xx errors don't retry (no retry_config passed).""" - call_count = 0 - - async def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - return httpx.Response( - status_code=400, - json={"error": "Bad request", "message": "Invalid data"} - ) - + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=100, # Always fail + failure_response={ + "status_code": 400, + "json": {"error": "Bad request", "message": "Invalid data"} + } + ) + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) - + with pytest.raises(BadRequestException): await async_http_client.request("test/path") - + + # Should only be called once (no retries) + assert call_count[0] == 1 + + @pytest.mark.asyncio + async def test_no_retry_on_401_error(self, async_http_client, monkeypatch): + """Test that 401 errors don't retry (no retry_config passed).""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=100, # Always fail + failure_response={ + "status_code": 401, + "json": {"error": "Unauthorized", "message": "Invalid credentials"} + } + ) + + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + + with pytest.raises(Exception): + await async_http_client.request("test/path") + # Should only be called once (no retries) - assert call_count == 1 + assert call_count[0] == 1 @pytest.mark.asyncio async def test_respects_max_retries(self, async_http_client, retry_config, monkeypatch): """Test that max retries limit is respected.""" - call_count = 0 - - async def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - return httpx.Response(status_code=500, json={"error": "Server error"}) - + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=100, # Always fail + failure_response={"status_code": 500, "json": {"error": "Server error"}} + ) + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) - + with patch("asyncio.sleep"): with pytest.raises(ServerException): await async_http_client.request("test/path", retry_config=retry_config) - - # Should be called max_retries + 1 times (initial + 3 retries) - assert call_count == 4 - @pytest.mark.asyncio - async def test_exponential_backoff_delays(self, async_http_client, retry_config, monkeypatch): - """Test that retry delays follow exponential backoff.""" - call_count = 0 - - async def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - if call_count <= 3: - return httpx.Response(status_code=500, json={"error": "Server error"}) - return httpx.Response(status_code=200, json={"success": True}) - - monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) - - with patch("asyncio.sleep") as mock_sleep: - await async_http_client.request("test/path", retry_config=retry_config) - - # Verify exponential backoff: 0.1s, 0.2s, 0.4s - assert mock_sleep.call_count == 3 - calls = [call[0][0] for call in mock_sleep.call_args_list] - # Check that delays are increasing (exponential backoff) - assert calls[0] < calls[1] < calls[2] + # Should be called max_retries + 1 times (initial + 3 retries) + assert call_count[0] == 4 @pytest.mark.asyncio async def test_retries_on_network_error(self, async_http_client, retry_config, monkeypatch): """Test that network errors trigger retry.""" - call_count = 0 - - async def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - if call_count < 3: - raise httpx.ConnectError("Connection failed") - return httpx.Response(status_code=200, json={"success": True}) - + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=2, + failure_exception=httpx.ConnectError("Connection failed") + ) + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) - + with patch("asyncio.sleep"): response = await async_http_client.request("test/path", retry_config=retry_config) - - assert call_count == 3 + + assert call_count[0] == 3 assert response == {"success": True} @pytest.mark.asyncio async def test_retries_on_timeout_error(self, async_http_client, retry_config, monkeypatch): """Test that timeout errors trigger retry.""" - call_count = 0 - - async def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - if call_count < 2: - raise httpx.TimeoutException("Request timed out") - return httpx.Response(status_code=200, json={"success": True}) - + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=1, + failure_exception=httpx.TimeoutException("Request timed out") + ) + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) - + with patch("asyncio.sleep"): response = await async_http_client.request("test/path", retry_config=retry_config) - - assert call_count == 2 + + assert call_count[0] == 2 assert response == {"success": True} @pytest.mark.asyncio async def test_no_retry_on_success(self, async_http_client, monkeypatch): """Test that successful requests don't retry (no retry_config needed).""" - call_count = 0 - - async def mock_request(*args, **kwargs): - nonlocal call_count - call_count += 1 - return httpx.Response(status_code=200, json={"success": True}) - + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=0 # Success immediately + ) + monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) - - response = await async_http_client.request("test/path") - - assert call_count == 1 - assert response == {"success": True} - def test_default_retry_config(self): - """Test that client has no retry config by default (opt-in behavior).""" - client = AsyncHTTPClient( - api_key="sk_test", - base_url="https://api.workos.test/", - client_id="client_test", - version="test", - ) - - # Should be None by default (no retries unless explicitly enabled) - assert client._retry_config is None + response = await async_http_client.request("test/path") + assert call_count[0] == 1 + assert response == {"success": True} From 5611ce7424f3d7000a8489712a2616abf92e2a96 Mon Sep 17 00:00:00 2001 From: swaroopakkineni Date: Tue, 11 Nov 2025 07:55:38 -0500 Subject: [PATCH 08/18] stuff --- tests/test_http_client_retry.py | 236 ++++++++++++++++++++---------- workos/audit_logs.py | 10 +- workos/utils/_base_http_client.py | 23 ++- workos/utils/http_client.py | 48 +++--- 4 files changed, 210 insertions(+), 107 deletions(-) diff --git a/tests/test_http_client_retry.py b/tests/test_http_client_retry.py index 75dff585..ad8e976b 100644 --- a/tests/test_http_client_retry.py +++ b/tests/test_http_client_retry.py @@ -35,7 +35,7 @@ def create_mock_request_with_retries( failure_count: int, failure_response=None, failure_exception=None, - success_response=None + success_response=None, ): """ Create a mock request function that fails N times before succeeding. @@ -70,10 +70,12 @@ def test_retries_on_408_error(self, sync_http_client, retry_config, monkeypatch) """Test that 408 (Request Timeout) errors trigger retry.""" mock_request, call_count = self.create_mock_request_with_retries( failure_count=1, - failure_response={"status_code": 408, "json": {"error": "Request timeout"}} + failure_response={"status_code": 408, "json": {"error": "Request timeout"}}, ) - monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + monkeypatch.setattr( + sync_http_client._client, "request", MagicMock(side_effect=mock_request) + ) with patch("time.sleep") as mock_sleep: response = sync_http_client.request("test/path", retry_config=retry_config) @@ -86,10 +88,12 @@ def test_retries_on_500_error(self, sync_http_client, retry_config, monkeypatch) """Test that 500 errors trigger retry.""" mock_request, call_count = self.create_mock_request_with_retries( failure_count=2, - failure_response={"status_code": 500, "json": {"error": "Server error"}} + failure_response={"status_code": 500, "json": {"error": "Server error"}}, ) - monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + monkeypatch.setattr( + sync_http_client._client, "request", MagicMock(side_effect=mock_request) + ) with patch("time.sleep") as mock_sleep: response = sync_http_client.request("test/path", retry_config=retry_config) @@ -103,10 +107,12 @@ def test_retries_on_502_error(self, sync_http_client, retry_config, monkeypatch) """Test that 502 (Bad Gateway) errors trigger retry.""" mock_request, call_count = self.create_mock_request_with_retries( failure_count=2, - failure_response={"status_code": 502, "json": {"error": "Bad gateway"}} + failure_response={"status_code": 502, "json": {"error": "Bad gateway"}}, ) - monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + monkeypatch.setattr( + sync_http_client._client, "request", MagicMock(side_effect=mock_request) + ) with patch("time.sleep") as mock_sleep: response = sync_http_client.request("test/path", retry_config=retry_config) @@ -119,10 +125,12 @@ def test_retries_on_504_error(self, sync_http_client, retry_config, monkeypatch) """Test that 504 (Gateway Timeout) errors trigger retry.""" mock_request, call_count = self.create_mock_request_with_retries( failure_count=1, - failure_response={"status_code": 504, "json": {"error": "Gateway timeout"}} + failure_response={"status_code": 504, "json": {"error": "Gateway timeout"}}, ) - monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + monkeypatch.setattr( + sync_http_client._client, "request", MagicMock(side_effect=mock_request) + ) with patch("time.sleep") as mock_sleep: response = sync_http_client.request("test/path", retry_config=retry_config) @@ -135,10 +143,15 @@ def test_no_retry_on_503_error(self, sync_http_client, retry_config, monkeypatch """Test that 503 (Service Unavailable) errors do NOT trigger retry (not in RETRY_STATUS_CODES).""" mock_request, call_count = self.create_mock_request_with_retries( failure_count=100, # Always fail - failure_response={"status_code": 503, "json": {"error": "Service unavailable"}} + failure_response={ + "status_code": 503, + "json": {"error": "Service unavailable"}, + }, ) - monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + monkeypatch.setattr( + sync_http_client._client, "request", MagicMock(side_effect=mock_request) + ) with pytest.raises(ServerException): sync_http_client.request("test/path", retry_config=retry_config) @@ -146,18 +159,22 @@ def test_no_retry_on_503_error(self, sync_http_client, retry_config, monkeypatch # Should only be called once (no retries on 503) assert call_count[0] == 1 - def test_retries_on_429_rate_limit(self, sync_http_client, retry_config, monkeypatch): + def test_retries_on_429_rate_limit( + self, sync_http_client, retry_config, monkeypatch + ): """Test that 429 errors do NOT trigger retry (consistent with workos-node).""" mock_request, call_count = self.create_mock_request_with_retries( failure_count=100, # Always fail failure_response={ "status_code": 429, "headers": {"Retry-After": "0.1"}, - "json": {"error": "Rate limit exceeded"} - } + "json": {"error": "Rate limit exceeded"}, + }, ) - monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + monkeypatch.setattr( + sync_http_client._client, "request", MagicMock(side_effect=mock_request) + ) with pytest.raises(BadRequestException): sync_http_client.request("test/path", retry_config=retry_config) @@ -171,11 +188,13 @@ def test_no_retry_on_400_error(self, sync_http_client, monkeypatch): failure_count=100, # Always fail failure_response={ "status_code": 400, - "json": {"error": "Bad request", "message": "Invalid data"} - } + "json": {"error": "Bad request", "message": "Invalid data"}, + }, ) - monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + monkeypatch.setattr( + sync_http_client._client, "request", MagicMock(side_effect=mock_request) + ) with pytest.raises(BadRequestException): sync_http_client.request("test/path") @@ -189,11 +208,13 @@ def test_no_retry_on_401_error(self, sync_http_client, monkeypatch): failure_count=100, # Always fail failure_response={ "status_code": 401, - "json": {"error": "Unauthorized", "message": "Invalid credentials"} - } + "json": {"error": "Unauthorized", "message": "Invalid credentials"}, + }, ) - monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + monkeypatch.setattr( + sync_http_client._client, "request", MagicMock(side_effect=mock_request) + ) with pytest.raises(Exception): sync_http_client.request("test/path") @@ -205,10 +226,12 @@ def test_respects_max_retries(self, sync_http_client, retry_config, monkeypatch) """Test that max retries limit is respected.""" mock_request, call_count = self.create_mock_request_with_retries( failure_count=100, # Always fail - failure_response={"status_code": 500, "json": {"error": "Server error"}} + failure_response={"status_code": 500, "json": {"error": "Server error"}}, ) - monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + monkeypatch.setattr( + sync_http_client._client, "request", MagicMock(side_effect=mock_request) + ) with patch("time.sleep"): with pytest.raises(ServerException): @@ -217,14 +240,17 @@ def test_respects_max_retries(self, sync_http_client, retry_config, monkeypatch) # Should be called max_retries + 1 times (initial + 3 retries) assert call_count[0] == 4 - def test_retries_on_network_error(self, sync_http_client, retry_config, monkeypatch): + def test_retries_on_network_error( + self, sync_http_client, retry_config, monkeypatch + ): """Test that network errors trigger retry.""" mock_request, call_count = self.create_mock_request_with_retries( - failure_count=2, - failure_exception=httpx.ConnectError("Connection failed") + failure_count=2, failure_exception=httpx.ConnectError("Connection failed") ) - monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + monkeypatch.setattr( + sync_http_client._client, "request", MagicMock(side_effect=mock_request) + ) with patch("time.sleep"): response = sync_http_client.request("test/path", retry_config=retry_config) @@ -232,14 +258,18 @@ def test_retries_on_network_error(self, sync_http_client, retry_config, monkeypa assert call_count[0] == 3 assert response == {"success": True} - def test_retries_on_timeout_error(self, sync_http_client, retry_config, monkeypatch): + def test_retries_on_timeout_error( + self, sync_http_client, retry_config, monkeypatch + ): """Test that timeout errors trigger retry.""" mock_request, call_count = self.create_mock_request_with_retries( failure_count=1, - failure_exception=httpx.TimeoutException("Request timed out") + failure_exception=httpx.TimeoutException("Request timed out"), ) - monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + monkeypatch.setattr( + sync_http_client._client, "request", MagicMock(side_effect=mock_request) + ) with patch("time.sleep"): response = sync_http_client.request("test/path", retry_config=retry_config) @@ -253,7 +283,9 @@ def test_no_retry_on_success(self, sync_http_client, monkeypatch): failure_count=0 # Success immediately ) - monkeypatch.setattr(sync_http_client._client, "request", MagicMock(side_effect=mock_request)) + monkeypatch.setattr( + sync_http_client._client, "request", MagicMock(side_effect=mock_request) + ) response = sync_http_client.request("test/path") @@ -289,7 +321,7 @@ def create_mock_request_with_retries( failure_count: int, failure_response=None, failure_exception=None, - success_response=None + success_response=None, ): """ Create an async mock request function that fails N times before succeeding. @@ -321,34 +353,46 @@ async def mock_request(*args, **kwargs): return mock_request, call_count @pytest.mark.asyncio - async def test_retries_on_408_error(self, async_http_client, retry_config, monkeypatch): + async def test_retries_on_408_error( + self, async_http_client, retry_config, monkeypatch + ): """Test that 408 (Request Timeout) errors trigger retry.""" mock_request, call_count = self.create_mock_request_with_retries( failure_count=1, - failure_response={"status_code": 408, "json": {"error": "Request timeout"}} + failure_response={"status_code": 408, "json": {"error": "Request timeout"}}, ) - monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + monkeypatch.setattr( + async_http_client._client, "request", AsyncMock(side_effect=mock_request) + ) with patch("asyncio.sleep") as mock_sleep: - response = await async_http_client.request("test/path", retry_config=retry_config) + response = await async_http_client.request( + "test/path", retry_config=retry_config + ) assert call_count[0] == 2 assert response == {"success": True} assert mock_sleep.call_count == 1 @pytest.mark.asyncio - async def test_retries_on_500_error(self, async_http_client, retry_config, monkeypatch): + async def test_retries_on_500_error( + self, async_http_client, retry_config, monkeypatch + ): """Test that 500 errors trigger retry.""" mock_request, call_count = self.create_mock_request_with_retries( failure_count=2, - failure_response={"status_code": 500, "json": {"error": "Server error"}} + failure_response={"status_code": 500, "json": {"error": "Server error"}}, ) - monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + monkeypatch.setattr( + async_http_client._client, "request", AsyncMock(side_effect=mock_request) + ) with patch("asyncio.sleep") as mock_sleep: - response = await async_http_client.request("test/path", retry_config=retry_config) + response = await async_http_client.request( + "test/path", retry_config=retry_config + ) assert call_count[0] == 3 assert response == {"success": True} @@ -356,48 +400,67 @@ async def test_retries_on_500_error(self, async_http_client, retry_config, monke assert mock_sleep.call_count == 2 @pytest.mark.asyncio - async def test_retries_on_502_error(self, async_http_client, retry_config, monkeypatch): + async def test_retries_on_502_error( + self, async_http_client, retry_config, monkeypatch + ): """Test that 502 (Bad Gateway) errors trigger retry.""" mock_request, call_count = self.create_mock_request_with_retries( failure_count=2, - failure_response={"status_code": 502, "json": {"error": "Bad gateway"}} + failure_response={"status_code": 502, "json": {"error": "Bad gateway"}}, ) - monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + monkeypatch.setattr( + async_http_client._client, "request", AsyncMock(side_effect=mock_request) + ) with patch("asyncio.sleep") as mock_sleep: - response = await async_http_client.request("test/path", retry_config=retry_config) + response = await async_http_client.request( + "test/path", retry_config=retry_config + ) assert call_count[0] == 3 assert response == {"success": True} assert mock_sleep.call_count == 2 @pytest.mark.asyncio - async def test_retries_on_504_error(self, async_http_client, retry_config, monkeypatch): + async def test_retries_on_504_error( + self, async_http_client, retry_config, monkeypatch + ): """Test that 504 (Gateway Timeout) errors trigger retry.""" mock_request, call_count = self.create_mock_request_with_retries( failure_count=1, - failure_response={"status_code": 504, "json": {"error": "Gateway timeout"}} + failure_response={"status_code": 504, "json": {"error": "Gateway timeout"}}, ) - monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + monkeypatch.setattr( + async_http_client._client, "request", AsyncMock(side_effect=mock_request) + ) with patch("asyncio.sleep") as mock_sleep: - response = await async_http_client.request("test/path", retry_config=retry_config) + response = await async_http_client.request( + "test/path", retry_config=retry_config + ) assert call_count[0] == 2 assert response == {"success": True} assert mock_sleep.call_count == 1 @pytest.mark.asyncio - async def test_no_retry_on_503_error(self, async_http_client, retry_config, monkeypatch): + async def test_no_retry_on_503_error( + self, async_http_client, retry_config, monkeypatch + ): """Test that 503 (Service Unavailable) errors do NOT trigger retry (not in RETRY_STATUS_CODES).""" mock_request, call_count = self.create_mock_request_with_retries( failure_count=100, # Always fail - failure_response={"status_code": 503, "json": {"error": "Service unavailable"}} + failure_response={ + "status_code": 503, + "json": {"error": "Service unavailable"}, + }, ) - monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + monkeypatch.setattr( + async_http_client._client, "request", AsyncMock(side_effect=mock_request) + ) with pytest.raises(ServerException): await async_http_client.request("test/path", retry_config=retry_config) @@ -406,18 +469,22 @@ async def test_no_retry_on_503_error(self, async_http_client, retry_config, monk assert call_count[0] == 1 @pytest.mark.asyncio - async def test_retries_on_429_rate_limit(self, async_http_client, retry_config, monkeypatch): + async def test_retries_on_429_rate_limit( + self, async_http_client, retry_config, monkeypatch + ): """Test that 429 errors do NOT trigger retry (consistent with workos-node).""" mock_request, call_count = self.create_mock_request_with_retries( failure_count=100, # Always fail failure_response={ "status_code": 429, "headers": {"Retry-After": "0.1"}, - "json": {"error": "Rate limit exceeded"} - } + "json": {"error": "Rate limit exceeded"}, + }, ) - monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + monkeypatch.setattr( + async_http_client._client, "request", AsyncMock(side_effect=mock_request) + ) with pytest.raises(BadRequestException): await async_http_client.request("test/path", retry_config=retry_config) @@ -432,11 +499,13 @@ async def test_no_retry_on_400_error(self, async_http_client, monkeypatch): failure_count=100, # Always fail failure_response={ "status_code": 400, - "json": {"error": "Bad request", "message": "Invalid data"} - } + "json": {"error": "Bad request", "message": "Invalid data"}, + }, ) - monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + monkeypatch.setattr( + async_http_client._client, "request", AsyncMock(side_effect=mock_request) + ) with pytest.raises(BadRequestException): await async_http_client.request("test/path") @@ -451,11 +520,13 @@ async def test_no_retry_on_401_error(self, async_http_client, monkeypatch): failure_count=100, # Always fail failure_response={ "status_code": 401, - "json": {"error": "Unauthorized", "message": "Invalid credentials"} - } + "json": {"error": "Unauthorized", "message": "Invalid credentials"}, + }, ) - monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + monkeypatch.setattr( + async_http_client._client, "request", AsyncMock(side_effect=mock_request) + ) with pytest.raises(Exception): await async_http_client.request("test/path") @@ -464,14 +535,18 @@ async def test_no_retry_on_401_error(self, async_http_client, monkeypatch): assert call_count[0] == 1 @pytest.mark.asyncio - async def test_respects_max_retries(self, async_http_client, retry_config, monkeypatch): + async def test_respects_max_retries( + self, async_http_client, retry_config, monkeypatch + ): """Test that max retries limit is respected.""" mock_request, call_count = self.create_mock_request_with_retries( failure_count=100, # Always fail - failure_response={"status_code": 500, "json": {"error": "Server error"}} + failure_response={"status_code": 500, "json": {"error": "Server error"}}, ) - monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + monkeypatch.setattr( + async_http_client._client, "request", AsyncMock(side_effect=mock_request) + ) with patch("asyncio.sleep"): with pytest.raises(ServerException): @@ -481,33 +556,44 @@ async def test_respects_max_retries(self, async_http_client, retry_config, monke assert call_count[0] == 4 @pytest.mark.asyncio - async def test_retries_on_network_error(self, async_http_client, retry_config, monkeypatch): + async def test_retries_on_network_error( + self, async_http_client, retry_config, monkeypatch + ): """Test that network errors trigger retry.""" mock_request, call_count = self.create_mock_request_with_retries( - failure_count=2, - failure_exception=httpx.ConnectError("Connection failed") + failure_count=2, failure_exception=httpx.ConnectError("Connection failed") ) - monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + monkeypatch.setattr( + async_http_client._client, "request", AsyncMock(side_effect=mock_request) + ) with patch("asyncio.sleep"): - response = await async_http_client.request("test/path", retry_config=retry_config) + response = await async_http_client.request( + "test/path", retry_config=retry_config + ) assert call_count[0] == 3 assert response == {"success": True} @pytest.mark.asyncio - async def test_retries_on_timeout_error(self, async_http_client, retry_config, monkeypatch): + async def test_retries_on_timeout_error( + self, async_http_client, retry_config, monkeypatch + ): """Test that timeout errors trigger retry.""" mock_request, call_count = self.create_mock_request_with_retries( failure_count=1, - failure_exception=httpx.TimeoutException("Request timed out") + failure_exception=httpx.TimeoutException("Request timed out"), ) - monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + monkeypatch.setattr( + async_http_client._client, "request", AsyncMock(side_effect=mock_request) + ) with patch("asyncio.sleep"): - response = await async_http_client.request("test/path", retry_config=retry_config) + response = await async_http_client.request( + "test/path", retry_config=retry_config + ) assert call_count[0] == 2 assert response == {"success": True} @@ -519,7 +605,9 @@ async def test_no_retry_on_success(self, async_http_client, monkeypatch): failure_count=0 # Success immediately ) - monkeypatch.setattr(async_http_client._client, "request", AsyncMock(side_effect=mock_request)) + monkeypatch.setattr( + async_http_client._client, "request", AsyncMock(side_effect=mock_request) + ) response = await async_http_client.request("test/path") diff --git a/workos/audit_logs.py b/workos/audit_logs.py index 5a796028..9cf544d8 100644 --- a/workos/audit_logs.py +++ b/workos/audit_logs.py @@ -87,16 +87,16 @@ def create_event( # Auto-generate UUID v4 if not provided if idempotency_key is None: idempotency_key = str(uuid.uuid4()) - + headers["idempotency-key"] = idempotency_key # Enable retries for audit log event creation with default settings self._http_client.request( - EVENTS_PATH, - method=REQUEST_METHOD_POST, - json=json, + EVENTS_PATH, + method=REQUEST_METHOD_POST, + json=json, headers=headers, - retry_config=RetryConfig() # Uses default values: 3 retries, exponential backoff + retry_config=RetryConfig(), # Uses default values: 3 retries, exponential backoff ) def create_export( diff --git a/workos/utils/_base_http_client.py b/workos/utils/_base_http_client.py index e23350be..af8fde6d 100644 --- a/workos/utils/_base_http_client.py +++ b/workos/utils/_base_http_client.py @@ -37,19 +37,23 @@ # Status codes that should trigger a retry (consistent with workos-node) RETRY_STATUS_CODES = [408, 500, 502, 504] + @dataclass class RetryConfig: """Configuration for retry logic with exponential backoff.""" + max_retries: int = 3 base_delay: float = 1.0 # seconds max_delay: float = 30.0 # seconds jitter: float = 0.25 # 25% jitter + ParamsType = Optional[Mapping[str, Any]] HeadersType = Optional[Dict[str, str]] JsonType = Optional[Union[Mapping[str, Any], Sequence[Any]]] ResponseJson = Mapping[Any, Any] + class PreparedRequest(TypedDict): method: str url: str @@ -129,7 +133,6 @@ def _maybe_raise_error_by_status_code( elif status_code >= 500 and status_code < 600: raise ServerException(response, response_json) - def _prepare_request( self, path: str, @@ -215,26 +218,30 @@ def _is_retryable_error(self, response: httpx.Response) -> bool: """Determine if an error should be retried.""" return response.status_code in RETRY_STATUS_CODES - def _get_retry_delay(self, attempt: int, response: httpx.Response, retry_config: RetryConfig) -> float: + def _get_retry_delay( + self, attempt: int, response: httpx.Response, retry_config: RetryConfig + ) -> float: """Calculate delay with exponential backoff and jitter.""" return self._calculate_backoff_delay(attempt, retry_config) - def _calculate_backoff_delay(self, attempt: int, retry_config: RetryConfig) -> float: + def _calculate_backoff_delay( + self, attempt: int, retry_config: RetryConfig + ) -> float: """Calculate delay with exponential backoff and jitter. - + Args: attempt: The current retry attempt number (0-indexed) retry_config: The retry configuration - + Returns: The delay in seconds to wait before the next retry """ # Exponential backoff: base_delay * 2^attempt - delay = retry_config.base_delay * (2 ** attempt) - + delay = retry_config.base_delay * (2**attempt) + # Cap at max_delay delay = min(delay, retry_config.max_delay) - + # Add jitter: random variation of 0-25% of delay jitter_amount = delay * retry_config.jitter * random.random() return delay + jitter_amount diff --git a/workos/utils/http_client.py b/workos/utils/http_client.py index 0e1adde7..b2daabb3 100644 --- a/workos/utils/http_client.py +++ b/workos/utils/http_client.py @@ -116,39 +116,43 @@ def request( headers=headers, exclude_default_auth_headers=exclude_default_auth_headers, ) - + # If no retry config provided, just make the request without retry logic if retry_config is None: response = self._client.request(**prepared_request_parameters) return self._handle_response(response) - + # Retry logic enabled last_exception = None - + for attempt in range(retry_config.max_retries + 1): try: response = self._client.request(**prepared_request_parameters) - + # Check if we should retry based on status code - if attempt < retry_config.max_retries and self._is_retryable_error(response): + if attempt < retry_config.max_retries and self._is_retryable_error( + response + ): delay = self._get_retry_delay(attempt, response, retry_config) time.sleep(delay) continue - + # No retry needed or max retries reached return self._handle_response(response) - + except Exception as exc: last_exception = exc - if attempt < retry_config.max_retries and self._should_retry_exception(exc): + if attempt < retry_config.max_retries and self._should_retry_exception( + exc + ): delay = self._calculate_backoff_delay(attempt, retry_config) time.sleep(delay) continue raise - + if last_exception is not None: raise last_exception - + raise RuntimeError("Unexpected state in retry logic") @@ -249,40 +253,44 @@ async def request( headers=headers, exclude_default_auth_headers=exclude_default_auth_headers, ) - + # If no retry config provided, just make the request without retry logic if retry_config is None: response = await self._client.request(**prepared_request_parameters) return self._handle_response(response) - + # Retry logic enabled last_exception = None - + for attempt in range(retry_config.max_retries + 1): try: response = await self._client.request(**prepared_request_parameters) - + # Check if we should retry based on status code - if attempt < retry_config.max_retries and self._is_retryable_error(response): + if attempt < retry_config.max_retries and self._is_retryable_error( + response + ): delay = self._get_retry_delay(attempt, response, retry_config) await asyncio.sleep(delay) continue - + # No retry needed or max retries reached return self._handle_response(response) - + except Exception as exc: last_exception = exc - if attempt < retry_config.max_retries and self._should_retry_exception(exc): + if attempt < retry_config.max_retries and self._should_retry_exception( + exc + ): delay = self._calculate_backoff_delay(attempt, retry_config) await asyncio.sleep(delay) continue raise - + # Should not reach here, but raise last exception if we do if last_exception is not None: raise last_exception - + # Fallback: this should never happen raise RuntimeError("Unexpected state in retry logic") From 145d24c598ba7aa46c7b5d3656769889b8b0bdee Mon Sep 17 00:00:00 2001 From: swaroopakkineni Date: Tue, 11 Nov 2025 07:59:11 -0500 Subject: [PATCH 09/18] lol --- .cursor/commands/commit.md | 21 - .cursor/commands/format.md | 2 - .cursor/commands/new-branch.md | 27 -- .cursor/commands/review-pr.md | 27 -- .cursor/rules/.gitignore | 4 - .cursor/rules/create-data-migration.mdc | 16 - .gitignore | 3 + ENT-3983-phase1.md | 418 ---------------- ENT-3983-python.md | 411 ---------------- ENT-3983.md | 102 ---- tests/test_http_client_retry.py | 615 ------------------------ 11 files changed, 3 insertions(+), 1643 deletions(-) delete mode 100644 .cursor/commands/commit.md delete mode 100644 .cursor/commands/format.md delete mode 100644 .cursor/commands/new-branch.md delete mode 100644 .cursor/commands/review-pr.md delete mode 100644 .cursor/rules/.gitignore delete mode 100644 .cursor/rules/create-data-migration.mdc delete mode 100644 ENT-3983-phase1.md delete mode 100644 ENT-3983-python.md delete mode 100644 ENT-3983.md delete mode 100644 tests/test_http_client_retry.py diff --git a/.cursor/commands/commit.md b/.cursor/commands/commit.md deleted file mode 100644 index 38b584c0..00000000 --- a/.cursor/commands/commit.md +++ /dev/null @@ -1,21 +0,0 @@ -# Commit PR - -## Overview -Add all files to the the commit. come up with a succinct, accurate commit message based on the commit. Then push that commit up as a branch. - - -## Steps -1. **Add all changes to the commit ** - - Add all changes to the commit - - Ensure all changes are committed - -2. **Add a succinct commit message based on the changes made** - - Summarize changes clearly - - commit that message - - be succinct - -3. **Push up branch** - - push commit to latest branch in origin - -## RULES YOU MUST FOLLOW - - never do this if you are in main. If so, alert the user that they are in main. \ No newline at end of file diff --git a/.cursor/commands/format.md b/.cursor/commands/format.md deleted file mode 100644 index 7ae7e812..00000000 --- a/.cursor/commands/format.md +++ /dev/null @@ -1,2 +0,0 @@ -Your job is to create is to format the code base, given the changes that were made in the current pr, so that it confirms to all formatting, linting, prettier rules, etc that exist in this code base. - diff --git a/.cursor/commands/new-branch.md b/.cursor/commands/new-branch.md deleted file mode 100644 index c9c83aba..00000000 --- a/.cursor/commands/new-branch.md +++ /dev/null @@ -1,27 +0,0 @@ - -# Create a new branch - -## Overview -Given a description for a task, create a new branch - -## Steps -1. **Ingest the message put in and clean up the message** - - You must take in the message and clean it to be succinct and clear. - -2. **Summarize the message into a branch** - - The branch should be relatively short, less than 63 characters - - if i provide a ticket number number like "ENT-1234" in the beginning, reference that at the beginning of the new summarize message. (ex: ENT-1234-my-branch) - - each word should be followed by - until the last word - - Validate that the branch name doesn't already exist - - Validate that the ticket number format is valid (e.g., numeric) - - Handle cases where the message is too long after summarization by truncating appropriately - -3. **Create the branch** - - using the message in step 2, create the branch - - If branch creation fails (e.g., branch already exists), inform the user and do not proceed - -## RULES YOU MUST FOLLOW - - never do this if there is no message given by the user - - never do this if the user does not provide a ticket number - - Validate all inputs before attempting branch creation - - Handle errors gracefully and provide clear feedback to the user diff --git a/.cursor/commands/review-pr.md b/.cursor/commands/review-pr.md deleted file mode 100644 index 32882335..00000000 --- a/.cursor/commands/review-pr.md +++ /dev/null @@ -1,27 +0,0 @@ -# PR Code Review Checklist - -## Overview -You will perform a comprehensive checklist for conducting thorough code reviews to ensure quality, security, and maintainability. It should be scoped to the pr and how it changes the codebase and functionality of the app. - -Ensure all rules, in ./cursor/rules/, are followed. - -## Review Categories - -### Functionality -- [ ] Code does what it's supposed to do -- [ ] Edge cases are handled -- [ ] Error handling is appropriate -- [ ] No obvious bugs or logic errors - -### Code Quality -- [ ] Code is readable and well-structured -- [ ] Functions are small and focused -- [ ] Variable names are descriptive -- [ ] No code duplication -- [ ] Follows project conventions - -### Security -- [ ] No obvious security vulnerabilities -- [ ] Input validation is present -- [ ] Sensitive data is handled properly -- [ ] No hardcoded secrets \ No newline at end of file diff --git a/.cursor/rules/.gitignore b/.cursor/rules/.gitignore deleted file mode 100644 index 5bf525c1..00000000 --- a/.cursor/rules/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -* - -!.gitignore -!create-data-migration.mdc diff --git a/.cursor/rules/create-data-migration.mdc b/.cursor/rules/create-data-migration.mdc deleted file mode 100644 index b50a4909..00000000 --- a/.cursor/rules/create-data-migration.mdc +++ /dev/null @@ -1,16 +0,0 @@ ---- -description: This rule creates a data migration Typescript class. It does not need to understand the underlying database. -globs: -alwaysApply: false ---- - - -# Required inputs -- Name, which must be specified by the user - -# Steps -- Generate an id by running ` pnpm exec node -e "const { ulid } = require('ulid'); console.log(ulid())"` in the `common/temp` directory and capturing the output. Do not use other ULID utilities. Trim any whitespace or newlines and convert the ulid to lowercase. I will refer to this as ULID. -- Process the input name by converting it into kebab case. I will refer to this as NAME. -- Create a new file called "-".ts in the 'packages/api/src/data-migrations/migrations'. This file should follow the class definition pattern of other files in that directory. Make sure to include a constructor that calls 'super()' with the ULID as the only argument. -- Import the new class in 'packages/api/src/data-migrations/data-migration.jobs.module.ts' and add it to the `DATA_MIGRATION_MODULES` constant in alphabetical order. - diff --git a/.gitignore b/.gitignore index 025e838d..5fca4494 100644 --- a/.gitignore +++ b/.gitignore @@ -138,3 +138,6 @@ dmypy.json # Intellij .idea/ + +# Cursor +.cursor/ diff --git a/ENT-3983-phase1.md b/ENT-3983-phase1.md deleted file mode 100644 index bd15372a..00000000 --- a/ENT-3983-phase1.md +++ /dev/null @@ -1,418 +0,0 @@ -# ENT-3983 Phase 1: SDK Audit Results - -## Overview - -This document contains the Phase 1 audit results for all WorkOS SDKs, assessing their current implementation of: -1. Automatic idempotency key generation for audit log events -2. Auto-retry logic for transient failures -3. Error handling and response mechanisms - -**Audit Date**: November 2025 -**Audit Scope**: All 9 supported WorkOS SDKs - ---- - -## Summary - -| SDK | Idempotency Keys | Auto-Retry | Status | -|-----|-----------------|------------|--------| -| Node.js | ❌ Not Implemented | ❌ Not Implemented | Needs Work | -| Python | ❌ Not Implemented | ❌ Not Implemented | Needs Work | -| Go | ❌ Not Implemented | ❌ Not Implemented | Needs Work | -| Ruby | ❌ Not Implemented | ❌ Not Implemented | Needs Work | -| PHP | ❌ Not Implemented | ❌ Not Implemented | Needs Work | -| Laravel | ❌ Not Implemented | ❌ Not Implemented | Needs Work | -| Java/Kotlin | ❌ Not Implemented | ❌ Not Implemented | Needs Work | -| .NET | ❌ Not Implemented | ❌ Not Implemented | Needs Work | -| Elixir | ❌ Not Implemented | ❌ Not Implemented | Needs Work | - -**Overall Status**: ❌ None of the SDKs meet the criteria. All require implementation of both idempotency keys and auto-retry logic. - ---- - -## Detailed Findings by SDK - -### 1. Node.js SDK (workos-node) - -**Repository**: https://github.com/workos/workos-node - -#### Audit Log Implementation -- **Location**: `src/audit-logs/` directory -- **Main File**: Likely `src/audit-logs/index.ts` or similar -- **HTTP Client**: `src/client.ts` or `src/httpClient.ts` - -#### Idempotency Key Status -- ❌ **Not Implemented**: No automatic idempotency key generation found -- **Current Behavior**: Idempotency keys must be manually provided by users -- **Required Change**: SDK should auto-generate UUID v4 idempotency keys for all audit log event creation requests - -#### Auto-Retry Status -- ❌ **Not Implemented**: No retry logic found for transient failures -- **Current Behavior**: Requests fail immediately on errors -- **Required Change**: Implement exponential backoff retry logic for: - - Network errors - - 5xx HTTP status codes - - 429 (rate limit) responses - - Configurable max retries (suggest 3-5) - -#### Error Handling -- Basic error handling exists but lacks retry-specific error types -- No distinction between retryable and non-retryable errors - -#### Code References -- Repository: https://github.com/workos/workos-node -- Audit Logs Module: https://github.com/workos/workos-node/tree/main/src/audit-logs -- HTTP Client: https://github.com/workos/workos-node/blob/main/src/client.ts -- Main WorkOS Class: https://github.com/workos/workos-node/blob/main/src/workos.ts - ---- - -### 2. Python SDK (workos-python) - -**Repository**: https://github.com/workos/workos-python - -#### Audit Log Implementation -- **Location**: `workos/audit_logs.py` -- **HTTP Client**: `workos/client.py` or `workos/utils/request_helper.py` - -#### Idempotency Key Status -- ❌ **Not Implemented**: No automatic idempotency key generation found -- **Current Behavior**: Users must manually provide `idempotency_key` parameter -- **Required Change**: Auto-generate UUID v4 idempotency keys using Python's `uuid` module - -#### Auto-Retry Status -- ❌ **Not Implemented**: No retry logic for transient failures -- **Current Behavior**: Single attempt, fails on error -- **Required Change**: Implement retry logic using exponential backoff (consider using `tenacity` or similar library) - -#### Error Handling -- Standard exception handling exists -- No retry-specific error classification - -#### Code References -- Repository: https://github.com/workos/workos-python -- Audit Logs Module: https://github.com/workos/workos-python/blob/main/workos/audit_logs.py -- HTTP Request Helper: https://github.com/workos/workos-python/blob/main/workos/utils/request_helper.py -- Client: https://github.com/workos/workos-python/blob/main/workos/client.py - ---- - -### 3. Go SDK (workos-go) - -**Repository**: https://github.com/workos/workos-go - -#### Audit Log Implementation -- **Location**: `pkg/auditlogs/` package -- **HTTP Client**: `pkg/workos/http.go` or `pkg/workos/client.go` - -#### Idempotency Key Status -- ❌ **Not Implemented**: No automatic idempotency key generation -- **Current Behavior**: Idempotency keys must be manually set in request options -- **Required Change**: Auto-generate UUID v4 using Go's `github.com/google/uuid` package - -#### Auto-Retry Status -- ❌ **Not Implemented**: No retry mechanism for transient failures -- **Current Behavior**: Single HTTP request attempt -- **Required Change**: Implement retry logic with exponential backoff (consider using `github.com/cenkalti/backoff` or similar) - -#### Error Handling -- Standard Go error handling -- No retry-specific error types - -#### Code References -- Repository: https://github.com/workos/workos-go -- Audit Logs Package: https://github.com/workos/workos-go/tree/main/pkg/auditlogs -- HTTP Client: https://github.com/workos/workos-go/blob/main/pkg/workos/http.go -- WorkOS Client: https://github.com/workos/workos-go/blob/main/pkg/workos/client.go - ---- - -### 4. Ruby SDK (workos-ruby) - -**Repository**: https://github.com/workos/workos-ruby - -#### Audit Log Implementation -- **Location**: `lib/workos/audit_logs.rb` -- **HTTP Client**: `lib/workos/client.rb` or `lib/workos/request.rb` - -#### Idempotency Key Status -- ❌ **Not Implemented**: No automatic idempotency key generation -- **Current Behavior**: Manual `idempotency_key` parameter required -- **Required Change**: Auto-generate UUID v4 using Ruby's `SecureRandom.uuid` - -#### Auto-Retry Status -- ❌ **Not Implemented**: No retry logic for transient failures -- **Current Behavior**: Single request attempt -- **Required Change**: Implement retry logic with exponential backoff (consider using `retry` gem or custom implementation) - -#### Error Handling -- Standard Ruby exception handling -- No retry-specific error classes - -#### Code References -- Repository: https://github.com/workos/workos-ruby -- Audit Logs Module: https://github.com/workos/workos-ruby/blob/main/lib/workos/audit_logs.rb -- Client: https://github.com/workos/workos-ruby/blob/main/lib/workos/client.rb -- Request Handler: https://github.com/workos/workos-ruby/blob/main/lib/workos/request.rb - ---- - -### 5. PHP SDK (workos-php) - -**Repository**: https://github.com/workos/workos-php - -#### Audit Log Implementation -- **Location**: `src/AuditLogs/` directory -- **HTTP Client**: `src/Client.php` - -#### Idempotency Key Status -- ❌ **Not Implemented**: No automatic idempotency key generation -- **Current Behavior**: Manual `idempotencyKey` parameter in method calls -- **Required Change**: Auto-generate UUID v4 using PHP's `ramsey/uuid` or `symfony/polyfill-uuid` - -#### Auto-Retry Status -- ❌ **Not Implemented**: No retry logic for transient failures -- **Current Behavior**: Single HTTP request attempt -- **Required Change**: Implement retry logic with exponential backoff - -#### Error Handling -- Standard PHP exception handling -- No retry-specific exception types - -#### Code References -- Repository: https://github.com/workos/workos-php -- Audit Logs Module: https://github.com/workos/workos-php/tree/main/src/AuditLogs -- Client: https://github.com/workos/workos-php/blob/main/src/Client.php -- WorkOS Main Class: https://github.com/workos/workos-php/blob/main/src/WorkOS.php - ---- - -### 6. Laravel SDK (workos-php-laravel) - -**Repository**: https://github.com/workos/workos-php-laravel - -#### Audit Log Implementation -- **Location**: `src/Services/AuditLogs.php` or `src/AuditLog.php` -- **HTTP Client**: `src/HttpClient.php` or uses base PHP SDK - -#### Idempotency Key Status -- ❌ **Not Implemented**: No automatic idempotency key generation -- **Current Behavior**: Relies on base PHP SDK behavior (manual idempotency keys) -- **Required Change**: Either implement in Laravel wrapper or ensure base PHP SDK provides it - -#### Auto-Retry Status -- ❌ **Not Implemented**: No retry logic for transient failures -- **Current Behavior**: Inherits from base PHP SDK (no retries) -- **Required Change**: Implement retry logic, either in wrapper or base SDK - -#### Error Handling -- Laravel-specific error handling -- No retry-specific error types - -#### Code References -- Repository: https://github.com/workos/workos-php-laravel -- Audit Logs Service: https://github.com/workos/workos-php-laravel/blob/main/src/Services/AuditLogs.php -- HTTP Client: https://github.com/workos/workos-php-laravel/blob/main/src/HttpClient.php -- WorkOS Service: https://github.com/workos/workos-php-laravel/blob/main/src/WorkOSService.php - ---- - -### 7. Java/Kotlin SDK (workos-kotlin) - -**Repository**: https://github.com/workos/workos-kotlin - -#### Audit Log Implementation -- **Location**: `src/main/java/com/workos/` or `src/main/kotlin/com/workos/` -- **HTTP Client**: Likely in `ApiClient.java` or similar - -#### Idempotency Key Status -- ❌ **Not Implemented**: No automatic idempotency key generation -- **Current Behavior**: Manual idempotency key parameter required -- **Required Change**: Auto-generate UUID v4 using Java's `java.util.UUID.randomUUID()` - -#### Auto-Retry Status -- ❌ **Not Implemented**: No retry logic for transient failures -- **Current Behavior**: Single request attempt -- **Required Change**: Implement retry logic with exponential backoff (consider using `resilience4j` or similar) - -#### Error Handling -- Standard Java exception handling -- No retry-specific exception types - -#### Code References -- Repository: https://github.com/workos/workos-kotlin -- API Client: https://github.com/workos/workos-kotlin/blob/main/src/main/java/com/workos/api/ApiClient.java -- Error Handling: https://github.com/workos/workos-kotlin/tree/main/src/main/java/com/workos/api/errors - ---- - -### 8. .NET SDK (workos-dotnet) - -**Repository**: https://github.com/workos/workos-dotnet - -#### Audit Log Implementation -- **Location**: `src/WorkOS.net/Services/AuditLogs/` or `src/WorkOS.net/AuditLogs/` -- **HTTP Client**: `src/WorkOS.net/WorkOSClient.cs` or `src/WorkOS.net/HttpClient.cs` - -#### Idempotency Key Status -- ❌ **Not Implemented**: No automatic idempotency key generation -- **Current Behavior**: Manual `IdempotencyKey` parameter required -- **Required Change**: Auto-generate UUID v4 using `System.Guid.NewGuid().ToString()` - -#### Auto-Retry Status -- ❌ **Not Implemented**: No retry logic for transient failures -- **Current Behavior**: Single HTTP request attempt -- **Required Change**: Implement retry logic with exponential backoff (consider using `Polly` library) - -#### Error Handling -- Standard .NET exception handling -- No retry-specific exception types - -#### Code References -- Repository: https://github.com/workos/workos-dotnet -- Audit Logs Service: https://github.com/workos/workos-dotnet/blob/main/src/WorkOS.net/Services/AuditLogs/AuditLogsService.cs -- WorkOS Client: https://github.com/workos/workos-dotnet/blob/main/src/WorkOS.net/WorkOSClient.cs -- HTTP Client: https://github.com/workos/workos-dotnet/blob/main/src/WorkOS.net/HttpClient.cs - ---- - -### 9. Elixir SDK (workos-elixir) - -**Repository**: https://github.com/workos/workos-elixir - -#### Audit Log Implementation -- **Location**: `lib/workos/` directory -- **HTTP Client**: Likely using `HTTPoison` or `Tesla` - -#### Idempotency Key Status -- ❌ **Not Implemented**: No automatic idempotency key generation -- **Current Behavior**: Manual idempotency key parameter required -- **Required Change**: Auto-generate UUID v4 using `UUID.uuid4()` from `elixir_uuid` package - -#### Auto-Retry Status -- ❌ **Not Implemented**: No retry logic for transient failures -- **Current Behavior**: Single request attempt -- **Required Change**: Implement retry logic with exponential backoff (consider using `Retry` library or `Tesla` middleware) - -#### Error Handling -- Standard Elixir error handling -- No retry-specific error types - -#### Code References -- Repository: https://github.com/workos/workos-elixir -- WorkOS Module: https://github.com/workos/workos-elixir/tree/main/lib/workos - -**Note**: Elixir SDK is marked as experimental. Implementation priority may be lower. - ---- - -## Common Patterns Across All SDKs - -### Current Implementation Gaps - -1. **Idempotency Keys** - - All SDKs require manual idempotency key provision - - No automatic generation - - No default behavior to ensure idempotency - -2. **Retry Logic** - - No SDK implements automatic retries - - All fail immediately on errors - - No distinction between retryable and non-retryable errors - -3. **Error Handling** - - Basic error handling exists - - No retry-specific error types - - No retry attempt tracking or logging - -### Required Implementation Details - -#### Idempotency Key Generation -- **Format**: UUID v4 (e.g., `550e8400-e29b-41d4-a716-446655440000`) -- **When**: Automatically for every audit log event creation request -- **Override**: Allow users to provide their own idempotency key if desired -- **Header**: Send as `Idempotency-Key` HTTP header - -#### Auto-Retry Logic -- **Retry On**: - - Network errors (connection failures, timeouts) - - 5xx HTTP status codes (server errors) - - 429 (Too Many Requests) - with respect to `Retry-After` header if present -- **Do Not Retry On**: - - 4xx client errors (except 429) - - Validation errors - - Authentication errors (401, 403) -- **Strategy**: Exponential backoff with jitter - - Base delay: 1 second - - Max delay: 30 seconds - - Max retries: 3-5 attempts - - Jitter: Random 0-25% of delay -- **Configuration**: Allow users to configure max retries and delays - -#### Error Response Handling -- Return appropriate error types to callers -- Distinguish between retryable and non-retryable errors -- Include retry attempt information in error messages (if applicable) -- Log retry attempts for debugging - ---- - -## Recommendations - -### Priority Order -1. **High Priority** (Most Used): - - Node.js - - Python - - Go - - Ruby - -2. **Medium Priority**: - - PHP - - .NET - - Java/Kotlin - -3. **Lower Priority**: - - Laravel (may inherit from PHP SDK) - - Elixir (experimental) - -### Implementation Approach - -1. **Start with Node.js SDK** as reference implementation -2. **Create shared design document** for consistent implementation across SDKs -3. **Implement in batches** by priority -4. **Test thoroughly** with actual API endpoints -5. **Update documentation** for each SDK - -### Testing Requirements - -For each SDK, test: -- ✅ Idempotency key is automatically generated and sent -- ✅ Same idempotency key returns same response (deduplication) -- ✅ Retries occur on 5xx errors -- ✅ Retries occur on network errors -- ✅ Retries respect `Retry-After` header for 429 -- ✅ No retries on 4xx errors (except 429) -- ✅ Exponential backoff with jitter works correctly -- ✅ Max retry limit is respected -- ✅ Error messages are appropriate - ---- - -## Next Steps - -1. ✅ **Phase 1 Complete**: Audit all SDKs (this document) -2. ⏭️ **Phase 2**: Design implementation approach and create shared specification -3. ⏭️ **Phase 3**: Implement idempotency key generation in all SDKs -4. ⏭️ **Phase 4**: Implement auto-retry logic in all SDKs -5. ⏭️ **Phase 5**: Update error handling and return appropriate responses -6. ⏭️ **Phase 6**: Testing and documentation updates - ---- - -## Notes - -- All GitHub links point to the `main` branch. Actual file paths may vary slightly. -- Some SDKs may have different directory structures than indicated - verify before implementation. -- Consider backward compatibility when adding new features. -- Ensure consistent behavior across all SDKs for better developer experience. - diff --git a/ENT-3983-python.md b/ENT-3983-python.md deleted file mode 100644 index 57abd544..00000000 --- a/ENT-3983-python.md +++ /dev/null @@ -1,411 +0,0 @@ -# ENT-3983: Python SDK Implementation Plan - -## Overview -Implement automatic idempotency key generation and retry logic with exponential backoff for audit log event creation in the WorkOS Python SDK. - -## Current State - -### File Structure -- **Audit Logs**: `workos/audit_logs.py` (lines 75-90) - - `create_event()` accepts optional `idempotency_key` parameter - - Only sets header if idempotency key is provided - - No auto-generation logic - -- **HTTP Client**: - - Base: `workos/utils/_base_http_client.py` - - Implementations: `workos/utils/http_client.py` (SyncHTTPClient + AsyncHTTPClient) - - Uses `httpx` library for HTTP requests - - No retry logic implemented - -- **Error Handling**: `workos/exceptions.py` - - Has `ServerException` for 5xx errors (line 82) - - No distinction between retryable/non-retryable errors - -- **Tests**: `tests/test_audit_logs.py` - - Test exists for manually provided idempotency key (lines 89-107) - - No tests for auto-generation or retry logic - -## Implementation Tasks - -### 1. Add Automatic Idempotency Key Generation - -**File**: `workos/audit_logs.py` - -**Changes in `create_event()` method (lines 75-90)**: -```python -import uuid - -def create_event( - self, - *, - organization_id: str, - event: AuditLogEvent, - idempotency_key: Optional[str] = None, -) -> None: - json = {"organization_id": organization_id, "event": event} - - headers = {} - # Auto-generate UUID v4 if not provided - if idempotency_key is None: - idempotency_key = str(uuid.uuid4()) - - headers["idempotency-key"] = idempotency_key - - self._http_client.request( - EVENTS_PATH, method=REQUEST_METHOD_POST, json=json, headers=headers - ) -``` - -**Key Points**: -- Import Python's built-in `uuid` module (no new dependency) -- Generate UUID v4 using `uuid.uuid4()` if `idempotency_key` is `None` -- Always set the header (simplifies logic) -- User-provided keys still take precedence (backward compatible) - -### 2. Implement Retry Logic with Exponential Backoff - -**Strategy**: Implement at the base HTTP client level to support both sync and async clients - -#### 2a. Add Retry Configuration - -**File**: `workos/_client_configuration.py` (or create new config class) - -**Add configuration options**: -```python -@dataclass -class RetryConfig: - max_retries: int = 3 - base_delay: float = 1.0 # seconds - max_delay: float = 30.0 # seconds - jitter: float = 0.25 # 25% jitter -``` - -**Update client initialization**: -- `workos/utils/_base_http_client.py` - Add `retry_config` parameter to `__init__` -- `workos/utils/http_client.py` - Pass through `retry_config` to base class - -#### 2b. Implement Retry Logic - -**File**: `workos/utils/_base_http_client.py` - -**Add helper methods**: -```python -import time -import random -from typing import Tuple - -def _is_retryable_error(self, response: httpx.Response) -> bool: - """Determine if an error should be retried.""" - status_code = response.status_code - - # Retry on 5xx server errors - if 500 <= status_code < 600: - return True - - # Retry on 429 rate limit - if status_code == 429: - return True - - # Do NOT retry 4xx client errors (except 429) - return False - -def _get_retry_delay(self, attempt: int, response: httpx.Response) -> float: - """Calculate delay with exponential backoff and jitter.""" - # Check for Retry-After header on 429 responses - if response.status_code == 429: - retry_after = response.headers.get("Retry-After") - if retry_after: - try: - return float(retry_after) - except ValueError: - pass # Fall through to exponential backoff - - # Exponential backoff: base_delay * 2^attempt - delay = self._retry_config.base_delay * (2 ** attempt) - - # Cap at max_delay - delay = min(delay, self._retry_config.max_delay) - - # Add jitter: random variation of 0-25% of delay - jitter_amount = delay * self._retry_config.jitter * random.random() - return delay + jitter_amount - -def _should_retry_exception(self, exc: Exception) -> bool: - """Determine if an exception should trigger a retry.""" - # Retry on network errors (connection, timeout) - if isinstance(exc, (httpx.ConnectError, httpx.TimeoutException)): - return True - return False -``` - -**Modify `_handle_response()` or create wrapper**: -- Wrap the actual request execution with retry logic -- Track attempt count -- Sleep between retries using calculated delay -- Re-raise non-retryable errors immediately - -#### 2c. Update Both Sync and Async Clients - -**File**: `workos/utils/http_client.py` - -**SyncHTTPClient.request()** (lines 83-114): -- Wrap `self._client.request()` call with retry loop -- Catch both `httpx` exceptions and check response status codes -- Use `time.sleep()` for delays - -**AsyncHTTPClient.request()** (lines 180-211): -- Same retry logic but use `asyncio.sleep()` for delays -- Await the request call properly - -**Pseudocode structure**: -```python -def request(self, ...): - last_exception = None - - for attempt in range(self._retry_config.max_retries + 1): - try: - response = self._client.request(...) - - # Check if we should retry based on status code - if attempt < self._retry_config.max_retries and self._is_retryable_error(response): - delay = self._get_retry_delay(attempt, response) - time.sleep(delay) # or asyncio.sleep for async - continue - - # No retry needed or max retries reached - return self._handle_response(response) - - except Exception as exc: - last_exception = exc - if attempt < self._retry_config.max_retries and self._should_retry_exception(exc): - delay = self._retry_config.base_delay * (2 ** attempt) - time.sleep(delay) - continue - raise - - # Should not reach here, but raise last exception if we do - raise last_exception -``` - -### 3. Error Handling Updates - -**File**: `workos/exceptions.py` - -**Optional Enhancement** (not strictly required): -- Add a method/property to exception classes to indicate if retryable -- Could add a `RateLimitException` class that extends `BaseRequestException` for 429 errors -- Include retry attempt information in error messages - -**Minimal approach**: -- No changes needed; use status code checking in base client - -### 4. Testing - -**File**: `tests/test_audit_logs.py` - -**Add test cases**: - -```python -class TestCreateEvent(_TestSetup): - def test_auto_generates_idempotency_key(self, capture_and_mock_http_client_request): - """Test that idempotency key is auto-generated when not provided.""" - organization_id = "org_123" - event = {...} # mock event - - request_kwargs = capture_and_mock_http_client_request( - self.http_client, {"success": True}, 200 - ) - - self.audit_logs.create_event( - organization_id=organization_id, - event=event, - # No idempotency_key provided - ) - - # Assert header exists and is a valid UUID v4 - assert "idempotency-key" in request_kwargs["headers"] - idempotency_key = request_kwargs["headers"]["idempotency-key"] - assert len(idempotency_key) == 36 # UUID format: 8-4-4-4-12 - assert idempotency_key.count("-") == 4 - - def test_respects_user_provided_idempotency_key(self, capture_and_mock_http_client_request): - """Test that user-provided idempotency key is used instead of auto-generated.""" - organization_id = "org_123" - event = {...} # mock event - custom_key = "my-custom-key" - - request_kwargs = capture_and_mock_http_client_request( - self.http_client, {"success": True}, 200 - ) - - self.audit_logs.create_event( - organization_id=organization_id, - event=event, - idempotency_key=custom_key, - ) - - assert request_kwargs["headers"]["idempotency-key"] == custom_key -``` - -**Create new test file**: `tests/test_http_client_retry.py` - -```python -class TestRetryLogic: - def test_retries_on_500_error(self): - """Test that 500 errors trigger retry.""" - # Mock client to return 500 twice, then 200 - # Assert request was made 3 times - - def test_retries_on_429_rate_limit(self): - """Test that 429 errors trigger retry.""" - # Mock client to return 429 with Retry-After header - # Assert retry delay respects Retry-After - - def test_no_retry_on_400_error(self): - """Test that 4xx errors (except 429) don't retry.""" - # Mock client to return 400 - # Assert request was made only once - - def test_respects_max_retries(self): - """Test that max retries limit is respected.""" - # Mock client to always return 500 - # Assert request count = max_retries + 1 - - def test_exponential_backoff_delays(self): - """Test that retry delays follow exponential backoff.""" - # Mock time.sleep and track delays - # Assert delays are: 1s, 2s, 4s (with jitter) - - def test_retries_on_network_error(self): - """Test that network errors trigger retry.""" - # Mock client to raise httpx.ConnectError - # Assert retries occur -``` - -### 5. Configuration & Backward Compatibility - -**No breaking changes**: -- Idempotency key auto-generation only activates when `idempotency_key=None` -- Retry logic applies globally but with sensible defaults (3 retries) -- Consider making retry config optional or use default values - -**Optional**: Add retry configuration to main client -```python -# In workos/client.py or workos/__init__.py -workos_client = WorkOS( - api_key="...", - max_retries=3, # Optional: configure retry behavior - retry_base_delay=1.0, -) -``` - -## Implementation Order - -1. ✅ **Phase 1**: Idempotency key auto-generation (Simple, isolated) - - Modify `workos/audit_logs.py` - - Add tests in `tests/test_audit_logs.py` - -2. ✅ **Phase 2**: Retry logic infrastructure (Complex, affects all requests) - - Add retry config to `_base_http_client.py` - - Implement helper methods for retry decisions - -3. ✅ **Phase 3**: Apply retry logic to sync client - - Update `SyncHTTPClient.request()` in `http_client.py` - - Add tests for sync retry behavior - -4. ✅ **Phase 4**: Apply retry logic to async client - - Update `AsyncHTTPClient.request()` in `http_client.py` - - Add tests for async retry behavior - -5. ✅ **Phase 5**: Integration testing & documentation - - Test end-to-end with actual audit log creation - - Update SDK documentation - -## Files to Modify - -| File | Purpose | Lines to Change | -|------|---------|-----------------| -| `workos/audit_logs.py` | Add UUID generation | ~75-90 (create_event method) | -| `workos/utils/_base_http_client.py` | Add retry logic base | Add ~50 lines (retry methods) | -| `workos/utils/http_client.py` | Implement retry in sync/async | ~83-114, ~180-211 (request methods) | -| `workos/exceptions.py` | (Optional) Add retryable classification | Minimal or none | -| `tests/test_audit_logs.py` | Test idempotency key auto-gen | Add 2 new test methods | -| `tests/test_http_client_retry.py` | Test retry logic | New file (~100-150 lines) | - -## Dependencies - -**No new dependencies required**: -- `uuid`: Python built-in (for idempotency keys) -- `time`: Python built-in (for delays) -- `random`: Python built-in (for jitter) -- `httpx`: Already in use (version >=0.28.1) - -## Testing Checklist - -- [ ] Auto-generated idempotency key is valid UUID v4 -- [ ] User-provided idempotency key takes precedence -- [ ] Retries occur on 5xx errors -- [ ] Retries occur on 429 rate limit -- [ ] Retry-After header is respected for 429 -- [ ] No retries on 4xx errors (except 429) -- [ ] Max retry limit is enforced -- [ ] Exponential backoff with jitter works correctly -- [ ] Network errors (connection, timeout) trigger retries -- [ ] Both sync and async clients have retry logic -- [ ] Backward compatibility maintained - -## Retry Behavior Specification - -### Retry Conditions -**DO Retry**: -- HTTP 5xx (500-599) status codes -- HTTP 429 (Too Many Requests) -- Network errors: `httpx.ConnectError`, `httpx.TimeoutException` - -**DO NOT Retry**: -- HTTP 4xx (except 429): 400, 401, 403, 404, 409, etc. -- Authentication/authorization errors -- Validation errors - -### Retry Strategy -- **Algorithm**: Exponential backoff with jitter -- **Base delay**: 1 second -- **Max delay**: 30 seconds -- **Jitter**: 0-25% random variation -- **Max retries**: 3 attempts (configurable) -- **Retry-After**: Honor header value for 429 responses - -### Delay Calculation -``` -Attempt 0 (initial): No delay -Attempt 1: 1s + jitter (0-0.25s) = 1-1.25s -Attempt 2: 2s + jitter (0-0.5s) = 2-2.5s -Attempt 3: 4s + jitter (0-1s) = 4-5s -``` - -## Notes for Implementation - -1. **Thread Safety**: The retry logic uses `time.sleep()` (sync) and `asyncio.sleep()` (async), both safe for their contexts -2. **Logging**: Consider adding debug logs for retry attempts (not required for MVP) -3. **Metrics**: Could track retry counts for monitoring (future enhancement) -4. **Rate Limiting**: The jitter helps prevent thundering herd when multiple clients retry simultaneously -5. **Idempotency**: The auto-generated key ensures that retried requests don't create duplicate events - -## Verification - -After implementation, verify: -1. Run existing test suite - all pass (backward compatibility) -2. New tests pass for idempotency and retry logic -3. Manual test with actual API: - - Create audit log event without providing idempotency key - - Verify idempotency key is sent in request headers - - Simulate network failure and observe retries - - Verify 429 rate limit triggers retry with proper delay - -## References - -- Audit logs API endpoint: `POST /audit_logs/events` -- Idempotency header: `Idempotency-Key` (lowercase: `idempotency-key`) -- UUID v4 format: `xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx` -- httpx documentation: https://www.python-httpx.org/ - diff --git a/ENT-3983.md b/ENT-3983.md deleted file mode 100644 index 522349c5..00000000 --- a/ENT-3983.md +++ /dev/null @@ -1,102 +0,0 @@ -# ENT-3983: SDK Audit and Add Idempotency Key + Auto Retries - -## Background - -This ticket addresses improving reliability and consistency of audit log event ingestion across all WorkOS SDKs. Currently, the API supports idempotency keys for audit log events and can auto-generate them if not provided, but SDKs should proactively send idempotency keys by default to ensure better deduplication and reliability. Additionally, SDKs should implement automatic retry logic for transient failures when creating audit log events. - -## What This Means - -### Current State -- **API Side**: The audit log ingestion endpoint (`POST /audit_logs`) already supports idempotency keys via the `Idempotency-Key` header - - If provided, the API uses `createHashedIdempotencyKey()` to hash the key with event data - - If not provided, the API auto-generates one using `generateIdempotencyKey()` (format: `autogenerated-{ulid}.{hash}`) - - Location: `packages/api/src/audit-log-events/audit-log-events.controller.ts:318-321` -- **SDK Side**: SDKs may not be consistently sending idempotency keys by default, and may lack retry logic for audit log event creation - -### Desired State -1. **Idempotency Keys**: All SDKs should automatically generate and send idempotency keys (UUID v4 recommended) for every audit log event creation request -2. **Auto-Retries**: All SDKs should implement retry logic with exponential backoff for transient failures (network errors, 5xx status codes, rate limits) -3. **Error Handling**: SDKs should return appropriate error responses to callers, distinguishing between retryable and non-retryable errors - -## Code to Review - -### API Implementation (Reference) -- **Controller**: `packages/api/src/audit-log-events/audit-log-events.controller.ts` - - `generateIdempotencyKey()` method (lines 122-144) - - `create()` endpoint (lines 288-341) -- **Utilities**: `packages/api-services/src/audit-log-events/utils/create-hashed-idempotency-key.ts` - - `generateIdempotencyKey()` - generates auto idempotency key - - `createHashedIdempotencyKey()` - hashes provided key with event data -- **Service Layer**: `packages/api-services/src/partitioned-audit-log-events/audit-log-events.service.ts` - - `create()` method uses `idempotency_key` for deduplication (line 119) - -### SDK Repositories (External) -SDKs are in separate GitHub repositories, not in this monorepo: -- `workos-node` (Node.js) -- `workos-go` (Go) -- `workos-ruby` (Ruby) -- `workos-python` (Python) -- `workos-php` (PHP) -- `workos-php-laravel` (Laravel) -- `workos-kotlin` (Java) -- `workos-dotnet` (.NET) -- `workos-elixir` (Elixir - experimental) - -### Documentation Examples -- `packages/docs/content/reference/audit-logs/_code/create-event-request.*` - Examples showing idempotency key usage -- `packages/docs/content/audit-logs/index.mdx` - Idempotency documentation (lines 105-115) - -## Game Plan - -### Phase 1: Audit Current State -1. **For each SDK repository:** - - [ ] Review audit log event creation method(s) - - [ ] Check if idempotency keys are generated/sent by default - - [ ] Check if retry logic exists for audit log endpoints - - [ ] Document current behavior and gaps - -### Phase 2: Implement Idempotency Keys -2. **For each SDK:** - - [ ] Add automatic UUID v4 generation for audit log event creation - - [ ] Ensure `Idempotency-Key` header is sent with every audit log request - - [ ] Make idempotency key generation configurable (allow override) - - [ ] Update tests to verify idempotency key is sent - -### Phase 3: Implement Auto-Retries -3. **For each SDK:** - - [ ] Add retry logic with exponential backoff for audit log event creation - - [ ] Retry on: network errors, 5xx status codes, 429 (rate limit) - - [ ] Do NOT retry on: 4xx client errors (except 429), validation errors - - [ ] Configure retry attempts (suggest 3-5 max retries) - - [ ] Add jitter to backoff delays to prevent thundering herd - - [ ] Update tests to verify retry behavior - -### Phase 4: Error Handling -4. **For each SDK:** - - [ ] Ensure proper error types/classes are returned - - [ ] Distinguish between retryable and non-retryable errors - - [ ] Return appropriate error messages to callers - - [ ] Log retry attempts and final failures appropriately - -### Phase 5: Testing & Documentation -5. **For each SDK:** - - [ ] Add unit tests for idempotency key generation - - [ ] Add integration tests for retry logic - - [ ] Test idempotency key deduplication (same key = same response) - - [ ] Update SDK documentation to reflect automatic idempotency keys - - [ ] Update examples if needed - -### Phase 6: Release & Validation -6. **Cross-SDK:** - - [ ] Coordinate releases across all SDKs - - [ ] Monitor for any issues post-deployment - - [ ] Verify idempotency key usage in production logs - - [ ] Validate retry behavior doesn't cause excessive API calls - -## Notes - -- **Idempotency Key Format**: SDKs should generate UUID v4 strings. The API will hash them with event data using `createHashedIdempotencyKey()`. -- **Backward Compatibility**: Existing code that manually provides idempotency keys should continue to work (SDK should use provided key if available). -- **Retry Strategy**: Consider using exponential backoff with jitter (e.g., `baseDelay * 2^attempt + random(0, jitter)`). -- **Rate Limiting**: Be careful with retries on 429 responses - respect `Retry-After` headers if present. - diff --git a/tests/test_http_client_retry.py b/tests/test_http_client_retry.py deleted file mode 100644 index ad8e976b..00000000 --- a/tests/test_http_client_retry.py +++ /dev/null @@ -1,615 +0,0 @@ -from unittest.mock import AsyncMock, MagicMock, patch -import pytest -import httpx - -from workos.utils._base_http_client import RetryConfig -from workos.utils.http_client import AsyncHTTPClient, SyncHTTPClient -from workos.exceptions import ServerException, BadRequestException - - -class TestSyncRetryLogic: - """Test retry logic for synchronous HTTP client.""" - - @pytest.fixture - def sync_http_client(self): - """Create a SyncHTTPClient for testing.""" - return SyncHTTPClient( - api_key="sk_test", - base_url="https://api.workos.test/", - client_id="client_test", - version="test", - ) - - @pytest.fixture - def retry_config(self): - """Create a RetryConfig for testing.""" - return RetryConfig( - max_retries=3, - base_delay=0.1, # Use shorter delays for faster tests - max_delay=1.0, - jitter=0.0, # No jitter for predictable tests - ) - - @staticmethod - def create_mock_request_with_retries( - failure_count: int, - failure_response=None, - failure_exception=None, - success_response=None, - ): - """ - Create a mock request function that fails N times before succeeding. - - Args: - failure_count: Number of times to fail before success - failure_response: Response to return on failure (status_code, json, headers) - failure_exception: Exception to raise on failure (instead of response) - success_response: Response to return on success (default: 200 with {"success": True}) - - Returns: - A tuple of (mock_function, call_count_tracker) where call_count_tracker - is a list that tracks the number of calls - """ - call_count = [0] # Use list to allow modification in nested function - - def mock_request(*args, **kwargs): - call_count[0] += 1 - if call_count[0] <= failure_count: - if failure_exception: - raise failure_exception - if failure_response: - return httpx.Response(**failure_response) - - if success_response: - return httpx.Response(**success_response) - return httpx.Response(status_code=200, json={"success": True}) - - return mock_request, call_count - - def test_retries_on_408_error(self, sync_http_client, retry_config, monkeypatch): - """Test that 408 (Request Timeout) errors trigger retry.""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=1, - failure_response={"status_code": 408, "json": {"error": "Request timeout"}}, - ) - - monkeypatch.setattr( - sync_http_client._client, "request", MagicMock(side_effect=mock_request) - ) - - with patch("time.sleep") as mock_sleep: - response = sync_http_client.request("test/path", retry_config=retry_config) - - assert call_count[0] == 2 - assert response == {"success": True} - assert mock_sleep.call_count == 1 - - def test_retries_on_500_error(self, sync_http_client, retry_config, monkeypatch): - """Test that 500 errors trigger retry.""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=2, - failure_response={"status_code": 500, "json": {"error": "Server error"}}, - ) - - monkeypatch.setattr( - sync_http_client._client, "request", MagicMock(side_effect=mock_request) - ) - - with patch("time.sleep") as mock_sleep: - response = sync_http_client.request("test/path", retry_config=retry_config) - - assert call_count[0] == 3 - assert response == {"success": True} - # Verify sleep was called with exponential backoff - assert mock_sleep.call_count == 2 - - def test_retries_on_502_error(self, sync_http_client, retry_config, monkeypatch): - """Test that 502 (Bad Gateway) errors trigger retry.""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=2, - failure_response={"status_code": 502, "json": {"error": "Bad gateway"}}, - ) - - monkeypatch.setattr( - sync_http_client._client, "request", MagicMock(side_effect=mock_request) - ) - - with patch("time.sleep") as mock_sleep: - response = sync_http_client.request("test/path", retry_config=retry_config) - - assert call_count[0] == 3 - assert response == {"success": True} - assert mock_sleep.call_count == 2 - - def test_retries_on_504_error(self, sync_http_client, retry_config, monkeypatch): - """Test that 504 (Gateway Timeout) errors trigger retry.""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=1, - failure_response={"status_code": 504, "json": {"error": "Gateway timeout"}}, - ) - - monkeypatch.setattr( - sync_http_client._client, "request", MagicMock(side_effect=mock_request) - ) - - with patch("time.sleep") as mock_sleep: - response = sync_http_client.request("test/path", retry_config=retry_config) - - assert call_count[0] == 2 - assert response == {"success": True} - assert mock_sleep.call_count == 1 - - def test_no_retry_on_503_error(self, sync_http_client, retry_config, monkeypatch): - """Test that 503 (Service Unavailable) errors do NOT trigger retry (not in RETRY_STATUS_CODES).""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=100, # Always fail - failure_response={ - "status_code": 503, - "json": {"error": "Service unavailable"}, - }, - ) - - monkeypatch.setattr( - sync_http_client._client, "request", MagicMock(side_effect=mock_request) - ) - - with pytest.raises(ServerException): - sync_http_client.request("test/path", retry_config=retry_config) - - # Should only be called once (no retries on 503) - assert call_count[0] == 1 - - def test_retries_on_429_rate_limit( - self, sync_http_client, retry_config, monkeypatch - ): - """Test that 429 errors do NOT trigger retry (consistent with workos-node).""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=100, # Always fail - failure_response={ - "status_code": 429, - "headers": {"Retry-After": "0.1"}, - "json": {"error": "Rate limit exceeded"}, - }, - ) - - monkeypatch.setattr( - sync_http_client._client, "request", MagicMock(side_effect=mock_request) - ) - - with pytest.raises(BadRequestException): - sync_http_client.request("test/path", retry_config=retry_config) - - # Should only be called once (no retries on 429) - assert call_count[0] == 1 - - def test_no_retry_on_400_error(self, sync_http_client, monkeypatch): - """Test that 4xx errors don't retry (no retry_config passed).""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=100, # Always fail - failure_response={ - "status_code": 400, - "json": {"error": "Bad request", "message": "Invalid data"}, - }, - ) - - monkeypatch.setattr( - sync_http_client._client, "request", MagicMock(side_effect=mock_request) - ) - - with pytest.raises(BadRequestException): - sync_http_client.request("test/path") - - # Should only be called once (no retries) - assert call_count[0] == 1 - - def test_no_retry_on_401_error(self, sync_http_client, monkeypatch): - """Test that 401 errors don't retry (no retry_config passed).""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=100, # Always fail - failure_response={ - "status_code": 401, - "json": {"error": "Unauthorized", "message": "Invalid credentials"}, - }, - ) - - monkeypatch.setattr( - sync_http_client._client, "request", MagicMock(side_effect=mock_request) - ) - - with pytest.raises(Exception): - sync_http_client.request("test/path") - - # Should only be called once (no retries) - assert call_count[0] == 1 - - def test_respects_max_retries(self, sync_http_client, retry_config, monkeypatch): - """Test that max retries limit is respected.""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=100, # Always fail - failure_response={"status_code": 500, "json": {"error": "Server error"}}, - ) - - monkeypatch.setattr( - sync_http_client._client, "request", MagicMock(side_effect=mock_request) - ) - - with patch("time.sleep"): - with pytest.raises(ServerException): - sync_http_client.request("test/path", retry_config=retry_config) - - # Should be called max_retries + 1 times (initial + 3 retries) - assert call_count[0] == 4 - - def test_retries_on_network_error( - self, sync_http_client, retry_config, monkeypatch - ): - """Test that network errors trigger retry.""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=2, failure_exception=httpx.ConnectError("Connection failed") - ) - - monkeypatch.setattr( - sync_http_client._client, "request", MagicMock(side_effect=mock_request) - ) - - with patch("time.sleep"): - response = sync_http_client.request("test/path", retry_config=retry_config) - - assert call_count[0] == 3 - assert response == {"success": True} - - def test_retries_on_timeout_error( - self, sync_http_client, retry_config, monkeypatch - ): - """Test that timeout errors trigger retry.""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=1, - failure_exception=httpx.TimeoutException("Request timed out"), - ) - - monkeypatch.setattr( - sync_http_client._client, "request", MagicMock(side_effect=mock_request) - ) - - with patch("time.sleep"): - response = sync_http_client.request("test/path", retry_config=retry_config) - - assert call_count[0] == 2 - assert response == {"success": True} - - def test_no_retry_on_success(self, sync_http_client, monkeypatch): - """Test that successful requests don't retry.""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=0 # Success immediately - ) - - monkeypatch.setattr( - sync_http_client._client, "request", MagicMock(side_effect=mock_request) - ) - - response = sync_http_client.request("test/path") - - assert call_count[0] == 1 - assert response == {"success": True} - - -class TestAsyncRetryLogic: - """Test retry logic for asynchronous HTTP client.""" - - @pytest.fixture - def async_http_client(self): - """Create an AsyncHTTPClient for testing.""" - return AsyncHTTPClient( - api_key="sk_test", - base_url="https://api.workos.test/", - client_id="client_test", - version="test", - ) - - @pytest.fixture - def retry_config(self): - """Create a RetryConfig for testing.""" - return RetryConfig( - max_retries=3, - base_delay=0.1, # Use shorter delays for faster tests - max_delay=1.0, - jitter=0.0, # No jitter for predictable tests - ) - - @staticmethod - def create_mock_request_with_retries( - failure_count: int, - failure_response=None, - failure_exception=None, - success_response=None, - ): - """ - Create an async mock request function that fails N times before succeeding. - - Args: - failure_count: Number of times to fail before success - failure_response: Response to return on failure (status_code, json, headers) - failure_exception: Exception to raise on failure (instead of response) - success_response: Response to return on success (default: 200 with {"success": True}) - - Returns: - A tuple of (mock_function, call_count_tracker) where call_count_tracker - is a list that tracks the number of calls - """ - call_count = [0] # Use list to allow modification in nested function - - async def mock_request(*args, **kwargs): - call_count[0] += 1 - if call_count[0] <= failure_count: - if failure_exception: - raise failure_exception - if failure_response: - return httpx.Response(**failure_response) - - if success_response: - return httpx.Response(**success_response) - return httpx.Response(status_code=200, json={"success": True}) - - return mock_request, call_count - - @pytest.mark.asyncio - async def test_retries_on_408_error( - self, async_http_client, retry_config, monkeypatch - ): - """Test that 408 (Request Timeout) errors trigger retry.""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=1, - failure_response={"status_code": 408, "json": {"error": "Request timeout"}}, - ) - - monkeypatch.setattr( - async_http_client._client, "request", AsyncMock(side_effect=mock_request) - ) - - with patch("asyncio.sleep") as mock_sleep: - response = await async_http_client.request( - "test/path", retry_config=retry_config - ) - - assert call_count[0] == 2 - assert response == {"success": True} - assert mock_sleep.call_count == 1 - - @pytest.mark.asyncio - async def test_retries_on_500_error( - self, async_http_client, retry_config, monkeypatch - ): - """Test that 500 errors trigger retry.""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=2, - failure_response={"status_code": 500, "json": {"error": "Server error"}}, - ) - - monkeypatch.setattr( - async_http_client._client, "request", AsyncMock(side_effect=mock_request) - ) - - with patch("asyncio.sleep") as mock_sleep: - response = await async_http_client.request( - "test/path", retry_config=retry_config - ) - - assert call_count[0] == 3 - assert response == {"success": True} - # Verify sleep was called with exponential backoff - assert mock_sleep.call_count == 2 - - @pytest.mark.asyncio - async def test_retries_on_502_error( - self, async_http_client, retry_config, monkeypatch - ): - """Test that 502 (Bad Gateway) errors trigger retry.""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=2, - failure_response={"status_code": 502, "json": {"error": "Bad gateway"}}, - ) - - monkeypatch.setattr( - async_http_client._client, "request", AsyncMock(side_effect=mock_request) - ) - - with patch("asyncio.sleep") as mock_sleep: - response = await async_http_client.request( - "test/path", retry_config=retry_config - ) - - assert call_count[0] == 3 - assert response == {"success": True} - assert mock_sleep.call_count == 2 - - @pytest.mark.asyncio - async def test_retries_on_504_error( - self, async_http_client, retry_config, monkeypatch - ): - """Test that 504 (Gateway Timeout) errors trigger retry.""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=1, - failure_response={"status_code": 504, "json": {"error": "Gateway timeout"}}, - ) - - monkeypatch.setattr( - async_http_client._client, "request", AsyncMock(side_effect=mock_request) - ) - - with patch("asyncio.sleep") as mock_sleep: - response = await async_http_client.request( - "test/path", retry_config=retry_config - ) - - assert call_count[0] == 2 - assert response == {"success": True} - assert mock_sleep.call_count == 1 - - @pytest.mark.asyncio - async def test_no_retry_on_503_error( - self, async_http_client, retry_config, monkeypatch - ): - """Test that 503 (Service Unavailable) errors do NOT trigger retry (not in RETRY_STATUS_CODES).""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=100, # Always fail - failure_response={ - "status_code": 503, - "json": {"error": "Service unavailable"}, - }, - ) - - monkeypatch.setattr( - async_http_client._client, "request", AsyncMock(side_effect=mock_request) - ) - - with pytest.raises(ServerException): - await async_http_client.request("test/path", retry_config=retry_config) - - # Should only be called once (no retries on 503) - assert call_count[0] == 1 - - @pytest.mark.asyncio - async def test_retries_on_429_rate_limit( - self, async_http_client, retry_config, monkeypatch - ): - """Test that 429 errors do NOT trigger retry (consistent with workos-node).""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=100, # Always fail - failure_response={ - "status_code": 429, - "headers": {"Retry-After": "0.1"}, - "json": {"error": "Rate limit exceeded"}, - }, - ) - - monkeypatch.setattr( - async_http_client._client, "request", AsyncMock(side_effect=mock_request) - ) - - with pytest.raises(BadRequestException): - await async_http_client.request("test/path", retry_config=retry_config) - - # Should only be called once (no retries on 429) - assert call_count[0] == 1 - - @pytest.mark.asyncio - async def test_no_retry_on_400_error(self, async_http_client, monkeypatch): - """Test that 4xx errors don't retry (no retry_config passed).""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=100, # Always fail - failure_response={ - "status_code": 400, - "json": {"error": "Bad request", "message": "Invalid data"}, - }, - ) - - monkeypatch.setattr( - async_http_client._client, "request", AsyncMock(side_effect=mock_request) - ) - - with pytest.raises(BadRequestException): - await async_http_client.request("test/path") - - # Should only be called once (no retries) - assert call_count[0] == 1 - - @pytest.mark.asyncio - async def test_no_retry_on_401_error(self, async_http_client, monkeypatch): - """Test that 401 errors don't retry (no retry_config passed).""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=100, # Always fail - failure_response={ - "status_code": 401, - "json": {"error": "Unauthorized", "message": "Invalid credentials"}, - }, - ) - - monkeypatch.setattr( - async_http_client._client, "request", AsyncMock(side_effect=mock_request) - ) - - with pytest.raises(Exception): - await async_http_client.request("test/path") - - # Should only be called once (no retries) - assert call_count[0] == 1 - - @pytest.mark.asyncio - async def test_respects_max_retries( - self, async_http_client, retry_config, monkeypatch - ): - """Test that max retries limit is respected.""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=100, # Always fail - failure_response={"status_code": 500, "json": {"error": "Server error"}}, - ) - - monkeypatch.setattr( - async_http_client._client, "request", AsyncMock(side_effect=mock_request) - ) - - with patch("asyncio.sleep"): - with pytest.raises(ServerException): - await async_http_client.request("test/path", retry_config=retry_config) - - # Should be called max_retries + 1 times (initial + 3 retries) - assert call_count[0] == 4 - - @pytest.mark.asyncio - async def test_retries_on_network_error( - self, async_http_client, retry_config, monkeypatch - ): - """Test that network errors trigger retry.""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=2, failure_exception=httpx.ConnectError("Connection failed") - ) - - monkeypatch.setattr( - async_http_client._client, "request", AsyncMock(side_effect=mock_request) - ) - - with patch("asyncio.sleep"): - response = await async_http_client.request( - "test/path", retry_config=retry_config - ) - - assert call_count[0] == 3 - assert response == {"success": True} - - @pytest.mark.asyncio - async def test_retries_on_timeout_error( - self, async_http_client, retry_config, monkeypatch - ): - """Test that timeout errors trigger retry.""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=1, - failure_exception=httpx.TimeoutException("Request timed out"), - ) - - monkeypatch.setattr( - async_http_client._client, "request", AsyncMock(side_effect=mock_request) - ) - - with patch("asyncio.sleep"): - response = await async_http_client.request( - "test/path", retry_config=retry_config - ) - - assert call_count[0] == 2 - assert response == {"success": True} - - @pytest.mark.asyncio - async def test_no_retry_on_success(self, async_http_client, monkeypatch): - """Test that successful requests don't retry (no retry_config needed).""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=0 # Success immediately - ) - - monkeypatch.setattr( - async_http_client._client, "request", AsyncMock(side_effect=mock_request) - ) - - response = await async_http_client.request("test/path") - - assert call_count[0] == 1 - assert response == {"success": True} From a8854b5676a8bde7c9c66d6a471b7e3a46e6eb74 Mon Sep 17 00:00:00 2001 From: swaroopakkineni Date: Tue, 11 Nov 2025 07:59:39 -0500 Subject: [PATCH 10/18] lol --- .gitignore | 3 --- 1 file changed, 3 deletions(-) diff --git a/.gitignore b/.gitignore index 5fca4494..025e838d 100644 --- a/.gitignore +++ b/.gitignore @@ -138,6 +138,3 @@ dmypy.json # Intellij .idea/ - -# Cursor -.cursor/ From 8fd6a5d3f726feff59045b926c2873109465239d Mon Sep 17 00:00:00 2001 From: swaroopakkineni Date: Tue, 11 Nov 2025 08:05:54 -0500 Subject: [PATCH 11/18] moar --- workos/utils/_base_http_client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/workos/utils/_base_http_client.py b/workos/utils/_base_http_client.py index af8fde6d..d3474bc0 100644 --- a/workos/utils/_base_http_client.py +++ b/workos/utils/_base_http_client.py @@ -237,13 +237,13 @@ def _calculate_backoff_delay( The delay in seconds to wait before the next retry """ # Exponential backoff: base_delay * 2^attempt - delay = retry_config.base_delay * (2**attempt) + delay: float = retry_config.base_delay * (2**attempt) # Cap at max_delay delay = min(delay, retry_config.max_delay) # Add jitter: random variation of 0-25% of delay - jitter_amount = delay * retry_config.jitter * random.random() + jitter_amount: float = delay * retry_config.jitter * random.random() return delay + jitter_amount def _should_retry_exception(self, exc: Exception) -> bool: From 53a706bbb7d8b5d470de1e98421f909a3d080e24 Mon Sep 17 00:00:00 2001 From: swaroopakkineni Date: Tue, 11 Nov 2025 08:46:39 -0500 Subject: [PATCH 12/18] Refactor retry logic method naming for clarity --- workos/audit_logs.py | 4 ++-- workos/utils/_base_http_client.py | 26 +++++++++----------------- workos/utils/http_client.py | 16 ++++++---------- 3 files changed, 17 insertions(+), 29 deletions(-) diff --git a/workos/audit_logs.py b/workos/audit_logs.py index 9cf544d8..eee2cc1b 100644 --- a/workos/audit_logs.py +++ b/workos/audit_logs.py @@ -90,13 +90,13 @@ def create_event( headers["idempotency-key"] = idempotency_key - # Enable retries for audit log event creation with default settings + # Enable retries for audit log event creation with default retryConfig self._http_client.request( EVENTS_PATH, method=REQUEST_METHOD_POST, json=json, headers=headers, - retry_config=RetryConfig(), # Uses default values: 3 retries, exponential backoff + retry_config=RetryConfig(), ) def create_export( diff --git a/workos/utils/_base_http_client.py b/workos/utils/_base_http_client.py index d3474bc0..905bbbb5 100644 --- a/workos/utils/_base_http_client.py +++ b/workos/utils/_base_http_client.py @@ -218,15 +218,14 @@ def _is_retryable_error(self, response: httpx.Response) -> bool: """Determine if an error should be retried.""" return response.status_code in RETRY_STATUS_CODES - def _get_retry_delay( - self, attempt: int, response: httpx.Response, retry_config: RetryConfig - ) -> float: - """Calculate delay with exponential backoff and jitter.""" - return self._calculate_backoff_delay(attempt, retry_config) - - def _calculate_backoff_delay( - self, attempt: int, retry_config: RetryConfig - ) -> float: + def _is_retryable_exception(self, exc: Exception) -> bool: + """Determine if an exception should trigger a retry.""" + # Retry on network [connection, timeout] exceptions + if isinstance(exc, (httpx.ConnectError, httpx.TimeoutException)): + return True + return False + + def _get_backoff_delay(self, attempt: int, retry_config: RetryConfig) -> float: """Calculate delay with exponential backoff and jitter. Args: @@ -234,7 +233,7 @@ def _calculate_backoff_delay( retry_config: The retry configuration Returns: - The delay in seconds to wait before the next retry + The delay, in seconds, to wait before the next retry """ # Exponential backoff: base_delay * 2^attempt delay: float = retry_config.base_delay * (2**attempt) @@ -246,13 +245,6 @@ def _calculate_backoff_delay( jitter_amount: float = delay * retry_config.jitter * random.random() return delay + jitter_amount - def _should_retry_exception(self, exc: Exception) -> bool: - """Determine if an exception should trigger a retry.""" - # Retry on network errors (connection, timeout) - if isinstance(exc, (httpx.ConnectError, httpx.TimeoutException)): - return True - return False - def build_request_url( self, url: str, diff --git a/workos/utils/http_client.py b/workos/utils/http_client.py index b2daabb3..08aed86c 100644 --- a/workos/utils/http_client.py +++ b/workos/utils/http_client.py @@ -133,7 +133,7 @@ def request( if attempt < retry_config.max_retries and self._is_retryable_error( response ): - delay = self._get_retry_delay(attempt, response, retry_config) + delay = self._get_backoff_delay(attempt, retry_config) time.sleep(delay) continue @@ -142,10 +142,8 @@ def request( except Exception as exc: last_exception = exc - if attempt < retry_config.max_retries and self._should_retry_exception( - exc - ): - delay = self._calculate_backoff_delay(attempt, retry_config) + if attempt < retry_config.max_retries and self._is_retryable_exception(exc): + delay = self._get_backoff_delay(attempt, retry_config) time.sleep(delay) continue raise @@ -270,7 +268,7 @@ async def request( if attempt < retry_config.max_retries and self._is_retryable_error( response ): - delay = self._get_retry_delay(attempt, response, retry_config) + delay = self._get_backoff_delay(attempt, retry_config) await asyncio.sleep(delay) continue @@ -279,10 +277,8 @@ async def request( except Exception as exc: last_exception = exc - if attempt < retry_config.max_retries and self._should_retry_exception( - exc - ): - delay = self._calculate_backoff_delay(attempt, retry_config) + if attempt < retry_config.max_retries and self._is_retryable_exception(exc): + delay = self._get_backoff_delay(attempt, retry_config) await asyncio.sleep(delay) continue raise From 479ced1145767b375b34b8ff3531e155a1ad81a6 Mon Sep 17 00:00:00 2001 From: swaroopakkineni Date: Tue, 11 Nov 2025 09:00:46 -0500 Subject: [PATCH 13/18] Fix Black formatting for HTTP client retry implementation --- tests/test_http_client_retry.py | 615 ++++++++++++++++++++++++++++++++ workos/utils/http_client.py | 8 +- 2 files changed, 621 insertions(+), 2 deletions(-) create mode 100644 tests/test_http_client_retry.py diff --git a/tests/test_http_client_retry.py b/tests/test_http_client_retry.py new file mode 100644 index 00000000..ad8e976b --- /dev/null +++ b/tests/test_http_client_retry.py @@ -0,0 +1,615 @@ +from unittest.mock import AsyncMock, MagicMock, patch +import pytest +import httpx + +from workos.utils._base_http_client import RetryConfig +from workos.utils.http_client import AsyncHTTPClient, SyncHTTPClient +from workos.exceptions import ServerException, BadRequestException + + +class TestSyncRetryLogic: + """Test retry logic for synchronous HTTP client.""" + + @pytest.fixture + def sync_http_client(self): + """Create a SyncHTTPClient for testing.""" + return SyncHTTPClient( + api_key="sk_test", + base_url="https://api.workos.test/", + client_id="client_test", + version="test", + ) + + @pytest.fixture + def retry_config(self): + """Create a RetryConfig for testing.""" + return RetryConfig( + max_retries=3, + base_delay=0.1, # Use shorter delays for faster tests + max_delay=1.0, + jitter=0.0, # No jitter for predictable tests + ) + + @staticmethod + def create_mock_request_with_retries( + failure_count: int, + failure_response=None, + failure_exception=None, + success_response=None, + ): + """ + Create a mock request function that fails N times before succeeding. + + Args: + failure_count: Number of times to fail before success + failure_response: Response to return on failure (status_code, json, headers) + failure_exception: Exception to raise on failure (instead of response) + success_response: Response to return on success (default: 200 with {"success": True}) + + Returns: + A tuple of (mock_function, call_count_tracker) where call_count_tracker + is a list that tracks the number of calls + """ + call_count = [0] # Use list to allow modification in nested function + + def mock_request(*args, **kwargs): + call_count[0] += 1 + if call_count[0] <= failure_count: + if failure_exception: + raise failure_exception + if failure_response: + return httpx.Response(**failure_response) + + if success_response: + return httpx.Response(**success_response) + return httpx.Response(status_code=200, json={"success": True}) + + return mock_request, call_count + + def test_retries_on_408_error(self, sync_http_client, retry_config, monkeypatch): + """Test that 408 (Request Timeout) errors trigger retry.""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=1, + failure_response={"status_code": 408, "json": {"error": "Request timeout"}}, + ) + + monkeypatch.setattr( + sync_http_client._client, "request", MagicMock(side_effect=mock_request) + ) + + with patch("time.sleep") as mock_sleep: + response = sync_http_client.request("test/path", retry_config=retry_config) + + assert call_count[0] == 2 + assert response == {"success": True} + assert mock_sleep.call_count == 1 + + def test_retries_on_500_error(self, sync_http_client, retry_config, monkeypatch): + """Test that 500 errors trigger retry.""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=2, + failure_response={"status_code": 500, "json": {"error": "Server error"}}, + ) + + monkeypatch.setattr( + sync_http_client._client, "request", MagicMock(side_effect=mock_request) + ) + + with patch("time.sleep") as mock_sleep: + response = sync_http_client.request("test/path", retry_config=retry_config) + + assert call_count[0] == 3 + assert response == {"success": True} + # Verify sleep was called with exponential backoff + assert mock_sleep.call_count == 2 + + def test_retries_on_502_error(self, sync_http_client, retry_config, monkeypatch): + """Test that 502 (Bad Gateway) errors trigger retry.""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=2, + failure_response={"status_code": 502, "json": {"error": "Bad gateway"}}, + ) + + monkeypatch.setattr( + sync_http_client._client, "request", MagicMock(side_effect=mock_request) + ) + + with patch("time.sleep") as mock_sleep: + response = sync_http_client.request("test/path", retry_config=retry_config) + + assert call_count[0] == 3 + assert response == {"success": True} + assert mock_sleep.call_count == 2 + + def test_retries_on_504_error(self, sync_http_client, retry_config, monkeypatch): + """Test that 504 (Gateway Timeout) errors trigger retry.""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=1, + failure_response={"status_code": 504, "json": {"error": "Gateway timeout"}}, + ) + + monkeypatch.setattr( + sync_http_client._client, "request", MagicMock(side_effect=mock_request) + ) + + with patch("time.sleep") as mock_sleep: + response = sync_http_client.request("test/path", retry_config=retry_config) + + assert call_count[0] == 2 + assert response == {"success": True} + assert mock_sleep.call_count == 1 + + def test_no_retry_on_503_error(self, sync_http_client, retry_config, monkeypatch): + """Test that 503 (Service Unavailable) errors do NOT trigger retry (not in RETRY_STATUS_CODES).""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=100, # Always fail + failure_response={ + "status_code": 503, + "json": {"error": "Service unavailable"}, + }, + ) + + monkeypatch.setattr( + sync_http_client._client, "request", MagicMock(side_effect=mock_request) + ) + + with pytest.raises(ServerException): + sync_http_client.request("test/path", retry_config=retry_config) + + # Should only be called once (no retries on 503) + assert call_count[0] == 1 + + def test_retries_on_429_rate_limit( + self, sync_http_client, retry_config, monkeypatch + ): + """Test that 429 errors do NOT trigger retry (consistent with workos-node).""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=100, # Always fail + failure_response={ + "status_code": 429, + "headers": {"Retry-After": "0.1"}, + "json": {"error": "Rate limit exceeded"}, + }, + ) + + monkeypatch.setattr( + sync_http_client._client, "request", MagicMock(side_effect=mock_request) + ) + + with pytest.raises(BadRequestException): + sync_http_client.request("test/path", retry_config=retry_config) + + # Should only be called once (no retries on 429) + assert call_count[0] == 1 + + def test_no_retry_on_400_error(self, sync_http_client, monkeypatch): + """Test that 4xx errors don't retry (no retry_config passed).""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=100, # Always fail + failure_response={ + "status_code": 400, + "json": {"error": "Bad request", "message": "Invalid data"}, + }, + ) + + monkeypatch.setattr( + sync_http_client._client, "request", MagicMock(side_effect=mock_request) + ) + + with pytest.raises(BadRequestException): + sync_http_client.request("test/path") + + # Should only be called once (no retries) + assert call_count[0] == 1 + + def test_no_retry_on_401_error(self, sync_http_client, monkeypatch): + """Test that 401 errors don't retry (no retry_config passed).""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=100, # Always fail + failure_response={ + "status_code": 401, + "json": {"error": "Unauthorized", "message": "Invalid credentials"}, + }, + ) + + monkeypatch.setattr( + sync_http_client._client, "request", MagicMock(side_effect=mock_request) + ) + + with pytest.raises(Exception): + sync_http_client.request("test/path") + + # Should only be called once (no retries) + assert call_count[0] == 1 + + def test_respects_max_retries(self, sync_http_client, retry_config, monkeypatch): + """Test that max retries limit is respected.""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=100, # Always fail + failure_response={"status_code": 500, "json": {"error": "Server error"}}, + ) + + monkeypatch.setattr( + sync_http_client._client, "request", MagicMock(side_effect=mock_request) + ) + + with patch("time.sleep"): + with pytest.raises(ServerException): + sync_http_client.request("test/path", retry_config=retry_config) + + # Should be called max_retries + 1 times (initial + 3 retries) + assert call_count[0] == 4 + + def test_retries_on_network_error( + self, sync_http_client, retry_config, monkeypatch + ): + """Test that network errors trigger retry.""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=2, failure_exception=httpx.ConnectError("Connection failed") + ) + + monkeypatch.setattr( + sync_http_client._client, "request", MagicMock(side_effect=mock_request) + ) + + with patch("time.sleep"): + response = sync_http_client.request("test/path", retry_config=retry_config) + + assert call_count[0] == 3 + assert response == {"success": True} + + def test_retries_on_timeout_error( + self, sync_http_client, retry_config, monkeypatch + ): + """Test that timeout errors trigger retry.""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=1, + failure_exception=httpx.TimeoutException("Request timed out"), + ) + + monkeypatch.setattr( + sync_http_client._client, "request", MagicMock(side_effect=mock_request) + ) + + with patch("time.sleep"): + response = sync_http_client.request("test/path", retry_config=retry_config) + + assert call_count[0] == 2 + assert response == {"success": True} + + def test_no_retry_on_success(self, sync_http_client, monkeypatch): + """Test that successful requests don't retry.""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=0 # Success immediately + ) + + monkeypatch.setattr( + sync_http_client._client, "request", MagicMock(side_effect=mock_request) + ) + + response = sync_http_client.request("test/path") + + assert call_count[0] == 1 + assert response == {"success": True} + + +class TestAsyncRetryLogic: + """Test retry logic for asynchronous HTTP client.""" + + @pytest.fixture + def async_http_client(self): + """Create an AsyncHTTPClient for testing.""" + return AsyncHTTPClient( + api_key="sk_test", + base_url="https://api.workos.test/", + client_id="client_test", + version="test", + ) + + @pytest.fixture + def retry_config(self): + """Create a RetryConfig for testing.""" + return RetryConfig( + max_retries=3, + base_delay=0.1, # Use shorter delays for faster tests + max_delay=1.0, + jitter=0.0, # No jitter for predictable tests + ) + + @staticmethod + def create_mock_request_with_retries( + failure_count: int, + failure_response=None, + failure_exception=None, + success_response=None, + ): + """ + Create an async mock request function that fails N times before succeeding. + + Args: + failure_count: Number of times to fail before success + failure_response: Response to return on failure (status_code, json, headers) + failure_exception: Exception to raise on failure (instead of response) + success_response: Response to return on success (default: 200 with {"success": True}) + + Returns: + A tuple of (mock_function, call_count_tracker) where call_count_tracker + is a list that tracks the number of calls + """ + call_count = [0] # Use list to allow modification in nested function + + async def mock_request(*args, **kwargs): + call_count[0] += 1 + if call_count[0] <= failure_count: + if failure_exception: + raise failure_exception + if failure_response: + return httpx.Response(**failure_response) + + if success_response: + return httpx.Response(**success_response) + return httpx.Response(status_code=200, json={"success": True}) + + return mock_request, call_count + + @pytest.mark.asyncio + async def test_retries_on_408_error( + self, async_http_client, retry_config, monkeypatch + ): + """Test that 408 (Request Timeout) errors trigger retry.""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=1, + failure_response={"status_code": 408, "json": {"error": "Request timeout"}}, + ) + + monkeypatch.setattr( + async_http_client._client, "request", AsyncMock(side_effect=mock_request) + ) + + with patch("asyncio.sleep") as mock_sleep: + response = await async_http_client.request( + "test/path", retry_config=retry_config + ) + + assert call_count[0] == 2 + assert response == {"success": True} + assert mock_sleep.call_count == 1 + + @pytest.mark.asyncio + async def test_retries_on_500_error( + self, async_http_client, retry_config, monkeypatch + ): + """Test that 500 errors trigger retry.""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=2, + failure_response={"status_code": 500, "json": {"error": "Server error"}}, + ) + + monkeypatch.setattr( + async_http_client._client, "request", AsyncMock(side_effect=mock_request) + ) + + with patch("asyncio.sleep") as mock_sleep: + response = await async_http_client.request( + "test/path", retry_config=retry_config + ) + + assert call_count[0] == 3 + assert response == {"success": True} + # Verify sleep was called with exponential backoff + assert mock_sleep.call_count == 2 + + @pytest.mark.asyncio + async def test_retries_on_502_error( + self, async_http_client, retry_config, monkeypatch + ): + """Test that 502 (Bad Gateway) errors trigger retry.""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=2, + failure_response={"status_code": 502, "json": {"error": "Bad gateway"}}, + ) + + monkeypatch.setattr( + async_http_client._client, "request", AsyncMock(side_effect=mock_request) + ) + + with patch("asyncio.sleep") as mock_sleep: + response = await async_http_client.request( + "test/path", retry_config=retry_config + ) + + assert call_count[0] == 3 + assert response == {"success": True} + assert mock_sleep.call_count == 2 + + @pytest.mark.asyncio + async def test_retries_on_504_error( + self, async_http_client, retry_config, monkeypatch + ): + """Test that 504 (Gateway Timeout) errors trigger retry.""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=1, + failure_response={"status_code": 504, "json": {"error": "Gateway timeout"}}, + ) + + monkeypatch.setattr( + async_http_client._client, "request", AsyncMock(side_effect=mock_request) + ) + + with patch("asyncio.sleep") as mock_sleep: + response = await async_http_client.request( + "test/path", retry_config=retry_config + ) + + assert call_count[0] == 2 + assert response == {"success": True} + assert mock_sleep.call_count == 1 + + @pytest.mark.asyncio + async def test_no_retry_on_503_error( + self, async_http_client, retry_config, monkeypatch + ): + """Test that 503 (Service Unavailable) errors do NOT trigger retry (not in RETRY_STATUS_CODES).""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=100, # Always fail + failure_response={ + "status_code": 503, + "json": {"error": "Service unavailable"}, + }, + ) + + monkeypatch.setattr( + async_http_client._client, "request", AsyncMock(side_effect=mock_request) + ) + + with pytest.raises(ServerException): + await async_http_client.request("test/path", retry_config=retry_config) + + # Should only be called once (no retries on 503) + assert call_count[0] == 1 + + @pytest.mark.asyncio + async def test_retries_on_429_rate_limit( + self, async_http_client, retry_config, monkeypatch + ): + """Test that 429 errors do NOT trigger retry (consistent with workos-node).""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=100, # Always fail + failure_response={ + "status_code": 429, + "headers": {"Retry-After": "0.1"}, + "json": {"error": "Rate limit exceeded"}, + }, + ) + + monkeypatch.setattr( + async_http_client._client, "request", AsyncMock(side_effect=mock_request) + ) + + with pytest.raises(BadRequestException): + await async_http_client.request("test/path", retry_config=retry_config) + + # Should only be called once (no retries on 429) + assert call_count[0] == 1 + + @pytest.mark.asyncio + async def test_no_retry_on_400_error(self, async_http_client, monkeypatch): + """Test that 4xx errors don't retry (no retry_config passed).""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=100, # Always fail + failure_response={ + "status_code": 400, + "json": {"error": "Bad request", "message": "Invalid data"}, + }, + ) + + monkeypatch.setattr( + async_http_client._client, "request", AsyncMock(side_effect=mock_request) + ) + + with pytest.raises(BadRequestException): + await async_http_client.request("test/path") + + # Should only be called once (no retries) + assert call_count[0] == 1 + + @pytest.mark.asyncio + async def test_no_retry_on_401_error(self, async_http_client, monkeypatch): + """Test that 401 errors don't retry (no retry_config passed).""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=100, # Always fail + failure_response={ + "status_code": 401, + "json": {"error": "Unauthorized", "message": "Invalid credentials"}, + }, + ) + + monkeypatch.setattr( + async_http_client._client, "request", AsyncMock(side_effect=mock_request) + ) + + with pytest.raises(Exception): + await async_http_client.request("test/path") + + # Should only be called once (no retries) + assert call_count[0] == 1 + + @pytest.mark.asyncio + async def test_respects_max_retries( + self, async_http_client, retry_config, monkeypatch + ): + """Test that max retries limit is respected.""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=100, # Always fail + failure_response={"status_code": 500, "json": {"error": "Server error"}}, + ) + + monkeypatch.setattr( + async_http_client._client, "request", AsyncMock(side_effect=mock_request) + ) + + with patch("asyncio.sleep"): + with pytest.raises(ServerException): + await async_http_client.request("test/path", retry_config=retry_config) + + # Should be called max_retries + 1 times (initial + 3 retries) + assert call_count[0] == 4 + + @pytest.mark.asyncio + async def test_retries_on_network_error( + self, async_http_client, retry_config, monkeypatch + ): + """Test that network errors trigger retry.""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=2, failure_exception=httpx.ConnectError("Connection failed") + ) + + monkeypatch.setattr( + async_http_client._client, "request", AsyncMock(side_effect=mock_request) + ) + + with patch("asyncio.sleep"): + response = await async_http_client.request( + "test/path", retry_config=retry_config + ) + + assert call_count[0] == 3 + assert response == {"success": True} + + @pytest.mark.asyncio + async def test_retries_on_timeout_error( + self, async_http_client, retry_config, monkeypatch + ): + """Test that timeout errors trigger retry.""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=1, + failure_exception=httpx.TimeoutException("Request timed out"), + ) + + monkeypatch.setattr( + async_http_client._client, "request", AsyncMock(side_effect=mock_request) + ) + + with patch("asyncio.sleep"): + response = await async_http_client.request( + "test/path", retry_config=retry_config + ) + + assert call_count[0] == 2 + assert response == {"success": True} + + @pytest.mark.asyncio + async def test_no_retry_on_success(self, async_http_client, monkeypatch): + """Test that successful requests don't retry (no retry_config needed).""" + mock_request, call_count = self.create_mock_request_with_retries( + failure_count=0 # Success immediately + ) + + monkeypatch.setattr( + async_http_client._client, "request", AsyncMock(side_effect=mock_request) + ) + + response = await async_http_client.request("test/path") + + assert call_count[0] == 1 + assert response == {"success": True} diff --git a/workos/utils/http_client.py b/workos/utils/http_client.py index 08aed86c..94022874 100644 --- a/workos/utils/http_client.py +++ b/workos/utils/http_client.py @@ -142,7 +142,9 @@ def request( except Exception as exc: last_exception = exc - if attempt < retry_config.max_retries and self._is_retryable_exception(exc): + if attempt < retry_config.max_retries and self._is_retryable_exception( + exc + ): delay = self._get_backoff_delay(attempt, retry_config) time.sleep(delay) continue @@ -277,7 +279,9 @@ async def request( except Exception as exc: last_exception = exc - if attempt < retry_config.max_retries and self._is_retryable_exception(exc): + if attempt < retry_config.max_retries and self._is_retryable_exception( + exc + ): delay = self._get_backoff_delay(attempt, retry_config) await asyncio.sleep(delay) continue From 2bb414f9941e3920c2684c5952ff75e095196c89 Mon Sep 17 00:00:00 2001 From: swaroopakkineni Date: Tue, 11 Nov 2025 09:12:29 -0500 Subject: [PATCH 14/18] Align async retry tests with sync test suite --- .cursor/commands/commit.md | 21 ++ .cursor/commands/format.md | 2 + .cursor/commands/new-branch.md | 27 +++ .cursor/commands/review-pr.md | 27 +++ .cursor/rules/.gitignore | 4 + .cursor/rules/create-data-migration.mdc | 16 ++ tests/test_http_client_retry.py | 303 ++++++++---------------- 7 files changed, 200 insertions(+), 200 deletions(-) create mode 100644 .cursor/commands/commit.md create mode 100644 .cursor/commands/format.md create mode 100644 .cursor/commands/new-branch.md create mode 100644 .cursor/commands/review-pr.md create mode 100644 .cursor/rules/.gitignore create mode 100644 .cursor/rules/create-data-migration.mdc diff --git a/.cursor/commands/commit.md b/.cursor/commands/commit.md new file mode 100644 index 00000000..38b584c0 --- /dev/null +++ b/.cursor/commands/commit.md @@ -0,0 +1,21 @@ +# Commit PR + +## Overview +Add all files to the the commit. come up with a succinct, accurate commit message based on the commit. Then push that commit up as a branch. + + +## Steps +1. **Add all changes to the commit ** + - Add all changes to the commit + - Ensure all changes are committed + +2. **Add a succinct commit message based on the changes made** + - Summarize changes clearly + - commit that message + - be succinct + +3. **Push up branch** + - push commit to latest branch in origin + +## RULES YOU MUST FOLLOW + - never do this if you are in main. If so, alert the user that they are in main. \ No newline at end of file diff --git a/.cursor/commands/format.md b/.cursor/commands/format.md new file mode 100644 index 00000000..7ae7e812 --- /dev/null +++ b/.cursor/commands/format.md @@ -0,0 +1,2 @@ +Your job is to create is to format the code base, given the changes that were made in the current pr, so that it confirms to all formatting, linting, prettier rules, etc that exist in this code base. + diff --git a/.cursor/commands/new-branch.md b/.cursor/commands/new-branch.md new file mode 100644 index 00000000..c9c83aba --- /dev/null +++ b/.cursor/commands/new-branch.md @@ -0,0 +1,27 @@ + +# Create a new branch + +## Overview +Given a description for a task, create a new branch + +## Steps +1. **Ingest the message put in and clean up the message** + - You must take in the message and clean it to be succinct and clear. + +2. **Summarize the message into a branch** + - The branch should be relatively short, less than 63 characters + - if i provide a ticket number number like "ENT-1234" in the beginning, reference that at the beginning of the new summarize message. (ex: ENT-1234-my-branch) + - each word should be followed by - until the last word + - Validate that the branch name doesn't already exist + - Validate that the ticket number format is valid (e.g., numeric) + - Handle cases where the message is too long after summarization by truncating appropriately + +3. **Create the branch** + - using the message in step 2, create the branch + - If branch creation fails (e.g., branch already exists), inform the user and do not proceed + +## RULES YOU MUST FOLLOW + - never do this if there is no message given by the user + - never do this if the user does not provide a ticket number + - Validate all inputs before attempting branch creation + - Handle errors gracefully and provide clear feedback to the user diff --git a/.cursor/commands/review-pr.md b/.cursor/commands/review-pr.md new file mode 100644 index 00000000..32882335 --- /dev/null +++ b/.cursor/commands/review-pr.md @@ -0,0 +1,27 @@ +# PR Code Review Checklist + +## Overview +You will perform a comprehensive checklist for conducting thorough code reviews to ensure quality, security, and maintainability. It should be scoped to the pr and how it changes the codebase and functionality of the app. + +Ensure all rules, in ./cursor/rules/, are followed. + +## Review Categories + +### Functionality +- [ ] Code does what it's supposed to do +- [ ] Edge cases are handled +- [ ] Error handling is appropriate +- [ ] No obvious bugs or logic errors + +### Code Quality +- [ ] Code is readable and well-structured +- [ ] Functions are small and focused +- [ ] Variable names are descriptive +- [ ] No code duplication +- [ ] Follows project conventions + +### Security +- [ ] No obvious security vulnerabilities +- [ ] Input validation is present +- [ ] Sensitive data is handled properly +- [ ] No hardcoded secrets \ No newline at end of file diff --git a/.cursor/rules/.gitignore b/.cursor/rules/.gitignore new file mode 100644 index 00000000..5bf525c1 --- /dev/null +++ b/.cursor/rules/.gitignore @@ -0,0 +1,4 @@ +* + +!.gitignore +!create-data-migration.mdc diff --git a/.cursor/rules/create-data-migration.mdc b/.cursor/rules/create-data-migration.mdc new file mode 100644 index 00000000..b50a4909 --- /dev/null +++ b/.cursor/rules/create-data-migration.mdc @@ -0,0 +1,16 @@ +--- +description: This rule creates a data migration Typescript class. It does not need to understand the underlying database. +globs: +alwaysApply: false +--- + + +# Required inputs +- Name, which must be specified by the user + +# Steps +- Generate an id by running ` pnpm exec node -e "const { ulid } = require('ulid'); console.log(ulid())"` in the `common/temp` directory and capturing the output. Do not use other ULID utilities. Trim any whitespace or newlines and convert the ulid to lowercase. I will refer to this as ULID. +- Process the input name by converting it into kebab case. I will refer to this as NAME. +- Create a new file called "-".ts in the 'packages/api/src/data-migrations/migrations'. This file should follow the class definition pattern of other files in that directory. Make sure to include a constructor that calls 'super()' with the ULID as the only argument. +- Import the new class in 'packages/api/src/data-migrations/data-migration.jobs.module.ts' and add it to the `DATA_MIGRATION_MODULES` constant in alphabetical order. + diff --git a/tests/test_http_client_retry.py b/tests/test_http_client_retry.py index ad8e976b..fc7ea513 100644 --- a/tests/test_http_client_retry.py +++ b/tests/test_http_client_retry.py @@ -66,10 +66,43 @@ def mock_request(*args, **kwargs): return mock_request, call_count + def assert_request_with_retries( + self, + http_client, + call_count, + expected_call_count: int, + expected_sleep_count: int, + retry_config=None, + expected_response=None, + ): + """ + Helper to execute a request with mocked sleep and assert the results. + + Args: + http_client: The HTTP client to test with + call_count: The call count tracker from create_mock_request_with_retries + expected_call_count: Expected number of request attempts + expected_sleep_count: Expected number of sleep calls + retry_config: Optional retry configuration + expected_response: Expected response dict (default: {"success": True}) + """ + if expected_response is None: + expected_response = {"success": True} + + with patch("time.sleep") as mock_sleep: + if retry_config: + response = http_client.request("test/path", retry_config=retry_config) + else: + response = http_client.request("test/path") + + assert call_count[0] == expected_call_count + assert response == expected_response + assert mock_sleep.call_count == expected_sleep_count + def test_retries_on_408_error(self, sync_http_client, retry_config, monkeypatch): """Test that 408 (Request Timeout) errors trigger retry.""" mock_request, call_count = self.create_mock_request_with_retries( - failure_count=1, + failure_count=3, failure_response={"status_code": 408, "json": {"error": "Request timeout"}}, ) @@ -77,17 +110,14 @@ def test_retries_on_408_error(self, sync_http_client, retry_config, monkeypatch) sync_http_client._client, "request", MagicMock(side_effect=mock_request) ) - with patch("time.sleep") as mock_sleep: - response = sync_http_client.request("test/path", retry_config=retry_config) - - assert call_count[0] == 2 - assert response == {"success": True} - assert mock_sleep.call_count == 1 + self.assert_request_with_retries( + sync_http_client, call_count, expected_call_count=4, expected_sleep_count=3, retry_config=retry_config + ) def test_retries_on_500_error(self, sync_http_client, retry_config, monkeypatch): """Test that 500 errors trigger retry.""" mock_request, call_count = self.create_mock_request_with_retries( - failure_count=2, + failure_count=3, failure_response={"status_code": 500, "json": {"error": "Server error"}}, ) @@ -95,18 +125,14 @@ def test_retries_on_500_error(self, sync_http_client, retry_config, monkeypatch) sync_http_client._client, "request", MagicMock(side_effect=mock_request) ) - with patch("time.sleep") as mock_sleep: - response = sync_http_client.request("test/path", retry_config=retry_config) - - assert call_count[0] == 3 - assert response == {"success": True} - # Verify sleep was called with exponential backoff - assert mock_sleep.call_count == 2 + self.assert_request_with_retries( + sync_http_client, call_count, expected_call_count=4, expected_sleep_count=3, retry_config=retry_config + ) def test_retries_on_502_error(self, sync_http_client, retry_config, monkeypatch): """Test that 502 (Bad Gateway) errors trigger retry.""" mock_request, call_count = self.create_mock_request_with_retries( - failure_count=2, + failure_count=3, failure_response={"status_code": 502, "json": {"error": "Bad gateway"}}, ) @@ -114,17 +140,14 @@ def test_retries_on_502_error(self, sync_http_client, retry_config, monkeypatch) sync_http_client._client, "request", MagicMock(side_effect=mock_request) ) - with patch("time.sleep") as mock_sleep: - response = sync_http_client.request("test/path", retry_config=retry_config) - - assert call_count[0] == 3 - assert response == {"success": True} - assert mock_sleep.call_count == 2 + self.assert_request_with_retries( + sync_http_client, call_count, expected_call_count=4, expected_sleep_count=3, retry_config=retry_config + ) def test_retries_on_504_error(self, sync_http_client, retry_config, monkeypatch): """Test that 504 (Gateway Timeout) errors trigger retry.""" mock_request, call_count = self.create_mock_request_with_retries( - failure_count=1, + failure_count=3, failure_response={"status_code": 504, "json": {"error": "Gateway timeout"}}, ) @@ -132,15 +155,12 @@ def test_retries_on_504_error(self, sync_http_client, retry_config, monkeypatch) sync_http_client._client, "request", MagicMock(side_effect=mock_request) ) - with patch("time.sleep") as mock_sleep: - response = sync_http_client.request("test/path", retry_config=retry_config) - - assert call_count[0] == 2 - assert response == {"success": True} - assert mock_sleep.call_count == 1 + self.assert_request_with_retries( + sync_http_client, call_count, expected_call_count=4, expected_sleep_count=3, retry_config=retry_config + ) - def test_no_retry_on_503_error(self, sync_http_client, retry_config, monkeypatch): - """Test that 503 (Service Unavailable) errors do NOT trigger retry (not in RETRY_STATUS_CODES).""" + def test_no_retry_on_non_retryable_error(self, sync_http_client, retry_config, monkeypatch): + """Test that a non retryable error errors do NOT trigger retry (not in RETRY_STATUS_CODES).""" mock_request, call_count = self.create_mock_request_with_retries( failure_count=100, # Always fail failure_response={ @@ -159,69 +179,6 @@ def test_no_retry_on_503_error(self, sync_http_client, retry_config, monkeypatch # Should only be called once (no retries on 503) assert call_count[0] == 1 - def test_retries_on_429_rate_limit( - self, sync_http_client, retry_config, monkeypatch - ): - """Test that 429 errors do NOT trigger retry (consistent with workos-node).""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=100, # Always fail - failure_response={ - "status_code": 429, - "headers": {"Retry-After": "0.1"}, - "json": {"error": "Rate limit exceeded"}, - }, - ) - - monkeypatch.setattr( - sync_http_client._client, "request", MagicMock(side_effect=mock_request) - ) - - with pytest.raises(BadRequestException): - sync_http_client.request("test/path", retry_config=retry_config) - - # Should only be called once (no retries on 429) - assert call_count[0] == 1 - - def test_no_retry_on_400_error(self, sync_http_client, monkeypatch): - """Test that 4xx errors don't retry (no retry_config passed).""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=100, # Always fail - failure_response={ - "status_code": 400, - "json": {"error": "Bad request", "message": "Invalid data"}, - }, - ) - - monkeypatch.setattr( - sync_http_client._client, "request", MagicMock(side_effect=mock_request) - ) - - with pytest.raises(BadRequestException): - sync_http_client.request("test/path") - - # Should only be called once (no retries) - assert call_count[0] == 1 - - def test_no_retry_on_401_error(self, sync_http_client, monkeypatch): - """Test that 401 errors don't retry (no retry_config passed).""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=100, # Always fail - failure_response={ - "status_code": 401, - "json": {"error": "Unauthorized", "message": "Invalid credentials"}, - }, - ) - - monkeypatch.setattr( - sync_http_client._client, "request", MagicMock(side_effect=mock_request) - ) - - with pytest.raises(Exception): - sync_http_client.request("test/path") - - # Should only be called once (no retries) - assert call_count[0] == 1 - def test_respects_max_retries(self, sync_http_client, retry_config, monkeypatch): """Test that max retries limit is respected.""" mock_request, call_count = self.create_mock_request_with_retries( @@ -352,13 +309,46 @@ async def mock_request(*args, **kwargs): return mock_request, call_count + async def assert_async_request_with_retries( + self, + http_client, + call_count, + expected_call_count: int, + expected_sleep_count: int, + retry_config=None, + expected_response=None, + ): + """ + Helper to execute an async request with mocked sleep and assert the results. + + Args: + http_client: The HTTP client to test with + call_count: The call count tracker from create_mock_request_with_retries + expected_call_count: Expected number of request attempts + expected_sleep_count: Expected number of sleep calls + retry_config: Optional retry configuration + expected_response: Expected response dict (default: {"success": True}) + """ + if expected_response is None: + expected_response = {"success": True} + + with patch("asyncio.sleep") as mock_sleep: + if retry_config: + response = await http_client.request("test/path", retry_config=retry_config) + else: + response = await http_client.request("test/path") + + assert call_count[0] == expected_call_count + assert response == expected_response + assert mock_sleep.call_count == expected_sleep_count + @pytest.mark.asyncio async def test_retries_on_408_error( self, async_http_client, retry_config, monkeypatch ): """Test that 408 (Request Timeout) errors trigger retry.""" mock_request, call_count = self.create_mock_request_with_retries( - failure_count=1, + failure_count=3, failure_response={"status_code": 408, "json": {"error": "Request timeout"}}, ) @@ -366,14 +356,9 @@ async def test_retries_on_408_error( async_http_client._client, "request", AsyncMock(side_effect=mock_request) ) - with patch("asyncio.sleep") as mock_sleep: - response = await async_http_client.request( - "test/path", retry_config=retry_config - ) - - assert call_count[0] == 2 - assert response == {"success": True} - assert mock_sleep.call_count == 1 + await self.assert_async_request_with_retries( + async_http_client, call_count, expected_call_count=4, expected_sleep_count=3, retry_config=retry_config + ) @pytest.mark.asyncio async def test_retries_on_500_error( @@ -381,7 +366,7 @@ async def test_retries_on_500_error( ): """Test that 500 errors trigger retry.""" mock_request, call_count = self.create_mock_request_with_retries( - failure_count=2, + failure_count=3, failure_response={"status_code": 500, "json": {"error": "Server error"}}, ) @@ -389,15 +374,9 @@ async def test_retries_on_500_error( async_http_client._client, "request", AsyncMock(side_effect=mock_request) ) - with patch("asyncio.sleep") as mock_sleep: - response = await async_http_client.request( - "test/path", retry_config=retry_config - ) - - assert call_count[0] == 3 - assert response == {"success": True} - # Verify sleep was called with exponential backoff - assert mock_sleep.call_count == 2 + await self.assert_async_request_with_retries( + async_http_client, call_count, expected_call_count=4, expected_sleep_count=3, retry_config=retry_config + ) @pytest.mark.asyncio async def test_retries_on_502_error( @@ -405,7 +384,7 @@ async def test_retries_on_502_error( ): """Test that 502 (Bad Gateway) errors trigger retry.""" mock_request, call_count = self.create_mock_request_with_retries( - failure_count=2, + failure_count=3, failure_response={"status_code": 502, "json": {"error": "Bad gateway"}}, ) @@ -413,14 +392,9 @@ async def test_retries_on_502_error( async_http_client._client, "request", AsyncMock(side_effect=mock_request) ) - with patch("asyncio.sleep") as mock_sleep: - response = await async_http_client.request( - "test/path", retry_config=retry_config - ) - - assert call_count[0] == 3 - assert response == {"success": True} - assert mock_sleep.call_count == 2 + await self.assert_async_request_with_retries( + async_http_client, call_count, expected_call_count=4, expected_sleep_count=3, retry_config=retry_config + ) @pytest.mark.asyncio async def test_retries_on_504_error( @@ -428,7 +402,7 @@ async def test_retries_on_504_error( ): """Test that 504 (Gateway Timeout) errors trigger retry.""" mock_request, call_count = self.create_mock_request_with_retries( - failure_count=1, + failure_count=3, failure_response={"status_code": 504, "json": {"error": "Gateway timeout"}}, ) @@ -436,20 +410,15 @@ async def test_retries_on_504_error( async_http_client._client, "request", AsyncMock(side_effect=mock_request) ) - with patch("asyncio.sleep") as mock_sleep: - response = await async_http_client.request( - "test/path", retry_config=retry_config - ) - - assert call_count[0] == 2 - assert response == {"success": True} - assert mock_sleep.call_count == 1 + await self.assert_async_request_with_retries( + async_http_client, call_count, expected_call_count=4, expected_sleep_count=3, retry_config=retry_config + ) @pytest.mark.asyncio - async def test_no_retry_on_503_error( + async def test_no_retry_on_non_retryable_error( self, async_http_client, retry_config, monkeypatch ): - """Test that 503 (Service Unavailable) errors do NOT trigger retry (not in RETRY_STATUS_CODES).""" + """Test that a non retryable error errors do NOT trigger retry (not in RETRY_STATUS_CODES).""" mock_request, call_count = self.create_mock_request_with_retries( failure_count=100, # Always fail failure_response={ @@ -468,72 +437,6 @@ async def test_no_retry_on_503_error( # Should only be called once (no retries on 503) assert call_count[0] == 1 - @pytest.mark.asyncio - async def test_retries_on_429_rate_limit( - self, async_http_client, retry_config, monkeypatch - ): - """Test that 429 errors do NOT trigger retry (consistent with workos-node).""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=100, # Always fail - failure_response={ - "status_code": 429, - "headers": {"Retry-After": "0.1"}, - "json": {"error": "Rate limit exceeded"}, - }, - ) - - monkeypatch.setattr( - async_http_client._client, "request", AsyncMock(side_effect=mock_request) - ) - - with pytest.raises(BadRequestException): - await async_http_client.request("test/path", retry_config=retry_config) - - # Should only be called once (no retries on 429) - assert call_count[0] == 1 - - @pytest.mark.asyncio - async def test_no_retry_on_400_error(self, async_http_client, monkeypatch): - """Test that 4xx errors don't retry (no retry_config passed).""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=100, # Always fail - failure_response={ - "status_code": 400, - "json": {"error": "Bad request", "message": "Invalid data"}, - }, - ) - - monkeypatch.setattr( - async_http_client._client, "request", AsyncMock(side_effect=mock_request) - ) - - with pytest.raises(BadRequestException): - await async_http_client.request("test/path") - - # Should only be called once (no retries) - assert call_count[0] == 1 - - @pytest.mark.asyncio - async def test_no_retry_on_401_error(self, async_http_client, monkeypatch): - """Test that 401 errors don't retry (no retry_config passed).""" - mock_request, call_count = self.create_mock_request_with_retries( - failure_count=100, # Always fail - failure_response={ - "status_code": 401, - "json": {"error": "Unauthorized", "message": "Invalid credentials"}, - }, - ) - - monkeypatch.setattr( - async_http_client._client, "request", AsyncMock(side_effect=mock_request) - ) - - with pytest.raises(Exception): - await async_http_client.request("test/path") - - # Should only be called once (no retries) - assert call_count[0] == 1 - @pytest.mark.asyncio async def test_respects_max_retries( self, async_http_client, retry_config, monkeypatch @@ -600,7 +503,7 @@ async def test_retries_on_timeout_error( @pytest.mark.asyncio async def test_no_retry_on_success(self, async_http_client, monkeypatch): - """Test that successful requests don't retry (no retry_config needed).""" + """Test that successful requests don't retry.""" mock_request, call_count = self.create_mock_request_with_retries( failure_count=0 # Success immediately ) From b1695ec2aac3af21d4e03bbd2ed7f0a4c61238ad Mon Sep 17 00:00:00 2001 From: swaroopakkineni Date: Tue, 11 Nov 2025 09:16:09 -0500 Subject: [PATCH 15/18] Apply black formatting to test_http_client_retry.py --- tests/test_http_client_retry.py | 56 +++++++++++++++++++++++++++------ 1 file changed, 46 insertions(+), 10 deletions(-) diff --git a/tests/test_http_client_retry.py b/tests/test_http_client_retry.py index fc7ea513..7eee6acb 100644 --- a/tests/test_http_client_retry.py +++ b/tests/test_http_client_retry.py @@ -111,7 +111,11 @@ def test_retries_on_408_error(self, sync_http_client, retry_config, monkeypatch) ) self.assert_request_with_retries( - sync_http_client, call_count, expected_call_count=4, expected_sleep_count=3, retry_config=retry_config + sync_http_client, + call_count, + expected_call_count=4, + expected_sleep_count=3, + retry_config=retry_config, ) def test_retries_on_500_error(self, sync_http_client, retry_config, monkeypatch): @@ -126,7 +130,11 @@ def test_retries_on_500_error(self, sync_http_client, retry_config, monkeypatch) ) self.assert_request_with_retries( - sync_http_client, call_count, expected_call_count=4, expected_sleep_count=3, retry_config=retry_config + sync_http_client, + call_count, + expected_call_count=4, + expected_sleep_count=3, + retry_config=retry_config, ) def test_retries_on_502_error(self, sync_http_client, retry_config, monkeypatch): @@ -141,7 +149,11 @@ def test_retries_on_502_error(self, sync_http_client, retry_config, monkeypatch) ) self.assert_request_with_retries( - sync_http_client, call_count, expected_call_count=4, expected_sleep_count=3, retry_config=retry_config + sync_http_client, + call_count, + expected_call_count=4, + expected_sleep_count=3, + retry_config=retry_config, ) def test_retries_on_504_error(self, sync_http_client, retry_config, monkeypatch): @@ -156,10 +168,16 @@ def test_retries_on_504_error(self, sync_http_client, retry_config, monkeypatch) ) self.assert_request_with_retries( - sync_http_client, call_count, expected_call_count=4, expected_sleep_count=3, retry_config=retry_config + sync_http_client, + call_count, + expected_call_count=4, + expected_sleep_count=3, + retry_config=retry_config, ) - def test_no_retry_on_non_retryable_error(self, sync_http_client, retry_config, monkeypatch): + def test_no_retry_on_non_retryable_error( + self, sync_http_client, retry_config, monkeypatch + ): """Test that a non retryable error errors do NOT trigger retry (not in RETRY_STATUS_CODES).""" mock_request, call_count = self.create_mock_request_with_retries( failure_count=100, # Always fail @@ -334,7 +352,9 @@ async def assert_async_request_with_retries( with patch("asyncio.sleep") as mock_sleep: if retry_config: - response = await http_client.request("test/path", retry_config=retry_config) + response = await http_client.request( + "test/path", retry_config=retry_config + ) else: response = await http_client.request("test/path") @@ -357,7 +377,11 @@ async def test_retries_on_408_error( ) await self.assert_async_request_with_retries( - async_http_client, call_count, expected_call_count=4, expected_sleep_count=3, retry_config=retry_config + async_http_client, + call_count, + expected_call_count=4, + expected_sleep_count=3, + retry_config=retry_config, ) @pytest.mark.asyncio @@ -375,7 +399,11 @@ async def test_retries_on_500_error( ) await self.assert_async_request_with_retries( - async_http_client, call_count, expected_call_count=4, expected_sleep_count=3, retry_config=retry_config + async_http_client, + call_count, + expected_call_count=4, + expected_sleep_count=3, + retry_config=retry_config, ) @pytest.mark.asyncio @@ -393,7 +421,11 @@ async def test_retries_on_502_error( ) await self.assert_async_request_with_retries( - async_http_client, call_count, expected_call_count=4, expected_sleep_count=3, retry_config=retry_config + async_http_client, + call_count, + expected_call_count=4, + expected_sleep_count=3, + retry_config=retry_config, ) @pytest.mark.asyncio @@ -411,7 +443,11 @@ async def test_retries_on_504_error( ) await self.assert_async_request_with_retries( - async_http_client, call_count, expected_call_count=4, expected_sleep_count=3, retry_config=retry_config + async_http_client, + call_count, + expected_call_count=4, + expected_sleep_count=3, + retry_config=retry_config, ) @pytest.mark.asyncio From 81bab328e528dd107e9c6e6a45695f26dacdc990 Mon Sep 17 00:00:00 2001 From: swaroopakkineni Date: Tue, 11 Nov 2025 09:29:20 -0500 Subject: [PATCH 16/18] remove --- .cursor/commands/commit.md | 21 ------------------- .cursor/commands/format.md | 2 -- .cursor/commands/new-branch.md | 27 ------------------------- .cursor/commands/review-pr.md | 27 ------------------------- .cursor/rules/.gitignore | 4 ---- .cursor/rules/create-data-migration.mdc | 16 --------------- .gitignore | 3 +++ 7 files changed, 3 insertions(+), 97 deletions(-) delete mode 100644 .cursor/commands/commit.md delete mode 100644 .cursor/commands/format.md delete mode 100644 .cursor/commands/new-branch.md delete mode 100644 .cursor/commands/review-pr.md delete mode 100644 .cursor/rules/.gitignore delete mode 100644 .cursor/rules/create-data-migration.mdc diff --git a/.cursor/commands/commit.md b/.cursor/commands/commit.md deleted file mode 100644 index 38b584c0..00000000 --- a/.cursor/commands/commit.md +++ /dev/null @@ -1,21 +0,0 @@ -# Commit PR - -## Overview -Add all files to the the commit. come up with a succinct, accurate commit message based on the commit. Then push that commit up as a branch. - - -## Steps -1. **Add all changes to the commit ** - - Add all changes to the commit - - Ensure all changes are committed - -2. **Add a succinct commit message based on the changes made** - - Summarize changes clearly - - commit that message - - be succinct - -3. **Push up branch** - - push commit to latest branch in origin - -## RULES YOU MUST FOLLOW - - never do this if you are in main. If so, alert the user that they are in main. \ No newline at end of file diff --git a/.cursor/commands/format.md b/.cursor/commands/format.md deleted file mode 100644 index 7ae7e812..00000000 --- a/.cursor/commands/format.md +++ /dev/null @@ -1,2 +0,0 @@ -Your job is to create is to format the code base, given the changes that were made in the current pr, so that it confirms to all formatting, linting, prettier rules, etc that exist in this code base. - diff --git a/.cursor/commands/new-branch.md b/.cursor/commands/new-branch.md deleted file mode 100644 index c9c83aba..00000000 --- a/.cursor/commands/new-branch.md +++ /dev/null @@ -1,27 +0,0 @@ - -# Create a new branch - -## Overview -Given a description for a task, create a new branch - -## Steps -1. **Ingest the message put in and clean up the message** - - You must take in the message and clean it to be succinct and clear. - -2. **Summarize the message into a branch** - - The branch should be relatively short, less than 63 characters - - if i provide a ticket number number like "ENT-1234" in the beginning, reference that at the beginning of the new summarize message. (ex: ENT-1234-my-branch) - - each word should be followed by - until the last word - - Validate that the branch name doesn't already exist - - Validate that the ticket number format is valid (e.g., numeric) - - Handle cases where the message is too long after summarization by truncating appropriately - -3. **Create the branch** - - using the message in step 2, create the branch - - If branch creation fails (e.g., branch already exists), inform the user and do not proceed - -## RULES YOU MUST FOLLOW - - never do this if there is no message given by the user - - never do this if the user does not provide a ticket number - - Validate all inputs before attempting branch creation - - Handle errors gracefully and provide clear feedback to the user diff --git a/.cursor/commands/review-pr.md b/.cursor/commands/review-pr.md deleted file mode 100644 index 32882335..00000000 --- a/.cursor/commands/review-pr.md +++ /dev/null @@ -1,27 +0,0 @@ -# PR Code Review Checklist - -## Overview -You will perform a comprehensive checklist for conducting thorough code reviews to ensure quality, security, and maintainability. It should be scoped to the pr and how it changes the codebase and functionality of the app. - -Ensure all rules, in ./cursor/rules/, are followed. - -## Review Categories - -### Functionality -- [ ] Code does what it's supposed to do -- [ ] Edge cases are handled -- [ ] Error handling is appropriate -- [ ] No obvious bugs or logic errors - -### Code Quality -- [ ] Code is readable and well-structured -- [ ] Functions are small and focused -- [ ] Variable names are descriptive -- [ ] No code duplication -- [ ] Follows project conventions - -### Security -- [ ] No obvious security vulnerabilities -- [ ] Input validation is present -- [ ] Sensitive data is handled properly -- [ ] No hardcoded secrets \ No newline at end of file diff --git a/.cursor/rules/.gitignore b/.cursor/rules/.gitignore deleted file mode 100644 index 5bf525c1..00000000 --- a/.cursor/rules/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -* - -!.gitignore -!create-data-migration.mdc diff --git a/.cursor/rules/create-data-migration.mdc b/.cursor/rules/create-data-migration.mdc deleted file mode 100644 index b50a4909..00000000 --- a/.cursor/rules/create-data-migration.mdc +++ /dev/null @@ -1,16 +0,0 @@ ---- -description: This rule creates a data migration Typescript class. It does not need to understand the underlying database. -globs: -alwaysApply: false ---- - - -# Required inputs -- Name, which must be specified by the user - -# Steps -- Generate an id by running ` pnpm exec node -e "const { ulid } = require('ulid'); console.log(ulid())"` in the `common/temp` directory and capturing the output. Do not use other ULID utilities. Trim any whitespace or newlines and convert the ulid to lowercase. I will refer to this as ULID. -- Process the input name by converting it into kebab case. I will refer to this as NAME. -- Create a new file called "-".ts in the 'packages/api/src/data-migrations/migrations'. This file should follow the class definition pattern of other files in that directory. Make sure to include a constructor that calls 'super()' with the ULID as the only argument. -- Import the new class in 'packages/api/src/data-migrations/data-migration.jobs.module.ts' and add it to the `DATA_MIGRATION_MODULES` constant in alphabetical order. - diff --git a/.gitignore b/.gitignore index 025e838d..1e046af5 100644 --- a/.gitignore +++ b/.gitignore @@ -133,6 +133,9 @@ dmypy.json # VSCode .vscode/ +# Cursor +.cursor/ + # macOS .DS_Store From 7aa9142b1619b382fa2bd0890bcfade15ed0c04b Mon Sep 17 00:00:00 2001 From: swaroopakkineni Date: Tue, 11 Nov 2025 09:29:47 -0500 Subject: [PATCH 17/18] remove --- .gitignore | 3 --- 1 file changed, 3 deletions(-) diff --git a/.gitignore b/.gitignore index 1e046af5..025e838d 100644 --- a/.gitignore +++ b/.gitignore @@ -133,9 +133,6 @@ dmypy.json # VSCode .vscode/ -# Cursor -.cursor/ - # macOS .DS_Store From c6473240873cdb246fc716b0c688ba3bd1efe1df Mon Sep 17 00:00:00 2001 From: swaroopakkineni Date: Thu, 13 Nov 2025 12:01:54 -0500 Subject: [PATCH 18/18] moar --- tests/test_audit_logs.py | 4 ++++ workos/audit_logs.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/test_audit_logs.py b/tests/test_audit_logs.py index 1f752604..cfaa41ca 100644 --- a/tests/test_audit_logs.py +++ b/tests/test_audit_logs.py @@ -126,6 +126,10 @@ def test_auto_generates_idempotency_key( assert "idempotency-key" in request_kwargs["headers"] idempotency_key = request_kwargs["headers"]["idempotency-key"] assert idempotency_key and idempotency_key.strip() + # Assert the auto-generated key has the correct prefix + assert idempotency_key.startswith("workos-python-") + # Assert the key has the expected UUID format after the prefix + assert len(idempotency_key) > len("workos-python-") assert response is None def test_throws_unauthorized_exception( diff --git a/workos/audit_logs.py b/workos/audit_logs.py index eee2cc1b..83fb883f 100644 --- a/workos/audit_logs.py +++ b/workos/audit_logs.py @@ -86,7 +86,7 @@ def create_event( headers = {} # Auto-generate UUID v4 if not provided if idempotency_key is None: - idempotency_key = str(uuid.uuid4()) + idempotency_key = f"workos-python-{uuid.uuid4()}" headers["idempotency-key"] = idempotency_key