diff --git a/docs/docs/index.md b/docs/docs/index.md index 7469694e41..43689acbbd 100644 --- a/docs/docs/index.md +++ b/docs/docs/index.md @@ -57,6 +57,17 @@ Instead of wrangling prompts or training jobs, DSPy (Declarative Self-improving dspy.configure(lm=lm) ``` + === "Microsoft Fabric" + If you're on the Microsoft Fabric platform, authentication is automatic via their SDK. + + ```python linenums="1" + import dspy + lm = dspy.LM("microsoftfabric/gpt-5") # Reasoning model + dspy.configure(lm=lm) + ``` + + Learn more about [Azure OpenAI in Microsoft Fabric](https://learn.microsoft.com/en-us/fabric/data-science/ai-services/ai-services-overview). + === "Gemini" You can authenticate by setting the `GEMINI_API_KEY` env variable or passing `api_key` below. diff --git a/dspy/clients/__init__.py b/dspy/clients/__init__.py index 43a5c9403a..6531ef8ec3 100644 --- a/dspy/clients/__init__.py +++ b/dspy/clients/__init__.py @@ -7,6 +7,7 @@ from dspy.clients.base_lm import BaseLM, inspect_history from dspy.clients.cache import Cache from dspy.clients.embedding import Embedder +from dspy.clients.fabric_azure_openai import FabricAzureOpenAI from dspy.clients.lm import LM from dspy.clients.provider import Provider, TrainingJob @@ -106,6 +107,7 @@ def disable_litellm_logging(): __all__ = [ "BaseLM", "LM", + "FabricAzureOpenAI", "Provider", "TrainingJob", "inspect_history", diff --git a/dspy/clients/fabric_azure_openai.py b/dspy/clients/fabric_azure_openai.py new file mode 100644 index 0000000000..81297da0e2 --- /dev/null +++ b/dspy/clients/fabric_azure_openai.py @@ -0,0 +1,305 @@ +"""Microsoft Fabric Azure OpenAI integration for DSPy. + +This module provides a custom LM class for using Azure OpenAI models within +Microsoft Fabric notebooks. It handles authentication and endpoint configuration +automatically using Fabric's built-in service discovery and token utilities. + +Note: This class only works within a Microsoft Fabric environment. +""" + +from typing import Any, ClassVar + +import requests + +from dspy.clients.base_lm import BaseLM + + +class FabricAzureOpenAI(BaseLM): + """Language model client for Azure OpenAI in Microsoft Fabric. + + This class provides integration with Azure OpenAI models deployed in Microsoft Fabric. + It automatically handles authentication and endpoint configuration using Fabric's + service discovery and token utilities. + + Note: + This class requires the following packages available in Microsoft Fabric: + - synapse.ml.fabric.service_discovery + - synapse.ml.fabric.token_utils + + Supported models: + - gpt-5 (reasoning model) + - gpt-4.1 + - gpt-4.1-mini + + Args: + deployment_name: The name of the Azure OpenAI deployment to use. + Must be one of: gpt-5, gpt-4.1, gpt-4.1-mini + model_type: The type of the model. Defaults to "chat". + temperature: The sampling temperature. Defaults to 0.0. + max_tokens: Maximum number of tokens to generate. Defaults to 4000. + cache: Whether to cache responses. Defaults to True. + **kwargs: Additional arguments passed to the base class. + + Example: + ```python + import dspy + from dspy.clients import FabricAzureOpenAI + + # In a Microsoft Fabric notebook + lm = FabricAzureOpenAI(deployment_name="gpt-5") + dspy.configure(lm=lm) + + # Use with DSPy modules + predictor = dspy.Predict("question -> answer") + result = predictor(question="What is DSPy?") + ``` + """ + + # Supported models in Microsoft Fabric + SUPPORTED_MODELS: ClassVar[set[str]] = {"gpt-5", "gpt-4.1", "gpt-4.1-mini"} + REASONING_MODELS: ClassVar[set[str]] = {"gpt-5"} + + def __init__( + self, + deployment_name: str = "gpt-5", + model_type: str = "chat", + temperature: float = 0.0, + max_tokens: int = 4000, + cache: bool = True, + **kwargs, + ): + """Initialize the FabricAzureOpenAI client. + + Args: + deployment_name: The Azure OpenAI deployment name. + model_type: The type of model ("chat" or "text"). + temperature: Sampling temperature (0.0 to 1.0). + max_tokens: Maximum tokens to generate. + cache: Whether to enable caching. + **kwargs: Additional keyword arguments. + + Raises: + ValueError: If deployment_name is not a supported model. + """ + # Validate model support + if deployment_name not in self.SUPPORTED_MODELS: + raise ValueError( + f"Model '{deployment_name}' is not supported in Microsoft Fabric. " + f"Supported models are: {', '.join(sorted(self.SUPPORTED_MODELS))}. " + f"For more information, see: " + f"https://learn.microsoft.com/en-us/fabric/data-science/ai-services/ai-services-overview" + ) + + self.deployment_name = deployment_name + super().__init__( + model=deployment_name, + model_type=model_type, + temperature=temperature, + max_tokens=max_tokens, + cache=cache, + **kwargs, + ) + + # Check if this is a reasoning model + self.is_reasoning_model = deployment_name in self.REASONING_MODELS + + def _get_fabric_config(self): + """Get Fabric environment configuration and auth header. + + Returns: + tuple: (fabric_env_config, auth_header) + + Raises: + ImportError: If Fabric SDK packages are not available. + """ + try: + from synapse.ml.fabric.service_discovery import get_fabric_env_config + from synapse.ml.fabric.token_utils import TokenUtils + except ImportError as e: + raise ImportError( + "Microsoft Fabric SDK packages are required to use FabricAzureOpenAI. " + "These packages are only available in Microsoft Fabric notebooks. " + "Please ensure you are running in a Fabric environment." + ) from e + + fabric_env_config = get_fabric_env_config().fabric_env_config + auth_header = TokenUtils().get_openai_auth_header() + return fabric_env_config, auth_header + + def _prepare_messages(self, prompt: str | list | None, messages: list[dict[str, Any]] | None) -> list[dict]: + """Prepare messages for the API request. + + Args: + prompt: A string prompt or list of messages. + messages: A list of message dictionaries. + + Returns: + list: Formatted messages for the API. + """ + if messages is not None: + return messages + elif prompt is not None: + if isinstance(prompt, str): + return [{"role": "user", "content": prompt}] + elif isinstance(prompt, list): + return prompt + else: + return [{"role": "user", "content": str(prompt)}] + else: + return [] + + def _build_payload(self, messages: list[dict], **kwargs) -> dict: + """Build the request payload based on model type. + + Args: + messages: The formatted messages. + **kwargs: Additional parameters like max_tokens, temperature, etc. + + Returns: + dict: The request payload. + """ + max_tokens_value = kwargs.get("max_tokens", self.kwargs.get("max_tokens", 4000)) + + # Build payload based on model type + payload = {"messages": messages} + + if self.is_reasoning_model: + # Reasoning models use max_completion_tokens and don't support temperature + payload["max_completion_tokens"] = max_tokens_value + # Don't include temperature or n for reasoning models + else: + # Standard models use max_tokens and support temperature + payload["max_tokens"] = max_tokens_value + payload["temperature"] = kwargs.get("temperature", self.kwargs.get("temperature", 0.0)) + payload["n"] = kwargs.get("n", 1) + + return payload + + def _make_request(self, payload: dict) -> list[str]: + """Make the API request to Azure OpenAI. + + Args: + payload: The request payload. + + Returns: + list: List of response contents. + + Raises: + Exception: If the API call fails. + """ + fabric_env_config, auth_header = self._get_fabric_config() + + url = ( + f"{fabric_env_config.ml_workload_endpoint}cognitive/openai/openai/deployments/" + f"{self.deployment_name}/chat/completions?api-version=2025-04-01-preview" + ) + headers = {"Authorization": auth_header, "Content-Type": "application/json"} + + response = requests.post(url, headers=headers, json=payload, timeout=60) + + if response.status_code == 200: + response_data = response.json() + return [choice["message"]["content"] for choice in response_data.get("choices", [])] + else: + raise Exception(f"API call failed: {response.status_code} - {response.text}") + + def basic_request(self, prompt: str | list | None = None, **kwargs) -> list[str]: + """Make a basic request to the Azure OpenAI API. + + Args: + prompt: The prompt string or list of messages. + **kwargs: Additional parameters for the request. + + Returns: + list: List of generated response strings. + """ + messages = self._prepare_messages(prompt, None) + payload = self._build_payload(messages, **kwargs) + return self._make_request(payload) + + def forward( + self, + prompt: str | None = None, + messages: list[dict[str, Any]] | None = None, + **kwargs, + ): + """Forward pass for the language model. + + This method is required by BaseLM and must return a response in OpenAI format. + + Args: + prompt: Optional string prompt. + messages: Optional list of message dictionaries. + **kwargs: Additional parameters. + + Returns: + A response object compatible with OpenAI's response format. + + Raises: + ValueError: If neither prompt nor messages is provided. + """ + if prompt is None and messages is None: + raise ValueError("Either 'prompt' or 'messages' must be provided") + + # Prepare messages + formatted_messages = self._prepare_messages(prompt, messages) + + # Build payload + payload = self._build_payload(formatted_messages, **kwargs) + + # Make request + response_contents = self._make_request(payload) + + # Convert to OpenAI-compatible format + # We need to return a response object that looks like OpenAI's ChatCompletion + from types import SimpleNamespace + + choices = [ + SimpleNamespace( + message=SimpleNamespace(content=content, role="assistant"), + finish_reason="stop", + index=i, + ) + for i, content in enumerate(response_contents) + ] + + # Create a minimal response object + response = SimpleNamespace( + choices=choices, + model=self.deployment_name, + usage=SimpleNamespace( + prompt_tokens=0, # Fabric API doesn't return token counts + completion_tokens=0, + total_tokens=0, + ), + ) + + return response + + def __call__( + self, + prompt: str | None = None, + messages: list[dict[str, Any]] | None = None, + **kwargs, + ) -> list[str]: + """Call the language model. + + This method provides a simpler interface that returns just the text outputs. + + Args: + prompt: Optional string prompt. + messages: Optional list of message dictionaries. + **kwargs: Additional parameters. + + Returns: + list: List of generated response strings. + + Raises: + ValueError: If neither prompt nor messages is provided. + """ + if messages is not None: + return self.basic_request(messages, **kwargs) + elif prompt is not None: + return self.basic_request(prompt, **kwargs) + else: + raise ValueError("Either 'prompt' or 'messages' must be provided") diff --git a/dspy/clients/lm.py b/dspy/clients/lm.py index f9fc648cad..7626924ab5 100644 --- a/dspy/clients/lm.py +++ b/dspy/clients/lm.py @@ -27,6 +27,37 @@ class LM(BaseLM): A language model supporting chat or text completion requests for use with DSPy modules. """ + def __new__( + cls, + model: str, + model_type: Literal["chat", "text", "responses"] = "chat", + temperature: float | None = None, + max_tokens: int | None = None, + cache: bool = True, + **kwargs, + ): + """Create a new LM instance, delegating to FabricAzureOpenAI for microsoftfabric/ models.""" + # Only check for microsoftfabric prefix if model is a string + if isinstance(model, str) and model.startswith("microsoftfabric/"): + # Import here to avoid circular dependency + from dspy.clients.fabric_azure_openai import FabricAzureOpenAI + + # Extract the deployment name (everything after "microsoftfabric/") + deployment_name = model.split("microsoftfabric/", 1)[1] + + # Create and return a FabricAzureOpenAI instance + return FabricAzureOpenAI( + deployment_name=deployment_name, + model_type=model_type, + temperature=temperature if temperature is not None else 0.0, + max_tokens=max_tokens if max_tokens is not None else 4000, + cache=cache, + **kwargs, + ) + + # For all other models, create a regular LM instance + return super().__new__(cls) + def __init__( self, model: str, @@ -129,12 +160,7 @@ def _get_cached_completion_fn(self, completion_fn, cache): return completion_fn, litellm_cache_args - def forward( - self, - prompt: str | None = None, - messages: list[dict[str, Any]] | None = None, - **kwargs - ): + def forward(self, prompt: str | None = None, messages: list[dict[str, Any]] | None = None, **kwargs): # Build the request. kwargs = dict(kwargs) cache = kwargs.pop("cache", self.cache) diff --git a/tests/clients/test_fabric_azure_openai.py b/tests/clients/test_fabric_azure_openai.py new file mode 100644 index 0000000000..31a430d62d --- /dev/null +++ b/tests/clients/test_fabric_azure_openai.py @@ -0,0 +1,329 @@ +"""Tests for FabricAzureOpenAI client.""" + +from unittest.mock import MagicMock, patch + +import pytest + +import dspy +from dspy.clients.fabric_azure_openai import FabricAzureOpenAI + + +class TestFabricAzureOpenAI: + """Test suite for FabricAzureOpenAI class.""" + + def test_initialization_with_supported_model(self): + """Test that FabricAzureOpenAI can be initialized with supported models.""" + lm = FabricAzureOpenAI(deployment_name="gpt-4.1") + assert lm.deployment_name == "gpt-4.1" + assert lm.model == "gpt-4.1" + assert lm.is_reasoning_model is False + + def test_initialization_with_unsupported_model(self): + """Test that unsupported models raise ValueError.""" + with pytest.raises(ValueError) as exc_info: + FabricAzureOpenAI(deployment_name="gpt-4o") + + assert "gpt-4o" in str(exc_info.value) + assert "not supported" in str(exc_info.value) + assert "https://learn.microsoft.com" in str(exc_info.value) + + def test_supported_models_list(self): + """Test that all supported models can be initialized.""" + supported = ["gpt-5", "gpt-4.1", "gpt-4.1-mini"] + for model in supported: + lm = FabricAzureOpenAI(deployment_name=model) + assert lm.deployment_name == model + + def test_reasoning_model_detection_gpt5(self): + """Test that gpt-5 is detected as a reasoning model.""" + lm = FabricAzureOpenAI(deployment_name="gpt-5") + assert lm.is_reasoning_model is True + + def test_standard_model_detection_gpt41(self): + """Test that gpt-4.1 is not a reasoning model.""" + lm = FabricAzureOpenAI(deployment_name="gpt-4.1") + assert lm.is_reasoning_model is False + + def test_standard_model_detection_gpt41_mini(self): + """Test that gpt-4.1-mini is not a reasoning model.""" + lm = FabricAzureOpenAI(deployment_name="gpt-4.1-mini") + assert lm.is_reasoning_model is False + + def test_prepare_messages_with_string_prompt(self): + """Test message preparation with a string prompt.""" + lm = FabricAzureOpenAI(deployment_name="gpt-4.1") + messages = lm._prepare_messages("Hello, world!", None) + assert messages == [{"role": "user", "content": "Hello, world!"}] + + def test_prepare_messages_with_list_prompt(self): + """Test message preparation with a list of messages.""" + lm = FabricAzureOpenAI(deployment_name="gpt-4.1") + prompt = [{"role": "user", "content": "Hello"}] + messages = lm._prepare_messages(prompt, None) + assert messages == prompt + + def test_prepare_messages_with_messages_param(self): + """Test message preparation with messages parameter.""" + lm = FabricAzureOpenAI(deployment_name="gpt-4.1") + messages_input = [{"role": "user", "content": "Test"}] + messages = lm._prepare_messages(None, messages_input) + assert messages == messages_input + + def test_build_payload_standard_model(self): + """Test payload building for standard models.""" + lm = FabricAzureOpenAI(deployment_name="gpt-4.1", max_tokens=1000, temperature=0.5) + messages = [{"role": "user", "content": "Hello"}] + payload = lm._build_payload(messages) + + assert payload["messages"] == messages + assert payload["max_tokens"] == 1000 + assert payload["temperature"] == 0.5 + assert payload["n"] == 1 + assert "max_completion_tokens" not in payload + + def test_build_payload_reasoning_model(self): + """Test payload building for reasoning models.""" + lm = FabricAzureOpenAI(deployment_name="gpt-5", max_tokens=2000) + messages = [{"role": "user", "content": "Solve this"}] + payload = lm._build_payload(messages) + + assert payload["messages"] == messages + assert payload["max_completion_tokens"] == 2000 + assert "temperature" not in payload + assert "n" not in payload + assert "max_tokens" not in payload + + def test_build_payload_with_override_kwargs(self): + """Test that kwargs can override default values.""" + lm = FabricAzureOpenAI(deployment_name="gpt-4.1", max_tokens=1000, temperature=0.0) + messages = [{"role": "user", "content": "Hello"}] + payload = lm._build_payload(messages, max_tokens=2000, temperature=0.8) + + assert payload["max_tokens"] == 2000 + assert payload["temperature"] == 0.8 + + def test_get_fabric_config_import_error(self): + """Test that appropriate error is raised when Fabric SDK is not available.""" + lm = FabricAzureOpenAI(deployment_name="gpt-4.1") + + with pytest.raises(ImportError) as exc_info: + lm._get_fabric_config() + + assert "Microsoft Fabric SDK packages are required" in str(exc_info.value) + + @patch("dspy.clients.fabric_azure_openai.requests.post") + def test_make_request_success(self, mock_post): + """Test successful API request.""" + # Mock Fabric SDK imports + mock_fabric_config = MagicMock() + mock_fabric_config.ml_workload_endpoint = "https://test.endpoint/" + + with patch( + "dspy.clients.fabric_azure_openai.FabricAzureOpenAI._get_fabric_config", + return_value=(mock_fabric_config, "Bearer test-token"), + ): + # Mock successful response + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = { + "choices": [ + {"message": {"content": "Hello there!"}}, + {"message": {"content": "How are you?"}}, + ] + } + mock_post.return_value = mock_response + + lm = FabricAzureOpenAI(deployment_name="gpt-4.1") + payload = {"messages": [{"role": "user", "content": "Hi"}]} + result = lm._make_request(payload) + + assert result == ["Hello there!", "How are you?"] + assert mock_post.called + + @patch("dspy.clients.fabric_azure_openai.requests.post") + def test_make_request_failure(self, mock_post): + """Test failed API request.""" + # Mock Fabric SDK imports + mock_fabric_config = MagicMock() + mock_fabric_config.ml_workload_endpoint = "https://test.endpoint/" + + with patch( + "dspy.clients.fabric_azure_openai.FabricAzureOpenAI._get_fabric_config", + return_value=(mock_fabric_config, "Bearer test-token"), + ): + # Mock failed response + mock_response = MagicMock() + mock_response.status_code = 400 + mock_response.text = "Bad request" + mock_post.return_value = mock_response + + lm = FabricAzureOpenAI(deployment_name="gpt-4.1") + payload = {"messages": [{"role": "user", "content": "Hi"}]} + + with pytest.raises(Exception) as exc_info: + lm._make_request(payload) + + assert "API call failed: 400" in str(exc_info.value) + + @patch("dspy.clients.fabric_azure_openai.requests.post") + def test_forward_success(self, mock_post): + """Test forward method with successful response.""" + # Mock Fabric SDK imports + mock_fabric_config = MagicMock() + mock_fabric_config.ml_workload_endpoint = "https://test.endpoint/" + + with patch( + "dspy.clients.fabric_azure_openai.FabricAzureOpenAI._get_fabric_config", + return_value=(mock_fabric_config, "Bearer test-token"), + ): + # Mock successful response + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = {"choices": [{"message": {"content": "Test response"}}]} + mock_post.return_value = mock_response + + lm = FabricAzureOpenAI(deployment_name="gpt-4.1") + result = lm.forward(prompt="Test prompt") + + # Check that result has the expected structure + assert hasattr(result, "choices") + assert len(result.choices) == 1 + assert result.choices[0].message.content == "Test response" + assert result.choices[0].message.role == "assistant" + assert result.model == "gpt-4.1" + + @patch("dspy.clients.fabric_azure_openai.requests.post") + def test_call_with_prompt(self, mock_post): + """Test __call__ method with prompt.""" + # Mock Fabric SDK imports + mock_fabric_config = MagicMock() + mock_fabric_config.ml_workload_endpoint = "https://test.endpoint/" + + with patch( + "dspy.clients.fabric_azure_openai.FabricAzureOpenAI._get_fabric_config", + return_value=(mock_fabric_config, "Bearer test-token"), + ): + # Mock successful response + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = {"choices": [{"message": {"content": "Response text"}}]} + mock_post.return_value = mock_response + + lm = FabricAzureOpenAI(deployment_name="gpt-4.1") + result = lm(prompt="Hello") + + assert result == ["Response text"] + + @patch("dspy.clients.fabric_azure_openai.requests.post") + def test_call_with_messages(self, mock_post): + """Test __call__ method with messages.""" + # Mock Fabric SDK imports + mock_fabric_config = MagicMock() + mock_fabric_config.ml_workload_endpoint = "https://test.endpoint/" + + with patch( + "dspy.clients.fabric_azure_openai.FabricAzureOpenAI._get_fabric_config", + return_value=(mock_fabric_config, "Bearer test-token"), + ): + # Mock successful response + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = {"choices": [{"message": {"content": "Response text"}}]} + mock_post.return_value = mock_response + + lm = FabricAzureOpenAI(deployment_name="gpt-4.1") + messages = [{"role": "user", "content": "Hello"}] + result = lm(messages=messages) + + assert result == ["Response text"] + + def test_call_without_prompt_or_messages(self): + """Test that __call__ raises error when neither prompt nor messages provided.""" + lm = FabricAzureOpenAI(deployment_name="gpt-4.1") + + with pytest.raises(ValueError) as exc_info: + lm() + + assert "Either 'prompt' or 'messages' must be provided" in str(exc_info.value) + + def test_forward_without_prompt_or_messages(self): + """Test that forward raises error when neither prompt nor messages provided.""" + lm = FabricAzureOpenAI(deployment_name="gpt-4.1") + + with pytest.raises(ValueError) as exc_info: + lm.forward() + + assert "Either 'prompt' or 'messages' must be provided" in str(exc_info.value) + + +class TestFabricAzureOpenAIIntegration: + """Test suite for FabricAzureOpenAI integration with dspy.LM.""" + + def test_lm_with_microsoftfabric_prefix(self): + """Test that dspy.LM with microsoftfabric/ prefix returns FabricAzureOpenAI.""" + lm = dspy.LM("microsoftfabric/gpt-4.1") + + assert isinstance(lm, FabricAzureOpenAI) + assert lm.deployment_name == "gpt-4.1" + assert lm.model == "gpt-4.1" + + def test_lm_with_microsoftfabric_prefix_custom_params(self): + """Test that custom parameters are passed through.""" + lm = dspy.LM( + "microsoftfabric/gpt-4.1", + temperature=0.8, + max_tokens=3000, + cache=False, + ) + + assert isinstance(lm, FabricAzureOpenAI) + assert lm.kwargs.get("temperature") == 0.8 + assert lm.kwargs.get("max_tokens") == 3000 + assert lm.cache is False + + def test_lm_with_microsoftfabric_reasoning_model(self): + """Test that reasoning models are detected with microsoftfabric prefix.""" + lm = dspy.LM("microsoftfabric/gpt-5") + + assert isinstance(lm, FabricAzureOpenAI) + assert lm.is_reasoning_model is True + assert lm.deployment_name == "gpt-5" + + def test_lm_without_microsoftfabric_prefix(self): + """Test that non-microsoftfabric models work as before.""" + # This should create a regular LM instance, not FabricAzureOpenAI + lm = dspy.LM("openai/gpt-4o", api_key="test") + + assert not isinstance(lm, FabricAzureOpenAI) + assert type(lm).__name__ == "LM" + + def test_configure_with_microsoftfabric_lm(self): + """Test that dspy.configure works with microsoftfabric LM.""" + lm = dspy.LM("microsoftfabric/gpt-4.1") + dspy.configure(lm=lm) + + # Verify it was configured + assert dspy.settings.lm is not None + assert isinstance(dspy.settings.lm, FabricAzureOpenAI) + + def test_microsoftfabric_prefix_with_supported_models(self): + """Test that supported models work with microsoftfabric prefix.""" + test_cases = [ + ("microsoftfabric/gpt-4.1", "gpt-4.1", False), + ("microsoftfabric/gpt-4.1-mini", "gpt-4.1-mini", False), + ("microsoftfabric/gpt-5", "gpt-5", True), + ] + + for model_string, expected_deployment, expected_reasoning in test_cases: + lm = dspy.LM(model_string) + assert isinstance(lm, FabricAzureOpenAI) + assert lm.deployment_name == expected_deployment + assert lm.is_reasoning_model == expected_reasoning + + def test_microsoftfabric_prefix_with_unsupported_model(self): + """Test that unsupported models raise ValueError.""" + with pytest.raises(ValueError) as exc_info: + dspy.LM("microsoftfabric/gpt-4o") + + assert "not supported" in str(exc_info.value) + assert "https://learn.microsoft.com" in str(exc_info.value)