11import asyncio
2- import os
32import time
43from unittest import mock
54from unittest .mock import AsyncMock
@@ -131,9 +130,8 @@ def module_start_status_message(self, instance, inputs):
131130 assert status_messages [2 ].message == "Predict starting!"
132131
133132
134- @pytest .mark .skipif (not os .getenv ("OPENAI_API_KEY" ), reason = "OpenAI API key not found in environment variables" )
135133@pytest .mark .anyio
136- async def test_stream_listener_chat_adapter ():
134+ async def test_stream_listener_chat_adapter (llm_model ):
137135 class MyProgram (dspy .Module ):
138136 def __init__ (self ):
139137 self .predict1 = dspy .Predict ("question->answer" )
@@ -154,7 +152,7 @@ def __call__(self, x: str, **kwargs):
154152 include_final_prediction_in_output_stream = False ,
155153 )
156154 # Turn off the cache to ensure the stream is produced.
157- with dspy .context (lm = dspy .LM ("openai/gpt-4o-mini" , cache = False )):
155+ with dspy .context (lm = dspy .LM (llm_model , cache = False )):
158156 output = program (x = "why did a chicken cross the kitchen?" )
159157 all_chunks = []
160158 async for value in output :
@@ -194,9 +192,8 @@ async def acall(self, x: str):
194192 assert status_messages [1 ].message == "Tool calling finished! Querying the LLM with tool calling results..."
195193
196194
197- @pytest .mark .skipif (not os .getenv ("OPENAI_API_KEY" ), reason = "OpenAI API key not found in environment variables" )
198195@pytest .mark .anyio
199- async def test_stream_listener_json_adapter ():
196+ async def test_stream_listener_json_adapter (llm_model ):
200197 class MyProgram (dspy .Module ):
201198 def __init__ (self ):
202199 self .predict1 = dspy .Predict ("question->answer" )
@@ -217,7 +214,7 @@ def __call__(self, x: str, **kwargs):
217214 include_final_prediction_in_output_stream = False ,
218215 )
219216 # Turn off the cache to ensure the stream is produced.
220- with dspy .context (lm = dspy .LM ("openai/gpt-4o-mini" , cache = False ), adapter = dspy .JSONAdapter ()):
217+ with dspy .context (lm = dspy .LM (llm_model , cache = False ), adapter = dspy .JSONAdapter ()):
221218 output = program (x = "why did a chicken cross the kitchen?" )
222219 all_chunks = []
223220 async for value in output :
@@ -232,22 +229,22 @@ def __call__(self, x: str, **kwargs):
232229
233230
234231@pytest .mark .anyio
235- async def test_streaming_handles_space_correctly ():
232+ async def test_streaming_handles_space_correctly (llm_model ):
236233 my_program = dspy .Predict ("question->answer" )
237234 program = dspy .streamify (
238235 my_program , stream_listeners = [dspy .streaming .StreamListener (signature_field_name = "answer" )]
239236 )
240237
241238 async def gpt_4o_mini_stream (* args , ** kwargs ):
242239 yield ModelResponseStream (
243- model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "[[ ## answer ## ]]\n " ))]
240+ model = llm_model , choices = [StreamingChoices (delta = Delta (content = "[[ ## answer ## ]]\n " ))]
244241 )
245- yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "How " ))])
246- yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "are " ))])
247- yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "you " ))])
248- yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "doing?" ))])
242+ yield ModelResponseStream (model = llm_model , choices = [StreamingChoices (delta = Delta (content = "How " ))])
243+ yield ModelResponseStream (model = llm_model , choices = [StreamingChoices (delta = Delta (content = "are " ))])
244+ yield ModelResponseStream (model = llm_model , choices = [StreamingChoices (delta = Delta (content = "you " ))])
245+ yield ModelResponseStream (model = llm_model , choices = [StreamingChoices (delta = Delta (content = "doing?" ))])
249246 yield ModelResponseStream (
250- model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "\n \n [[ ## completed ## ]]" ))]
247+ model = llm_model , choices = [StreamingChoices (delta = Delta (content = "\n \n [[ ## completed ## ]]" ))]
251248 )
252249
253250 with mock .patch ("litellm.acompletion" , side_effect = gpt_4o_mini_stream ):
@@ -261,8 +258,7 @@ async def gpt_4o_mini_stream(*args, **kwargs):
261258 assert all_chunks [0 ].chunk == "How are you doing?"
262259
263260
264- @pytest .mark .skipif (not os .getenv ("OPENAI_API_KEY" ), reason = "OpenAI API key not found in environment variables" )
265- def test_sync_streaming ():
261+ def test_sync_streaming (llm_model ):
266262 class MyProgram (dspy .Module ):
267263 def __init__ (self ):
268264 self .predict1 = dspy .Predict ("question->answer" )
@@ -284,7 +280,7 @@ def __call__(self, x: str, **kwargs):
284280 async_streaming = False ,
285281 )
286282 # Turn off the cache to ensure the stream is produced.
287- with dspy .context (lm = dspy .LM ("openai/gpt-4o-mini" , cache = False )):
283+ with dspy .context (lm = dspy .LM (llm_model , cache = False )):
288284 output = program (x = "why did a chicken cross the kitchen?" )
289285 all_chunks = []
290286 for value in output :
0 commit comments