@@ -131,9 +131,8 @@ def module_start_status_message(self, instance, inputs):
131131 assert status_messages [2 ].message == "Predict starting!"
132132
133133
134- @pytest .mark .skipif (not os .getenv ("OPENAI_API_KEY" ), reason = "OpenAI API key not found in environment variables" )
135134@pytest .mark .anyio
136- async def test_stream_listener_chat_adapter ():
135+ async def test_stream_listener_chat_adapter (llm_model ):
137136 class MyProgram (dspy .Module ):
138137 def __init__ (self ):
139138 self .predict1 = dspy .Predict ("question->answer" )
@@ -154,7 +153,7 @@ def __call__(self, x: str, **kwargs):
154153 include_final_prediction_in_output_stream = False ,
155154 )
156155 # Turn off the cache to ensure the stream is produced.
157- with dspy .context (lm = dspy .LM ("openai/gpt-4o-mini" , cache = False )):
156+ with dspy .context (lm = dspy .LM (llm_model , cache = False )):
158157 output = program (x = "why did a chicken cross the kitchen?" )
159158 all_chunks = []
160159 async for value in output :
@@ -194,9 +193,8 @@ async def acall(self, x: str):
194193 assert status_messages [1 ].message == "Tool calling finished! Querying the LLM with tool calling results..."
195194
196195
197- @pytest .mark .skipif (not os .getenv ("OPENAI_API_KEY" ), reason = "OpenAI API key not found in environment variables" )
198196@pytest .mark .anyio
199- async def test_stream_listener_json_adapter ():
197+ async def test_stream_listener_json_adapter (llm_model ):
200198 class MyProgram (dspy .Module ):
201199 def __init__ (self ):
202200 self .predict1 = dspy .Predict ("question->answer" )
@@ -217,7 +215,7 @@ def __call__(self, x: str, **kwargs):
217215 include_final_prediction_in_output_stream = False ,
218216 )
219217 # Turn off the cache to ensure the stream is produced.
220- with dspy .context (lm = dspy .LM ("openai/gpt-4o-mini" , cache = False ), adapter = dspy .JSONAdapter ()):
218+ with dspy .context (lm = dspy .LM (llm_model , cache = False ), adapter = dspy .JSONAdapter ()):
221219 output = program (x = "why did a chicken cross the kitchen?" )
222220 all_chunks = []
223221 async for value in output :
@@ -232,22 +230,22 @@ def __call__(self, x: str, **kwargs):
232230
233231
234232@pytest .mark .anyio
235- async def test_streaming_handles_space_correctly ():
233+ async def test_streaming_handles_space_correctly (llm_model ):
236234 my_program = dspy .Predict ("question->answer" )
237235 program = dspy .streamify (
238236 my_program , stream_listeners = [dspy .streaming .StreamListener (signature_field_name = "answer" )]
239237 )
240238
241239 async def gpt_4o_mini_stream (* args , ** kwargs ):
242240 yield ModelResponseStream (
243- model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "[[ ## answer ## ]]\n " ))]
241+ model = llm_model , choices = [StreamingChoices (delta = Delta (content = "[[ ## answer ## ]]\n " ))]
244242 )
245- yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "How " ))])
246- yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "are " ))])
247- yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "you " ))])
248- yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "doing?" ))])
243+ yield ModelResponseStream (model = llm_model , choices = [StreamingChoices (delta = Delta (content = "How " ))])
244+ yield ModelResponseStream (model = llm_model , choices = [StreamingChoices (delta = Delta (content = "are " ))])
245+ yield ModelResponseStream (model = llm_model , choices = [StreamingChoices (delta = Delta (content = "you " ))])
246+ yield ModelResponseStream (model = llm_model , choices = [StreamingChoices (delta = Delta (content = "doing?" ))])
249247 yield ModelResponseStream (
250- model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "\n \n [[ ## completed ## ]]" ))]
248+ model = llm_model , choices = [StreamingChoices (delta = Delta (content = "\n \n [[ ## completed ## ]]" ))]
251249 )
252250
253251 with mock .patch ("litellm.acompletion" , side_effect = gpt_4o_mini_stream ):
@@ -261,8 +259,7 @@ async def gpt_4o_mini_stream(*args, **kwargs):
261259 assert all_chunks [0 ].chunk == "How are you doing?"
262260
263261
264- @pytest .mark .skipif (not os .getenv ("OPENAI_API_KEY" ), reason = "OpenAI API key not found in environment variables" )
265- def test_sync_streaming ():
262+ def test_sync_streaming (llm_model ):
266263 class MyProgram (dspy .Module ):
267264 def __init__ (self ):
268265 self .predict1 = dspy .Predict ("question->answer" )
@@ -284,7 +281,7 @@ def __call__(self, x: str, **kwargs):
284281 async_streaming = False ,
285282 )
286283 # Turn off the cache to ensure the stream is produced.
287- with dspy .context (lm = dspy .LM ("openai/gpt-4o-mini" , cache = False )):
284+ with dspy .context (lm = dspy .LM (llm_model , cache = False )):
288285 output = program (x = "why did a chicken cross the kitchen?" )
289286 all_chunks = []
290287 for value in output :
0 commit comments