@@ -78,48 +78,6 @@ async def test_internal_error_stops_execution():
7878 not _has_langchain_openai or not _has_openai_key ,
7979 reason = "langchain-openai not available" ,
8080)
81- @pytest .mark .asyncio
82- async def test_content_safety_missing_prompt ():
83- config_data = {
84- "instructions" : [
85- {"type" : "general" , "content" : "You are a helpful assistant." }
86- ],
87- "models" : [
88- {"type" : "main" , "engine" : "openai" , "model" : "gpt-3.5-turbo" },
89- {"type" : "content_safety" , "engine" : "openai" , "model" : "gpt-3.5-turbo" },
90- ],
91- "rails" : {
92- "input" : {
93- "flows" : [
94- "content safety check input $model=content_safety" ,
95- "self check input" ,
96- ],
97- "parallel" : True ,
98- }
99- },
100- }
101-
102- config = RailsConfig .from_content (
103- config = config_data ,
104- yaml_content = "prompts:\n - task: self_check_input\n content: 'Is the user input safe? Answer Yes or No.'" ,
105- )
106-
107- chat = TestChat (config , llm_completions = ["Safe response" ])
108- chat >> "test message"
109-
110- result = await chat .app .generate_async (messages = chat .history , options = OPTIONS )
111-
112- assert result is not None
113- assert "internal error" in result .response [0 ]["content" ].lower ()
114-
115- stop_events = [
116- event
117- for event in result .log .internal_events
118- if event .get ("type" ) == "BotIntent" and event .get ("intent" ) == "stop"
119- ]
120- assert len (stop_events ) > 0
121-
122-
12381@pytest .mark .asyncio
12482async def test_no_app_llm_request_on_internal_error ():
12583 """Test that App LLM request is not sent when internal error occurs."""
@@ -164,48 +122,6 @@ async def test_no_app_llm_request_on_internal_error():
164122 ), "Expected BotIntent stop event after internal error"
165123
166124
167- @pytest .mark .asyncio
168- async def test_content_safety_missing_model ():
169- """Test content safety with missing model configuration."""
170- config_data = {
171- "instructions" : [
172- {"type" : "general" , "content" : "You are a helpful assistant." }
173- ],
174- "models" : [
175- {"type" : "main" , "engine" : "openai" , "model" : "gpt-3.5-turbo" }
176- # missing content_safety model
177- ],
178- "rails" : {
179- "input" : {
180- "flows" : ["content safety check input $model=content_safety" ],
181- "parallel" : True ,
182- }
183- },
184- }
185-
186- config = RailsConfig .from_content (
187- config = config_data ,
188- yaml_content = "prompts:\n - task: content_safety_check_input $model=content_safety\n content: 'Check if this is safe: {{ user_input }}'" ,
189- )
190-
191- chat = TestChat (config , llm_completions = ["Response" ])
192- chat >> "test message"
193-
194- result = await chat .app .generate_async (messages = chat .history , options = OPTIONS )
195-
196- # should get internal error due to missing model
197- assert result is not None
198- assert "internal error" in result .response [0 ]["content" ].lower ()
199-
200- # verify stop event was generated
201- stop_events = [
202- event
203- for event in result .log .internal_events
204- if event .get ("type" ) == "BotIntent" and event .get ("intent" ) == "stop"
205- ]
206- assert len (stop_events ) > 0
207-
208-
209125@pytest .mark .asyncio
210126async def test_parallel_rails_partial_failure ():
211127 """Test that partial failure in parallel rails is handled properly."""
@@ -343,21 +259,28 @@ async def test_action_execution_returns_failed():
343259 ), "Expected BotIntent stop event after action failure"
344260
345261
262+ @pytest .mark .skipif (
263+ not _has_langchain_openai or not _has_openai_key ,
264+ reason = "langchain-openai not available" ,
265+ )
346266@pytest .mark .asyncio
347267async def test_single_error_message_not_multiple ():
348268 """Test that we get exactly one error message, not multiple for each failed rail.
349269
350270 Before the fix, if we had multiple rails failing, we'd get multiple error messages.
351271 This test verifies we only get one error message even with multiple parallel rails.
272+ Now with config-time validation, we provide valid config and trigger runtime failures.
352273 """
353274 config_data = {
354- "models" : [{"type" : "main" , "engine" : "openai" , "model" : "gpt-3.5-turbo" }],
275+ "models" : [
276+ {"type" : "main" , "engine" : "openai" , "model" : "gpt-3.5-turbo" },
277+ {"type" : "content_safety" , "engine" : "openai" , "model" : "gpt-3.5-turbo" },
278+ ],
355279 "rails" : {
356280 "input" : {
357281 "flows" : [
358282 "self check input" ,
359283 "content safety check input $model=content_safety" ,
360- "llama guard check input $model=llama_guard" ,
361284 ],
362285 "parallel" : True ,
363286 }
@@ -366,44 +289,56 @@ async def test_single_error_message_not_multiple():
366289 {
367290 "task" : "self_check_input" ,
368291 "content" : "Is the user input safe? Answer Yes or No." ,
369- }
370- # missing prompts and models for content_safety and llama_guard
292+ },
293+ {
294+ "task" : "content_safety_check_input $model=content_safety" ,
295+ "content" : "Check content safety: {{ user_input }}" ,
296+ },
371297 ],
372298 }
373299
374300 config = RailsConfig .from_content (config = config_data )
375301
376- chat = TestChat (config , llm_completions = ["Test response" ])
377- chat >> "test message"
302+ with patch (
303+ "nemoguardrails.llm.taskmanager.LLMTaskManager.render_task_prompt"
304+ ) as mock_render :
305+ mock_render .side_effect = Exception ("Runtime error in multiple rails" )
378306
379- result = await chat .app .generate_async (messages = chat .history , options = OPTIONS )
307+ chat = TestChat (config , llm_completions = ["Test response" ])
308+ chat >> "test message"
380309
381- # should get exactly one response, not multiple
382- assert result is not None
383- assert len (result .response ) == 1 , f"Expected 1 response, got { len (result .response )} "
384-
385- # that single response should be an internal error
386- assert "internal error" in result .response [0 ]["content" ].lower ()
387-
388- # count how many times "internal error" appears in the response
389- error_count = result .response [0 ]["content" ].lower ().count ("internal error" )
390- assert error_count == 1 , f"Expected 1 'internal error' message, found { error_count } "
391-
392- # verify stop event was generated
393- stop_events = [
394- event
395- for event in result .log .internal_events
396- if event .get ("type" ) == "BotIntent" and event .get ("intent" ) == "stop"
397- ]
398- assert len (stop_events ) >= 1 , "Expected at least one BotIntent stop event"
399-
400- # verify we don't have multiple StartUtteranceBotAction events with error messages
401- error_utterances = [
402- event
403- for event in result .log .internal_events
404- if event .get ("type" ) == "StartUtteranceBotAction"
405- and "internal error" in event .get ("script" , "" ).lower ()
406- ]
407- assert (
408- len (error_utterances ) == 1
409- ), f"Expected 1 error utterance, found { len (error_utterances )} "
310+ result = await chat .app .generate_async (messages = chat .history , options = OPTIONS )
311+
312+ # should get exactly one response, not multiple
313+ assert result is not None
314+ assert (
315+ len (result .response ) == 1
316+ ), f"Expected 1 response, got { len (result .response )} "
317+
318+ # that single response should be an internal error
319+ assert "internal error" in result .response [0 ]["content" ].lower ()
320+
321+ # count how many times "internal error" appears in the response
322+ error_count = result .response [0 ]["content" ].lower ().count ("internal error" )
323+ assert (
324+ error_count == 1
325+ ), f"Expected 1 'internal error' message, found { error_count } "
326+
327+ # verify stop event was generated
328+ stop_events = [
329+ event
330+ for event in result .log .internal_events
331+ if event .get ("type" ) == "BotIntent" and event .get ("intent" ) == "stop"
332+ ]
333+ assert len (stop_events ) >= 1 , "Expected at least one BotIntent stop event"
334+
335+ # verify we don't have multiple StartUtteranceBotAction events with error messages
336+ error_utterances = [
337+ event
338+ for event in result .log .internal_events
339+ if event .get ("type" ) == "StartUtteranceBotAction"
340+ and "internal error" in event .get ("script" , "" ).lower ()
341+ ]
342+ assert (
343+ len (error_utterances ) == 1
344+ ), f"Expected 1 error utterance, found { len (error_utterances )} "
0 commit comments