Skip to content

Commit c34f294

Browse files
authored
fix(cli): Fix TypeError in v2.x chat due to incorrect State/dict conv… (#1509)
* fix(cli): Fix TypeError in v2.x chat due to incorrect State/dict conversion Fixes incorrect type assumptions in PR #1380 that caused TypeError when using v2.x chat CLI. The bug incorrectly assumed process_events_async returns dict and tried to convert State objects with State(**output_state). Changes: - Fix type annotations in LLMRails.process_events_async to return Union[dict, State] - Remove incorrect asdict() conversion and State(**) reconstruction in chat.py - Add integration tests to prevent regression
1 parent ea85f78 commit c34f294

File tree

3 files changed

+262
-11
lines changed

3 files changed

+262
-11
lines changed

nemoguardrails/cli/chat.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
import asyncio
1616
import json
1717
import os
18-
from dataclasses import asdict, dataclass, field
18+
from dataclasses import dataclass, field
1919
from typing import Dict, List, Optional, Tuple, Union, cast
2020

2121
import aiohttp
@@ -498,13 +498,12 @@ async def _check_local_async_actions():
498498

499499
output_events, output_state = await rails_app.process_events_async(
500500
input_events_copy,
501-
asdict(chat_state.state) if chat_state.state else None,
501+
chat_state.state,
502502
)
503503
chat_state.output_events = output_events
504504

505-
# process_events_async returns a Dict `state`, need to convert to dataclass for ChatState object
506505
if output_state:
507-
chat_state.output_state = cast(State, State(**output_state))
506+
chat_state.output_state = cast(State, output_state)
508507

509508
# Process output_events and potentially generate new input_events
510509
_process_output()
@@ -535,12 +534,11 @@ async def _process_input_events():
535534
chat_state.input_events = []
536535
output_events, output_state = await rails_app.process_events_async(
537536
input_events_copy,
538-
asdict(chat_state.state) if chat_state.state else None,
537+
chat_state.state,
539538
)
540539
chat_state.output_events = output_events
541540
if output_state:
542-
# process_events_async returns a Dict `state`, need to convert to dataclass for ChatState object
543-
output_state_typed: State = cast(State, State(**output_state))
541+
output_state_typed: State = cast(State, output_state)
544542
chat_state.output_state = output_state_typed
545543
debugger.set_output_state(output_state_typed)
546544

nemoguardrails/rails/llm/llmrails.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1462,9 +1462,9 @@ def generate_events(
14621462
async def process_events_async(
14631463
self,
14641464
events: List[dict],
1465-
state: Optional[dict] = None,
1465+
state: Union[Optional[dict], State] = None,
14661466
blocking: bool = False,
1467-
) -> Tuple[List[dict], dict]:
1467+
) -> Tuple[List[dict], Union[dict, State]]:
14681468
"""Process a sequence of events in a given state.
14691469
14701470
The events will be processed one by one, in the input order.
@@ -1501,9 +1501,9 @@ async def process_events_async(
15011501
def process_events(
15021502
self,
15031503
events: List[dict],
1504-
state: Optional[dict] = None,
1504+
state: Union[Optional[dict], State] = None,
15051505
blocking: bool = False,
1506-
) -> Tuple[List[dict], dict]:
1506+
) -> Tuple[List[dict], Union[dict, State]]:
15071507
"""Synchronous version of `LLMRails.process_events_async`."""
15081508

15091509
if check_sync_call_from_async_loop():
Lines changed: 253 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,253 @@
1+
# SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2+
# SPDX-License-Identifier: Apache-2.0
3+
#
4+
# Licensed under the Apache License, Version 2.0 (the "License");
5+
# you may not use this file except in compliance with the License.
6+
# You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
16+
import os
17+
18+
import pytest
19+
20+
LIVE_TEST_MODE = os.environ.get("LIVE_TEST") or os.environ.get("LIVE_TEST_MODE")
21+
22+
23+
@pytest.mark.skipif(not LIVE_TEST_MODE, reason="Not in live mode.")
24+
class TestProcessEventsAsyncV2x:
25+
"""Integration tests for LLMRails.process_events_async with v2.x runtime.
26+
27+
These tests would have caught issue #1505 where PR #1380 incorrectly
28+
assumed process_events_async returns dict and tried to convert with
29+
State(**output_state), causing TypeError.
30+
31+
The bug was introduced because:
32+
1. The type annotation in llmrails.py was wrong (said it returns dict)
33+
2. PR #1380 "fixed" chat.py based on the wrong annotation
34+
3. No tests called LLMRails.process_events_async() with v2.x
35+
(all tests called runtime.process_events() directly)
36+
"""
37+
38+
@pytest.mark.asyncio
39+
async def test_process_events_async_returns_state_object(self):
40+
"""Test that LLMRails.process_events_async returns State object for v2.x, not dict.
41+
42+
This is the critical test that would have caught the bug immediately.
43+
"""
44+
from nemoguardrails import LLMRails, RailsConfig
45+
from nemoguardrails.colang.v2_x.runtime.flows import State
46+
47+
config = RailsConfig.from_content(
48+
"""
49+
import core
50+
51+
flow main
52+
user said "hi"
53+
bot say "Hello!"
54+
""",
55+
"""
56+
colang_version: "2.x"
57+
models:
58+
- type: main
59+
engine: openai
60+
model: gpt-4o-mini
61+
""",
62+
)
63+
64+
rails = LLMRails(config)
65+
66+
events = [{"type": "UtteranceUserActionFinished", "final_transcript": "hi"}]
67+
68+
output_events, output_state = await rails.process_events_async(
69+
events, state=None
70+
)
71+
72+
assert isinstance(
73+
output_state, State
74+
), f"Expected State object, got {type(output_state)}"
75+
assert isinstance(output_events, list)
76+
assert len(output_events) > 0
77+
78+
@pytest.mark.asyncio
79+
async def test_process_events_async_accepts_state_object(self):
80+
"""Test that LLMRails.process_events_async accepts State object as input.
81+
82+
The bug in PR #1380 also incorrectly converted State to dict using asdict()
83+
before passing to process_events_async. This test verifies that passing
84+
State objects directly works correctly.
85+
"""
86+
from nemoguardrails import LLMRails, RailsConfig
87+
from nemoguardrails.colang.v2_x.runtime.flows import State
88+
from nemoguardrails.utils import new_event_dict
89+
90+
config = RailsConfig.from_content(
91+
"""
92+
import core
93+
94+
flow main
95+
user said "hi"
96+
bot say "Hello!"
97+
user said "bye"
98+
bot say "Goodbye!"
99+
""",
100+
"""
101+
colang_version: "2.x"
102+
models:
103+
- type: main
104+
engine: openai
105+
model: gpt-3.5-turbo
106+
""",
107+
)
108+
109+
rails = LLMRails(config)
110+
111+
events = [{"type": "UtteranceUserActionFinished", "final_transcript": "hi"}]
112+
113+
output_events_1, output_state_1 = await rails.process_events_async(
114+
events, state=None
115+
)
116+
117+
assert isinstance(output_state_1, State)
118+
119+
events_2 = []
120+
for event in output_events_1:
121+
if event["type"] == "StartUtteranceBotAction":
122+
events_2.append(
123+
new_event_dict(
124+
"UtteranceBotActionFinished",
125+
action_uid=event["action_uid"],
126+
is_success=True,
127+
final_script=event["script"],
128+
)
129+
)
130+
131+
events_2.append(
132+
{"type": "UtteranceUserActionFinished", "final_transcript": "bye"}
133+
)
134+
135+
output_events_2, output_state_2 = await rails.process_events_async(
136+
events_2, state=output_state_1
137+
)
138+
139+
assert isinstance(
140+
output_state_2, State
141+
), "Second call should also return State object when passing State as input"
142+
143+
144+
@pytest.mark.skipif(not LIVE_TEST_MODE, reason="Not in live mode.")
145+
class TestChatV2xE2E:
146+
"""End-to-end tests for chat CLI with v2.x runtime.
147+
148+
These tests exercise the actual chat.py code paths that were broken by PR #1380.
149+
"""
150+
151+
@pytest.mark.asyncio
152+
async def test_chat_v2x_with_real_llm(self):
153+
"""E2E test of v2.x chat with real LLM.
154+
155+
This requires LIVE_TEST_MODE=1 and OpenAI API key.
156+
"""
157+
from unittest.mock import patch
158+
159+
from nemoguardrails import LLMRails, RailsConfig
160+
from nemoguardrails.cli.chat import _run_chat_v2_x
161+
from nemoguardrails.colang.v2_x.runtime.flows import State
162+
163+
config = RailsConfig.from_content(
164+
"""
165+
import core
166+
167+
flow main
168+
user said "hi"
169+
bot say "Hello from v2.x!"
170+
""",
171+
"""
172+
colang_version: "2.x"
173+
models:
174+
- type: main
175+
engine: openai
176+
model: gpt-3.5-turbo
177+
""",
178+
)
179+
180+
rails = LLMRails(config)
181+
182+
simulated_input = ["hi", "exit"]
183+
input_iter = iter(simulated_input)
184+
185+
def mock_input(*args, **kwargs):
186+
try:
187+
return next(input_iter)
188+
except StopIteration:
189+
raise KeyboardInterrupt()
190+
191+
with patch("builtins.input", side_effect=mock_input):
192+
try:
193+
await _run_chat_v2_x(rails)
194+
except (KeyboardInterrupt, StopIteration):
195+
pass
196+
197+
@pytest.mark.asyncio
198+
async def test_chat_v2x_process_events_flow(self):
199+
"""Test the exact code path that was broken in chat.py.
200+
201+
This simulates what _run_chat_v2_x does internally.
202+
"""
203+
from dataclasses import dataclass, field
204+
from typing import List, Optional
205+
206+
from nemoguardrails import LLMRails, RailsConfig
207+
from nemoguardrails.colang.v2_x.runtime.flows import State
208+
209+
@dataclass
210+
class ChatState:
211+
state: Optional[State] = None
212+
input_events: List[dict] = field(default_factory=list)
213+
output_events: List[dict] = field(default_factory=list)
214+
output_state: Optional[State] = None
215+
216+
config = RailsConfig.from_content(
217+
"""
218+
import core
219+
220+
flow main
221+
user said "hi"
222+
bot say "Hello!"
223+
""",
224+
"""
225+
colang_version: "2.x"
226+
models:
227+
- type: main
228+
engine: openai
229+
model: gpt-3.5-turbo
230+
""",
231+
)
232+
233+
rails = LLMRails(config)
234+
chat_state = ChatState()
235+
236+
chat_state.input_events = [
237+
{"type": "UtteranceUserActionFinished", "final_transcript": "hi"}
238+
]
239+
240+
input_events_copy = chat_state.input_events.copy()
241+
chat_state.input_events = []
242+
243+
output_events, output_state = await rails.process_events_async(
244+
input_events_copy,
245+
chat_state.state,
246+
)
247+
chat_state.output_events = output_events
248+
249+
if output_state:
250+
chat_state.output_state = output_state
251+
252+
assert isinstance(chat_state.output_state, State)
253+
assert len(chat_state.output_events) > 0

0 commit comments

Comments
 (0)