1010from vllm .outputs import CompletionOutput , RequestOutput
1111
1212
13- # Helper function for Python < 3.10 compatibility
14- async def async_next (async_iterator ):
15- """Compatibility function equivalent to Python 3.10's anext()."""
16- return await async_iterator .__anext__ ()
17-
18-
1913def create_mock_request_output (
2014 prompt_token_ids = None ,
2115 output_token_ids = None ,
@@ -129,7 +123,7 @@ async def test_multi_turn_token_counting():
129123 )
130124
131125 # First turn - initial prompt and response
132- mock_output1 = await async_next (mock_generator )
126+ mock_output1 = await anext (mock_generator )
133127 context .append_output (mock_output1 )
134128
135129 # At this point, we should have 5 prompt tokens and 3 output tokens
@@ -138,7 +132,7 @@ async def test_multi_turn_token_counting():
138132 assert context .num_tool_output_tokens == 0
139133
140134 # Second turn - after tool output
141- mock_output2 = await async_next (mock_generator )
135+ mock_output2 = await anext (mock_generator )
142136 context .append_output (mock_output2 )
143137 # Current prompt tokens (15) - last_turn_input_tokens (5) -
144138 # last_turn_output_tokens (3) = 7
@@ -150,7 +144,7 @@ async def test_multi_turn_token_counting():
150144 assert context .num_cached_tokens == 5
151145
152146 # Third turn - final response
153- mock_output3 = await async_next (mock_generator )
147+ mock_output3 = await anext (mock_generator )
154148 context .append_output (mock_output3 )
155149 # Additional tool output tokens from third turn:
156150 # Current prompt (20) - last_turn_input_tokens (15) -
0 commit comments