@@ -69,7 +69,7 @@ def mock_sample(*args, **kwargs):
6969 n = 0 # reset
7070 chunks = list (llama .create_completion (text , max_tokens = 20 , stream = True ))
7171 assert "" .join (chunk ["choices" ][0 ]["text" ] for chunk in chunks ) == output_text
72- # assert chunks[-1]["choices"][0]["finish_reason"] == "stop"
72+ assert chunks [- 1 ]["choices" ][0 ]["finish_reason" ] == "stop"
7373
7474 ## Test basic completion until stop sequence
7575 n = 0 # reset
@@ -83,19 +83,19 @@ def mock_sample(*args, **kwargs):
8383 assert (
8484 "" .join (chunk ["choices" ][0 ]["text" ] for chunk in chunks ) == " jumps over the "
8585 )
86- # assert chunks[-1]["choices"][0]["finish_reason"] == "stop"
86+ assert chunks [- 1 ]["choices" ][0 ]["finish_reason" ] == "stop"
8787
8888 ## Test basic completion until length
8989 n = 0 # reset
9090 completion = llama .create_completion (text , max_tokens = 2 )
9191 assert completion ["choices" ][0 ]["text" ] == " jumps"
92- # assert completion["choices"][0]["finish_reason"] == "length"
92+ assert completion ["choices" ][0 ]["finish_reason" ] == "length"
9393
9494 ## Test streaming completion until length
9595 n = 0 # reset
9696 chunks = list (llama .create_completion (text , max_tokens = 2 , stream = True ))
9797 assert "" .join (chunk ["choices" ][0 ]["text" ] for chunk in chunks ) == " jumps"
98- # assert chunks[-1]["choices"][0]["finish_reason"] == "length"
98+ assert chunks [- 1 ]["choices" ][0 ]["finish_reason" ] == "length"
9999
100100
101101def test_llama_pickle ():
0 commit comments