@@ -737,11 +737,11 @@ async def openai_compatible_generator(run_code):
737737 if chunk ["type" ] == "message" and "content" in chunk :
738738 output_content = chunk ["content" ]
739739 if chunk ["type" ] == "code" and "start" in chunk :
740- output_content = " "
740+ output_content = "```" + chunk [ "format" ] + " \n "
741741 if chunk ["type" ] == "code" and "content" in chunk :
742- output_content = (
743- f"""<unvoiced code=" { chunk ["content" ] } "></unvoiced>"""
744- )
742+ output_content = chunk [ "content" ]
743+ if chunk ["type" ] == "code" and "end" in chunk :
744+ output_content = " \n ``` \n "
745745
746746 if output_content :
747747 await asyncio .sleep (0 )
@@ -776,6 +776,16 @@ async def openai_compatible_generator(run_code):
776776 chunk ["type" ] == "confirmation"
777777 and async_interpreter .auto_run == False
778778 ):
779+ await asyncio .sleep (0 )
780+ output_content = "Do you want to run this code?"
781+ output_chunk = {
782+ "id" : i ,
783+ "object" : "chat.completion.chunk" ,
784+ "created" : time .time (),
785+ "model" : "open-interpreter" ,
786+ "choices" : [{"delta" : {"content" : output_content }}],
787+ }
788+ yield f"data: { json .dumps (output_chunk )} \n \n "
779789 break
780790
781791 if async_interpreter .stop_event .is_set ():
@@ -786,11 +796,11 @@ async def openai_compatible_generator(run_code):
786796 if chunk ["type" ] == "message" and "content" in chunk :
787797 output_content = chunk ["content" ]
788798 if chunk ["type" ] == "code" and "start" in chunk :
789- output_content = " "
799+ output_content = "```" + chunk [ "format" ] + " \n "
790800 if chunk ["type" ] == "code" and "content" in chunk :
791- output_content = (
792- f"""<unvoiced code=" { chunk ["content" ] } "></unvoiced>"""
793- )
801+ output_content = chunk [ "content" ]
802+ if chunk ["type" ] == "code" and "end" in chunk :
803+ output_content = " \n ``` \n "
794804
795805 if output_content :
796806 await asyncio .sleep (0 )
@@ -806,18 +816,6 @@ async def openai_compatible_generator(run_code):
806816 if made_chunk :
807817 break
808818
809- if async_interpreter .messages [- 1 ]["type" ] == "code" :
810- await asyncio .sleep (0 )
811- output_content = "{CODE_FINISHED}"
812- output_chunk = {
813- "id" : i ,
814- "object" : "chat.completion.chunk" ,
815- "created" : time .time (),
816- "model" : "open-interpreter" ,
817- "choices" : [{"delta" : {"content" : output_content }}],
818- }
819- yield f"data: { json .dumps (output_chunk )} \n \n "
820-
821819 @router .post ("/openai/chat/completions" )
822820 async def chat_completion (request : ChatCompletionRequest ):
823821 global last_start_time
@@ -851,7 +849,14 @@ async def chat_completion(request: ChatCompletionRequest):
851849 async_interpreter .auto_run = False
852850 return
853851
854- if type (last_message .content ) == str :
852+ run_code = False
853+ if (
854+ async_interpreter .messages
855+ and async_interpreter .messages [- 1 ]["type" ] == "code"
856+ and last_message .content .lower ().strip (".!?" ).strip () == "yes"
857+ ):
858+ run_code = True
859+ elif type (last_message .content ) == str :
855860 async_interpreter .messages .append (
856861 {
857862 "role" : "user" ,
@@ -890,11 +895,6 @@ async def chat_completion(request: ChatCompletionRequest):
890895 }
891896 )
892897
893- run_code = False
894- if last_message .content == "{RUN}" :
895- run_code = True
896- # Remove that {RUN} message that would have just been added
897- async_interpreter .messages = async_interpreter .messages [:- 1 ]
898898 else :
899899 if async_interpreter .context_mode :
900900 # In context mode, we only respond if we recieved a {START} message
0 commit comments