5454# $1 should be the response body
5555handle_error () {
5656 if echo " $1 " | jq -e ' .error' > /dev/null; then
57- echo -e " Your request to Open AI API failed: \033[0;31m$( echo $1 | jq -r ' .error.type' ) \033[0m"
58- echo $1 | jq -r ' .error.message'
57+ echo -e " Your request to Open AI API failed: \033[0;31m$( echo " $1 " | jq -r ' .error.type' ) \033[0m"
58+ echo " $1 " | jq -r ' .error.message'
5959 exit 1
6060 fi
6161}
6262
6363# request to OpenAI API completions endpoint function
6464# $1 should be the request prompt
6565request_to_completions () {
66- request_prompt =" $1 "
66+ local prompt =" $1 "
6767
68- response= $( curl https://api.openai.com/v1/completions \
68+ curl https://api.openai.com/v1/completions \
6969 -sS \
7070 -H ' Content-Type: application/json' \
7171 -H " Authorization: Bearer $OPENAI_KEY " \
7272 -d ' {
7373 "model": "' " $MODEL " ' ",
74- "prompt": "' " ${request_prompt} " ' ",
74+ "prompt": "' " $prompt " ' ",
7575 "max_tokens": ' $MAX_TOKENS ' ,
7676 "temperature": ' $TEMPERATURE '
77- }' )
77+ }'
7878}
7979
8080# request to OpenAI API image generations endpoint function
8181# $1 should be the prompt
8282request_to_image () {
83- prompt=" $1 "
83+ local prompt=" $1 "
8484 image_response=$( curl https://api.openai.com/v1/images/generations \
8585 -sS \
8686 -H ' Content-Type: application/json' \
@@ -95,8 +95,8 @@ request_to_image() {
9595# request to OpenAPI API chat completion endpoint function
9696# $1 should be the message(s) formatted with role and content
9797request_to_chat () {
98- message=" $1 "
99- response= $( curl https://api.openai.com/v1/chat/completions \
98+ local message=" $1 "
99+ curl https://api.openai.com/v1/chat/completions \
100100 -sS \
101101 -H ' Content-Type: application/json' \
102102 -H " Authorization: Bearer $OPENAI_KEY " \
@@ -108,35 +108,36 @@ request_to_chat() {
108108 ],
109109 "max_tokens": ' $MAX_TOKENS ' ,
110110 "temperature": ' $TEMPERATURE '
111- }' )
111+ }'
112112}
113113
114114# build chat context before each request for /completions (all models except
115115# gpt turbo and gpt 4)
116- # $1 should be the chat context
117- # $2 should be the escaped prompt
116+ # $1 should be the escaped request prompt,
117+ # it extends $chat_context
118118build_chat_context () {
119- chat_context=" $1 "
120- escaped_prompt=" $2 "
119+ local escaped_request_prompt=" $1 "
121120 if [ -z " $chat_context " ]; then
122- chat_context=" $CHAT_INIT_PROMPT \nQ: $escaped_prompt "
121+ chat_context=" $CHAT_INIT_PROMPT \nQ: $escaped_request_prompt "
123122 else
124- chat_context=" $chat_context \nQ: $escaped_prompt "
123+ chat_context=" $chat_context \nQ: $escaped_request_prompt "
125124 fi
126- request_prompt=" ${chat_context// $' \n ' / \\ n} "
125+ }
126+
127+ escape (){
128+ echo " $1 " | jq -Rrs ' tojson[1:-1]'
127129}
128130
129131# maintain chat context function for /completions (all models except
130132# gpt turbo and gpt 4)
131133# builds chat context from response,
132134# keeps chat context length under max token limit
133- # $1 should be the chat context
134- # $2 should be the response data (only the text)
135+ # * $1 should be the escaped response data
136+ # * it extends $chat_context
135137maintain_chat_context () {
136- chat_context=" $1 "
137- response_data=" $2 "
138+ local escaped_response_data=" $1 "
138139 # add response to chat context as answer
139- chat_context=" $chat_context ${chat_context: +\n } \nA: ${response_data // $' \n ' / \\ n} "
140+ chat_context=" $chat_context ${chat_context: +\n } \nA: $escaped_response_data "
140141 # check prompt length, 1 word =~ 1.3 tokens
141142 # reserving 100 tokens for next user prompt
142143 while (( $(echo "$chat_context " | wc - c) * 1 , 3 > (MAX_TOKENS - 100 )) ); do
@@ -149,36 +150,29 @@ maintain_chat_context() {
149150
150151# build user chat message function for /chat/completions (gpt models)
151152# builds chat message before request,
152- # $1 should be the chat message
153- # $2 should be the escaped prompt
153+ # $1 should be the escaped request prompt,
154+ # it extends $chat_message
154155build_user_chat_message () {
155- chat_message=" $1 "
156- escaped_prompt=" $2 "
156+ local escaped_request_prompt=" $1 "
157157 if [ -z " $chat_message " ]; then
158- chat_message=" {\" role\" : \" user\" , \" content\" : \" $escaped_prompt \" }"
158+ chat_message=" {\" role\" : \" user\" , \" content\" : \" $escaped_request_prompt \" }"
159159 else
160- chat_message=" $chat_message , {\" role\" : \" user\" , \" content\" : \" $escaped_prompt \" }"
160+ chat_message=" $chat_message , {\" role\" : \" user\" , \" content\" : \" $escaped_request_prompt \" }"
161161 fi
162-
163- request_prompt=" $chat_message "
164162}
165163
166164# adds the assistant response to the message in (chatml) format
167165# for /chat/completions (gpt models)
168166# keeps messages length under max token limit
169- # $1 should be the chat message
170- # $2 should be the response data (only the text)
167+ # * $1 should be the escaped response data
168+ # * it extends and potentially shrinks $chat_message
171169add_assistant_response_to_chat_message () {
172- chat_message=" $1 "
173- local local_response_data=" $2 "
174-
175- # replace new line characters from response with space
176- local_response_data=$( echo " $local_response_data " | tr ' \n' ' ' )
170+ local escaped_response_data=" $1 "
177171 # add response to chat context as answer
178- chat_message=" $chat_message , {\" role\" : \" assistant\" , \" content\" : \" $local_response_data \" }"
172+ chat_message=" $chat_message , {\" role\" : \" assistant\" , \" content\" : \" $escaped_response_data \" }"
179173
180174 # transform to json array to parse with jq
181- chat_message_json=" [ $chat_message ]"
175+ local chat_message_json=" [ $chat_message ]"
182176 # check prompt length, 1 word =~ 1.3 tokens
183177 # reserving 100 tokens for next user prompt
184178 while (( $(echo "$chat_message " | wc - c) * 1 , 3 > (MAX_TOKENS - 100 )) ); do
@@ -296,7 +290,7 @@ while $running; do
296290 elif [[ " $prompt " =~ ^image: ]]; then
297291 request_to_image " $prompt "
298292 handle_error " $image_response "
299- image_url=$( echo $image_response | jq -r ' .data[0].url' )
293+ image_url=$( echo " $image_response " | jq -r ' .data[0].url' )
300294 echo -e " $OVERWRITE_PROCESSING_LINE "
301295 echo -e " ${CHATGPT_CYAN_LABEL} Your image was created. \n\nLink: ${image_url} \n"
302296
@@ -334,15 +328,12 @@ while $running; do
334328 echo -e " $OVERWRITE_PROCESSING_LINE "
335329 echo -e " ${CHATGPT_CYAN_LABEL} Complete details for model: ${prompt#* model: } \n ${model_data} "
336330 elif [[ " $prompt " =~ ^command: ]]; then
337- # escape quotation marks
338- escaped_prompt=$( echo " $prompt " | sed ' s/"/\\"/g' )
339- # escape new lines
340- if [[ " $prompt " =~ ^command: ]]; then
341- escaped_prompt=${prompt# command: }
342- request_prompt=$COMMAND_GENERATION_PROMPT ${escaped_prompt// $' \n ' / ' ' }
343- fi
344- build_user_chat_message " $chat_message " " $request_prompt "
345- request_to_chat " $request_prompt "
331+ # escape quotation marks, new lines, backslashes...
332+ escaped_prompt=$( escape " $prompt " )
333+ escaped_prompt=${escaped_prompt# command: }
334+ request_prompt=$COMMAND_GENERATION_PROMPT$escaped_prompt
335+ build_user_chat_message " $request_prompt "
336+ response=$( request_to_chat " $chat_message " )
346337 handle_error " $response "
347338 response_data=$( echo $response | jq -r ' .choices[].message.content' )
348339
@@ -363,20 +354,17 @@ while $running; do
363354 eval $response_data
364355 fi
365356 fi
366- escaped_response_data=$( echo " $response_data " | sed ' s/"/\\"/g' )
367- add_assistant_response_to_chat_message " $chat_message " " $escaped_response_data "
357+ add_assistant_response_to_chat_message " $( escape " $response_data " ) "
368358
369359 timestamp=$( date +" %d/%m/%Y %H:%M" )
370360 echo -e " $timestamp $prompt \n$response_data \n" >> ~/.chatgpt_history
371361
372362 elif [[ " $MODEL " =~ ^gpt- ]]; then
373- # escape quotation marks
374- escaped_prompt=$( echo " $prompt " | sed ' s/"/\\"/g' )
375- # escape new lines
376- request_prompt=${escaped_prompt// $' \n ' / ' ' }
363+ # escape quotation marks, new lines, backslashes...
364+ request_prompt=$( escape " $prompt " )
377365
378- build_user_chat_message " $chat_message " " $ request_prompt"
379- request_to_chat " $request_prompt "
366+ build_user_chat_message " $request_prompt "
367+ response= $( request_to_chat " $chat_message " )
380368 handle_error " $response "
381369 response_data=$( echo " $response " | jq -r ' .choices[].message.content' )
382370
@@ -387,24 +375,21 @@ while $running; do
387375 echo " ${response_data} " | glow -
388376 # echo -e "${formatted_text}"
389377 else
390- echo -e " ${CHATGPT_CYAN_LABEL}${response_data} " | fold -s -w $COLUMNS
378+ echo -e " ${CHATGPT_CYAN_LABEL}${response_data} " | fold -s -w " $COLUMNS "
391379 fi
392- escaped_response_data=$( echo " $response_data " | sed ' s/"/\\"/g' )
393- add_assistant_response_to_chat_message " $chat_message " " $escaped_response_data "
380+ add_assistant_response_to_chat_message " $( escape " $response_data " ) "
394381
395382 timestamp=$( date +" %d/%m/%Y %H:%M" )
396383 echo -e " $timestamp $prompt \n$response_data \n" >> ~/.chatgpt_history
397384 else
398- # escape quotation marks
399- escaped_prompt=$( echo " $prompt " | sed ' s/"/\\"/g' )
400- # escape new lines
401- request_prompt=${escaped_prompt// $' \n ' / ' ' }
385+ # escape quotation marks, new lines, backslashes...
386+ request_prompt=$( escape " $prompt " )
402387
403388 if [ " $CONTEXT " = true ]; then
404- build_chat_context " $chat_context " " $escaped_prompt "
389+ build_chat_context " $request_prompt "
405390 fi
406391
407- request_to_completions " $request_prompt "
392+ response= $( request_to_completions " $request_prompt " )
408393 handle_error " $response "
409394 response_data=$( echo " $response " | jq -r ' .choices[].text' )
410395
@@ -420,8 +405,7 @@ while $running; do
420405 fi
421406
422407 if [ " $CONTEXT " = true ]; then
423- escaped_response_data=$( echo " $response_data " | sed ' s/"/\\"/g' )
424- maintain_chat_context " $chat_context " " $escaped_response_data "
408+ maintain_chat_context " $( escape " $response_data " ) "
425409 fi
426410
427411 timestamp=$( date +" %d/%m/%Y %H:%M" )
0 commit comments