Skip to content

Commit f5f7f1b

Browse files
committed
Merge branch 'main' into multiline-prompt-chat-mode
2 parents 3236de2 + 5e572eb commit f5f7f1b

File tree

1 file changed

+52
-68
lines changed

1 file changed

+52
-68
lines changed

chatgpt.sh

Lines changed: 52 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -72,8 +72,8 @@ EOF
7272
# $1 should be the response body
7373
handle_error() {
7474
if echo "$1" | jq -e '.error' >/dev/null; then
75-
echo -e "Your request to Open AI API failed: \033[0;31m$(echo $1 | jq -r '.error.type')\033[0m"
76-
echo $1 | jq -r '.error.message'
75+
echo -e "Your request to Open AI API failed: \033[0;31m$(echo "$1" | jq -r '.error.type')\033[0m"
76+
echo "$1" | jq -r '.error.message'
7777
exit 1
7878
fi
7979
}
@@ -92,24 +92,24 @@ list_models() {
9292
# request to OpenAI API completions endpoint function
9393
# $1 should be the request prompt
9494
request_to_completions() {
95-
request_prompt="$1"
95+
local prompt="$1"
9696

97-
response=$(curl https://api.openai.com/v1/completions \
97+
curl https://api.openai.com/v1/completions \
9898
-sS \
9999
-H 'Content-Type: application/json' \
100100
-H "Authorization: Bearer $OPENAI_KEY" \
101101
-d '{
102102
"model": "'"$MODEL"'",
103-
"prompt": "'"${request_prompt}"'",
103+
"prompt": "'"$prompt"'",
104104
"max_tokens": '$MAX_TOKENS',
105105
"temperature": '$TEMPERATURE'
106-
}')
106+
}'
107107
}
108108

109109
# request to OpenAI API image generations endpoint function
110110
# $1 should be the prompt
111111
request_to_image() {
112-
prompt="$1"
112+
local prompt="$1"
113113
image_response=$(curl https://api.openai.com/v1/images/generations \
114114
-sS \
115115
-H 'Content-Type: application/json' \
@@ -124,8 +124,8 @@ request_to_image() {
124124
# request to OpenAPI API chat completion endpoint function
125125
# $1 should be the message(s) formatted with role and content
126126
request_to_chat() {
127-
message="$1"
128-
response=$(curl https://api.openai.com/v1/chat/completions \
127+
local message="$1"
128+
curl https://api.openai.com/v1/chat/completions \
129129
-sS \
130130
-H 'Content-Type: application/json' \
131131
-H "Authorization: Bearer $OPENAI_KEY" \
@@ -137,35 +137,36 @@ request_to_chat() {
137137
],
138138
"max_tokens": '$MAX_TOKENS',
139139
"temperature": '$TEMPERATURE'
140-
}')
140+
}'
141141
}
142142

143143
# build chat context before each request for /completions (all models except
144144
# gpt turbo and gpt 4)
145-
# $1 should be the chat context
146-
# $2 should be the escaped prompt
145+
# $1 should be the escaped request prompt,
146+
# it extends $chat_context
147147
build_chat_context() {
148-
chat_context="$1"
149-
escaped_prompt="$2"
148+
local escaped_request_prompt="$1"
150149
if [ -z "$chat_context" ]; then
151-
chat_context="$CHAT_INIT_PROMPT\nQ: $escaped_prompt"
150+
chat_context="$CHAT_INIT_PROMPT\nQ: $escaped_request_prompt"
152151
else
153-
chat_context="$chat_context\nQ: $escaped_prompt"
152+
chat_context="$chat_context\nQ: $escaped_request_prompt"
154153
fi
155-
request_prompt="${chat_context//$'\n'/\\n}"
154+
}
155+
156+
escape(){
157+
echo "$1" | jq -Rrs 'tojson[1:-1]'
156158
}
157159

158160
# maintain chat context function for /completions (all models except
159161
# gpt turbo and gpt 4)
160162
# builds chat context from response,
161163
# keeps chat context length under max token limit
162-
# $1 should be the chat context
163-
# $2 should be the response data (only the text)
164+
# * $1 should be the escaped response data
165+
# * it extends $chat_context
164166
maintain_chat_context() {
165-
chat_context="$1"
166-
response_data="$2"
167+
local escaped_response_data="$1"
167168
# add response to chat context as answer
168-
chat_context="$chat_context${chat_context:+\n}\nA: ${response_data//$'\n'/\\n}"
169+
chat_context="$chat_context${chat_context:+\n}\nA: $escaped_response_data"
169170
# check prompt length, 1 word =~ 1.3 tokens
170171
# reserving 100 tokens for next user prompt
171172
while (($(echo "$chat_context" | wc -c) * 1, 3 > (MAX_TOKENS - 100))); do
@@ -178,36 +179,29 @@ maintain_chat_context() {
178179

179180
# build user chat message function for /chat/completions (gpt models)
180181
# builds chat message before request,
181-
# $1 should be the chat message
182-
# $2 should be the escaped prompt
182+
# $1 should be the escaped request prompt,
183+
# it extends $chat_message
183184
build_user_chat_message() {
184-
chat_message="$1"
185-
escaped_prompt="$2"
185+
local escaped_request_prompt="$1"
186186
if [ -z "$chat_message" ]; then
187-
chat_message="{\"role\": \"user\", \"content\": \"$escaped_prompt\"}"
187+
chat_message="{\"role\": \"user\", \"content\": \"$escaped_request_prompt\"}"
188188
else
189-
chat_message="$chat_message, {\"role\": \"user\", \"content\": \"$escaped_prompt\"}"
189+
chat_message="$chat_message, {\"role\": \"user\", \"content\": \"$escaped_request_prompt\"}"
190190
fi
191-
192-
request_prompt="$chat_message"
193191
}
194192

195193
# adds the assistant response to the message in (chatml) format
196194
# for /chat/completions (gpt models)
197195
# keeps messages length under max token limit
198-
# $1 should be the chat message
199-
# $2 should be the response data (only the text)
196+
# * $1 should be the escaped response data
197+
# * it extends and potentially shrinks $chat_message
200198
add_assistant_response_to_chat_message() {
201-
chat_message="$1"
202-
local local_response_data="$2"
203-
204-
# replace new line characters from response with space
205-
local_response_data=$(echo "$local_response_data" | tr '\n' ' ')
199+
local escaped_response_data="$1"
206200
# add response to chat context as answer
207-
chat_message="$chat_message, {\"role\": \"assistant\", \"content\": \"$local_response_data\"}"
201+
chat_message="$chat_message, {\"role\": \"assistant\", \"content\": \"$escaped_response_data\"}"
208202

209203
# transform to json array to parse with jq
210-
chat_message_json="[ $chat_message ]"
204+
local chat_message_json="[ $chat_message ]"
211205
# check prompt length, 1 word =~ 1.3 tokens
212206
# reserving 100 tokens for next user prompt
213207
while (($(echo "$chat_message" | wc -c) * 1, 3 > (MAX_TOKENS - 100))); do
@@ -346,7 +340,7 @@ while $running; do
346340
elif [[ "$prompt" =~ ^image: ]]; then
347341
request_to_image "$prompt"
348342
handle_error "$image_response"
349-
image_url=$(echo $image_response | jq -r '.data[0].url')
343+
image_url=$(echo "$image_response" | jq -r '.data[0].url')
350344
echo -e "$OVERWRITE_PROCESSING_LINE"
351345
echo -e "${CHATGPT_CYAN_LABEL}Your image was created. \n\nLink: ${image_url}\n"
352346

@@ -378,15 +372,12 @@ while $running; do
378372
echo -e "$OVERWRITE_PROCESSING_LINE"
379373
echo -e "${CHATGPT_CYAN_LABEL}Complete details for model: ${prompt#*model:}\n ${model_data}"
380374
elif [[ "$prompt" =~ ^command: ]]; then
381-
# escape quotation marks
382-
escaped_prompt=$(echo "$prompt" | sed 's/"/\\"/g')
383-
# escape new lines
384-
if [[ "$prompt" =~ ^command: ]]; then
385-
escaped_prompt=${prompt#command:}
386-
request_prompt=$COMMAND_GENERATION_PROMPT${escaped_prompt//$'\n'/' '}
387-
fi
388-
build_user_chat_message "$chat_message" "$request_prompt"
389-
request_to_chat "$request_prompt"
375+
# escape quotation marks, new lines, backslashes...
376+
escaped_prompt=$(escape "$prompt")
377+
escaped_prompt=${escaped_prompt#command:}
378+
request_prompt=$COMMAND_GENERATION_PROMPT$escaped_prompt
379+
build_user_chat_message "$request_prompt"
380+
response=$(request_to_chat "$chat_message")
390381
handle_error "$response"
391382
response_data=$(echo $response | jq -r '.choices[].message.content')
392383

@@ -407,20 +398,17 @@ while $running; do
407398
eval $response_data
408399
fi
409400
fi
410-
escaped_response_data=$(echo "$response_data" | sed 's/"/\\"/g')
411-
add_assistant_response_to_chat_message "$chat_message" "$escaped_response_data"
401+
add_assistant_response_to_chat_message "$(escape "$response_data")"
412402

413403
timestamp=$(date +"%d/%m/%Y %H:%M")
414404
echo -e "$timestamp $prompt \n$response_data \n" >>~/.chatgpt_history
415405

416406
elif [[ "$MODEL" =~ ^gpt- ]]; then
417-
# escape quotation marks
418-
escaped_prompt=$(echo "$prompt" | sed 's/"/\\"/g')
419-
# escape new lines
420-
request_prompt=${escaped_prompt//$'\n'/' '}
407+
# escape quotation marks, new lines, backslashes...
408+
request_prompt=$(escape "$prompt")
421409

422-
build_user_chat_message "$chat_message" "$request_prompt"
423-
request_to_chat "$request_prompt"
410+
build_user_chat_message "$request_prompt"
411+
response=$(request_to_chat "$chat_message")
424412
handle_error "$response"
425413
response_data=$(echo "$response" | jq -r '.choices[].message.content')
426414

@@ -431,24 +419,21 @@ while $running; do
431419
echo "${response_data}" | glow -
432420
#echo -e "${formatted_text}"
433421
else
434-
echo -e "${CHATGPT_CYAN_LABEL}${response_data}" | fold -s -w $COLUMNS
422+
echo -e "${CHATGPT_CYAN_LABEL}${response_data}" | fold -s -w "$COLUMNS"
435423
fi
436-
escaped_response_data=$(echo "$response_data" | sed 's/"/\\"/g')
437-
add_assistant_response_to_chat_message "$chat_message" "$escaped_response_data"
424+
add_assistant_response_to_chat_message "$(escape "$response_data")"
438425

439426
timestamp=$(date +"%d/%m/%Y %H:%M")
440427
echo -e "$timestamp $prompt \n$response_data \n" >>~/.chatgpt_history
441428
else
442-
# escape quotation marks
443-
escaped_prompt=$(echo "$prompt" | sed 's/"/\\"/g')
444-
# escape new lines
445-
request_prompt=${escaped_prompt//$'\n'/' '}
429+
# escape quotation marks, new lines, backslashes...
430+
request_prompt=$(escape "$prompt")
446431

447432
if [ "$CONTEXT" = true ]; then
448-
build_chat_context "$chat_context" "$escaped_prompt"
433+
build_chat_context "$request_prompt"
449434
fi
450435

451-
request_to_completions "$request_prompt"
436+
response=$(request_to_completions "$request_prompt")
452437
handle_error "$response"
453438
response_data=$(echo "$response" | jq -r '.choices[].text')
454439

@@ -464,8 +449,7 @@ while $running; do
464449
fi
465450

466451
if [ "$CONTEXT" = true ]; then
467-
escaped_response_data=$(echo "$response_data" | sed 's/"/\\"/g')
468-
maintain_chat_context "$chat_context" "$escaped_response_data"
452+
maintain_chat_context "$(escape "$response_data")"
469453
fi
470454

471455
timestamp=$(date +"%d/%m/%Y %H:%M")

0 commit comments

Comments
 (0)