Skip to content

Commit b7833e6

Browse files
authored
Merge pull request #101 from 0xacx/multiline-prompt-chat-mode
Multiline prompt chat mode
2 parents 24a0de1 + 80693a4 commit b7833e6

File tree

1 file changed

+69
-25
lines changed

1 file changed

+69
-25
lines changed

chatgpt.sh

Lines changed: 69 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@ CHATGPT_CYAN_LABEL="\033[36mchatgpt \033[0m"
1212
PROCESSING_LABEL="\n\033[90mProcessing... \033[0m\033[0K\r"
1313
OVERWRITE_PROCESSING_LINE=" \033[0K\r"
1414

15-
1615
if [[ -z "$OPENAI_KEY" ]]; then
1716
echo "You need to set your OPENAI_KEY to use this script"
1817
echo "You can set it temporarily by running this on your terminal: export OPENAI_KEY=YOUR_KEY_HERE"
@@ -37,15 +36,34 @@ Commands:
3736
*If a command modifies your file system or dowloads external files the script will show a warning before executing.
3837
3938
Options:
40-
-i, --init-prompt - Provide initial chat prompt to use in context
41-
--init-prompt-from-file - Provide initial prompt from file
42-
-p, --prompt - Provide prompt instead of starting chat
43-
--prompt-from-file - Provide prompt from file
44-
-t, --temperature - Temperature
45-
--max-tokens - Max number of tokens
46-
-m, --model - Model
47-
-s, --size - Image size. (The sizes that are accepted by the OpenAI API are 256x256, 512x512, 1024x1024)
48-
-c, --chat-context - For models that do not support chat context by default (all models except gpt-3.5-turbo and gpt-4), you can enable chat context, for the model to remember your previous questions and its previous answers. It also makes models aware of todays date and what data it was trained on.
39+
-i, --init-prompt Provide initial chat prompt to use in context
40+
41+
--init-prompt-from-file Provide initial prompt from file
42+
43+
-p, --prompt Provide prompt instead of starting chat
44+
45+
--prompt-from-file Provide prompt from file
46+
47+
-b, --big-prompt Allow multi-line prompts during chat mode
48+
49+
-t, --temperature Temperature
50+
51+
--max-tokens Max number of tokens
52+
53+
-l, --list List available openAI models
54+
55+
-m, --model Model to use
56+
57+
-s, --size Image size. (The sizes that are accepted by the
58+
OpenAI API are 256x256, 512x512, 1024x1024)
59+
60+
-c, --chat-context For models that do not support chat context by
61+
default (all models except gpt-3.5-turbo and
62+
gpt-4), you can enable chat context, for the
63+
model to remember your previous questions and
64+
its previous answers. It also makes models
65+
aware of todays date and what data it was trained
66+
on.
4967
5068
EOF
5169
}
@@ -60,6 +78,17 @@ handle_error() {
6078
fi
6179
}
6280

81+
# request to openAI API models endpoint. Returns a list of models
82+
# takes no input parameters
83+
list_models() {
84+
models_response=$(curl https://api.openai.com/v1/models \
85+
-sS \
86+
-H "Authorization: Bearer $OPENAI_KEY")
87+
handle_error "$models_response"
88+
models_data=$(echo $models_response | jq -r -C '.data[] | {id, owned_by, created}')
89+
echo -e "$OVERWRITE_PROCESSING_LINE"
90+
echo -e "${CHATGPT_CYAN_LABEL}This is a list of models currently available at OpenAI API:\n ${models_data}"
91+
}
6392
# request to OpenAI API completions endpoint function
6493
# $1 should be the request prompt
6594
request_to_completions() {
@@ -124,8 +153,8 @@ build_chat_context() {
124153
fi
125154
}
126155

127-
escape(){
128-
echo "$1" | jq -Rrs 'tojson[1:-1]'
156+
escape() {
157+
echo "$1" | jq -Rrs 'tojson[1:-1]'
129158
}
130159

131160
# maintain chat context function for /completions (all models except
@@ -218,6 +247,10 @@ while [[ "$#" -gt 0 ]]; do
218247
shift
219248
shift
220249
;;
250+
-l | --list)
251+
list_models
252+
exit 0
253+
;;
221254
-m | --model)
222255
MODEL="$2"
223256
shift
@@ -228,6 +261,10 @@ while [[ "$#" -gt 0 ]]; do
228261
shift
229262
shift
230263
;;
264+
--multi-line-prompt)
265+
MULTI_LINE_PROMPT=true
266+
shift
267+
;;
231268
-c | --chat-context)
232269
CONTEXT=true
233270
shift
@@ -249,6 +286,13 @@ MAX_TOKENS=${MAX_TOKENS:-1024}
249286
MODEL=${MODEL:-gpt-3.5-turbo}
250287
SIZE=${SIZE:-512x512}
251288
CONTEXT=${CONTEXT:-false}
289+
MULTI_LINE_PROMPT=${MULTI_LINE_PROMPT:-false}
290+
291+
# create our temp file for multi-line input
292+
if [ $MULTI_LINE_PROMPT = true ]; then
293+
USER_INPUT_TEMP_FILE=$(mktemp)
294+
trap 'rm -f ${USER_INPUT}' EXIT
295+
fi
252296

253297
# create history file
254298
if [ ! -f ~/.chatgpt_history ]; then
@@ -273,9 +317,16 @@ fi
273317
while $running; do
274318

275319
if [ -z "$pipe_mode_prompt" ]; then
276-
echo -e "\nEnter a prompt:"
277-
read -e prompt
278-
if [ "$prompt" != "exit" ] && [ "$prompt" != "q" ]; then
320+
if [ $MULTI_LINE_PROMPT = true ]; then
321+
echo -e "\nEnter a prompt: (Press Enter then Ctrl-D to send)"
322+
cat > "${USER_INPUT_TEMP_FILE}"
323+
input_from_temp_file=$(cat "${USER_INPUT_TEMP_FILE}")
324+
prompt=$(escape "$input_from_temp_file")
325+
else
326+
echo -e "\nEnter a prompt:"
327+
read -e prompt
328+
fi
329+
if [[ ! $prompt =~ ^(exit|q)$ ]]; then
279330
echo -ne $PROCESSING_LABEL
280331
fi
281332
else
@@ -285,7 +336,7 @@ while $running; do
285336
CHATGPT_CYAN_LABEL=""
286337
fi
287338

288-
if [ "$prompt" == "exit" ] || [ "$prompt" == "q" ]; then
339+
if [[ $prompt =~ ^(exit|q)$ ]]; then
289340
running=false
290341
elif [[ "$prompt" =~ ^image: ]]; then
291342
request_to_image "$prompt"
@@ -312,13 +363,7 @@ while $running; do
312363
elif [[ "$prompt" == "history" ]]; then
313364
echo -e "\n$(cat ~/.chatgpt_history)"
314365
elif [[ "$prompt" == "models" ]]; then
315-
models_response=$(curl https://api.openai.com/v1/models \
316-
-sS \
317-
-H "Authorization: Bearer $OPENAI_KEY")
318-
handle_error "$models_response"
319-
models_data=$(echo $models_response | jq -r -C '.data[] | {id, owned_by, created}')
320-
echo -e "$OVERWRITE_PROCESSING_LINE"
321-
echo -e "${CHATGPT_CYAN_LABEL}This is a list of models currently available at OpenAI API:\n ${models_data}"
366+
list_models
322367
elif [[ "$prompt" =~ ^model: ]]; then
323368
models_response=$(curl https://api.openai.com/v1/models \
324369
-sS \
@@ -373,7 +418,6 @@ while $running; do
373418
if command -v glow &>/dev/null; then
374419
echo -e "${CHATGPT_CYAN_LABEL}"
375420
echo "${response_data}" | glow -
376-
#echo -e "${formatted_text}"
377421
else
378422
echo -e "${CHATGPT_CYAN_LABEL}${response_data}" | fold -s -w "$COLUMNS"
379423
fi
@@ -399,7 +443,7 @@ while $running; do
399443
echo -e "${CHATGPT_CYAN_LABEL}"
400444
echo "${response_data}" | glow -
401445
else
402-
# else remove empty lines and print
446+
# else remove empty lines and print
403447
formatted_text=$(echo "${response_data}" | sed '1,2d; s/^A://g')
404448
echo -e "${CHATGPT_CYAN_LABEL}${formatted_text}" | fold -s -w $COLUMNS
405449
fi

0 commit comments

Comments
 (0)