From 26081ad319b893be33212c4078d64a48647a1a66 Mon Sep 17 00:00:00 2001 From: camAtGitHub Date: Thu, 20 Apr 2023 20:01:42 +1000 Subject: [PATCH 01/16] refactor(usage()) Align help output --- chatgpt.sh | 35 ++++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/chatgpt.sh b/chatgpt.sh index 828f797..7533680 100755 --- a/chatgpt.sh +++ b/chatgpt.sh @@ -37,15 +37,32 @@ Commands: *If a command modifies your file system or dowloads external files the script will show a warning before executing. Options: - -i, --init-prompt - Provide initial chat prompt to use in context - --init-prompt-from-file - Provide initial prompt from file - -p, --prompt - Provide prompt instead of starting chat - --prompt-from-file - Provide prompt from file - -t, --temperature - Temperature - --max-tokens - Max number of tokens - -m, --model - Model - -s, --size - Image size. (The sizes that are accepted by the OpenAI API are 256x256, 512x512, 1024x1024) - -c, --chat-context - For models that do not support chat context by default (all models except gpt-3.5-turbo and gpt-4), you can enable chat context, for the model to remember your previous questions and its previous answers. It also makes models aware of todays date and what data it was trained on. + -i, --init-prompt Provide initial chat prompt to use in context + + --init-prompt-from-file Provide initial prompt from file + + -p, --prompt Provide prompt instead of starting chat + + --prompt-from-file Provide prompt from file + + -t, --temperature Temperature + + --max-tokens Max number of tokens + + -l, --list List available openAI models + + -m, --model Model to use + + -s, --size Image size. (The sizes that are accepted by the + OpenAI API are 256x256, 512x512, 1024x1024) + + -c, --chat-context For models that do not support chat context by + default (all models except gpt-3.5-turbo and + gpt-4), you can enable chat context, for the + model to remember your previous questions and + its previous answers. It also makes models + aware of todays date and what data it was trained + on. EOF } From f040fe81772e8a8e7013fd8b57a8a1a8c63a3cbe Mon Sep 17 00:00:00 2001 From: camAtGitHub Date: Thu, 20 Apr 2023 20:04:18 +1000 Subject: [PATCH 02/16] add(list_models): Models can be queried via cli argument --- chatgpt.sh | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/chatgpt.sh b/chatgpt.sh index 7533680..84acd5b 100755 --- a/chatgpt.sh +++ b/chatgpt.sh @@ -77,6 +77,17 @@ handle_error() { fi } +# request to openAI API models endpoint. Returns a list of models +# takes no input parameters +list_models() { + models_response=$(curl https://api.openai.com/v1/models \ + -sS \ + -H "Authorization: Bearer $OPENAI_KEY") + handle_error "$models_response" + models_data=$(echo $models_response | jq -r -C '.data[] | {id, owned_by, created}') + echo -e "$OVERWRITE_PROCESSING_LINE" + echo -e "${CHATGPT_CYAN_LABEL}This is a list of models currently available at OpenAI API:\n ${models_data}" +} # request to OpenAI API completions endpoint function # $1 should be the request prompt request_to_completions() { @@ -241,6 +252,10 @@ while [[ "$#" -gt 0 ]]; do shift shift ;; + -l | --list) + list_models + exit 0 + ;; -m | --model) MODEL="$2" shift @@ -335,13 +350,7 @@ while $running; do elif [[ "$prompt" == "history" ]]; then echo -e "\n$(cat ~/.chatgpt_history)" elif [[ "$prompt" == "models" ]]; then - models_response=$(curl https://api.openai.com/v1/models \ - -sS \ - -H "Authorization: Bearer $OPENAI_KEY") - handle_error "$models_response" - models_data=$(echo $models_response | jq -r -C '.data[] | {id, owned_by, created}') - echo -e "$OVERWRITE_PROCESSING_LINE" - echo -e "${CHATGPT_CYAN_LABEL}This is a list of models currently available at OpenAI API:\n ${models_data}" + list_models elif [[ "$prompt" =~ ^model: ]]; then models_response=$(curl https://api.openai.com/v1/models \ -sS \ From 798e240b569d3ab6b5361b92adacecf4d68cc358 Mon Sep 17 00:00:00 2001 From: camAtGitHub Date: Thu, 20 Apr 2023 20:05:20 +1000 Subject: [PATCH 03/16] optimize(exit): streamline exit conditions --- chatgpt.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/chatgpt.sh b/chatgpt.sh index 84acd5b..08853fe 100755 --- a/chatgpt.sh +++ b/chatgpt.sh @@ -313,7 +313,7 @@ while $running; do if [ -z "$pipe_mode_prompt" ]; then echo -e "\nEnter a prompt:" read -e prompt - if [ "$prompt" != "exit" ] && [ "$prompt" != "q" ]; then + if [[ ! $prompt =~ ^(exit|q)$ ]]; then echo -ne $PROCESSING_LABEL fi else @@ -323,7 +323,7 @@ while $running; do CHATGPT_CYAN_LABEL="" fi - if [ "$prompt" == "exit" ] || [ "$prompt" == "q" ]; then + if [[ $prompt =~ ^(exit|q)$ ]]; then running=false elif [[ "$prompt" =~ ^image: ]]; then request_to_image "$prompt" From 24fc6bf52b8a44df37bcf6551afcac97b755960c Mon Sep 17 00:00:00 2001 From: camAtGitHub Date: Thu, 20 Apr 2023 20:48:15 +1000 Subject: [PATCH 04/16] feature(big-prompt): allow multi-line input during chat mode --- chatgpt.sh | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/chatgpt.sh b/chatgpt.sh index 08853fe..3c6e76c 100755 --- a/chatgpt.sh +++ b/chatgpt.sh @@ -45,6 +45,8 @@ Options: --prompt-from-file Provide prompt from file + -b, --big-prompt Allow multi-line prompts during chat mode + -t, --temperature Temperature --max-tokens Max number of tokens @@ -266,6 +268,10 @@ while [[ "$#" -gt 0 ]]; do shift shift ;; + -b | --big-prompt) + BIG_PROMPT=true + shift + ;; -c | --chat-context) CONTEXT=true shift @@ -287,6 +293,14 @@ MAX_TOKENS=${MAX_TOKENS:-1024} MODEL=${MODEL:-gpt-3.5-turbo} SIZE=${SIZE:-512x512} CONTEXT=${CONTEXT:-false} +BIG_PROMPT=${BIG_PROMPT:-false} + +# create our temp file for multi-line input +if [ $BIG_PROMPT = true ]; then + USER_INPUT=$(mktemp) + trap 'rm -f ${USER_INPUT}' EXIT +fi + # create history file if [ ! -f ~/.chatgpt_history ]; then @@ -311,8 +325,14 @@ fi while $running; do if [ -z "$pipe_mode_prompt" ]; then - echo -e "\nEnter a prompt:" - read -e prompt + if [ $BIG_PROMPT = true ]; then + echo -e "\nEnter a prompt: (Press Enter then Ctrl-D to send)" + cat > "${USER_INPUT}" + prompt=$(sed -E ':a;N;$!ba;s/\r{0,1}\n/\\n/g' "${USER_INPUT}") + else + echo -e "\nEnter a prompt:" + read -e prompt + fi if [[ ! $prompt =~ ^(exit|q)$ ]]; then echo -ne $PROCESSING_LABEL fi From 1cf6d04b45d5b3f0c5b198e5044fcfa40a277d48 Mon Sep 17 00:00:00 2001 From: camAtGitHub Date: Thu, 20 Apr 2023 20:59:35 +1000 Subject: [PATCH 05/16] rename(OPENAI_KEY): OPENAI_API_KEY is standardised across projects --- chatgpt.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/chatgpt.sh b/chatgpt.sh index 3c6e76c..4817b41 100755 --- a/chatgpt.sh +++ b/chatgpt.sh @@ -13,7 +13,7 @@ PROCESSING_LABEL="\n\033[90mProcessing... \033[0m\033[0K\r" OVERWRITE_PROCESSING_LINE=" \033[0K\r" -if [[ -z "$OPENAI_KEY" ]]; then +if [[ -z "$OPENAI_API_KEY" ]]; then echo "You need to set your OPENAI_KEY to use this script" echo "You can set it temporarily by running this on your terminal: export OPENAI_KEY=YOUR_KEY_HERE" exit 1 @@ -84,7 +84,7 @@ handle_error() { list_models() { models_response=$(curl https://api.openai.com/v1/models \ -sS \ - -H "Authorization: Bearer $OPENAI_KEY") + -H "Authorization: Bearer $OPENAI_API_KEY") handle_error "$models_response" models_data=$(echo $models_response | jq -r -C '.data[] | {id, owned_by, created}') echo -e "$OVERWRITE_PROCESSING_LINE" @@ -98,7 +98,7 @@ request_to_completions() { response=$(curl https://api.openai.com/v1/completions \ -sS \ -H 'Content-Type: application/json' \ - -H "Authorization: Bearer $OPENAI_KEY" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ "model": "'"$MODEL"'", "prompt": "'"${request_prompt}"'", @@ -114,7 +114,7 @@ request_to_image() { image_response=$(curl https://api.openai.com/v1/images/generations \ -sS \ -H 'Content-Type: application/json' \ - -H "Authorization: Bearer $OPENAI_KEY" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ "prompt": "'"${prompt#*image:}"'", "n": 1, @@ -129,7 +129,7 @@ request_to_chat() { response=$(curl https://api.openai.com/v1/chat/completions \ -sS \ -H 'Content-Type: application/json' \ - -H "Authorization: Bearer $OPENAI_KEY" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ "model": "'"$MODEL"'", "messages": [ @@ -374,7 +374,7 @@ while $running; do elif [[ "$prompt" =~ ^model: ]]; then models_response=$(curl https://api.openai.com/v1/models \ -sS \ - -H "Authorization: Bearer $OPENAI_KEY") + -H "Authorization: Bearer $OPENAI_API_KEY") handle_error "$models_response" model_data=$(echo $models_response | jq -r -C '.data[] | select(.id=="'"${prompt#*model:}"'")') echo -e "$OVERWRITE_PROCESSING_LINE" From 60eb98d7b33d9890c83ddcce68cd7fbca75486e4 Mon Sep 17 00:00:00 2001 From: Nicolas Pouillard Date: Mon, 24 Apr 2023 00:22:35 +0200 Subject: [PATCH 06/16] Safer quoting --- chatgpt.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/chatgpt.sh b/chatgpt.sh index 828f797..673f91c 100755 --- a/chatgpt.sh +++ b/chatgpt.sh @@ -54,8 +54,8 @@ EOF # $1 should be the response body handle_error() { if echo "$1" | jq -e '.error' >/dev/null; then - echo -e "Your request to Open AI API failed: \033[0;31m$(echo $1 | jq -r '.error.type')\033[0m" - echo $1 | jq -r '.error.message' + echo -e "Your request to Open AI API failed: \033[0;31m$(echo "$1" | jq -r '.error.type')\033[0m" + echo "$1" | jq -r '.error.message' exit 1 fi } @@ -296,7 +296,7 @@ while $running; do elif [[ "$prompt" =~ ^image: ]]; then request_to_image "$prompt" handle_error "$image_response" - image_url=$(echo $image_response | jq -r '.data[0].url') + image_url=$(echo "$image_response" | jq -r '.data[0].url') echo -e "$OVERWRITE_PROCESSING_LINE" echo -e "${CHATGPT_CYAN_LABEL}Your image was created. \n\nLink: ${image_url}\n" From 4f1f92d022fc1027fd64f7add2d693022762c36b Mon Sep 17 00:00:00 2001 From: Nicolas Pouillard Date: Fri, 28 Apr 2023 23:33:21 +0200 Subject: [PATCH 07/16] Refactoring to reduce the use global variables from functions --- chatgpt.sh | 90 +++++++++++++++++++++++------------------------------- 1 file changed, 38 insertions(+), 52 deletions(-) diff --git a/chatgpt.sh b/chatgpt.sh index 673f91c..4eb9b63 100755 --- a/chatgpt.sh +++ b/chatgpt.sh @@ -63,24 +63,24 @@ handle_error() { # request to OpenAI API completions endpoint function # $1 should be the request prompt request_to_completions() { - request_prompt="$1" + local prompt="$1" - response=$(curl https://api.openai.com/v1/completions \ + curl https://api.openai.com/v1/completions \ -sS \ -H 'Content-Type: application/json' \ -H "Authorization: Bearer $OPENAI_KEY" \ -d '{ "model": "'"$MODEL"'", - "prompt": "'"${request_prompt}"'", + "prompt": "'"$prompt"'", "max_tokens": '$MAX_TOKENS', "temperature": '$TEMPERATURE' - }') + }' } # request to OpenAI API image generations endpoint function # $1 should be the prompt request_to_image() { - prompt="$1" + local prompt="$1" image_response=$(curl https://api.openai.com/v1/images/generations \ -sS \ -H 'Content-Type: application/json' \ @@ -95,8 +95,8 @@ request_to_image() { # request to OpenAPI API chat completion endpoint function # $1 should be the message(s) formatted with role and content request_to_chat() { - message="$1" - response=$(curl https://api.openai.com/v1/chat/completions \ + local message="$1" + curl https://api.openai.com/v1/chat/completions \ -sS \ -H 'Content-Type: application/json' \ -H "Authorization: Bearer $OPENAI_KEY" \ @@ -108,20 +108,19 @@ request_to_chat() { ], "max_tokens": '$MAX_TOKENS', "temperature": '$TEMPERATURE' - }') + }' } # build chat context before each request for /completions (all models except # gpt turbo and gpt 4) -# $1 should be the chat context -# $2 should be the escaped prompt +# $1 should be the escaped request prompt, +# it extends $chat_context build_chat_context() { - chat_context="$1" - escaped_prompt="$2" + local escaped_request_prompt="$1" if [ -z "$chat_context" ]; then - chat_context="$CHAT_INIT_PROMPT\nQ: $escaped_prompt" + chat_context="$CHAT_INIT_PROMPT\nQ: $escaped_request_prompt" else - chat_context="$chat_context\nQ: $escaped_prompt" + chat_context="$chat_context\nQ: $escaped_request_prompt" fi request_prompt="${chat_context//$'\n'/\\n}" } @@ -130,13 +129,12 @@ build_chat_context() { # gpt turbo and gpt 4) # builds chat context from response, # keeps chat context length under max token limit -# $1 should be the chat context -# $2 should be the response data (only the text) +# * $1 should be the escaped response data +# * it extends $chat_context maintain_chat_context() { - chat_context="$1" - response_data="$2" + local escaped_response_data="$1" # add response to chat context as answer - chat_context="$chat_context${chat_context:+\n}\nA: ${response_data//$'\n'/\\n}" + chat_context="$chat_context${chat_context:+\n}\nA: $escaped_response_data" # check prompt length, 1 word =~ 1.3 tokens # reserving 100 tokens for next user prompt while (($(echo "$chat_context" | wc -c) * 1, 3 > (MAX_TOKENS - 100))); do @@ -149,36 +147,29 @@ maintain_chat_context() { # build user chat message function for /chat/completions (gpt models) # builds chat message before request, -# $1 should be the chat message -# $2 should be the escaped prompt +# $1 should be the escaped request prompt, +# it extends $chat_message build_user_chat_message() { - chat_message="$1" - escaped_prompt="$2" + local escaped_request_prompt="$1" if [ -z "$chat_message" ]; then - chat_message="{\"role\": \"user\", \"content\": \"$escaped_prompt\"}" + chat_message="{\"role\": \"user\", \"content\": \"$escaped_request_prompt\"}" else - chat_message="$chat_message, {\"role\": \"user\", \"content\": \"$escaped_prompt\"}" + chat_message="$chat_message, {\"role\": \"user\", \"content\": \"$escaped_request_prompt\"}" fi - - request_prompt="$chat_message" } # adds the assistant response to the message in (chatml) format # for /chat/completions (gpt models) # keeps messages length under max token limit -# $1 should be the chat message -# $2 should be the response data (only the text) +# * $1 should be the escaped response data +# * it extends and potentially shrinks $chat_message add_assistant_response_to_chat_message() { - chat_message="$1" - local local_response_data="$2" - - # replace new line characters from response with space - local_response_data=$(echo "$local_response_data" | tr '\n' ' ') + local escaped_response_data="$1" # add response to chat context as answer - chat_message="$chat_message, {\"role\": \"assistant\", \"content\": \"$local_response_data\"}" + chat_message="$chat_message, {\"role\": \"assistant\", \"content\": \"$escaped_response_data\"}" # transform to json array to parse with jq - chat_message_json="[ $chat_message ]" + local chat_message_json="[ $chat_message ]" # check prompt length, 1 word =~ 1.3 tokens # reserving 100 tokens for next user prompt while (($(echo "$chat_message" | wc -c) * 1, 3 > (MAX_TOKENS - 100))); do @@ -334,15 +325,12 @@ while $running; do echo -e "$OVERWRITE_PROCESSING_LINE" echo -e "${CHATGPT_CYAN_LABEL}Complete details for model: ${prompt#*model:}\n ${model_data}" elif [[ "$prompt" =~ ^command: ]]; then - # escape quotation marks + # escape quotation marks, new lines, backslashes... escaped_prompt=$(echo "$prompt" | sed 's/"/\\"/g') - # escape new lines - if [[ "$prompt" =~ ^command: ]]; then - escaped_prompt=${prompt#command:} - request_prompt=$COMMAND_GENERATION_PROMPT${escaped_prompt//$'\n'/' '} - fi - build_user_chat_message "$chat_message" "$request_prompt" - request_to_chat "$request_prompt" + escaped_prompt=${escaped_prompt#command:} + request_prompt=$COMMAND_GENERATION_PROMPT$escaped_prompt + build_user_chat_message "$request_prompt" + response=$(request_to_chat "$chat_message") handle_error "$response" response_data=$(echo $response | jq -r '.choices[].message.content') @@ -363,8 +351,7 @@ while $running; do eval $response_data fi fi - escaped_response_data=$(echo "$response_data" | sed 's/"/\\"/g') - add_assistant_response_to_chat_message "$chat_message" "$escaped_response_data" + add_assistant_response_to_chat_message "$(echo "$response_data" | tr '\n' ' ')" timestamp=$(date +"%d/%m/%Y %H:%M") echo -e "$timestamp $prompt \n$response_data \n" >>~/.chatgpt_history @@ -375,8 +362,8 @@ while $running; do # escape new lines request_prompt=${escaped_prompt//$'\n'/' '} - build_user_chat_message "$chat_message" "$request_prompt" - request_to_chat "$request_prompt" + build_user_chat_message "$request_prompt" + response=$(request_to_chat "$chat_message") handle_error "$response" response_data=$(echo "$response" | jq -r '.choices[].message.content') @@ -401,10 +388,10 @@ while $running; do request_prompt=${escaped_prompt//$'\n'/' '} if [ "$CONTEXT" = true ]; then - build_chat_context "$chat_context" "$escaped_prompt" + build_chat_context "$request_prompt" fi - request_to_completions "$request_prompt" + response=$(request_to_completions "$request_prompt") handle_error "$response" response_data=$(echo "$response" | jq -r '.choices[].text') @@ -420,8 +407,7 @@ while $running; do fi if [ "$CONTEXT" = true ]; then - escaped_response_data=$(echo "$response_data" | sed 's/"/\\"/g') - maintain_chat_context "$chat_context" "$escaped_response_data" + maintain_chat_context "$escaped_response_data" fi timestamp=$(date +"%d/%m/%Y %H:%M") From 3b0cd946cec45d7d6da87412242c36cd1fb349b9 Mon Sep 17 00:00:00 2001 From: Nicolas Pouillard Date: Fri, 28 Apr 2023 23:36:11 +0200 Subject: [PATCH 08/16] Safe escaping using jq --- chatgpt.sh | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/chatgpt.sh b/chatgpt.sh index 4eb9b63..7821d02 100755 --- a/chatgpt.sh +++ b/chatgpt.sh @@ -122,7 +122,10 @@ build_chat_context() { else chat_context="$chat_context\nQ: $escaped_request_prompt" fi - request_prompt="${chat_context//$'\n'/\\n}" +} + +escape(){ + echo "$1" | jq -Rrs 'tojson[1:-1]' } # maintain chat context function for /completions (all models except @@ -326,7 +329,7 @@ while $running; do echo -e "${CHATGPT_CYAN_LABEL}Complete details for model: ${prompt#*model:}\n ${model_data}" elif [[ "$prompt" =~ ^command: ]]; then # escape quotation marks, new lines, backslashes... - escaped_prompt=$(echo "$prompt" | sed 's/"/\\"/g') + escaped_prompt=$(escape "$prompt") escaped_prompt=${escaped_prompt#command:} request_prompt=$COMMAND_GENERATION_PROMPT$escaped_prompt build_user_chat_message "$request_prompt" @@ -351,16 +354,14 @@ while $running; do eval $response_data fi fi - add_assistant_response_to_chat_message "$(echo "$response_data" | tr '\n' ' ')" + add_assistant_response_to_chat_message "$(escape "$response_data")" timestamp=$(date +"%d/%m/%Y %H:%M") echo -e "$timestamp $prompt \n$response_data \n" >>~/.chatgpt_history elif [[ "$MODEL" =~ ^gpt- ]]; then - # escape quotation marks - escaped_prompt=$(echo "$prompt" | sed 's/"/\\"/g') - # escape new lines - request_prompt=${escaped_prompt//$'\n'/' '} + # escape quotation marks, new lines, backslashes... + request_prompt=$(escape "$prompt") build_user_chat_message "$request_prompt" response=$(request_to_chat "$chat_message") @@ -376,16 +377,13 @@ while $running; do else echo -e "${CHATGPT_CYAN_LABEL}${response_data}" | fold -s -w $COLUMNS fi - escaped_response_data=$(echo "$response_data" | sed 's/"/\\"/g') - add_assistant_response_to_chat_message "$chat_message" "$escaped_response_data" + add_assistant_response_to_chat_message "$(escape "$response_data")" timestamp=$(date +"%d/%m/%Y %H:%M") echo -e "$timestamp $prompt \n$response_data \n" >>~/.chatgpt_history else - # escape quotation marks - escaped_prompt=$(echo "$prompt" | sed 's/"/\\"/g') - # escape new lines - request_prompt=${escaped_prompt//$'\n'/' '} + # escape quotation marks, new lines, backslashes... + request_prompt=$(escape "$prompt") if [ "$CONTEXT" = true ]; then build_chat_context "$request_prompt" @@ -407,7 +405,7 @@ while $running; do fi if [ "$CONTEXT" = true ]; then - maintain_chat_context "$escaped_response_data" + maintain_chat_context "$(escape "$response_data")" fi timestamp=$(date +"%d/%m/%Y %H:%M") From cbc31b57cf50af5772cc4ca4cbac695ded031cff Mon Sep 17 00:00:00 2001 From: Nicolas Pouillard Date: Fri, 28 Apr 2023 23:36:28 +0200 Subject: [PATCH 09/16] Safer quoting on $COLUMNS --- chatgpt.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chatgpt.sh b/chatgpt.sh index 7821d02..7c5f76c 100755 --- a/chatgpt.sh +++ b/chatgpt.sh @@ -375,7 +375,7 @@ while $running; do echo "${response_data}" | glow - #echo -e "${formatted_text}" else - echo -e "${CHATGPT_CYAN_LABEL}${response_data}" | fold -s -w $COLUMNS + echo -e "${CHATGPT_CYAN_LABEL}${response_data}" | fold -s -w "$COLUMNS" fi add_assistant_response_to_chat_message "$(escape "$response_data")" From 3ba434ee87b59cf69598422b17c9bedbe4eb05d8 Mon Sep 17 00:00:00 2001 From: Achilleas Date: Tue, 2 May 2023 15:50:17 +0300 Subject: [PATCH 10/16] Keep old OPENAI key name to maintain compatibility with the install script, add identation in elif --- chatgpt.sh | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/chatgpt.sh b/chatgpt.sh index 4817b41..845fec1 100755 --- a/chatgpt.sh +++ b/chatgpt.sh @@ -13,7 +13,7 @@ PROCESSING_LABEL="\n\033[90mProcessing... \033[0m\033[0K\r" OVERWRITE_PROCESSING_LINE=" \033[0K\r" -if [[ -z "$OPENAI_API_KEY" ]]; then +if [[ -z "$OPENAI_KEY" ]]; then echo "You need to set your OPENAI_KEY to use this script" echo "You can set it temporarily by running this on your terminal: export OPENAI_KEY=YOUR_KEY_HERE" exit 1 @@ -84,7 +84,7 @@ handle_error() { list_models() { models_response=$(curl https://api.openai.com/v1/models \ -sS \ - -H "Authorization: Bearer $OPENAI_API_KEY") + -H "Authorization: Bearer $OPENAI_KEY") handle_error "$models_response" models_data=$(echo $models_response | jq -r -C '.data[] | {id, owned_by, created}') echo -e "$OVERWRITE_PROCESSING_LINE" @@ -98,7 +98,7 @@ request_to_completions() { response=$(curl https://api.openai.com/v1/completions \ -sS \ -H 'Content-Type: application/json' \ - -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Authorization: Bearer $OPENAI_KEY" \ -d '{ "model": "'"$MODEL"'", "prompt": "'"${request_prompt}"'", @@ -114,7 +114,7 @@ request_to_image() { image_response=$(curl https://api.openai.com/v1/images/generations \ -sS \ -H 'Content-Type: application/json' \ - -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Authorization: Bearer $OPENAI_KEY" \ -d '{ "prompt": "'"${prompt#*image:}"'", "n": 1, @@ -129,7 +129,7 @@ request_to_chat() { response=$(curl https://api.openai.com/v1/chat/completions \ -sS \ -H 'Content-Type: application/json' \ - -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Authorization: Bearer $OPENAI_KEY" \ -d '{ "model": "'"$MODEL"'", "messages": [ @@ -370,11 +370,11 @@ while $running; do elif [[ "$prompt" == "history" ]]; then echo -e "\n$(cat ~/.chatgpt_history)" elif [[ "$prompt" == "models" ]]; then - list_models + list_models elif [[ "$prompt" =~ ^model: ]]; then models_response=$(curl https://api.openai.com/v1/models \ -sS \ - -H "Authorization: Bearer $OPENAI_API_KEY") + -H "Authorization: Bearer $OPENAI_KEY") handle_error "$models_response" model_data=$(echo $models_response | jq -r -C '.data[] | select(.id=="'"${prompt#*model:}"'")') echo -e "$OVERWRITE_PROCESSING_LINE" From 0a1ca89e6ccb19452408670361c33d783ae5e2cc Mon Sep 17 00:00:00 2001 From: Achilleas Date: Tue, 2 May 2023 15:52:06 +0300 Subject: [PATCH 11/16] formatting --- chatgpt.sh | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/chatgpt.sh b/chatgpt.sh index 845fec1..1adb819 100755 --- a/chatgpt.sh +++ b/chatgpt.sh @@ -12,7 +12,6 @@ CHATGPT_CYAN_LABEL="\033[36mchatgpt \033[0m" PROCESSING_LABEL="\n\033[90mProcessing... \033[0m\033[0K\r" OVERWRITE_PROCESSING_LINE=" \033[0K\r" - if [[ -z "$OPENAI_KEY" ]]; then echo "You need to set your OPENAI_KEY to use this script" echo "You can set it temporarily by running this on your terminal: export OPENAI_KEY=YOUR_KEY_HERE" @@ -82,13 +81,13 @@ handle_error() { # request to openAI API models endpoint. Returns a list of models # takes no input parameters list_models() { - models_response=$(curl https://api.openai.com/v1/models \ - -sS \ - -H "Authorization: Bearer $OPENAI_KEY") - handle_error "$models_response" - models_data=$(echo $models_response | jq -r -C '.data[] | {id, owned_by, created}') - echo -e "$OVERWRITE_PROCESSING_LINE" - echo -e "${CHATGPT_CYAN_LABEL}This is a list of models currently available at OpenAI API:\n ${models_data}" + models_response=$(curl https://api.openai.com/v1/models \ + -sS \ + -H "Authorization: Bearer $OPENAI_KEY") + handle_error "$models_response" + models_data=$(echo $models_response | jq -r -C '.data[] | {id, owned_by, created}') + echo -e "$OVERWRITE_PROCESSING_LINE" + echo -e "${CHATGPT_CYAN_LABEL}This is a list of models currently available at OpenAI API:\n ${models_data}" } # request to OpenAI API completions endpoint function # $1 should be the request prompt @@ -301,7 +300,6 @@ if [ $BIG_PROMPT = true ]; then trap 'rm -f ${USER_INPUT}' EXIT fi - # create history file if [ ! -f ~/.chatgpt_history ]; then touch ~/.chatgpt_history @@ -327,13 +325,13 @@ while $running; do if [ -z "$pipe_mode_prompt" ]; then if [ $BIG_PROMPT = true ]; then echo -e "\nEnter a prompt: (Press Enter then Ctrl-D to send)" - cat > "${USER_INPUT}" + cat >"${USER_INPUT}" prompt=$(sed -E ':a;N;$!ba;s/\r{0,1}\n/\\n/g' "${USER_INPUT}") else echo -e "\nEnter a prompt:" read -e prompt fi - if [[ ! $prompt =~ ^(exit|q)$ ]]; then + if [[ ! $prompt =~ ^(exit|q)$ ]]; then echo -ne $PROCESSING_LABEL fi else @@ -343,7 +341,7 @@ while $running; do CHATGPT_CYAN_LABEL="" fi - if [[ $prompt =~ ^(exit|q)$ ]]; then + if [[ $prompt =~ ^(exit|q)$ ]]; then running=false elif [[ "$prompt" =~ ^image: ]]; then request_to_image "$prompt" @@ -431,7 +429,7 @@ while $running; do if command -v glow &>/dev/null; then echo -e "${CHATGPT_CYAN_LABEL}" echo "${response_data}" | glow - - #echo -e "${formatted_text}" + #echo -e "${formatted_text}" else echo -e "${CHATGPT_CYAN_LABEL}${response_data}" | fold -s -w $COLUMNS fi @@ -460,7 +458,7 @@ while $running; do echo -e "${CHATGPT_CYAN_LABEL}" echo "${response_data}" | glow - else - # else remove empty lines and print + # else remove empty lines and print formatted_text=$(echo "${response_data}" | sed '1,2d; s/^A://g') echo -e "${CHATGPT_CYAN_LABEL}${formatted_text}" | fold -s -w $COLUMNS fi From 3236de2f23a03731deb67608afe222f63d178996 Mon Sep 17 00:00:00 2001 From: Achilleas Date: Tue, 2 May 2023 16:20:44 +0300 Subject: [PATCH 12/16] Rename to multiline prompt --- chatgpt.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/chatgpt.sh b/chatgpt.sh index 1adb819..0cf41dc 100755 --- a/chatgpt.sh +++ b/chatgpt.sh @@ -267,8 +267,8 @@ while [[ "$#" -gt 0 ]]; do shift shift ;; - -b | --big-prompt) - BIG_PROMPT=true + --multi-line-prompt) + MULTI_LINE_PROMPT=true shift ;; -c | --chat-context) @@ -292,10 +292,10 @@ MAX_TOKENS=${MAX_TOKENS:-1024} MODEL=${MODEL:-gpt-3.5-turbo} SIZE=${SIZE:-512x512} CONTEXT=${CONTEXT:-false} -BIG_PROMPT=${BIG_PROMPT:-false} +MULTI_LINE_PROMPT=${MULTI_LINE_PROMPT:-false} # create our temp file for multi-line input -if [ $BIG_PROMPT = true ]; then +if [ $MULTI_LINE_PROMPT = true ]; then USER_INPUT=$(mktemp) trap 'rm -f ${USER_INPUT}' EXIT fi @@ -323,7 +323,7 @@ fi while $running; do if [ -z "$pipe_mode_prompt" ]; then - if [ $BIG_PROMPT = true ]; then + if [ $MULTI_LINE_PROMPT = true ]; then echo -e "\nEnter a prompt: (Press Enter then Ctrl-D to send)" cat >"${USER_INPUT}" prompt=$(sed -E ':a;N;$!ba;s/\r{0,1}\n/\\n/g' "${USER_INPUT}") From 24a0de13d3f36ec1601bb786f42059906a02e5b7 Mon Sep 17 00:00:00 2001 From: 0xacx <99351112+0xacx@users.noreply.github.com> Date: Tue, 2 May 2023 18:44:32 +0300 Subject: [PATCH 13/16] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f3f2dcd..e4a7971 100644 --- a/README.md +++ b/README.md @@ -163,7 +163,7 @@ This script relies on curl for the requests to the api and jq to parse the json ## Contributors :pray: Thanks to all the people who used, tested, submitted issues, PRs and proposed changes: -[pfr-dev](https://www.github.com/pfr-dev), [jordantrizz](https://www.github.com/jordantrizz), [se7en-x230](https://www.github.com/se7en-x230), [mountaineerbr](https://www.github.com/mountaineerbr), [oligeo](https://www.github.com/oligeo), [biaocy](https://www.github.com/biaocy), [dmd](https://www.github.com/dmd), [goosegit11](https://www.github.com/goosegit11), [dilatedpupils](https://www.github.com/dilatedpupils), [direster](https://www.github.com/direster), [rxaviers](https://www.github.com/rxaviers), [Zeioth](https://www.github.com/Zeioth), [edshamis](https://www.github.com/edshamis), [nre-ableton](https://www.github.com/nre-ableton), [TobiasLaving](https://www.github.com/TobiasLaving), [RexAckermann](https://www.github.com/RexAckermann), [emirkmo](https://www.github.com/emirkmo) +[pfr-dev](https://www.github.com/pfr-dev), [jordantrizz](https://www.github.com/jordantrizz), [se7en-x230](https://www.github.com/se7en-x230), [mountaineerbr](https://www.github.com/mountaineerbr), [oligeo](https://www.github.com/oligeo), [biaocy](https://www.github.com/biaocy), [dmd](https://www.github.com/dmd), [goosegit11](https://www.github.com/goosegit11), [dilatedpupils](https://www.github.com/dilatedpupils), [direster](https://www.github.com/direster), [rxaviers](https://www.github.com/rxaviers), [Zeioth](https://www.github.com/Zeioth), [edshamis](https://www.github.com/edshamis), [nre-ableton](https://www.github.com/nre-ableton), [TobiasLaving](https://www.github.com/TobiasLaving), [RexAckermann](https://www.github.com/RexAckermann), [emirkmo](https://www.github.com/emirkmo), [np](https://www.github.com/np) ## Contributing Contributions are very welcome! From 80693a4d866e347f1684f0f5d11fd8bd16653f22 Mon Sep 17 00:00:00 2001 From: Achilleas Date: Tue, 2 May 2023 19:27:59 +0300 Subject: [PATCH 14/16] Remove sed due to warning, replace with escape function, rename variables, autoformat --- chatgpt.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/chatgpt.sh b/chatgpt.sh index 6e5e4fd..cda6fa8 100755 --- a/chatgpt.sh +++ b/chatgpt.sh @@ -153,8 +153,8 @@ build_chat_context() { fi } -escape(){ - echo "$1" | jq -Rrs 'tojson[1:-1]' +escape() { + echo "$1" | jq -Rrs 'tojson[1:-1]' } # maintain chat context function for /completions (all models except @@ -290,7 +290,7 @@ MULTI_LINE_PROMPT=${MULTI_LINE_PROMPT:-false} # create our temp file for multi-line input if [ $MULTI_LINE_PROMPT = true ]; then - USER_INPUT=$(mktemp) + USER_INPUT_TEMP_FILE=$(mktemp) trap 'rm -f ${USER_INPUT}' EXIT fi @@ -319,8 +319,9 @@ while $running; do if [ -z "$pipe_mode_prompt" ]; then if [ $MULTI_LINE_PROMPT = true ]; then echo -e "\nEnter a prompt: (Press Enter then Ctrl-D to send)" - cat >"${USER_INPUT}" - prompt=$(sed -E ':a;N;$!ba;s/\r{0,1}\n/\\n/g' "${USER_INPUT}") + cat > "${USER_INPUT_TEMP_FILE}" + input_from_temp_file=$(cat "${USER_INPUT_TEMP_FILE}") + prompt=$(escape "$input_from_temp_file") else echo -e "\nEnter a prompt:" read -e prompt @@ -417,7 +418,6 @@ while $running; do if command -v glow &>/dev/null; then echo -e "${CHATGPT_CYAN_LABEL}" echo "${response_data}" | glow - - #echo -e "${formatted_text}" else echo -e "${CHATGPT_CYAN_LABEL}${response_data}" | fold -s -w "$COLUMNS" fi From 192434b5a685ddca0e5c747d7fa1a394226b17f8 Mon Sep 17 00:00:00 2001 From: 0xacx <99351112+0xacx@users.noreply.github.com> Date: Tue, 2 May 2023 19:40:43 +0300 Subject: [PATCH 15/16] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e4a7971..e5e9d6f 100644 --- a/README.md +++ b/README.md @@ -163,7 +163,7 @@ This script relies on curl for the requests to the api and jq to parse the json ## Contributors :pray: Thanks to all the people who used, tested, submitted issues, PRs and proposed changes: -[pfr-dev](https://www.github.com/pfr-dev), [jordantrizz](https://www.github.com/jordantrizz), [se7en-x230](https://www.github.com/se7en-x230), [mountaineerbr](https://www.github.com/mountaineerbr), [oligeo](https://www.github.com/oligeo), [biaocy](https://www.github.com/biaocy), [dmd](https://www.github.com/dmd), [goosegit11](https://www.github.com/goosegit11), [dilatedpupils](https://www.github.com/dilatedpupils), [direster](https://www.github.com/direster), [rxaviers](https://www.github.com/rxaviers), [Zeioth](https://www.github.com/Zeioth), [edshamis](https://www.github.com/edshamis), [nre-ableton](https://www.github.com/nre-ableton), [TobiasLaving](https://www.github.com/TobiasLaving), [RexAckermann](https://www.github.com/RexAckermann), [emirkmo](https://www.github.com/emirkmo), [np](https://www.github.com/np) +[pfr-dev](https://www.github.com/pfr-dev), [jordantrizz](https://www.github.com/jordantrizz), [se7en-x230](https://www.github.com/se7en-x230), [mountaineerbr](https://www.github.com/mountaineerbr), [oligeo](https://www.github.com/oligeo), [biaocy](https://www.github.com/biaocy), [dmd](https://www.github.com/dmd), [goosegit11](https://www.github.com/goosegit11), [dilatedpupils](https://www.github.com/dilatedpupils), [direster](https://www.github.com/direster), [rxaviers](https://www.github.com/rxaviers), [Zeioth](https://www.github.com/Zeioth), [edshamis](https://www.github.com/edshamis), [nre-ableton](https://www.github.com/nre-ableton), [TobiasLaving](https://www.github.com/TobiasLaving), [RexAckermann](https://www.github.com/RexAckermann), [emirkmo](https://www.github.com/emirkmo), [np](https://www.github.com/np), [camAtGitHub](https://github.com/camAtGitHub) ## Contributing Contributions are very welcome! From 2e25138caaad77b59edb9936798a92ae80a4e0cd Mon Sep 17 00:00:00 2001 From: 0xacx <99351112+0xacx@users.noreply.github.com> Date: Thu, 4 May 2023 01:34:35 +0300 Subject: [PATCH 16/16] Remove broken view count --- README.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/README.md b/README.md index e5e9d6f..9bc0f7d 100644 --- a/README.md +++ b/README.md @@ -171,6 +171,3 @@ Contributions are very welcome! If you have ideas or need help to get started join the [Discord server](https://discord.gg/fwfYAZWKqu) ![Discord](https://img.shields.io/discord/1090696025162928158?label=Discord&style=for-the-badge) - - -[![visitors](https://visitor-badge.glitch.me/badge?page_id=0xacx/chatGPT-shell-cli&left_color=black&right_color=red)](https://visitor-badge.glitch.me)