Skip to content

Commit

Permalink
Runtime config, code and file structure improvements, GitHub action f…
Browse files Browse the repository at this point in the history
…ixes
  • Loading branch information
TheR1D authored Mar 18, 2023
1 parent b53b374 commit 1465c4a
Show file tree
Hide file tree
Showing 12 changed files with 445 additions and 393 deletions.
9 changes: 8 additions & 1 deletion .github/workflows/lint_test.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,12 @@
name: Run linters and unittests

on: [push]
on:
push:
branches:
- main
pull_request:
branches:
- main

jobs:
lint_test:
Expand Down Expand Up @@ -39,4 +45,5 @@ jobs:
black --target-version py310 -l 120 *.py
- name: Run unittests
run: |
export OPENAI_API_KEY=test_api_key
python -m unittest tests/unittests.py
20 changes: 19 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ pip install shell-gpt
```
You'll need an OpenAI API key, you can generate one [here](https://beta.openai.com/account/api-keys).

If the`$OPENAI_API_KEY` environment variable is set it will be used, otherwise, you will be prompted for your key which will then be stored in `~/.config/shell-gpt/api_key.txt`.
If the`$OPENAI_API_KEY` environment variable is set it will be used, otherwise, you will be prompted for your key which will then be stored in `~/.config/shell_gpt/.sgptrc`.

## Usage
`sgpt` has a variety of use cases, including simple queries, shell queries, and code queries.
Expand Down Expand Up @@ -199,6 +199,24 @@ Next time, same exact query will get results from local cache instantly. Note th

This is just some examples of what we can do using ChatGPT model, I'm sure you will find it useful for your specific use cases.

### Runtime configuration file
You can setup some parameters in runtime configuration file `~/.config/shell_gpt/.sgptrc`:
```text
# API key, also it is possible to define OPENAI_API_KEY env.
OPENAI_API_KEY=your_api_key
# OpenAI host, useful if you would like to use proxy.
OPENAI_API_HOST=https://api.openai.com
# Max amount of cached message per chat session.
CHAT_CACHE_LENGTH=100
# Chat cache folder.
CHAT_CACHE_PATH=/tmp/shell_gpt/chat_cache
# Request cache length (amount).
CACHE_LENGTH=100
# Request cache folder.
CACHE_PATH=/tmp/shell_gpt/cache
# Request timeout in seconds.
REQUEST_TIMEOUT=60
```

### Full list of arguments
```shell
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
# pylint: disable=consider-using-with
setup(
name="shell_gpt",
version="0.7.1",
version="0.7.3",
packages=find_packages(),
install_requires=[
"typer~=0.7.0",
Expand Down
6 changes: 5 additions & 1 deletion sgpt/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
from .chat_gpt import ChatGPT
from . import config as config
from .cache import Cache as Cache
from .cache import ChatCache as ChatCache
from .client import OpenAIClient as OpenAIClient
from . import utils as utils
from .app import main as main
from .app import entry_point as cli
from . import make_prompt as make_prompt
177 changes: 27 additions & 150 deletions sgpt/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,161 +12,42 @@


import os
import platform
from time import sleep
from pathlib import Path
from getpass import getpass
from tempfile import NamedTemporaryFile
from typing import Callable

import typer

# Click is part of typer.
from click import MissingParameter, BadParameter
from rich.progress import Progress, SpinnerColumn, TextColumn
from sgpt import ChatGPT
from sgpt import make_prompt

DATA_FOLDER = os.path.expanduser("~/.config")
KEY_FILE = Path(DATA_FOLDER) / "shell-gpt" / "api_key.txt"
CURRENT_SHELL = "PowerShell" if platform.system() == "Windows" else "Bash"
CODE_PROMPT = "Provide code and only code as output without any additional text, prompt or note."
SHELL_PROMPT = f"Provide only {CURRENT_SHELL} command as output, without any additional text or prompt."


def get_api_key() -> str:
"""
Retrieves API key from the file located in the user's home
directory, or prompts the user to input it if it does not exist.
:return: String API key for OpenAI API requests.
"""

if "OPENAI_API_KEY" in os.environ:
return os.environ["OPENAI_API_KEY"]

if not KEY_FILE.exists():
api_key = getpass(prompt="Please enter your API secret key")
KEY_FILE.parent.mkdir(parents=True, exist_ok=True)
KEY_FILE.write_text(api_key)
else:
api_key = KEY_FILE.read_text().strip()
return api_key


def loading_spinner(func: Callable) -> Callable:
"""
Decorator that adds a loading spinner animation to a function that uses the OpenAI API.
:param func: Function to wrap.
:return: Wrapped function with loading.
"""
def wrapper(*args, **kwargs):
if not kwargs.pop("spinner"):
return func(*args, **kwargs)
text = TextColumn("[green]Consulting with robots...")
with Progress(SpinnerColumn(), text, transient=True) as progress:
progress.add_task("request")
return func(*args, **kwargs)
return wrapper


def get_edited_prompt() -> str:
"""
Opens the user's default editor to let them
input a prompt, and returns the edited text.
:return: String prompt.
"""
with NamedTemporaryFile(suffix=".txt", delete=False) as file:
# Create file and store path.
file_path = file.name
editor = os.environ.get("EDITOR", "vim")
# This will write text to file using $EDITOR.
os.system(f"{editor} {file_path}")
# Read file when editor is closed.
with open(file_path, "r") as file:
output = file.read()
os.remove(file_path)
if not output:
raise BadParameter("Couldn't get valid PROMPT from $EDITOR")
return output
from click import MissingParameter
from sgpt import config, make_prompt, OpenAIClient
from sgpt.utils import (
loading_spinner,
echo_chat_ids,
echo_chat_messages,
typer_writer,
get_edited_prompt,
)


@loading_spinner
def get_completion(
prompt: str,
api_key: str,
temperature: float,
top_p: float,
caching: bool,
chat: str,
) -> str:
"""
Generates completions for a given prompt using the OpenAI API.
:param prompt: Prompt to generate completion for.
:param api_key: OpenAI API key.
:param temperature: Controls randomness of GPT-3.5 completions.
:param top_p: Controls most probable tokens for completions.
:param caching: Enable/Disable caching.
:param chat: Enable/Disable conversation (chat mode).
:return: GPT-3.5 generated completion.
"""
chat_gpt = ChatGPT(api_key)
model = "gpt-3.5-turbo"
if not chat:
return chat_gpt.get_completion(prompt, model, temperature, top_p, caching)
return chat_gpt.get_chat_completion(
model=model, temperature=temperature, top_probability=top_p, message=prompt, chat_id=chat
):
api_host = config.get("OPENAI_API_HOST")
api_key = config.get("OPENAI_API_KEY")
client = OpenAIClient(api_host, api_key)
return client.get_completion(
message=prompt,
model="gpt-3.5-turbo",
temperature=temperature,
top_probability=top_p,
caching=caching,
chat_id=chat,
)


def typer_writer(text: str, code: bool, shell: bool, animate: bool) -> None:
"""
Writes output to the console, with optional typewriter animation and color.
:param text: Text to output.
:param code: If content of text is code.
:param shell: if content of text is shell command.
:param animate: Enable/Disable typewriter animation.
:return: None
"""
shell_or_code = shell or code
color = "magenta" if shell_or_code else None
if animate and not shell_or_code:
for char in text:
typer.secho(char, nl=False, fg=color, bold=shell_or_code)
sleep(0.015)
# Add new line at the end, to prevent % from appearing.
typer.echo("")
return
typer.secho(text, fg=color, bold=shell_or_code)


def echo_chat_messages(chat_id: str) -> None:
"""
Writes all messages from a specified chat ID to the console.
:param chat_id: String chat id.
:return: None
"""
for index, message in enumerate(ChatGPT.chat_cache.get_messages(chat_id)):
color = "cyan" if index % 2 == 0 else "green"
typer.secho(message, fg=color)


def echo_chat_ids() -> None:
"""
Writes all existing chat IDs to the console.
:return: None
"""
for chat_id in ChatGPT.chat_cache.get_chats():
typer.echo(chat_id)


# Using lambda to pass a function to default value, which make it appear as "dynamic" in help.
def main(
prompt: str = typer.Argument(None, show_default=False, help="The prompt to generate completions for."),
temperature: float = typer.Option(1.0, min=0.0, max=1.0, help="Randomness of generated output."),
Expand All @@ -187,6 +68,7 @@ def main(
return
if show_chat:
echo_chat_messages(show_chat)
return

if not prompt and not editor:
raise MissingParameter(param_hint="PROMPT", param_type="string")
Expand All @@ -195,29 +77,24 @@ def main(
prompt = get_edited_prompt()

if shell:
# If probability and temperature were not changed (default), make response more accurate.
# If default values, make response more accurate.
if top_probability == 1 == temperature:
temperature = 0.4
prompt = make_prompt.shell(prompt)
elif code:
prompt = make_prompt.code(prompt)

api_key = get_api_key()
response_text = get_completion(
prompt, api_key, temperature, top_probability, cache, chat, spinner=spinner
completion = get_completion(
prompt, temperature, top_probability, cache, chat, spinner=spinner
)

typer_writer(response_text, code, shell, animation)
typer_writer(completion, code, shell, animation)
if shell and execute and typer.confirm("Execute shell command?"):
os.system(response_text)
os.system(completion)


def entry_point() -> None:
"""
Python package entry point defined in setup.py
:return: None
"""
# Python package entry point defined in setup.py
typer.run(main)


Expand Down
Loading

0 comments on commit 1465c4a

Please sign in to comment.