Skip to content

Commit

Permalink
Add system message and stuff
Browse files Browse the repository at this point in the history
  • Loading branch information
lalalune committed Aug 3, 2023
1 parent 0394e68 commit d31167c
Show file tree
Hide file tree
Showing 8 changed files with 234 additions and 199 deletions.
2 changes: 1 addition & 1 deletion easycompletion/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from .model import (
function_completion,
text_completion,
compose_function,
chat_completion
)

Expand All @@ -13,6 +12,7 @@
trim_prompt,
chunk_prompt,
count_tokens,
compose_function,
get_tokens,
)

Expand Down
82 changes: 27 additions & 55 deletions easycompletion/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,8 @@ def validate_functions(response, functions, function_call, debug=DEBUG):
Usage:
isValid = validate_functions(response, functions, function_call)
"""
print('response')
print(response)
response_function_call = response["choices"][0]["message"].get(
"function_call", None
)
Expand Down Expand Up @@ -153,61 +155,20 @@ def validate_functions(response, functions, function_call, debug=DEBUG):
return True


def compose_function(name, description, properties, required_properties, debug=DEBUG):
"""
Composes a function object for function calling.
Parameters:
name (str): The name of the function.
description (str): Description of the function.
properties (dict): Dictionary of property objects.
required_properties (list): List of property names that are required.
Returns:
A dictionary representing a function.
Usage:
summarization_function = compose_function(
name="summarize_text",
description="Summarize the text. Include the topic, subtopics.",
properties={
"summary": {
"type": "string",
"description": "Detailed summary of the text.",
},
},
required_properties=["summary"],
)
"""
function = {
"name": name,
"description": description,
"parameters": {
"type": "object",
"properties": properties,
"required": required_properties,
},
}
log(f"Function:\n{str(function)}", type="info", log=debug)
return function


def chat_completion(
messages,
system_message=None,
model_failure_retries=5,
model=None,
chunk_length=DEFAULT_CHUNK_LENGTH,
api_key=None,
debug=DEBUG,
temperature=0.0
temperature=0.0,
):
"""
Function for sending chat messages and returning a chat response.
Parameters:
messages (str): Messages to send to the model. In the form {<role>: string, <content>: string} - roles are "user" and "assistant"
system_message (str, optional): Message appended at the top sent by the system. Usually used to tell what the agent how to act.
model_failure_retries (int, optional): Number of retries if the request fails. Default is 5.
model (str, optional): The model to use. Default is the DEFAULT_TEXT_MODEL defined in constants.py.
chunk_length (int, optional): Maximum length of text chunk to process. Default is defined in constants.py.
Expand Down Expand Up @@ -249,22 +210,19 @@ def chat_completion(
"error": "Message too long",
}

# Prepare messages for the API call
messages = [{"role": "system", "content": system_message}] + messages

log(f"Prompt:\n{str(messages)}", type="prompt", log=debug)

# Try to make a request for a specified number of times
response = None
for i in range(model_failure_retries):
try:
response = openai.ChatCompletion.create(model=model, messages=messages, temperature=temperature)
response = openai.ChatCompletion.create(
model=model, messages=messages, temperature=temperature
)
break
except Exception as e:
log(f"OpenAI Error: {e}", type="error", log=debug)
continue
# wait 1 second
time.sleep(1)

# If response is not valid, print an error message and return None
if (
Expand Down Expand Up @@ -299,7 +257,7 @@ def text_completion(
chunk_length=DEFAULT_CHUNK_LENGTH,
api_key=None,
debug=DEBUG,
temperature=0.0
temperature=0.0,
):
"""
Function for sending text and returning a text completion response.
Expand Down Expand Up @@ -356,7 +314,9 @@ def text_completion(
response = None
for i in range(model_failure_retries):
try:
response = openai.ChatCompletion.create(model=model, messages=messages, temperature=temperature)
response = openai.ChatCompletion.create(
model=model, messages=messages, temperature=temperature
)
break
except Exception as e:
log(f"OpenAI Error: {e}", type="error", log=debug)
Expand Down Expand Up @@ -391,9 +351,9 @@ def text_completion(


def function_completion(
text,
system_message=None,
text=None,
messages=None,
system_message=None,
functions=None,
model_failure_retries=5,
function_call=None,
Expand All @@ -402,7 +362,7 @@ def function_completion(
model=None,
api_key=None,
debug=DEBUG,
temperature=0.0
temperature=0.0,
):
"""
Send text and a list of functions to the model and return optional text and a function call.
Expand Down Expand Up @@ -524,7 +484,8 @@ def function_completion(
all_messages += messages

# Prepare the messages to be sent to the API
all_messages.append({"role": "user", "content": text})
if text is not None and text != "":
all_messages.append({"role": "user", "content": text})

log(
f"Prompt:\n{text}\n\nFunctions:\n{json.dumps(functions, indent=4)}",
Expand All @@ -543,15 +504,26 @@ def function_completion(
messages=all_messages,
functions=functions,
function_call=function_call,
temperature=temperature
temperature=temperature,
)
print('***** openai response')
print(response)
if not response.get("choices") or response["choices"][0] is None:
log("No choices in response", type="error", log=debug)
continue
break
except Exception as e:
print('**** ERROR')
print(e)
log(f"OpenAI Error: {e}", type="error", log=debug)
time.sleep(1)
# Check if we have a valid response from the model
print('***** response')
print(response)
print('***** functions')
print(functions)
print('***** function_call')
print(function_call)
if validate_functions(response, functions, function_call):
break
time.sleep(1)
Expand Down
40 changes: 40 additions & 0 deletions easycompletion/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,3 +184,43 @@ def compose_prompt(prompt_template, parameters, debug=DEBUG):
log(f"Composed prompt:\n{prompt}", log=debug)

return prompt


def compose_function(name, description, properties, required_properties, debug=DEBUG):
"""
Composes a function object for function calling.
Parameters:
name (str): The name of the function.
description (str): Description of the function.
properties (dict): Dictionary of property objects.
required_properties (list): List of property names that are required.
Returns:
A dictionary representing a function.
Usage:
summarization_function = compose_function(
name="summarize_text",
description="Summarize the text. Include the topic, subtopics.",
properties={
"summary": {
"type": "string",
"description": "Detailed summary of the text.",
},
},
required_properties=["summary"],
)
"""
function = {
"name": name,
"description": description,
"parameters": {
"type": "object",
"properties": properties,
"required": required_properties,
},
}
log(f"Function:\n{str(function)}", type="info", log=debug)
return function

2 changes: 2 additions & 0 deletions easycompletion/tests/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
from .model import *
from .prompt import *
93 changes: 93 additions & 0 deletions easycompletion/tests/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
from easycompletion.model import (
chat_completion,
parse_arguments,
function_completion,
text_completion,
)


def test_parse_arguments():
test_input = '{"key1": "value1", "key2": 2}'
expected_output = {"key1": "value1", "key2": 2}
assert parse_arguments(test_input) == expected_output, "Test parse_arguments failed"


def test_function_completion():
test_text = "Write a song about AI"
test_function = {
"name": "write_song",
"description": "Write a song about AI",
"parameters": {
"type": "object",
"properties": {
"lyrics": {
"type": "string",
"description": "The lyrics for the song",
}
},
"required": ["lyrics"],
},
}
response = function_completion(
text=test_text, functions=test_function, function_call="write_song"
)
assert response is not None, "Test function_completion failed"
prompt_tokens = response["usage"]["prompt_tokens"]
assert prompt_tokens == 64, "Prompt tokens was not expected count"

response = function_completion(
text=test_text,
messages=[{"role": "assistant", "content": "hey whats up"}],
system_message="you are a towel",
functions=test_function,
function_call="write_song",
)
assert response is not None, "Test function_completion failed"
prompt_tokens = response["usage"]["prompt_tokens"]
assert prompt_tokens == 76, "Prompt tokens was not expected count"


def test_chat_completion():
response = chat_completion(
messages=[
{"role": "system", "content": "You are a towel. Respond as a towel."},
{"role": "user", "content": "Hello, how are you?"},
],
)

assert response is not None, "Test text_completion failed"
assert response["text"] is not None, "Test text_completion failed"
prompt_tokens = response["usage"]["prompt_tokens"]
assert prompt_tokens == 27, "Prompt tokens was not expected count"


def test_text_completion():
response = text_completion("Hello, how are you?")
assert response is not None, "Test text_completion failed"
assert response["text"] is not None, "Test text_completion failed"
prompt_tokens = response["usage"]["prompt_tokens"]
assert prompt_tokens == 13, "Prompt tokens was not expected count"


def test_long_completion():
script = """
Sure, Satisfiability Modulo Theories (SMT) is a fundamental concept in computer science, and it can be explained from several different angles. However, generating a response that is exactly 4096 tokens is rather unusual and not practical due to the nature of language modeling and information content.
In the context of language model like GPT-3 or GPT-4, tokens can be a single character, a word, or even a part of a word, depending on the language and the context. In English text, a token is typically a word or a punctuation mark. Given this information, a text of 4096 tokens would be very long and possibly redundant for a concept like SMT.
"""
summarization_function = {
"name": "summarize_text",
"description": "Summarize the text. Include the topic, subtopics.",
"parameters": {
"type": "object",
"properties": {
"summary": {
"type": "string",
"description": "Detailed summary of the text.",
},
},
"required": ["summary"],
},
}
response = function_completion(text=script, functions=summarization_function)
assert response is not None, "Test long_completion failed"
Loading

0 comments on commit d31167c

Please sign in to comment.