Skip to content

Commit

Permalink
Remove excessive debug text
Browse files Browse the repository at this point in the history
  • Loading branch information
Taytay committed Apr 3, 2023
1 parent afceca0 commit 4173b07
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 15 deletions.
19 changes: 8 additions & 11 deletions scripts/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,19 +30,16 @@ def __init__(self):
# TODO - make these models be self-contained, using langchain, so we can configure them once and call it good
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
self.thinking_token_limit = 4000
# Initialize the OpenAI API client

# TODO: Make this not so hard-coded
# This is the token limit that the main prompt needs to know. GPT-4 has a much bigger limit than GPT-3
if (self.smart_llm_model.startswith("gpt-3")):
self.thinking_token_limit = 4000
else:
self.thinking_token_limit = 6000

self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
# Print values:
print("Config values:")
print(f"continuous_mode: {self.continuous_mode}")
print(f"speak_mode: {self.speak_mode}")
print(f"fast_llm_model: {self.fast_llm_model}")
print(f"smart_llm_model: {self.smart_llm_model}")
print(f"thinking_token_limit: {self.thinking_token_limit}")
print(f"openai_api_key: {self.openai_api_key}")
print(f"elevenlabs_api_key: {self.elevenlabs_api_key}")

# Initialize the OpenAI API client
openai.api_key = self.openai_api_key
Expand Down
7 changes: 3 additions & 4 deletions scripts/json_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True):
raise e

# TODO: Make debug a global config var
def fix_json(json_str: str, schema:str = None, debug=True) -> str:
def fix_json(json_str: str, schema:str = None, debug=False) -> str:
# Try to fix the JSON using gpt:
function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
args = [json_str, schema]
Expand All @@ -44,10 +44,9 @@ def fix_json(json_str: str, schema:str = None, debug=True) -> str:
try:
return dirtyjson.loads(result_string)
except:
# Log the exception:
print("Failed to fix JSON")
# Get the call stack:
import traceback
call_stack = traceback.format_exc()
print(call_stack)
# TODO: Handle this sort of thing better
print(f"Failed to fix JSON: '{json_str}' "+call_stack)
return {}

0 comments on commit 4173b07

Please sign in to comment.