From cf00b5da05b9eca63d4d0276da45162dd502005c Mon Sep 17 00:00:00 2001 From: MrGreyfun <72959013+MrGreyfun@users.noreply.github.com> Date: Thu, 21 Dec 2023 20:00:57 +0800 Subject: [PATCH] Bug fix: error when failing make API call to vision model. --- src/bot_backend.py | 28 ++++++++++++++++++++-------- src/tools.py | 4 ++-- src/web_ui.py | 11 ++++++----- 3 files changed, 28 insertions(+), 15 deletions(-) diff --git a/src/bot_backend.py b/src/bot_backend.py index 2ac1910..dac500a 100644 --- a/src/bot_backend.py +++ b/src/bot_backend.py @@ -132,16 +132,19 @@ def __init__(self): super().__init__() self.unique_id = hash(id(self)) self.jupyter_work_dir = f'cache/work_dir_{self.unique_id}' + self.tool_log = f'cache/tool_{self.unique_id}.log' self.jupyter_kernel = JupyterKernel(work_dir=self.jupyter_work_dir) self.gpt_model_choice = "GPT-3.5" self.revocable_files = [] - self._init_conversation() + self.system_msg = system_msg + self.functions = copy.deepcopy(functions) self._init_api_config() self._init_tools() + self._init_conversation() self._init_kwargs_for_chat_completion() def _init_conversation(self): - first_system_msg = {'role': 'system', 'content': system_msg} + first_system_msg = {'role': 'system', 'content': self.system_msg} self.context_window_tokens = 0 # num of tokens actually sent to GPT self.sliced = False # whether the conversion is sliced if hasattr(self, 'conversation'): @@ -159,28 +162,27 @@ def _init_api_config(self): config_openai_api(api_type, api_base, api_version, api_key) def _init_tools(self): - global system_msg, functions self.additional_tools = {} tools = get_available_tools(self.config) if tools: - system_msg += '\n\nAdditional tools:' + self.system_msg += '\n\nAdditional tools:' for tool in tools: system_prompt = tool['system_prompt'] tool_name = tool['tool_name'] tool_description = tool['tool_description'] - system_msg += f'\n{tool_name}: {system_prompt}' + self.system_msg += f'\n{tool_name}: {system_prompt}' - functions.append(tool_description) + self.functions.append(tool_description) self.additional_tools[tool_name] = tool['tool'] def _init_kwargs_for_chat_completion(self): self.kwargs_for_chat_completion = { 'stream': True, 'messages': self.conversation, - 'functions': functions, + 'functions': self.functions, 'function_call': 'auto' } @@ -205,6 +207,14 @@ def _clear_all_files_in_work_dir(self, backup=True): else: os.remove(path) + def _save_tool_log(self, tool_response): + with open(self.tool_log, 'a') as log_file: + log_file.write(f'Previous conversion: {self.conversation}\n') + log_file.write(f'Tool name: {self.function_name}\n') + log_file.write(f'Parameters: {self.function_args_str}\n') + log_file.write(f'Response: {tool_response}\n') + log_file.write('----------\n\n') + def add_gpt_response_content_message(self): self.conversation.append( {'role': self.assistant_role_name, 'content': self.content} @@ -236,7 +246,8 @@ def add_file_message(self, path, bot_msg): ) def add_function_call_response_message(self, function_response: Union[str, None], save_tokens=True): - add_code_cell_to_notebook(self.code_str) + if self.code_str is not None: + add_code_cell_to_notebook(self.code_str) self.conversation.append( { @@ -256,6 +267,7 @@ def add_function_call_response_message(self, function_response: Union[str, None] "content": function_response, } ) + self._save_tool_log(tool_response=function_response) def append_system_msg(self, prompt): self.conversation.append( diff --git a/src/tools.py b/src/tools.py index c4d713f..c8e2b8b 100644 --- a/src/tools.py +++ b/src/tools.py @@ -41,11 +41,11 @@ def inquire_image(workdir, path, prompt): image_base64 = image_to_base64(f'{workdir}/{path}') hypertext_to_display = None if image_base64 is None: - return "Error: Image transform error" + return "Error: Image transform error", None else: response = create_vision_chat_completion(image_base64, prompt) if response is None: - return "Model response error" + return "Model response error", None else: return response, hypertext_to_display diff --git a/src/web_ui.py b/src/web_ui.py index 3c14931..aef04f5 100644 --- a/src/web_ui.py +++ b/src/web_ui.py @@ -118,17 +118,18 @@ def bot(state_dict: Dict, history: List) -> List: bot_backend = get_bot_backend(state_dict) while bot_backend.finish_reason in ('new_input', 'function_call'): - if history[-1][0] is None: - history.append( - [None, ""] - ) + if history[-1][1]: + history.append([None, ""]) else: history[-1][1] = "" response = chat_completion(bot_backend=bot_backend) for chunk in response: if chunk['choices'] and chunk['choices'][0]['finish_reason'] == 'function_call': - yield history, gr.Button.update(value='⏹️ Interrupt execution') + if bot_backend.function_name in bot_backend.jupyter_kernel.available_functions: + yield history, gr.Button.update(value='⏹️ Interrupt execution') + else: + yield history, gr.Button.update(interactive=False) if bot_backend.stop_generating: response.close()