Skip to content

Commit

Permalink
feat: add open-interpreter adapter (agiresearch#224)
Browse files Browse the repository at this point in the history
* feat: init

* feat: interpreter adapter

* docs: interpreter doc

* style: clean some code
  • Loading branch information
2020-qqtcg authored Aug 29, 2024
1 parent b206bca commit 741207b
Show file tree
Hide file tree
Showing 10 changed files with 278 additions and 7 deletions.
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ or if using pip
```bash
cd AIOS
python -m venv venv
source venv/bin/activate
source venv/bin/activate
cd ..
cd ..
```
Expand Down Expand Up @@ -204,6 +204,7 @@ Interact with all agents by using the `@` to tag an agent.

### Supported Agent Framework
- [autogen](https://github.com/microsoft/autogen)
- [open-interpreter](https://github.com/OpenInterpreter/open-interpreter)

### Supported LLM Endpoints
- [OpenAI API](https://platform.openai.com/api-keys)
Expand Down
4 changes: 3 additions & 1 deletion aios/sdk/autogen/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,3 @@
from . import adapter
from . import adapter

adapter
6 changes: 3 additions & 3 deletions aios/sdk/autogen/adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
adapter_client_extract_text_or_completion_object
)

from aios.utils.logger import AgentLogger
from aios.utils.logger import SDKLogger

try:
from autogen import (
Expand All @@ -30,7 +30,7 @@
"Please install it with `pip install pyautogen`."
)

logger = AgentLogger("Adapter")
logger = SDKLogger("Autogen Adapter")


def prepare_autogen(agent_process_factory: Optional[AgentProcessFactory] = None):
Expand Down Expand Up @@ -59,4 +59,4 @@ def prepare_autogen(agent_process_factory: Optional[AgentProcessFactory] = None)
ConversableAgent.update_tool_signature = adapter_update_tool_signature
ConversableAgent.__init__ = adapter_autogen_agent_init

logger.log("Autogen prepare success", "info")
logger.log("Autogen prepare success\n", "info")
3 changes: 3 additions & 0 deletions aios/sdk/interpreter/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from . import adapter

adapter
115 changes: 115 additions & 0 deletions aios/sdk/interpreter/adapter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
# replace run_tool_calling_llm and run_text_llm in interpreter llm
# so that interpreter can run LLM in aios
import json
import sys

from aios.utils.logger import SDKLogger
from pyopenagi.agents.agent_process import AgentProcessFactory
from pyopenagi.utils.chat_template import Query
from pyopenagi.agents.call_core import CallCore
from dataclasses import dataclass

try:
from interpreter import interpreter

except ImportError:
raise ImportError(
"Could not import interpreter python package. "
"Please install it with `open-interpreter`."
)

logger = SDKLogger("Interpreter Adapter")

aios_call = None


def prepare_interpreter(agent_process_factory: AgentProcessFactory):
"""Prepare the interpreter for running LLM in aios.
Args:
agent_process_factory (AgentProcessFactory):
Used to create agent processes.
"""

try:
# Set the completion function in the interpreter
interpreter.llm.completions = adapter_aios_completions

# Initialize the aios_call variable as a CallCore object
global aios_call
aios_call = CallCore("interpreter", agent_process_factory, "console")
except Exception as e:
logger.log("Interpreter prepare failed: " + str(e) + "\n", "error")

logger.log("Interpreter prepare success\n", "info")


@dataclass
class InterpreterFunctionAdapter:
name: str
arguments: str


@dataclass
class InterpreterToolCallsAdapter:
function: InterpreterFunctionAdapter

def __init__(self, name: str, arguments: str):
self.function = InterpreterFunctionAdapter(name, arguments)


def adapter_aios_completions(**params):
"""aios completions replace fixed_litellm_completions in interpreter
"""

if params.get("stream", False) is True:
# TODO: AIOS not supprt stream mode
logger.log('''AIOS does not support stream mode currently. The stream mode has been automatically set to False.
''', level="warn")
params["stream"] = False

# Run completion
attempts = 2
first_error = None

for attempt in range(attempts):
try:
global aios_call
assert isinstance(aios_call, CallCore)
response, _, _, _, _ = aios_call.get_response(
query=Query(
messages=params['messages'],
tools=(params["tools"] if "tools" in params else None)
)
)

# format similar to completion in interpreter
comletion = {'choices':
[
{
'delta': {}
}
]
}
comletion["choices"][0]["delta"]["content"] = response.response_message
if response.tool_calls is not None:
comletion["choices"][0]["delta"]["tool_calls"] = format_tool_calls_to_interpreter(response.tool_calls)

return [comletion] # If the completion is successful, exit the function
except KeyboardInterrupt:
print("Exiting...")
sys.exit(0)
except Exception as e:
if attempt == 0:
# Store the first error
first_error = e

if first_error is not None:
raise first_error


def format_tool_calls_to_interpreter(tool_calls):
name = tool_calls[0]["name"]
arguments = tool_calls[0]["parameters"]
arguments = json.dumps(arguments)
return [InterpreterToolCallsAdapter(name, arguments)]
17 changes: 17 additions & 0 deletions aios/utils/logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,3 +100,20 @@ def load_log_file(self):
os.makedirs(log_dir)
log_file = os.path.join(log_dir, f"{date_time}.txt")
return log_file

class SDKLogger(BaseLogger):
def __init__(self, logger_name, log_mode="console") -> None:
super().__init__(logger_name, log_mode)
self.level_color = {
"info": (248, 246, 227), # white
"warn": (255, 201, 74), # yellow
"error": (255, 0, 0), # red
}

def load_log_file(self):
date_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
log_dir = os.path.join(os.getcwd(), "logs", "agents", self.logger_name)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
log_file = os.path.join(log_dir, f"{date_time}.txt")
return log_file
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,4 @@ Below are the frameworks currently supported
:caption: Supported Agent Framework

autogen

open_interpreter
53 changes: 53 additions & 0 deletions docs/source/get_started/agent_framework/open_interpreter.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
Open-Interpreter For AIOS
=========================

Introduction
------------
Open Interpreter lets language models run code. We made it
so that agent applications developed with Open Interpreter can run on AIOS by adding
just one line of code.

Quick start
-----------
For installation and usage of open-interpreter, please refer to the `official open-interpreter documentation <https://docs.openinterpreter.com/getting-started/introduction>`_.

If you want to run an application developed with open-interpreter on AIOS, please add ``prepare_interpreter()``
before you use open-interpreter. ``AgentProcessFactory`` is a required parameter.

.. code-block:: python
from pyopenagi.agents.agent_process import AgentProcessFactory
from aios.sdk.interpreter.adapter import prepare_interpreter
from interpreter import interpreter
# example process_factory
process_factory = AgentProcessFactory()
# prepate interpreter for AIOS
prepare_interpreter(process_factory)
Then nothing needs to change, use interpreter as usual.

.. code-block:: python
interpreter.chat("In a group of 23 people, the probability of at least two having the same birthday is greater than 50%")
Don't forget to start the scheduler so that AIOS can manage llm call.
Details and More examples can be found in https://github.com/agiresearch/AIOS/tree/main/scripts/aios-interpreter


prepare_interpreter()
---------------------

.. .. automethod:: aios.sdk.interpreter.adapter.prepare_interpreter
.. :noindex:
``prepare_interpreter()``

Prepare the interpreter for running LLM in aios.

Parameters:
**agent_process_factory** - Used to create agent processes.

`Source Code <https://github.com/agiresearch/AIOS/blob/main/aios/sdk/interpreter/adapter.py>`_
2 changes: 1 addition & 1 deletion pyopenagi/agents/call_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ class CallCore:
def __init__(self,
agent_name,
agent_process_factory,
log_mode: str
log_mode: str = "console"
):
self.agent_name = agent_name
self.agent_process_factory = agent_process_factory
Expand Down
80 changes: 80 additions & 0 deletions scripts/aios-interpreter/example_aios_interpreter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))


import warnings
from dotenv import load_dotenv
from aios.sdk.interpreter.adapter import prepare_interpreter
from aios.hooks.llm import useKernel, useFIFOScheduler
from aios.utils.utils import (
parse_global_args,
delete_directories
)
from pyopenagi.agents.agent_process import AgentProcessFactory
from interpreter import interpreter


def clean_cache(root_directory):
targets = {
".ipynb_checkpoints",
"__pycache__",
".pytest_cache",
"context_restoration",
}
delete_directories(root_directory, targets)


def main():
# parse arguments and set configuration for this run accordingly
warnings.filterwarnings("ignore")
parser = parse_global_args()
args = parser.parse_args()

llm_name = args.llm_name
max_gpu_memory = args.max_gpu_memory
eval_device = args.eval_device
max_new_tokens = args.max_new_tokens
scheduler_log_mode = args.scheduler_log_mode
# agent_log_mode = args.agent_log_mode
llm_kernel_log_mode = args.llm_kernel_log_mode
use_backend = args.use_backend
load_dotenv()

llm = useKernel(
llm_name=llm_name,
max_gpu_memory=max_gpu_memory,
eval_device=eval_device,
max_new_tokens=max_new_tokens,
log_mode=llm_kernel_log_mode,
use_backend=use_backend
)

# run agents concurrently for maximum efficiency using a scheduler

# scheduler = FIFOScheduler(llm=llm, log_mode=scheduler_log_mode)

startScheduler, stopScheduler = useFIFOScheduler(
llm=llm,
log_mode=scheduler_log_mode,
get_queue_message=None
)

process_factory = AgentProcessFactory()

prepare_interpreter(process_factory)

startScheduler()

# interpreter.chat("Calculate 10 * 20 / 2")
# interpreter.chat("Plot the sin function")
# interpreter.chat("Use the Euclidean algorithm to calculate the greatest common divisor (GCD) of 78782 and 64.")
interpreter.chat("In a group of 23 people, the probability of at least two having the same birthday is greater than 50%")

stopScheduler()

clean_cache(root_directory="./")


if __name__ == "__main__":
main()

0 comments on commit 741207b

Please sign in to comment.