Skip to content

Commit

Permalink
Use Configuration of the rearch branch (Significant-Gravitas#4803)
Browse files Browse the repository at this point in the history
  • Loading branch information
waynehamadi authored Jun 27, 2023
1 parent b157054 commit 9f353f4
Show file tree
Hide file tree
Showing 13 changed files with 393 additions and 312 deletions.
494 changes: 237 additions & 257 deletions autogpt/config/config.py

Large diffs are not rendered by default.

28 changes: 13 additions & 15 deletions autogpt/configurator.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,13 +51,13 @@ def create_config(
allow_downloads (bool): Whether to allow Auto-GPT to download files natively
skips_news (bool): Whether to suppress the output of latest news on startup
"""
config.set_debug_mode(False)
config.set_continuous_mode(False)
config.set_speak_mode(False)
config.debug_mode = False
config.continuous_mode = False
config.speak_mode = False

if debug:
logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
config.set_debug_mode(True)
config.debug_mode = True

if continuous:
logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED")
Expand All @@ -68,42 +68,40 @@ def create_config(
" cause your AI to run forever or carry out actions you would not usually"
" authorise. Use at your own risk.",
)
config.set_continuous_mode(True)
config.continuous_mode = True

if continuous_limit:
logger.typewriter_log(
"Continuous Limit: ", Fore.GREEN, f"{continuous_limit}"
)
config.set_continuous_limit(continuous_limit)
config.continuous_limit = continuous_limit

# Check if continuous limit is used without continuous mode
if continuous_limit and not continuous:
raise click.UsageError("--continuous-limit can only be used with --continuous")

if speak:
logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
config.set_speak_mode(True)
config.speak_mode = True

# Set the default LLM models
if gpt3only:
logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
# --gpt3only should always use gpt-3.5-turbo, despite user's FAST_LLM_MODEL config
config.set_fast_llm_model(GPT_3_MODEL)
config.set_smart_llm_model(GPT_3_MODEL)
config.fast_llm_model = GPT_3_MODEL
config.smart_llm_model = GPT_3_MODEL

elif (
gpt4only
and check_model(GPT_4_MODEL, model_type="smart_llm_model") == GPT_4_MODEL
):
logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
# --gpt4only should always use gpt-4, despite user's SMART_LLM_MODEL config
config.set_fast_llm_model(GPT_4_MODEL)
config.set_smart_llm_model(GPT_4_MODEL)
config.fast_llm_model = GPT_4_MODEL
config.smart_llm_model = GPT_4_MODEL
else:
config.set_fast_llm_model(check_model(config.fast_llm_model, "fast_llm_model"))
config.set_smart_llm_model(
check_model(config.smart_llm_model, "smart_llm_model")
)
config.fast_llm_model = check_model(config.fast_llm_model, "fast_llm_model")
config.smart_llm_model = check_model(config.smart_llm_model, "smart_llm_model")

if memory_type:
supported_memory = get_supported_memory_backends()
Expand Down
Empty file.
98 changes: 98 additions & 0 deletions autogpt/core/configuration/schema.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
import abc
import copy
import typing
from typing import Any

from pydantic import BaseModel


class SystemConfiguration(BaseModel):
def get_user_config(self) -> dict[str, Any]:
return _get_user_config_fields(self)

class Config:
extra = "forbid"
use_enum_values = True


class SystemSettings(BaseModel, abc.ABC):
"""A base class for all system settings."""

name: str
description: typing.Optional[str]

class Config:
extra = "forbid"
use_enum_values = True


class Configurable(abc.ABC):
"""A base class for all configurable objects."""

prefix: str = ""
defaults_settings: typing.ClassVar[SystemSettings]

@classmethod
def get_user_config(cls) -> dict[str, Any]:
return _get_user_config_fields(cls.defaults_settings)

@classmethod
def build_agent_configuration(cls, configuration: dict = {}) -> SystemSettings:
"""Process the configuration for this object."""

defaults_settings = cls.defaults_settings.dict()
final_configuration = deep_update(defaults_settings, configuration)

return cls.defaults_settings.__class__.parse_obj(final_configuration)


def _get_user_config_fields(instance: BaseModel) -> dict[str, Any]:
"""
Get the user config fields of a Pydantic model instance.
Args:
instance: The Pydantic model instance.
Returns:
The user config fields of the instance.
"""
user_config_fields = {}

for name, value in instance.__dict__.items():
field_info = instance.__fields__[name]
if "user_configurable" in field_info.field_info.extra:
user_config_fields[name] = value
elif isinstance(value, SystemConfiguration):
user_config_fields[name] = value.get_user_config()
elif isinstance(value, list) and all(
isinstance(i, SystemConfiguration) for i in value
):
user_config_fields[name] = [i.get_user_config() for i in value]
elif isinstance(value, dict) and all(
isinstance(i, SystemConfiguration) for i in value.values()
):
user_config_fields[name] = {
k: v.get_user_config() for k, v in value.items()
}

return user_config_fields


def deep_update(original_dict: dict, update_dict: dict) -> dict:
"""
Recursively update a dictionary.
Args:
original_dict (dict): The dictionary to be updated.
update_dict (dict): The dictionary to update with.
Returns:
dict: The updated dictionary.
"""
original_dict = copy.deepcopy(original_dict)
for key, value in update_dict.items():
if (
key in original_dict
and isinstance(original_dict[key], dict)
and isinstance(value, dict)
):
original_dict[key] = deep_update(original_dict[key], value)
else:
original_dict[key] = value
return original_dict
7 changes: 4 additions & 3 deletions autogpt/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from colorama import Fore, Style

from autogpt.agent import Agent
from autogpt.config import Config, check_openai_api_key
from autogpt.config.config import Config, check_openai_api_key
from autogpt.configurator import create_config
from autogpt.logs import logger
from autogpt.memory.vector import get_memory
Expand Down Expand Up @@ -52,7 +52,8 @@ def run_auto_gpt(
logger.set_level(logging.DEBUG if debug else logging.INFO)
logger.speak_mode = speak

config = Config()
config = Config.build_config_from_env()

# TODO: fill in llm values here
check_openai_api_key(config)

Expand Down Expand Up @@ -120,7 +121,7 @@ def run_auto_gpt(
# HACK: doing this here to collect some globals that depend on the workspace.
Workspace.build_file_logger_path(config, workspace_directory)

config.set_plugins(scan_plugins(config, config.debug_mode))
config.plugins = scan_plugins(config, config.debug_mode)
# Create a CommandRegistry instance and scan default folder
command_registry = CommandRegistry()

Expand Down
2 changes: 1 addition & 1 deletion autogpt/plugins/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,8 +219,8 @@ def scan_plugins(config: Config, debug: bool = False) -> List[AutoGPTPluginTempl
loaded_plugins = []
# Generic plugins
plugins_path_path = Path(config.plugins_dir)
plugins_config = config.plugins_config

plugins_config = config.plugins_config
# Directory-based plugins
for plugin_path in [f.path for f in os.scandir(config.plugins_dir) if f.is_dir()]:
# Avoid going into __pycache__ or other hidden directories
Expand Down
6 changes: 3 additions & 3 deletions benchmarks.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,9 @@ def run_task(task) -> None:


def bootstrap_agent(task):
config = Config()
config.set_continuous_mode(False)
config.set_temperature(0)
config = Config.build_config_from_env()
config.continuous_mode = False
config.temperature = 0
config.plain_output = True
command_registry = get_command_registry(config)
config.memory_backend = "no_memory"
Expand Down
2 changes: 1 addition & 1 deletion data_ingestion.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from autogpt.config import Config
from autogpt.memory.vector import VectorMemory, get_memory

config = Config()
config = Config.build_config_from_env()


def configure_logging():
Expand Down
11 changes: 7 additions & 4 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,13 +49,17 @@ def temp_plugins_config_file():
def config(
temp_plugins_config_file: str, mocker: MockerFixture, workspace: Workspace
) -> Config:
config = Config()
config = Config.build_config_from_env()
if not os.environ.get("OPENAI_API_KEY"):
os.environ["OPENAI_API_KEY"] = "sk-dummy"

config.plugins_dir = "tests/unit/data/test_plugins"
config.plugins_config_file = temp_plugins_config_file
config.load_plugins_config()

# avoid circular dependency
from autogpt.plugins.plugins_config import PluginsConfig

config.plugins_config = PluginsConfig.load_config(global_config=config)

# Do a little setup and teardown since the config object is a singleton
mocker.patch.multiple(
Expand Down Expand Up @@ -95,8 +99,7 @@ def agent(config: Config, workspace: Workspace) -> Agent:

command_registry = CommandRegistry()
ai_config.command_registry = command_registry

config.set_memory_backend("json_file")
config.memory_backend = "json_file"
memory_json_file = get_memory(config)
memory_json_file.clear()

Expand Down
4 changes: 2 additions & 2 deletions tests/integration/agent_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,12 @@
def memory_json_file(config: Config):
was_memory_backend = config.memory_backend

config.set_memory_backend("json_file")
config.memory_backend = "json_file"
memory = get_memory(config)
memory.clear()
yield memory

config.set_memory_backend(was_memory_backend)
config.memory_backend = was_memory_backend


@pytest.fixture
Expand Down
4 changes: 2 additions & 2 deletions tests/integration/memory/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def mock_get_embedding(mocker: MockerFixture, embedding_dimension: int):
def memory_none(agent_test_config: Config, mock_get_embedding):
was_memory_backend = agent_test_config.memory_backend

agent_test_config.set_memory_backend("no_memory")
agent_test_config.memory_backend = "no_memory"
yield get_memory(agent_test_config)

agent_test_config.set_memory_backend(was_memory_backend)
agent_test_config.memory_backend = was_memory_backend
Loading

0 comments on commit 9f353f4

Please sign in to comment.