Skip to content

Commit

Permalink
Optimize ActionHistory prompting
Browse files Browse the repository at this point in the history
  • Loading branch information
Pwuts committed Aug 28, 2023
1 parent f6e9565 commit 82a81e5
Show file tree
Hide file tree
Showing 6 changed files with 55 additions and 35 deletions.
11 changes: 3 additions & 8 deletions autogpt/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,14 +188,9 @@ def execute(
except AgentException as e:
result = ActionErrorResult(e.message, e)

logger.debug(f"Command result: {result}")

result_tlength = count_string_tokens(str(result), self.llm.name)
# history_tlength = count_string_tokens(
# str(self.message_history.summary_message()), self.llm.name
# )
history_tlength = count_string_tokens(
self.event_history.generate_list(), self.llm.name
self.event_history.fmt_paragraph(), self.llm.name
)
if result_tlength + history_tlength > self.send_token_limit:
result = ActionErrorResult(
Expand All @@ -207,15 +202,15 @@ def execute(
if not plugin.can_handle_post_command():
continue
if result.status == "success":
result.results = plugin.post_command(command_name, result.results)
result.outputs = plugin.post_command(command_name, result.outputs)
elif result.status == "error":
result.reason = plugin.post_command(command_name, result.reason)

# Check if there's a result from the command append it to the message
if result.status == "success":
self.message_history.add(
"system",
f"Command {command_name} returned: {result.results}",
f"Command {command_name} returned: {result.outputs}",
"action_result",
)
elif result.status == "error":
Expand Down
9 changes: 4 additions & 5 deletions autogpt/agents/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,13 +180,12 @@ def construct_base_prompt(
"""

if self.event_history:
prepend_messages.append(
prepend_messages.insert(
0,
Message(
"system",
"# Progress\n"
"So far, the following things have happened:\n"
f"{self.event_history.generate_list()}",
)
"## Progress\n\n" f"{self.event_history.fmt_paragraph()}",
),
)

prompt = ChatSequence.for_model(
Expand Down
4 changes: 2 additions & 2 deletions autogpt/agents/planning_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,15 +297,15 @@ def execute(
if not plugin.can_handle_post_command():
continue
if result.status == "success":
result.results = plugin.post_command(command_name, result.results)
result.outputs = plugin.post_command(command_name, result.outputs)
elif result.status == "error":
result.reason = plugin.post_command(command_name, result.reason)

# Check if there's a result from the command append it to the message
if result.status == "success":
self.message_history.add(
"system",
f"Command {command_name} returned: {result.results}",
f"Command {command_name} returned: {result.outputs}",
"action_result",
)
elif result.status == "error":
Expand Down
14 changes: 6 additions & 8 deletions autogpt/commands/file_operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
import os
import os.path
from pathlib import Path
from typing import Generator, Literal
from typing import Iterator, Literal

from autogpt.agents.agent import Agent
from autogpt.agents.utils.exceptions import DuplicateOperationError
Expand All @@ -34,7 +34,9 @@ def text_checksum(text: str) -> str:

def operations_from_log(
log_path: str | Path,
) -> Generator[tuple[Operation, str, str | None], None, None]:
) -> Iterator[
tuple[Literal["write", "append"], str, str] | tuple[Literal["delete"], str, None]
]:
"""Parse the file operations log and return a tuple containing the log entries"""
try:
log = open(log_path, "r", encoding="utf-8")
Expand All @@ -48,11 +50,7 @@ def operations_from_log(
operation, tail = line.split(": ", maxsplit=1)
operation = operation.strip()
if operation in ("write", "append"):
try:
path, checksum = (x.strip() for x in tail.rsplit(" #", maxsplit=1))
except ValueError:
logger.warn(f"File log entry lacks checksum: '{line}'")
path, checksum = tail.strip(), None
path, checksum = (x.strip() for x in tail.rsplit(" #", maxsplit=1))
yield (operation, path, checksum)
elif operation == "delete":
yield (operation, tail.strip(), None)
Expand Down Expand Up @@ -228,7 +226,7 @@ def write_to_file(filename: Path, text: str, agent: Agent) -> str:
with open(filename, "w", encoding="utf-8") as f:
f.write(text)
log_operation("write", filename, agent, checksum)
return "File written to successfully."
return f"File {filename.name} has been written successfully."


@sanitize_path_arg("filename")
Expand Down
46 changes: 34 additions & 12 deletions autogpt/models/agent_actions.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from dataclasses import dataclass
from typing import Any, Iterator, Literal, Optional

from autogpt.prompts.utils import format_numbered_list
from autogpt.prompts.utils import format_numbered_list, indent


@dataclass
Expand All @@ -18,16 +18,13 @@ def format_call(self) -> str:

@dataclass
class ActionSuccessResult:
results: Any
outputs: Any
status: Literal["success"] = "success"

def __str__(self) -> str:
results = (
f'"""{self.results}"""'
if type(self.results) == str and any(s in self.results for s in ("\n", '"'))
else f'"{self.results}"'
)
return f"Action succeeded, and returned: {results}"
outputs = str(self.outputs).replace("```", r"\```")
multiline = "\n" in outputs
return f"```\n{self.outputs}\n```" if multiline else str(self.outputs)


@dataclass
Expand Down Expand Up @@ -60,9 +57,9 @@ class CycleRecord:
action: Action
result: ActionResult | None

def __str__(self):
executed_action = f"You executed `{self.action.format_call()}`."
action_result = f" Result: {self.result}" if self.result else ""
def __str__(self) -> str:
executed_action = f"Executed `{self.action.format_call()}`"
action_result = f": {self.result}" if self.result else "."
return executed_action + action_result

cursor: int
Expand Down Expand Up @@ -106,5 +103,30 @@ def register_result(self, result: ActionResult) -> None:
self.current_record.result = result
self.cursor = len(self.cycles)

def generate_list(self) -> str:
def fmt_list(self) -> str:
return format_numbered_list(self.cycles)

def fmt_paragraph(self) -> str:
steps: list[str] = []

for i, c in enumerate(self.cycles, 1):
step = f"### Step {i}: Executed `{c.action.format_call()}`\n"
step += f'- **Reasoning:** "{c.action.reasoning}"\n'
step += (
f"- **Status:** `{c.result.status if c.result else 'did_not_finish'}`\n"
)
if c.result:
if c.result.status == "success":
result = str(c.result)
result = "\n" + indent(result) if "\n" in result else result
step += f"- **Output:** {result}"
elif c.result.status == "error":
step += f"- **Reason:** {c.result.reason}\n"
if c.result.error:
step += f"- **Error:** {c.result.error}\n"
elif c.result.status == "interrupted_by_human":
step += f"- **Feedback:** {c.result.feedback}\n"

steps.append(step)

return "\n\n".join(steps)
6 changes: 6 additions & 0 deletions autogpt/prompts/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,9 @@

def format_numbered_list(items: list[Any], start_at: int = 1) -> str:
return "\n".join(f"{i}. {str(item)}" for i, item in enumerate(items, start_at))


def indent(content: str, indentation: int | str = 4) -> str:
if type(indentation) == int:
indentation = " " * indentation
return indentation + content.replace("\n", f"\n{indentation}") # type: ignore

0 comments on commit 82a81e5

Please sign in to comment.