Skip to content

Commit

Permalink
Logan/update llama index (run-llama#29)
Browse files Browse the repository at this point in the history
* bump llama agi to use llamaindex v0.6.13

* linting

* bump llama-agi to v0.2.0
  • Loading branch information
logan-markewich authored May 30, 2023
1 parent a288fca commit 3364c9e
Show file tree
Hide file tree
Showing 18 changed files with 123 additions and 112 deletions.
17 changes: 6 additions & 11 deletions convo_agents/convo_agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
"""

from llama_index import (
GPTSimpleVectorIndex, GPTListIndex, Document, ServiceContext
GPTVectorStoreIndex, GPTListIndex, Document, ServiceContext
)
from llama_index.indices.base import BaseGPTIndex
from llama_index.data_structs import Node
Expand Down Expand Up @@ -68,7 +68,7 @@ def from_defaults(
) -> "ConvoAgent":
name = name or "Agent"
st_memory = st_memory or deque()
lt_memory = lt_memory or GPTSimpleVectorIndex([])
lt_memory = lt_memory or GPTVectorStoreIndex([])
service_context = service_context or ServiceContext.from_defaults()
return cls(
name=name,
Expand All @@ -94,12 +94,9 @@ def generate_message(self, prev_message: Optional[str] = None) -> str:
prev_message = self.st_memory[-1]

st_memory_text = "\n".join([l for l in self.st_memory])
summary_response = self.lt_memory.query(
summary_response = self.lt_memory.as_query_engine(**self.lt_memory_query_kwargs).query(
f"Tell me a bit more about any context that's relevant "
f"to the current messages: \n{st_memory_text}",
# similarity_top_k=10,
response_mode="compact",
**self.lt_memory_query_kwargs
f"to the current messages: \n{st_memory_text}"
)

# add both the long-term memory summary and the short-term conversation
Expand All @@ -114,9 +111,7 @@ def generate_message(self, prev_message: Optional[str] = None) -> str:
)
qa_prompt = QuestionAnswerPrompt(full_qa_prompt_tmpl)

response = list_builder.query(
"Generate the next message in the conversation.",
text_qa_template=qa_prompt,
response_mode="compact"
response = list_builder.as_query_engine(text_qa_template=qa_prompt).query(
"Generate the next message in the conversation."
)
return str(response)
2 changes: 1 addition & 1 deletion convo_agents/requirements.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
llama-index==0.5.22
llama-index==0.6.13
15 changes: 8 additions & 7 deletions llama_agi/llama_agi/execution_agent/SimpleExecutionAgent.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,22 +15,23 @@ class SimpleExecutionAgent(BaseExecutionAgent):
This agent uses an LLM to execute a basic action without tools.
The LlamaAgentPrompts.execution_prompt defines how this execution agent
behaves.
behaves.
Usually, this is used for simple tasks, like generating the initial list of tasks.
The execution template kwargs are automatically extracted and expected to be
The execution template kwargs are automatically extracted and expected to be
specified in execute_task().
Args:
llm (Union[BaseLLM, BaseChatModel]): The langchain LLM class to use.
model_name: (str): The name of the OpenAI model to use, if the LLM is
model_name: (str): The name of the OpenAI model to use, if the LLM is
not provided.
max_tokens: (int): The maximum number of tokens the LLM can generate.
prompts: (LlamaAgentPrompts): The prompt templates used during execution.
The only prompt used byt the SimpleExecutionAgent is
prompts: (LlamaAgentPrompts): The prompt templates used during execution.
The only prompt used byt the SimpleExecutionAgent is
LlamaAgentPrompts.execution_prompt.
"""

def __init__(
self,
llm: Optional[Union[BaseLLM, BaseChatModel]] = None,
Expand Down
26 changes: 15 additions & 11 deletions llama_agi/llama_agi/execution_agent/ToolExecutionAgent.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,30 +12,31 @@

class ToolExecutionAgent(BaseExecutionAgent):
"""Tool Execution Agent
This agent is a wrapper around the zero-shot agent from Langchain. Using
a set of tools, the agent is expected to carry out and complete some task
a set of tools, the agent is expected to carry out and complete some task
that will help achieve an overall objective.
The agents overall behavior is controlled by the LlamaAgentPrompts.agent_prefix
The agents overall behavior is controlled by the LlamaAgentPrompts.agent_prefix
and LlamaAgentPrompts.agent_suffix prompt templates.
The execution template kwargs are automatically extracted and expected to be
specified in execute_task().
The execution template kwargs are automatically extracted and expected to be
specified in execute_task().
execute_task() also returns the intermediate steps, for additional debugging and is
used for the streamlit example.
used for the streamlit example.
Args:
llm (Union[BaseLLM, BaseChatModel]): The langchain LLM class to use.
model_name: (str): The name of the OpenAI model to use, if the LLM is
model_name: (str): The name of the OpenAI model to use, if the LLM is
not provided.
max_tokens: (int): The maximum number of tokens the LLM can generate.
prompts: (LlamaAgentPrompts): The prompt templates used during execution.
The Tool Execution Agent uses LlamaAgentPrompts.agent_prefix and
prompts: (LlamaAgentPrompts): The prompt templates used during execution.
The Tool Execution Agent uses LlamaAgentPrompts.agent_prefix and
LlamaAgentPrompts.agent_suffix.
tools: (List[Tool]): The list of langchain tools for the execution agent to use.
"""

def __init__(
self,
llm: Optional[Union[BaseLLM, BaseChatModel]] = None,
Expand Down Expand Up @@ -71,7 +72,10 @@ def __init__(
llm_chain=self._llm_chain, tools=self.tools, verbose=True
)
self._execution_chain = AgentExecutor.from_agent_and_tools(
agent=self._agent, tools=self.tools, verbose=True, return_intermediate_steps=True
agent=self._agent,
tools=self.tools,
verbose=True,
return_intermediate_steps=True,
)

def execute_task(self, **prompt_kwargs: Any) -> Dict[str, str]:
Expand Down
5 changes: 1 addition & 4 deletions llama_agi/llama_agi/execution_agent/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,4 @@
from .SimpleExecutionAgent import SimpleExecutionAgent
from .ToolExecutionAgent import ToolExecutionAgent

__all__ = [
SimpleExecutionAgent,
ToolExecutionAgent
]
__all__ = [SimpleExecutionAgent, ToolExecutionAgent]
11 changes: 5 additions & 6 deletions llama_agi/llama_agi/execution_agent/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,7 @@
from langchain.chat_models.base import BaseChatModel
from langchain.chat_models import ChatOpenAI

from llama_agi.default_task_prompts import (
LC_PREFIX, LC_SUFFIX, LC_EXECUTION_PROMPT
)
from llama_agi.default_task_prompts import LC_PREFIX, LC_SUFFIX, LC_EXECUTION_PROMPT


@dataclass
Expand All @@ -21,16 +19,17 @@ class LlamaAgentPrompts:

class BaseExecutionAgent:
"""Base Execution Agent
Args:
llm (Union[BaseLLM, BaseChatModel]): The langchain LLM class to use.
model_name: (str): The name of the OpenAI model to use, if the LLM is
model_name: (str): The name of the OpenAI model to use, if the LLM is
not provided.
max_tokens: (int): The maximum number of tokens the LLM can generate.
prompts: (LlamaAgentPrompts): The prompt templates used during execution.
tools: (List[Tool]): The list of langchain tools for the execution
tools: (List[Tool]): The list of langchain tools for the execution
agent to use.
"""

def __init__(
self,
llm: Optional[Union[BaseLLM, BaseChatModel]] = None,
Expand Down
6 changes: 4 additions & 2 deletions llama_agi/llama_agi/runners/AutoAGIRunner.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,9 @@ def run(
completed_tasks_summary=initial_completed_tasks_summary,
)

initial_task_list = self.task_manager.parse_task_list(initial_task_list_result['output'])
initial_task_list = self.task_manager.parse_task_list(
initial_task_list_result["output"]
)

# add tasks to the task manager
self.task_manager.add_new_tasks(initial_task_list)
Expand All @@ -53,7 +55,7 @@ def run(
objective=objective,
cur_task=cur_task,
completed_tasks_summary=completed_tasks_summary,
)['output']
)["output"]

# store the task and result as completed
self.task_manager.add_completed_task(cur_task, result)
Expand Down
63 changes: 36 additions & 27 deletions llama_agi/llama_agi/runners/AutoStreamlitAGIRunner.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,27 +27,27 @@ def run(
initial_task: str,
sleep_time: int,
initial_task_list: Optional[List[str]] = None,
max_iterations: Optional[int] = None
max_iterations: Optional[int] = None,
) -> None:

run_initial_task = False
if 'logs' not in st.session_state:
st.session_state['logs'] = []
st.session_state['state_str'] = "No state yet!"
st.session_state['tasks_summary'] = ""
if "logs" not in st.session_state:
st.session_state["logs"] = []
st.session_state["state_str"] = "No state yet!"
st.session_state["tasks_summary"] = ""
run_initial_task = True

logs_col, state_col = st.columns(2)

with logs_col:
st.subheader("Execution Log")
st_logs = st.empty()
st_logs.write(st.session_state['logs'])
st_logs.write(st.session_state["logs"])

with state_col:
st.subheader("AGI State")
st_state = st.empty()
st_state.write(st.session_state['state_str'])
st_state.write(st.session_state["state_str"])

if run_initial_task:
# get initial list of tasks
Expand All @@ -71,7 +71,9 @@ def run(
completed_tasks_summary=initial_completed_tasks_summary,
)

initial_task_list = self.task_manager.parse_task_list(initial_task_list_result['output'])
initial_task_list = self.task_manager.parse_task_list(
initial_task_list_result["output"]
)

# add tasks to the task manager
self.task_manager.add_new_tasks(initial_task_list)
Expand All @@ -80,12 +82,18 @@ def run(
self.task_manager.prioritize_tasks(objective)

tasks_summary = initial_completed_tasks_summary
st.session_state['tasks_summary'] = tasks_summary
st.session_state["tasks_summary"] = tasks_summary

# update streamlit state
st.session_state['state_str'] = log_current_status(initial_task, initial_task_list_result['output'], tasks_summary, self.task_manager.current_tasks, return_str=True)
if st.session_state['state_str']:
st_state.markdown(st.session_state['state_str'].replace("\n", "\n\n"))
st.session_state["state_str"] = log_current_status(
initial_task,
initial_task_list_result["output"],
tasks_summary,
self.task_manager.current_tasks,
return_str=True,
)
if st.session_state["state_str"]:
st_state.markdown(st.session_state["state_str"].replace("\n", "\n\n"))

for _ in range(0, max_iterations):
# Get the next task
Expand All @@ -95,14 +103,16 @@ def run(
result_dict = self.execution_agent.execute_task(
objective=objective,
cur_task=cur_task,
completed_tasks_summary=st.session_state['tasks_summary'],
completed_tasks_summary=st.session_state["tasks_summary"],
)
result = result_dict['output']

# update logs
log = make_intermediate_steps_pretty(json.dumps(result_dict['intermediate_steps'])) + [result]
st.session_state['logs'].append(log)
st_logs.write(st.session_state['logs'])
result = result_dict["output"]

# update logs
log = make_intermediate_steps_pretty(
json.dumps(result_dict["intermediate_steps"])
) + [result]
st.session_state["logs"].append(log)
st_logs.write(st.session_state["logs"])

# store the task and result as completed
self.task_manager.add_completed_task(cur_task, result)
Expand All @@ -112,18 +122,18 @@ def run(

# Summarize completed tasks
completed_tasks_summary = self.task_manager.get_completed_tasks_summary()
st.session_state['tasks_summary'] = completed_tasks_summary
st.session_state["tasks_summary"] = completed_tasks_summary

# log state of AGI to streamlit
st.session_state['state_str'] = log_current_status(
st.session_state["state_str"] = log_current_status(
cur_task,
result,
completed_tasks_summary,
self.task_manager.current_tasks,
return_str=True
return_str=True,
)
if st.session_state['state_str'] is not None:
st_state.markdown(st.session_state['state_str'].replace("\n", "\n\n"))
if st.session_state["state_str"] is not None:
st_state.markdown(st.session_state["state_str"].replace("\n", "\n\n"))

# Quit the loop?
if len(self.task_manager.current_tasks) == 0:
Expand All @@ -132,4 +142,3 @@ def run(

# wait a bit to let you read what's happening
time.sleep(sleep_time)

5 changes: 1 addition & 4 deletions llama_agi/llama_agi/runners/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,4 @@
from .AutoAGIRunner import AutoAGIRunner
from .AutoStreamlitAGIRunner import AutoStreamlitAGIRunner

__all__ = [
AutoAGIRunner,
AutoStreamlitAGIRunner
]
__all__ = [AutoAGIRunner, AutoStreamlitAGIRunner]
Loading

0 comments on commit 3364c9e

Please sign in to comment.