Skip to content

Commit

Permalink
Add Console function to stream result to pretty print console output (m…
Browse files Browse the repository at this point in the history
  • Loading branch information
ekzhu authored Nov 9, 2024
1 parent 3f28aa8 commit f40b0c2
Show file tree
Hide file tree
Showing 5 changed files with 63 additions and 15 deletions.
7 changes: 3 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -109,11 +109,11 @@ and running on your machine.

```python
import asyncio
from autogen_ext.code_executor.docker_executor import DockerCommandLineCodeExecutor
from autogen_ext.code_executors import DockerCommandLineCodeExecutor
from autogen_ext.models import OpenAIChatCompletionClient
from autogen_agentchat.agents import CodeExecutorAgent, CodingAssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.task import TextMentionTermination
from autogen_agentchat.task import TextMentionTermination, Console

async def main() -> None:
async with DockerCommandLineCodeExecutor(work_dir="coding") as code_executor:
Expand All @@ -126,8 +126,7 @@ async def main() -> None:
stream = group_chat.run_stream(
task="Create a plot of NVDIA and TSLA stock returns YTD from 2024-01-01 and save it to 'nvidia_tesla_2024_ytd.png'."
)
async for message in stream:
print(message)
await Console(stream)

asyncio.run(main())
```
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ class ToolCallResultMessage(BaseMessage):
"""Messages for agent-to-agent communication."""


AgentMessage = InnerMessage | ChatMessage
AgentMessage = TextMessage | MultiModalMessage | StopMessage | HandoffMessage | ToolCallMessage | ToolCallResultMessage
"""All message types."""


Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
from ._console import Console
from ._terminations import MaxMessageTermination, StopMessageTermination, TextMentionTermination, TokenUsageTermination

__all__ = [
"MaxMessageTermination",
"TextMentionTermination",
"StopMessageTermination",
"TokenUsageTermination",
"Console",
]
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import sys
import time
from typing import AsyncGenerator

from autogen_core.components.models import RequestUsage

from autogen_agentchat.base import TaskResult
from autogen_agentchat.messages import AgentMessage


async def Console(stream: AsyncGenerator[AgentMessage | TaskResult, None]) -> None:
"""Consume the stream from :meth:`~autogen_agentchat.teams.Team.run_stream`
and print the messages to the console."""

start_time = time.time()
total_usage = RequestUsage(prompt_tokens=0, completion_tokens=0)
async for message in stream:
if isinstance(message, TaskResult):
duration = time.time() - start_time
output = (
f"{'-' * 10} Summary {'-' * 10}\n"
f"Number of messages: {len(message.messages)}\n"
f"Finish reason: {message.stop_reason}\n"
f"Total prompt tokens: {total_usage.prompt_tokens}\n"
f"Total completion tokens: {total_usage.completion_tokens}\n"
f"Duration: {duration:.2f} seconds\n"
)
sys.stdout.write(output)
else:
output = f"{'-' * 10} {message.source} {'-' * 10}\n{message.content}\n"
if message.models_usage:
output += f"[Prompt tokens: {message.models_usage.prompt_tokens}, Completion tokens: {message.models_usage.completion_tokens}]\n"
total_usage.completion_tokens += message.models_usage.completion_tokens
total_usage.prompt_tokens += message.models_usage.prompt_tokens
sys.stdout.write(output)
Original file line number Diff line number Diff line change
Expand Up @@ -35,18 +35,31 @@
"name": "stdout",
"output_type": "stream",
"text": [
"source='user' models_usage=None content='What is the weather in New York?'\n",
"source='weather_agent' models_usage=RequestUsage(prompt_tokens=79, completion_tokens=15) content=[FunctionCall(id='call_CntvzLVL7iYJwPP2WWeBKNHc', arguments='{\"city\":\"New York\"}', name='get_weather')]\n",
"source='weather_agent' models_usage=None content=[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_CntvzLVL7iYJwPP2WWeBKNHc')]\n",
"source='weather_agent' models_usage=RequestUsage(prompt_tokens=90, completion_tokens=14) content='The weather in New York is currently 73 degrees and sunny.'\n",
"source='weather_agent' models_usage=RequestUsage(prompt_tokens=137, completion_tokens=4) content='TERMINATE'\n",
"TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What is the weather in New York?'), ToolCallMessage(source='weather_agent', models_usage=RequestUsage(prompt_tokens=79, completion_tokens=15), content=[FunctionCall(id='call_CntvzLVL7iYJwPP2WWeBKNHc', arguments='{\"city\":\"New York\"}', name='get_weather')]), ToolCallResultMessage(source='weather_agent', models_usage=None, content=[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_CntvzLVL7iYJwPP2WWeBKNHc')]), TextMessage(source='weather_agent', models_usage=RequestUsage(prompt_tokens=90, completion_tokens=14), content='The weather in New York is currently 73 degrees and sunny.'), TextMessage(source='weather_agent', models_usage=RequestUsage(prompt_tokens=137, completion_tokens=4), content='TERMINATE')], stop_reason=\"Text 'TERMINATE' mentioned\")\n"
"---------- user ----------\n",
"What is the weather in New York?\n",
"---------- weather_agent ----------\n",
"[FunctionCall(id='call_AhTZ2q3TNL8x0qs00e3wIZ7y', arguments='{\"city\":\"New York\"}', name='get_weather')]\n",
"[Prompt tokens: 79, Completion tokens: 15]\n",
"---------- weather_agent ----------\n",
"[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_AhTZ2q3TNL8x0qs00e3wIZ7y')]\n",
"---------- weather_agent ----------\n",
"The weather in New York is currently 73 degrees and sunny.\n",
"[Prompt tokens: 90, Completion tokens: 14]\n",
"---------- weather_agent ----------\n",
"TERMINATE\n",
"[Prompt tokens: 137, Completion tokens: 4]\n",
"---------- Summary ----------\n",
"Number of messages: 5\n",
"Finish reason: Text 'TERMINATE' mentioned\n",
"Total prompt tokens: 306\n",
"Total completion tokens: 33\n",
"Duration: 1.43 seconds\n"
]
}
],
"source": [
"from autogen_agentchat.agents import AssistantAgent\n",
"from autogen_agentchat.task import TextMentionTermination\n",
"from autogen_agentchat.task import Console, TextMentionTermination\n",
"from autogen_agentchat.teams import RoundRobinGroupChat\n",
"from autogen_ext.models import OpenAIChatCompletionClient\n",
"\n",
Expand All @@ -72,8 +85,7 @@
"\n",
" # Run the team and stream messages\n",
" stream = agent_team.run_stream(task=\"What is the weather in New York?\")\n",
" async for response in stream:\n",
" print(response)\n",
" await Console(stream)\n",
"\n",
"\n",
"# NOTE: if running this inside a Python script you'll need to use asyncio.run(main()).\n",
Expand Down Expand Up @@ -114,7 +126,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.6"
"version": "3.11.5"
}
},
"nbformat": 4,
Expand Down

0 comments on commit f40b0c2

Please sign in to comment.