2024-11-15 16:26:18 -05:00
|
|
|
import os
|
2024-11-08 19:02:19 -08:00
|
|
|
import sys
|
|
|
|
import time
|
2024-11-13 21:06:55 -08:00
|
|
|
from typing import AsyncGenerator, List
|
2024-11-08 19:02:19 -08:00
|
|
|
|
2024-11-15 16:26:18 -05:00
|
|
|
from autogen_core.components import Image
|
2024-11-08 19:02:19 -08:00
|
|
|
from autogen_core.components.models import RequestUsage
|
|
|
|
|
2024-11-16 14:03:58 -08:00
|
|
|
from autogen_agentchat.base import Response, TaskResult
|
2024-11-13 21:06:55 -08:00
|
|
|
from autogen_agentchat.messages import AgentMessage, MultiModalMessage
|
2024-11-08 19:02:19 -08:00
|
|
|
|
|
|
|
|
2024-11-15 16:26:18 -05:00
|
|
|
def _is_running_in_iterm() -> bool:
|
|
|
|
return os.getenv("TERM_PROGRAM") == "iTerm.app"
|
|
|
|
|
|
|
|
|
|
|
|
def _is_output_a_tty() -> bool:
|
|
|
|
return sys.stdout.isatty()
|
|
|
|
|
|
|
|
|
2024-11-16 14:03:58 -08:00
|
|
|
async def Console(
|
|
|
|
stream: AsyncGenerator[AgentMessage | TaskResult, None] | AsyncGenerator[AgentMessage | Response, None],
|
|
|
|
*,
|
|
|
|
no_inline_images: bool = False,
|
|
|
|
) -> None:
|
|
|
|
"""Consume the stream from :meth:`~autogen_agentchat.base.Team.run_stream`
|
|
|
|
or :meth:`~autogen_agentchat.base.ChatAgent.on_messages_stream`
|
2024-11-15 16:26:18 -05:00
|
|
|
and print the messages to the console.
|
2024-11-08 19:02:19 -08:00
|
|
|
|
2024-11-15 16:26:18 -05:00
|
|
|
Args:
|
2024-11-16 14:03:58 -08:00
|
|
|
stream (AsyncGenerator[AgentMessage | TaskResult, None] | AsyncGenerator[AgentMessage | Response, None]): Stream to render
|
2024-11-15 16:26:18 -05:00
|
|
|
no_inline_images (bool, optional): If terminal is iTerm2 will render images inline. Use this to disable this behavior. Defaults to False.
|
|
|
|
"""
|
|
|
|
|
|
|
|
render_image_iterm = _is_running_in_iterm() and _is_output_a_tty() and not no_inline_images
|
2024-11-08 19:02:19 -08:00
|
|
|
start_time = time.time()
|
|
|
|
total_usage = RequestUsage(prompt_tokens=0, completion_tokens=0)
|
|
|
|
async for message in stream:
|
|
|
|
if isinstance(message, TaskResult):
|
|
|
|
duration = time.time() - start_time
|
|
|
|
output = (
|
|
|
|
f"{'-' * 10} Summary {'-' * 10}\n"
|
|
|
|
f"Number of messages: {len(message.messages)}\n"
|
|
|
|
f"Finish reason: {message.stop_reason}\n"
|
|
|
|
f"Total prompt tokens: {total_usage.prompt_tokens}\n"
|
|
|
|
f"Total completion tokens: {total_usage.completion_tokens}\n"
|
|
|
|
f"Duration: {duration:.2f} seconds\n"
|
|
|
|
)
|
|
|
|
sys.stdout.write(output)
|
2024-11-16 14:03:58 -08:00
|
|
|
elif isinstance(message, Response):
|
|
|
|
duration = time.time() - start_time
|
|
|
|
|
|
|
|
# Print final response.
|
|
|
|
output = f"{'-' * 10} {message.chat_message.source} {'-' * 10}\n{_message_to_str(message.chat_message, render_image_iterm=render_image_iterm)}\n"
|
|
|
|
if message.chat_message.models_usage:
|
|
|
|
output += f"[Prompt tokens: {message.chat_message.models_usage.prompt_tokens}, Completion tokens: {message.chat_message.models_usage.completion_tokens}]\n"
|
|
|
|
total_usage.completion_tokens += message.chat_message.models_usage.completion_tokens
|
|
|
|
total_usage.prompt_tokens += message.chat_message.models_usage.prompt_tokens
|
|
|
|
sys.stdout.write(output)
|
|
|
|
|
|
|
|
# Print summary.
|
|
|
|
if message.inner_messages is not None:
|
|
|
|
num_inner_messages = len(message.inner_messages)
|
|
|
|
else:
|
|
|
|
num_inner_messages = 0
|
|
|
|
output = (
|
|
|
|
f"{'-' * 10} Summary {'-' * 10}\n"
|
|
|
|
f"Number of inner messages: {num_inner_messages}\n"
|
|
|
|
f"Total prompt tokens: {total_usage.prompt_tokens}\n"
|
|
|
|
f"Total completion tokens: {total_usage.completion_tokens}\n"
|
|
|
|
f"Duration: {duration:.2f} seconds\n"
|
|
|
|
)
|
|
|
|
sys.stdout.write(output)
|
2024-11-08 19:02:19 -08:00
|
|
|
else:
|
2024-11-15 16:26:18 -05:00
|
|
|
output = f"{'-' * 10} {message.source} {'-' * 10}\n{_message_to_str(message, render_image_iterm=render_image_iterm)}\n"
|
2024-11-08 19:02:19 -08:00
|
|
|
if message.models_usage:
|
|
|
|
output += f"[Prompt tokens: {message.models_usage.prompt_tokens}, Completion tokens: {message.models_usage.completion_tokens}]\n"
|
|
|
|
total_usage.completion_tokens += message.models_usage.completion_tokens
|
|
|
|
total_usage.prompt_tokens += message.models_usage.prompt_tokens
|
|
|
|
sys.stdout.write(output)
|
2024-11-13 21:06:55 -08:00
|
|
|
|
|
|
|
|
2024-11-15 16:26:18 -05:00
|
|
|
# iTerm2 image rendering protocol: https://iterm2.com/documentation-images.html
|
|
|
|
def _image_to_iterm(image: Image) -> str:
|
|
|
|
image_data = image.to_base64()
|
|
|
|
return f"\033]1337;File=inline=1:{image_data}\a\n"
|
|
|
|
|
|
|
|
|
|
|
|
def _message_to_str(message: AgentMessage, *, render_image_iterm: bool = False) -> str:
|
2024-11-13 21:06:55 -08:00
|
|
|
if isinstance(message, MultiModalMessage):
|
|
|
|
result: List[str] = []
|
|
|
|
for c in message.content:
|
|
|
|
if isinstance(c, str):
|
|
|
|
result.append(c)
|
|
|
|
else:
|
2024-11-15 16:26:18 -05:00
|
|
|
if render_image_iterm:
|
|
|
|
result.append(_image_to_iterm(c))
|
|
|
|
else:
|
|
|
|
result.append("<image>")
|
2024-11-13 21:06:55 -08:00
|
|
|
return "\n".join(result)
|
|
|
|
else:
|
|
|
|
return f"{message.content}"
|