From d96aaebc8d7e67829e111c30f87e33397392fbf4 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Thu, 24 Apr 2025 18:29:39 -0700 Subject: [PATCH] Update agent documentation (#6394) * Replace on_messages and on_messages_stream with run and run_stream to unify interface documentation with teams * Remove magentic-one-cli from homepage as it has not been maintained and improved for a while. --- .../agents/_assistant_agent.py | 113 ++---- .../packages/autogen-core/docs/src/index.md | 40 +- .../agentchat-user-guide/custom-agents.ipynb | 6 +- .../tutorial/agents.ipynb | 366 ++++++------------ 4 files changed, 172 insertions(+), 353 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py index b51ded56e..cce76eaed 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py @@ -90,10 +90,20 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]): the inner messages as they are created, and the :class:`~autogen_agentchat.base.Response` object as the last item before closing the generator. + The :meth:`BaseChatAgent.run` method returns a :class:`~autogen_agentchat.base.TaskResult` + containing the messages produced by the agent. In the list of messages, + :attr:`~autogen_agentchat.base.TaskResult.messages`, + the last message is the final response message. + + The :meth:`BaseChatAgent.run_stream` method creates an async generator that produces + the inner messages as they are created, and the :class:`~autogen_agentchat.base.TaskResult` + object as the last item before closing the generator. + .. attention:: The caller must only pass the new messages to the agent on each call - to the :meth:`on_messages` or :meth:`on_messages_stream` method. + to the :meth:`on_messages`, :meth:`on_messages_stream`, :meth:`BaseChatAgent.run`, + or :meth:`BaseChatAgent.run_stream` methods. The agent maintains its state between calls to these methods. Do not pass the entire conversation history to the agent on each call. @@ -215,10 +225,8 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]): .. code-block:: python import asyncio - from autogen_core import CancellationToken from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_agentchat.agents import AssistantAgent - from autogen_agentchat.messages import TextMessage async def main() -> None: @@ -228,10 +236,8 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]): ) agent = AssistantAgent(name="assistant", model_client=model_client) - response = await agent.on_messages( - [TextMessage(content="What is the capital of France?", source="user")], CancellationToken() - ) - print(response) + result = await agent.run(task="Name two cities in North America.") + print(result) asyncio.run(main()) @@ -246,8 +252,6 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]): import asyncio from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_agentchat.agents import AssistantAgent - from autogen_agentchat.messages import TextMessage - from autogen_core import CancellationToken async def main() -> None: @@ -261,9 +265,7 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]): model_client_stream=True, ) - stream = agent.on_messages_stream( - [TextMessage(content="Name two cities in North America.", source="user")], CancellationToken() - ) + stream = agent.run_stream(task="Name two cities in North America.") async for message in stream: print(message) @@ -272,27 +274,23 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]): .. code-block:: text - source='assistant' models_usage=None content='Two' type='ModelClientStreamingChunkEvent' - source='assistant' models_usage=None content=' cities' type='ModelClientStreamingChunkEvent' - source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent' - source='assistant' models_usage=None content=' North' type='ModelClientStreamingChunkEvent' - source='assistant' models_usage=None content=' America' type='ModelClientStreamingChunkEvent' - source='assistant' models_usage=None content=' are' type='ModelClientStreamingChunkEvent' - source='assistant' models_usage=None content=' New' type='ModelClientStreamingChunkEvent' - source='assistant' models_usage=None content=' York' type='ModelClientStreamingChunkEvent' - source='assistant' models_usage=None content=' City' type='ModelClientStreamingChunkEvent' - source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent' - source='assistant' models_usage=None content=' the' type='ModelClientStreamingChunkEvent' - source='assistant' models_usage=None content=' United' type='ModelClientStreamingChunkEvent' - source='assistant' models_usage=None content=' States' type='ModelClientStreamingChunkEvent' - source='assistant' models_usage=None content=' and' type='ModelClientStreamingChunkEvent' - source='assistant' models_usage=None content=' Toronto' type='ModelClientStreamingChunkEvent' - source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent' - source='assistant' models_usage=None content=' Canada' type='ModelClientStreamingChunkEvent' - source='assistant' models_usage=None content='.' type='ModelClientStreamingChunkEvent' - source='assistant' models_usage=None content=' TERMIN' type='ModelClientStreamingChunkEvent' - source='assistant' models_usage=None content='ATE' type='ModelClientStreamingChunkEvent' - Response(chat_message=TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), content='Two cities in North America are New York City in the United States and Toronto in Canada. TERMINATE', type='TextMessage'), inner_messages=[]) + source='user' models_usage=None metadata={} content='Name two cities in North America.' type='TextMessage' + source='assistant' models_usage=None metadata={} content='Two' type='ModelClientStreamingChunkEvent' + source='assistant' models_usage=None metadata={} content=' cities' type='ModelClientStreamingChunkEvent' + source='assistant' models_usage=None metadata={} content=' in' type='ModelClientStreamingChunkEvent' + source='assistant' models_usage=None metadata={} content=' North' type='ModelClientStreamingChunkEvent' + source='assistant' models_usage=None metadata={} content=' America' type='ModelClientStreamingChunkEvent' + source='assistant' models_usage=None metadata={} content=' are' type='ModelClientStreamingChunkEvent' + source='assistant' models_usage=None metadata={} content=' New' type='ModelClientStreamingChunkEvent' + source='assistant' models_usage=None metadata={} content=' York' type='ModelClientStreamingChunkEvent' + source='assistant' models_usage=None metadata={} content=' City' type='ModelClientStreamingChunkEvent' + source='assistant' models_usage=None metadata={} content=' and' type='ModelClientStreamingChunkEvent' + source='assistant' models_usage=None metadata={} content=' Toronto' type='ModelClientStreamingChunkEvent' + source='assistant' models_usage=None metadata={} content='.' type='ModelClientStreamingChunkEvent' + source='assistant' models_usage=None metadata={} content=' TERMIN' type='ModelClientStreamingChunkEvent' + source='assistant' models_usage=None metadata={} content='ATE' type='ModelClientStreamingChunkEvent' + source='assistant' models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0) metadata={} content='Two cities in North America are New York City and Toronto. TERMINATE' type='TextMessage' + messages=[TextMessage(source='user', models_usage=None, metadata={}, content='Name two cities in North America.', type='TextMessage'), TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), metadata={}, content='Two cities in North America are New York City and Toronto. TERMINATE', type='TextMessage')] stop_reason=None **Example 3: agent with tools** @@ -312,9 +310,7 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]): import asyncio from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_agentchat.agents import AssistantAgent - from autogen_agentchat.messages import TextMessage from autogen_agentchat.ui import Console - from autogen_core import CancellationToken async def get_current_time() -> str: @@ -327,12 +323,7 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]): # api_key = "your_openai_api_key" ) agent = AssistantAgent(name="assistant", model_client=model_client, tools=[get_current_time]) - - await Console( - agent.on_messages_stream( - [TextMessage(content="What is the current time?", source="user")], CancellationToken() - ) - ) + await Console(agent.run_stream(task="What is the current time?")) asyncio.run(main()) @@ -390,9 +381,7 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]): from typing import Literal from autogen_agentchat.agents import AssistantAgent - from autogen_agentchat.messages import TextMessage from autogen_agentchat.ui import Console - from autogen_core import CancellationToken from autogen_core.tools import FunctionTool from autogen_ext.models.openai import OpenAIChatCompletionClient from pydantic import BaseModel @@ -430,7 +419,7 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]): async def main() -> None: - stream = agent.on_messages_stream([TextMessage(content="I am happy today!", source="user")], CancellationToken()) + stream = agent.run_stream(task="I am happy today!") await Console(stream) @@ -458,8 +447,6 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]): import asyncio from autogen_agentchat.agents import AssistantAgent - from autogen_agentchat.messages import TextMessage - from autogen_core import CancellationToken from autogen_core.model_context import BufferedChatCompletionContext from autogen_ext.models.openai import OpenAIChatCompletionClient @@ -482,20 +469,14 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]): system_message="You are a helpful assistant.", ) - response = await agent.on_messages( - [TextMessage(content="Name two cities in North America.", source="user")], CancellationToken() - ) - print(response.chat_message.content) # type: ignore + result = await agent.run(task="Name two cities in North America.") + print(result.messages[-1].content) # type: ignore - response = await agent.on_messages( - [TextMessage(content="My favorite color is blue.", source="user")], CancellationToken() - ) - print(response.chat_message.content) # type: ignore + result = await agent.run(task="My favorite color is blue.") + print(result.messages[-1].content) # type: ignore - response = await agent.on_messages( - [TextMessage(content="Did I ask you any question?", source="user")], CancellationToken() - ) - print(response.chat_message.content) # type: ignore + result = await agent.run(task="Did I ask you any question?") + print(result.messages[-1].content) # type: ignore asyncio.run(main()) @@ -518,8 +499,6 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]): import asyncio from autogen_agentchat.agents import AssistantAgent - from autogen_agentchat.messages import TextMessage - from autogen_core import CancellationToken from autogen_core.memory import ListMemory, MemoryContent from autogen_ext.models.openai import OpenAIChatCompletionClient @@ -544,10 +523,8 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]): system_message="You are a helpful assistant.", ) - response = await agent.on_messages( - [TextMessage(content="One idea for a dinner.", source="user")], CancellationToken() - ) - print(response.chat_message.content) # type: ignore + result = await agent.run(task="What is a good dinner idea?") + print(result.messages[-1].content) # type: ignore asyncio.run(main()) @@ -573,10 +550,8 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]): .. code-block:: python import asyncio - from autogen_core import CancellationToken from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_agentchat.agents import AssistantAgent - from autogen_agentchat.messages import TextMessage async def main() -> None: @@ -587,10 +562,8 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]): # The system message is not supported by the o1 series model. agent = AssistantAgent(name="assistant", model_client=model_client, system_message=None) - response = await agent.on_messages( - [TextMessage(content="What is the capital of France?", source="user")], CancellationToken() - ) - print(response) + result = await agent.run(task="What is the capital of France?") + print(result.messages[-1].content) # type: ignore asyncio.run(main()) diff --git a/python/packages/autogen-core/docs/src/index.md b/python/packages/autogen-core/docs/src/index.md index 1f13cc8dc..ca41ee79c 100644 --- a/python/packages/autogen-core/docs/src/index.md +++ b/python/packages/autogen-core/docs/src/index.md @@ -46,40 +46,12 @@ A framework for building AI agents and applications ::::{grid} :gutter: 2 -:::{grid-item-card} -:shadow: none -:margin: 2 0 0 0 -:columns: 12 12 6 6 - -
- -{fas}`book;pst-color-primary` -Magentic-One CLI [![PyPi magentic-one-cli](https://img.shields.io/badge/PyPi-magentic--one--cli-blue?logo=pypi)](https://pypi.org/project/magentic-one-cli/) -
-A console-based multi-agent assistant for web and file-based tasks. -Built on AgentChat. - -```bash -pip install -U magentic-one-cli -m1 "Find flights from Seattle to Paris and format the result in a table" -``` - -+++ - -```{button-ref} user-guide/agentchat-user-guide/magentic-one -:color: secondary - -Get Started -``` - -::: - :::{grid-item-card} {fas}`palette;pst-color-primary` Studio [![PyPi autogenstudio](https://img.shields.io/badge/PyPi-autogenstudio-blue?logo=pypi)](https://pypi.org/project/autogenstudio/) :shadow: none :margin: 2 0 0 0 -:columns: 12 12 6 6 +:columns: 12 12 12 12 -An app for prototyping and managing agents without writing code. +An web-based UI for prototyping with agents without writing code. Built on AgentChat. ```bash @@ -87,6 +59,8 @@ pip install -U autogenstudio autogenstudio ui --port 8080 --appdir ./myapp ``` +_Start here if you are new to AutoGen and want to prototype with agents without writing code._ + +++ ```{button-ref} user-guide/autogenstudio-user-guide/index @@ -124,7 +98,7 @@ async def main() -> None: asyncio.run(main()) ``` -_Start here if you are building conversational agents. [Migrating from AutoGen 0.2?](./user-guide/agentchat-user-guide/migration-guide.md)._ +_Start here if you are prototyping with agents using Python. [Migrating from AutoGen 0.2?](./user-guide/agentchat-user-guide/migration-guide.md)._ +++ @@ -147,7 +121,7 @@ An event-driven programming framework for building scalable multi-agent AI syste * Research on multi-agent collaboration. * Distributed agents for multi-language applications. -_Start here if you are building workflows or distributed agent systems._ +_Start here if you are getting serious about building multi-agent systems._ +++ @@ -167,7 +141,7 @@ Get Started Implementations of Core and AgentChat components that interface with external services or other libraries. You can find and use community extensions or create your own. Examples of built-in extensions: -* {py:class}`~autogen_ext.tools.langchain.LangChainToolAdapter` for using LangChain tools. +* {py:class}`~autogen_ext.tools.mcp.McpWorkbench` for using Model-Context Protocol (MCP) servers. * {py:class}`~autogen_ext.agents.openai.OpenAIAssistantAgent` for using Assistant API. * {py:class}`~autogen_ext.code_executors.docker.DockerCommandLineCodeExecutor` for running model-generated code in a Docker container. * {py:class}`~autogen_ext.runtimes.grpc.GrpcWorkerAgentRuntime` for distributed agents. diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb index 4ca98a433..b24b49e76 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/custom-agents.ipynb @@ -16,7 +16,9 @@ "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: The abstract method that resets the agent to its initial state. This method is called when the agent is asked to reset itself.\n", "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.produced_message_types`: The list of possible {py:class}`~autogen_agentchat.messages.BaseChatMessage` message types the agent can produce in its response.\n", "\n", - "Optionally, you can implement the the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` method to stream messages as they are generated by the agent. If this method is not implemented, the agent\n", + "Optionally, you can implement the the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` method to stream messages as they are generated by the agent.\n", + "This method is called by {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream` to stream messages.\n", + "If this method is not implemented, the agent\n", "uses the default implementation of {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`\n", "that calls the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` method and\n", "yields all messages in the response." @@ -731,7 +733,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.3" + "version": "3.12.7" } }, "nbformat": 4, diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb index 69b5418b1..cd2dd61ae 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb @@ -11,40 +11,50 @@ "\n", "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.name`: The unique name of the agent.\n", "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.description`: The description of the agent in text.\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: Send the agent a sequence of messages that subclass {py:class}`~autogen_agentchat.messages.BaseChatMessage` and get a {py:class}`~autogen_agentchat.base.Response`. **It is important to note that agents are expected to be stateful and this method is expected to be called with new messages, not the complete history**.\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`: Same as {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` but returns an iterator of messages that subclass {py:class}`~autogen_agentchat.messages.BaseAgentEvent` or {py:class}`~autogen_agentchat.messages.BaseChatMessage` followed by a {py:class}`~autogen_agentchat.base.Response` as the last item.\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: Reset the agent to its initial state.\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream`: convenience methods that call {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` respectively but offer the same interface as [Teams](./teams.ipynb).\n", - "\n", - "See {py:mod}`autogen_agentchat.messages` for more information on AgentChat message types.\n", - "\n", + "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.run`: The method that runs the agent given a task as a string or a list of messages, and returns a {py:class}`~autogen_agentchat.base.TaskResult`. **Agents are expected to be stateful and this method is expected to be called with new messages, not complete history**.\n", + "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.run_stream`: Same as {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` but returns an iterator of messages that subclass {py:class}`~autogen_agentchat.messages.BaseAgentEvent` or {py:class}`~autogen_agentchat.messages.BaseChatMessage` followed by a {py:class}`~autogen_agentchat.base.TaskResult` as the last item.\n", "\n", + "See {py:mod}`autogen_agentchat.messages` for more information on AgentChat message types." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ "## Assistant Agent\n", "\n", "{py:class}`~autogen_agentchat.agents.AssistantAgent` is a built-in agent that\n", - "uses a language model and has the ability to use tools." + "uses a language model and has the ability to use tools.\n", + "\n", + "```{warning}\n", + "{py:class}`~autogen_agentchat.agents.AssistantAgent` is a \"kitchen sink\" agent\n", + "for prototyping and educational purpose -- it is very general.\n", + "Make sure you read the documentation and implementation to understand the design choices.\n", + "Once you fully understand the design, you may want to implement your own agent.\n", + "See [Custom Agent](../custom-agents.ipynb).\n", + "```" ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_agentchat.messages import StructuredMessage, TextMessage\n", + "from autogen_agentchat.messages import StructuredMessage\n", "from autogen_agentchat.ui import Console\n", - "from autogen_core import CancellationToken\n", "from autogen_ext.models.openai import OpenAIChatCompletionClient" ] }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 23, "metadata": {}, "outputs": [], "source": [ "# Define a tool that searches the web for information.\n", + "# For simplicity, we will use a mock function here that returns a static string.\n", "async def web_search(query: str) -> str:\n", " \"\"\"Find information on the web\"\"\"\n", " return \"AutoGen is a programming framework for building multi-agent applications.\"\n", @@ -52,7 +62,7 @@ "\n", "# Create an agent that uses the OpenAI GPT-4o model.\n", "model_client = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o\",\n", + " model=\"gpt-4.1-nano\",\n", " # api_key=\"YOUR_API_KEY\",\n", ")\n", "agent = AssistantAgent(\n", @@ -67,65 +77,51 @@ "cell_type": "markdown", "metadata": {}, "source": [ + "## Getting Result\n", "\n", - "## Getting Responses\n", - "\n", - "We can use the {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages` method to get the agent response to a given message.\n" + "We can use the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` method to get the agent run on a given task." ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=598, completion_tokens=16), content=[FunctionCall(id='call_9UWYM1CgE3ZbnJcSJavNDB79', arguments='{\"query\":\"AutoGen\"}', name='web_search')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='AutoGen is a programming framework for building multi-agent applications.', call_id='call_9UWYM1CgE3ZbnJcSJavNDB79', is_error=False)], type='ToolCallExecutionEvent')]\n", - "source='assistant' models_usage=None content='AutoGen is a programming framework for building multi-agent applications.' type='ToolCallSummaryMessage'\n" + "[TextMessage(source='user', models_usage=None, metadata={}, content='Find information on AutoGen', type='TextMessage'), ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=61, completion_tokens=16), metadata={}, content=[FunctionCall(id='call_703i17OLXfztkuioUbkESnea', arguments='{\"query\":\"AutoGen\"}', name='web_search')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='assistant', models_usage=None, metadata={}, content=[FunctionExecutionResult(content='AutoGen is a programming framework for building multi-agent applications.', name='web_search', call_id='call_703i17OLXfztkuioUbkESnea', is_error=False)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='assistant', models_usage=None, metadata={}, content='AutoGen is a programming framework for building multi-agent applications.', type='ToolCallSummaryMessage')]\n" ] } ], "source": [ - "async def assistant_run() -> None:\n", - " response = await agent.on_messages(\n", - " [TextMessage(content=\"Find information on AutoGen\", source=\"user\")],\n", - " cancellation_token=CancellationToken(),\n", - " )\n", - " print(response.inner_messages)\n", - " print(response.chat_message)\n", - "\n", - "\n", - "# Use asyncio.run(assistant_run()) when running in a script.\n", - "await assistant_run()" + "# Use asyncio.run(agent.run(...)) when running in a script.\n", + "result = await agent.run(task=\"Find information on AutoGen\")\n", + "print(result.messages)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The call to the {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages` method\n", - "returns a {py:class}`~autogen_agentchat.base.Response`\n", - "that contains the agent's final response in the {py:attr}`~autogen_agentchat.base.Response.chat_message` attribute,\n", - "as well as a list of inner messages in the {py:attr}`~autogen_agentchat.base.Response.inner_messages` attribute,\n", - "which stores the agent's \"thought process\" that led to the final response.\n", + "The call to the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` method\n", + "returns a {py:class}`~autogen_agentchat.base.TaskResult`\n", + "with the list of messages in the {py:attr}`~autogen_agentchat.base.TaskResult.messages` attribute,\n", + "which stores the agent's \"thought process\" as well as the final response.\n", "\n", "```{note}\n", - "It is important to note that {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages`\n", + "It is important to note that {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run`\n", "will update the internal state of the agent -- it will add the messages to the agent's\n", - "history. So you should call this method with new messages.\n", - "**You should not repeatedly call this method with the same messages or the complete history.**\n", + "message history. You can also call {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run`\n", + "without a task to get the agent to generate responses given its current state.\n", "```\n", "\n", "```{note}\n", "Unlike in v0.2 AgentChat, the tools are executed by the same agent directly within\n", - "the same call to {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages`.\n", + "the same call to {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run`.\n", "By default, the agent will return the result of the tool call as the final response.\n", - "```\n", - "\n", - "You can also call the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` method, which is a convenience method that calls {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`. \n", - "It follows the same interface as [Teams](./teams.ipynb) and returns a {py:class}`~autogen_agentchat.base.TaskResult` object." + "```" ] }, { @@ -140,19 +136,19 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 15, "metadata": {}, "outputs": [ { "data": { "text/html": [ - "" + "" ], "text/plain": [ - "" + "" ] }, - "execution_count": 9, + "execution_count": 15, "metadata": {}, "output_type": "execute_result" } @@ -174,29 +170,21 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 17, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "The image depicts a vintage car, likely from the 1930s or 1940s, with a sleek, classic design. The car seems to be customized or well-maintained, as indicated by its shiny exterior and lowered stance. It has a prominent grille and round headlights. There's a license plate on the front with the text \"FARMER BOY.\" The setting appears to be a street with old-style buildings in the background, suggesting a historical or retro theme.\n" + "The image depicts a scenic mountain landscape under a clear blue sky. There are several rugged mountain peaks in the background, with some clouds scattered across the sky. In the valley below, there is a body of water, possibly a lake or river, surrounded by greenery. The overall scene conveys a sense of natural beauty and tranquility.\n" ] } ], "source": [ "# Use asyncio.run(...) when running in a script.\n", - "response = await agent.on_messages([multi_modal_message], CancellationToken())\n", - "print(response.chat_message)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can also use {py:class}`~autogen_agentchat.messages.MultiModalMessage` as a `task`\n", - "input to the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run` method." + "result = await agent.run(task=multi_modal_message)\n", + "print(result.messages[-1].content) # type: ignore" ] }, { @@ -206,51 +194,47 @@ "## Streaming Messages\n", "\n", "We can also stream each message as it is generated by the agent by using the\n", - "{py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages_stream` method,\n", + "{py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream` method,\n", "and use {py:class}`~autogen_agentchat.ui.Console` to print the messages\n", "as they appear to the console." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 21, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "---------- assistant ----------\n", - "[FunctionCall(id='call_fSp5iTGVm2FKw5NIvfECSqNd', arguments='{\"query\":\"AutoGen information\"}', name='web_search')]\n", + "---------- TextMessage (user) ----------\n", + "Find information on AutoGen\n", + "---------- ToolCallRequestEvent (assistant) ----------\n", + "[FunctionCall(id='call_HOTRhOzXCBm0zSqZCFbHD7YP', arguments='{\"query\":\"AutoGen\"}', name='web_search')]\n", "[Prompt tokens: 61, Completion tokens: 16]\n", - "---------- assistant ----------\n", - "[FunctionExecutionResult(content='AutoGen is a programming framework for building multi-agent applications.', call_id='call_fSp5iTGVm2FKw5NIvfECSqNd')]\n", - "---------- assistant ----------\n", - "AutoGen is a programming framework designed for building multi-agent applications. If you need more detailed information or specific aspects about AutoGen, feel free to ask!\n", - "[Prompt tokens: 93, Completion tokens: 32]\n", + "---------- ToolCallExecutionEvent (assistant) ----------\n", + "[FunctionExecutionResult(content='AutoGen is a programming framework for building multi-agent applications.', name='web_search', call_id='call_HOTRhOzXCBm0zSqZCFbHD7YP', is_error=False)]\n", + "---------- ToolCallSummaryMessage (assistant) ----------\n", + "AutoGen is a programming framework for building multi-agent applications.\n", "---------- Summary ----------\n", - "Number of inner messages: 2\n", - "Total prompt tokens: 154\n", - "Total completion tokens: 48\n", - "Duration: 4.30 seconds\n" + "Number of messages: 4\n", + "Finish reason: None\n", + "Total prompt tokens: 61\n", + "Total completion tokens: 16\n", + "Duration: 0.52 seconds\n" ] } ], "source": [ "async def assistant_run_stream() -> None:\n", " # Option 1: read each message from the stream (as shown in the previous example).\n", - " # async for message in agent.on_messages_stream(\n", - " # [TextMessage(content=\"Find information on AutoGen\", source=\"user\")],\n", - " # cancellation_token=CancellationToken(),\n", - " # ):\n", + " # async for message in agent.run_stream(task=\"Find information on AutoGen\"):\n", " # print(message)\n", "\n", " # Option 2: use Console to print all messages as they appear.\n", " await Console(\n", - " agent.on_messages_stream(\n", - " [TextMessage(content=\"Find information on AutoGen\", source=\"user\")],\n", - " cancellation_token=CancellationToken(),\n", - " ),\n", + " agent.run_stream(task=\"Find information on AutoGen\"),\n", " output_stats=True, # Enable stats printing.\n", " )\n", "\n", @@ -263,21 +247,19 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages_stream` method\n", - "returns an asynchronous generator that yields each inner message generated by the agent,\n", - "with the final item being the response message in the {py:attr}`~autogen_agentchat.base.Response.chat_message` attribute.\n", + "The {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream` method\n", + "returns an asynchronous generator that yields each message generated by the agent,\n", + "followed by a {py:class}`~autogen_agentchat.base.TaskResult` as the last item.\n", "\n", "From the messages, you can observe that the assistant agent utilized the `web_search` tool to\n", - "gather information and responded based on the search results.\n", - "\n", - "You can also use {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream` to get the same streaming behavior as {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`. It follows the same interface as [Teams](./teams.ipynb)." + "gather information and responded based on the search results." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Using Tools\n", + "## Using Tools and Workbench\n", "\n", "Large Language Models (LLMs) are typically limited to generating text or code responses. \n", "However, many complex tasks benefit from the ability to use external tools that perform specific actions,\n", @@ -292,10 +274,13 @@ "\n", "In AgentChat, the {py:class}`~autogen_agentchat.agents.AssistantAgent` can use tools to perform specific actions.\n", "The `web_search` tool is one such tool that allows the assistant agent to search the web for information.\n", - "A custom tool can be a Python function or a subclass of the {py:class}`~autogen_core.tools.BaseTool`.\n", + "A single custom tool can be a Python function or a subclass of the {py:class}`~autogen_core.tools.BaseTool`.\n", + "\n", + "On the other hand, a {py:class}`~autogen_core.tools.Workbench` is a collection of tools that share state and resources.\n", "\n", "```{note}\n", - "For how to use model clients directly with tools, refer to the [Tools](../../core-user-guide/components/tools.ipynb) section\n", + "For how to use model clients directly with tools and workbench, refer to the [Tools](../../core-user-guide/components/tools.ipynb)\n", + "and [Workbench](../../core-user-guide/components/workbench.ipynb) sections\n", "in the Core User Guide.\n", "```\n", "\n", @@ -305,7 +290,7 @@ "can add a reflection step to have the model summarize the tool's output,\n", "by setting the `reflect_on_tool_use=True` parameter in the {py:class}`~autogen_agentchat.agents.AssistantAgent` constructor.\n", "\n", - "### Built-in Tools\n", + "### Built-in Tools and Workbench\n", "\n", "AutoGen Extension provides a set of built-in tools that can be used with the Assistant Agent.\n", "Head over to the [API documentation](../../../reference/index.md) for all the available tools\n", @@ -314,7 +299,7 @@ "- {py:mod}`~autogen_ext.tools.graphrag`: Tools for using GraphRAG index.\n", "- {py:mod}`~autogen_ext.tools.http`: Tools for making HTTP requests.\n", "- {py:mod}`~autogen_ext.tools.langchain`: Adaptor for using LangChain tools.\n", - "- {py:mod}`~autogen_ext.tools.mcp`: Tools for using Model Chat Protocol (MCP) servers." + "- {py:mod}`~autogen_ext.tools.mcp`: Tools and workbench for using Model Chat Protocol (MCP) servers." ] }, { @@ -376,11 +361,11 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Model Context Protocol Tools\n", + "### Model Context Protocol (MCP) Workbench\n", "\n", "The {py:class}`~autogen_agentchat.agents.AssistantAgent` can also use tools that are\n", "served from a Model Context Protocol (MCP) server\n", - "using {py:func}`~autogen_ext.tools.mcp.mcp_server_tools`." + "using {py:func}`~autogen_ext.tools.mcp.McpWorkbench`." ] }, { @@ -392,107 +377,34 @@ "name": "stdout", "output_type": "stream", "text": [ - "Seattle, located in Washington state, is the most populous city in the state and a major city in the Pacific Northwest region of the United States. It's known for its vibrant cultural scene, significant economic presence, and rich history. Here are some key points about Seattle from the Wikipedia page:\n", - "\n", - "1. **History and Geography**: Seattle is situated between Puget Sound and Lake Washington, with the Cascade Range to the east and the Olympic Mountains to the west. Its history is deeply rooted in Native American heritage and its development was accelerated with the arrival of settlers in the 19th century. The city was officially incorporated in 1869.\n", - "\n", - "2. **Economy**: Seattle is a major economic hub with a diverse economy anchored by sectors like aerospace, technology, and retail. It's home to influential companies such as Amazon and Starbucks, and has a significant impact on the tech industry due to companies like Microsoft and other technology enterprises in the surrounding area.\n", - "\n", - "3. **Cultural Significance**: Known for its music scene, Seattle was the birthplace of grunge music in the early 1990s. It also boasts significant attractions like the Space Needle, Pike Place Market, and the Seattle Art Museum. \n", - "\n", - "4. **Education and Innovation**: The city hosts important educational institutions, with the University of Washington being a leading research university. Seattle is recognized for fostering innovation and is a leader in environmental sustainability efforts.\n", - "\n", - "5. **Demographics and Diversity**: Seattle is noted for its diverse population, reflected in its rich cultural tapestry. It has seen a significant increase in population, leading to urban development and changes in its social landscape.\n", - "\n", - "These points highlight Seattle as a dynamic city with a significant cultural, economic, and educational influence within the United States and beyond.\n" + "Seattle is a major city located in the state of Washington, United States. It was founded on November 13, 1851, and incorporated as a town on January 14, 1865, and later as a city on December 2, 1869. The city is named after Chief Seattle. It covers an area of approximately 142 square miles, with a population of around 737,000 as of the 2020 Census, and an estimated 755,078 residents in 2023. Seattle is known by nicknames such as The Emerald City, Jet City, and Rain City, and has mottos including The City of Flowers and The City of Goodwill. The city operates under a mayor–council government system, with Bruce Harrell serving as mayor. Key landmarks include the Space Needle, Pike Place Market, Amazon Spheres, and the Seattle Great Wheel. It is situated on the U.S. West Coast, with a diverse urban and metropolitan area that extends to a population of over 4 million in the greater metropolitan region.\n" ] } ], "source": [ "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.messages import TextMessage\n", "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", - "from autogen_ext.tools.mcp import StdioServerParams, mcp_server_tools\n", + "from autogen_ext.tools.mcp import McpWorkbench, StdioServerParams\n", "\n", "# Get the fetch tool from mcp-server-fetch.\n", "fetch_mcp_server = StdioServerParams(command=\"uvx\", args=[\"mcp-server-fetch\"])\n", - "tools = await mcp_server_tools(fetch_mcp_server)\n", "\n", - "# Create an agent that can use the fetch tool.\n", - "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", - "agent = AssistantAgent(name=\"fetcher\", model_client=model_client, tools=tools, reflect_on_tool_use=True) # type: ignore\n", + "# Create an MCP workbench which provides a session to the mcp server.\n", + "async with McpWorkbench(fetch_mcp_server) as workbench: # type: ignore\n", + " # Create an agent that can use the fetch tool.\n", + " model_client = OpenAIChatCompletionClient(model=\"gpt-4.1-nano\")\n", + " fetch_agent = AssistantAgent(\n", + " name=\"fetcher\", model_client=model_client, workbench=workbench, reflect_on_tool_use=True\n", + " )\n", "\n", - "# Let the agent fetch the content of a URL and summarize it.\n", - "result = await agent.run(task=\"Summarize the content of https://en.wikipedia.org/wiki/Seattle\")\n", - "assert isinstance(result.messages[-1], TextMessage)\n", - "print(result.messages[-1].content)\n", + " # Let the agent fetch the content of a URL and summarize it.\n", + " result = await fetch_agent.run(task=\"Summarize the content of https://en.wikipedia.org/wiki/Seattle\")\n", + " assert isinstance(result.messages[-1], TextMessage)\n", + " print(result.messages[-1].content)\n", "\n", - "# Close the connection to the model client.\n", - "await model_client.close()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Langchain Tools\n", - "\n", - "You can also use tools from the Langchain library\n", - "by wrapping them in {py:class}`~autogen_ext.tools.langchain.LangChainToolAdapter`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------- assistant ----------\n", - "[FunctionCall(id='call_BEYRkf53nBS1G2uG60wHP0zf', arguments='{\"query\":\"df[\\'Age\\'].mean()\"}', name='python_repl_ast')]\n", - "[Prompt tokens: 111, Completion tokens: 22]\n", - "---------- assistant ----------\n", - "[FunctionExecutionResult(content='29.69911764705882', call_id='call_BEYRkf53nBS1G2uG60wHP0zf')]\n", - "---------- assistant ----------\n", - "29.69911764705882\n", - "---------- Summary ----------\n", - "Number of inner messages: 2\n", - "Total prompt tokens: 111\n", - "Total completion tokens: 22\n", - "Duration: 0.62 seconds\n" - ] - }, - { - "data": { - "text/plain": [ - "Response(chat_message=ToolCallSummaryMessage(source='assistant', models_usage=None, content='29.69911764705882', type='ToolCallSummaryMessage'), inner_messages=[ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=111, completion_tokens=22), content=[FunctionCall(id='call_BEYRkf53nBS1G2uG60wHP0zf', arguments='{\"query\":\"df[\\'Age\\'].mean()\"}', name='python_repl_ast')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='29.69911764705882', call_id='call_BEYRkf53nBS1G2uG60wHP0zf')], type='ToolCallExecutionEvent')])" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import pandas as pd\n", - "from autogen_ext.tools.langchain import LangChainToolAdapter\n", - "from langchain_experimental.tools.python.tool import PythonAstREPLTool\n", - "\n", - "df = pd.read_csv(\"https://raw.githubusercontent.com/pandas-dev/pandas/main/doc/data/titanic.csv\")\n", - "tool = LangChainToolAdapter(PythonAstREPLTool(locals={\"df\": df}))\n", - "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", - "agent = AssistantAgent(\n", - " \"assistant\", tools=[tool], model_client=model_client, system_message=\"Use the `df` variable to access the dataset.\"\n", - ")\n", - "await Console(\n", - " agent.on_messages_stream(\n", - " [TextMessage(content=\"What's the average age of the passengers?\", source=\"user\")], CancellationToken()\n", - " ),\n", - " output_stats=True,\n", - ")\n", - "\n", - "await model_client.close()" + " # Close the connection to the model client.\n", + " await model_client.close()" ] }, { @@ -642,7 +554,7 @@ "\n", "You can stream the tokens generated by the model client by setting `model_client_stream=True`.\n", "This will cause the agent to yield {py:class}`~autogen_agentchat.messages.ModelClientStreamingChunkEvent` messages\n", - "in {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` and {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream`.\n", + "in {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream`.\n", "\n", "The underlying model API must support streaming tokens for this to work.\n", "Please check with your model provider to see if this is supported." @@ -650,30 +562,32 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 25, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "source='assistant' models_usage=None content='Two' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' cities' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' South' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' America' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' are' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Buenos' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Aires' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Argentina' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' and' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' São' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Paulo' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Brazil' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content='.' type='ModelClientStreamingChunkEvent'\n", - "Response(chat_message=TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), content='Two cities in South America are Buenos Aires in Argentina and São Paulo in Brazil.', type='TextMessage'), inner_messages=[])\n" + "source='user' models_usage=None metadata={} content='Name two cities in South America' type='TextMessage'\n", + "source='assistant' models_usage=None metadata={} content='Two' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None metadata={} content=' cities' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None metadata={} content=' in' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None metadata={} content=' South' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None metadata={} content=' America' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None metadata={} content=' are' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None metadata={} content=' Buenos' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None metadata={} content=' Aires' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None metadata={} content=' in' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None metadata={} content=' Argentina' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None metadata={} content=' and' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None metadata={} content=' São' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None metadata={} content=' Paulo' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None metadata={} content=' in' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None metadata={} content=' Brazil' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=None metadata={} content='.' type='ModelClientStreamingChunkEvent'\n", + "source='assistant' models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0) metadata={} content='Two cities in South America are Buenos Aires in Argentina and São Paulo in Brazil.' type='TextMessage'\n", + "messages=[TextMessage(source='user', models_usage=None, metadata={}, content='Name two cities in South America', type='TextMessage'), TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), metadata={}, content='Two cities in South America are Buenos Aires in Argentina and São Paulo in Brazil.', type='TextMessage')] stop_reason=None\n" ] } ], @@ -688,10 +602,7 @@ ")\n", "\n", "# Use an async function and asyncio.run() in a script.\n", - "async for message in streaming_assistant.on_messages_stream( # type: ignore\n", - " [TextMessage(content=\"Name two cities in South America\", source=\"user\")],\n", - " cancellation_token=CancellationToken(),\n", - "):\n", + "async for message in streaming_assistant.run_stream(task=\"Name two cities in South America\"): # type: ignore\n", " print(message)" ] }, @@ -701,48 +612,7 @@ "source": [ "You can see the streaming chunks in the output above.\n", "The chunks are generated by the model client and are yielded by the agent as they are received.\n", - "The final response, the concatenation of all the chunks, is yielded right after the last chunk.\n", - "\n", - "Similarly, {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream` will also yield the same streaming chunks,\n", - "followed by a full text message right after the last chunk." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "source='user' models_usage=None content='Name two cities in North America.' type='TextMessage'\n", - "source='assistant' models_usage=None content='Two' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' cities' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' North' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' America' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' are' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' New' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' York' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' City' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' the' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' United' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' States' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' and' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Toronto' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content=' Canada' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=None content='.' type='ModelClientStreamingChunkEvent'\n", - "source='assistant' models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0) content='Two cities in North America are New York City in the United States and Toronto in Canada.' type='TextMessage'\n", - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Name two cities in North America.', type='TextMessage'), TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), content='Two cities in North America are New York City in the United States and Toronto in Canada.', type='TextMessage')], stop_reason=None)\n" - ] - } - ], - "source": [ - "async for message in streaming_assistant.run_stream(task=\"Name two cities in North America.\"): # type: ignore\n", - " print(message)" + "The final response, the concatenation of all the chunks, is yielded right after the last chunk." ] }, { @@ -854,7 +724,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.3" + "version": "3.12.7" } }, "nbformat": 4,