From fa550c2c3616ecaeb73ccf869ee00a5d6ab5c02b Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Fri, 6 Dec 2024 01:23:05 -0800 Subject: [PATCH] fix docs (#4589) * fix doc on distributed runtime * Fix references * Update references * Fix import paths in user guide notebooks for code executor components --- .../Templates/MagenticOne/scenario.py | 2 +- .../GAIA/Templates/MagenticOne/scenario.py | 2 +- .../Templates/MagenticOne/scenario.py | 2 +- .../Templates/MagenticOne/scenario.py | 2 +- .../agents/_user_proxy_agent.py | 2 +- .../teams/_group_chat/_base_group_chat.py | 4 +- .../_group_chat/_sequential_routed_agent.py | 2 +- .../cookbook/tool-use-with-intervention.ipynb | 2 +- .../design-patterns/mixture-of-agents.ipynb | 1034 ++++++------- .../design-patterns/multi-agent-debate.ipynb | 1138 +++++++-------- .../src/user-guide/core-user-guide/faqs.md | 6 +- .../framework/agent-and-agent-runtime.ipynb | 548 +++---- .../command-line-code-executors.ipynb | 12 +- .../framework/distributed-agent-runtime.ipynb | 440 +++--- .../framework/message-and-communication.ipynb | 1282 ++++++++--------- .../framework/model-clients.ipynb | 4 +- .../core-user-guide/framework/tools.ipynb | 4 +- .../core-user-guide/quickstart.ipynb | 4 +- .../azure-container-code-executor.ipynb | 6 +- .../src/autogen_core/_agent_runtime.py | 4 +- .../src/autogen_core/_base_agent.py | 2 +- .../src/autogen_core/_routed_agent.py | 6 +- .../interface/magentic_one_helper.py | 4 +- 23 files changed, 2256 insertions(+), 2256 deletions(-) diff --git a/python/packages/agbench/benchmarks/AssistantBench/Templates/MagenticOne/scenario.py b/python/packages/agbench/benchmarks/AssistantBench/Templates/MagenticOne/scenario.py index 004bb0c38..3c8f02f8a 100644 --- a/python/packages/agbench/benchmarks/AssistantBench/Templates/MagenticOne/scenario.py +++ b/python/packages/agbench/benchmarks/AssistantBench/Templates/MagenticOne/scenario.py @@ -17,7 +17,7 @@ from autogen_core.components.models import ( LLMMessage, ) from autogen_core import DefaultSubscription, DefaultTopicId -from autogen_core.components.code_executor import LocalCommandLineCodeExecutor +from autogen_ext.code_executors.local import LocalCommandLineCodeExecutor from autogen_core.components.models import AssistantMessage from autogen_magentic_one.markdown_browser import MarkdownConverter, UnsupportedFormatException diff --git a/python/packages/agbench/benchmarks/GAIA/Templates/MagenticOne/scenario.py b/python/packages/agbench/benchmarks/GAIA/Templates/MagenticOne/scenario.py index 4d7135819..c4e80d586 100644 --- a/python/packages/agbench/benchmarks/GAIA/Templates/MagenticOne/scenario.py +++ b/python/packages/agbench/benchmarks/GAIA/Templates/MagenticOne/scenario.py @@ -18,7 +18,7 @@ from autogen_core.components.models import ( LLMMessage, ) from autogen_core import DefaultSubscription, DefaultTopicId -from autogen_core.components.code_executor import LocalCommandLineCodeExecutor +from autogen_ext.code_executors.local import LocalCommandLineCodeExecutor from autogen_core.components.models import AssistantMessage from autogen_magentic_one.markdown_browser import MarkdownConverter, UnsupportedFormatException diff --git a/python/packages/agbench/benchmarks/HumanEval/Templates/MagenticOne/scenario.py b/python/packages/agbench/benchmarks/HumanEval/Templates/MagenticOne/scenario.py index 63643c84a..ad9361e7d 100644 --- a/python/packages/agbench/benchmarks/HumanEval/Templates/MagenticOne/scenario.py +++ b/python/packages/agbench/benchmarks/HumanEval/Templates/MagenticOne/scenario.py @@ -5,7 +5,7 @@ from autogen_core import AgentId, AgentProxy, TopicId from autogen_core import SingleThreadedAgentRuntime from autogen_core.application.logging import EVENT_LOGGER_NAME from autogen_core import DefaultSubscription, DefaultTopicId -from autogen_core.components.code_executor import LocalCommandLineCodeExecutor +from autogen_ext.code_executors.local import LocalCommandLineCodeExecutor from autogen_core.components.models import ( UserMessage, ) diff --git a/python/packages/agbench/benchmarks/WebArena/Templates/MagenticOne/scenario.py b/python/packages/agbench/benchmarks/WebArena/Templates/MagenticOne/scenario.py index 159003459..0e6f84045 100644 --- a/python/packages/agbench/benchmarks/WebArena/Templates/MagenticOne/scenario.py +++ b/python/packages/agbench/benchmarks/WebArena/Templates/MagenticOne/scenario.py @@ -11,7 +11,7 @@ from autogen_core import AgentId, AgentProxy, TopicId from autogen_core import SingleThreadedAgentRuntime from autogen_core.application.logging import EVENT_LOGGER_NAME from autogen_core import DefaultSubscription, DefaultTopicId -from autogen_core.components.code_executor import LocalCommandLineCodeExecutor +from autogen_ext.code_executors.local import LocalCommandLineCodeExecutor from autogen_core.components.models import ( ChatCompletionClient, UserMessage, diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py index d4ba81a3a..6e92825af 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py @@ -28,7 +28,7 @@ class UserProxyAgent(BaseChatAgent): Using :class:`UserProxyAgent` puts a running team in a temporary blocked state until the user responds. So it is important to time out the user input - function and cancel using the :class:`~autogen_core.base.CancellationToken` if the user does not respond. + function and cancel using the :class:`~autogen_core.CancellationToken` if the user does not respond. The input function should also handle exceptions and return a default response if needed. For typical use cases that involve diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py index 1e46c83fd..2c38f8449 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py @@ -210,7 +210,7 @@ class BaseGroupChat(Team, ABC): asyncio.run(main()) - Example using the :class:`~autogen_core.base.CancellationToken` to cancel the task: + Example using the :class:`~autogen_core.CancellationToken` to cancel the task: .. code-block:: python @@ -310,7 +310,7 @@ class BaseGroupChat(Team, ABC): asyncio.run(main()) - Example using the :class:`~autogen_core.base.CancellationToken` to cancel the task: + Example using the :class:`~autogen_core.CancellationToken` to cancel the task: .. code-block:: python diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_sequential_routed_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_sequential_routed_agent.py index ceb63d23c..572592f2d 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_sequential_routed_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_sequential_routed_agent.py @@ -35,7 +35,7 @@ class FIFOLock: class SequentialRoutedAgent(RoutedAgent): - """A subclass of :class:`autogen_core.components.RoutedAgent` that ensures + """A subclass of :class:`autogen_core.RoutedAgent` that ensures messages are handled sequentially in the order they arrive.""" def __init__(self, description: str) -> None: diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/tool-use-with-intervention.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/tool-use-with-intervention.ipynb index 47b59ac67..4db35e70d 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/tool-use-with-intervention.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/tool-use-with-intervention.ipynb @@ -163,7 +163,7 @@ "source": [ "In this example, we will use a tool for Python code execution.\n", "First, we create a Docker-based command-line code executor\n", - "using {py:class}`~autogen_core.components.code_executor.docker_executorCommandLineCodeExecutor`,\n", + "using {py:class}`~autogen_ext.code_executors.docker.DockerCommandLineCodeExecutor`,\n", "and then use it to instantiate a built-in Python code execution tool\n", "{py:class}`~autogen_core.components.tools.PythonCodeExecutionTool`\n", "that runs code in a Docker container." diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/mixture-of-agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/mixture-of-agents.ipynb index 0cfc6a36c..7abf03260 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/mixture-of-agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/mixture-of-agents.ipynb @@ -1,519 +1,519 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Mixture of Agents\n", - "\n", - "[Mixture of Agents](https://arxiv.org/abs/2406.04692) is a multi-agent design pattern\n", - "that models after the feed-forward neural network architecture.\n", - "\n", - "The pattern consists of two types of agents: worker agents and a single orchestrator agent.\n", - "Worker agents are organized into multiple layers, with each layer consisting of a fixed number of worker agents.\n", - "Messages from the worker agents in a previous layer are concatenated and sent to\n", - "all the worker agents in the next layer.\n", - "\n", - "This example implements the Mixture of Agents pattern using the core library\n", - "following the [original implementation](https://github.com/togethercomputer/moa) of multi-layer mixture of agents.\n", - "\n", - "Here is a high-level procedure overview of the pattern:\n", - "1. The orchestrator agent takes input a user task and first dispatches it to the worker agents in the first layer.\n", - "2. The worker agents in the first layer process the task and return the results to the orchestrator agent.\n", - "3. The orchestrator agent then synthesizes the results from the first layer and dispatches an updated task with the previous results to the worker agents in the second layer.\n", - "4. The process continues until the final layer is reached.\n", - "5. In the final layer, the orchestrator agent aggregates the results from previous layer and returns a single final result to the user.\n", - "\n", - "We use the direct messaging API {py:meth}`~autogen_core.base.BaseAgent.send_message` to implement this pattern.\n", - "This makes it easier to add more features like worker task cancellation and error handling in the future." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import asyncio\n", - "from dataclasses import dataclass\n", - "from typing import List\n", - "\n", - "from autogen_core import AgentId, MessageContext, RoutedAgent, SingleThreadedAgentRuntime, message_handler\n", - "from autogen_core.components.models import ChatCompletionClient, SystemMessage, UserMessage\n", - "from autogen_ext.models import OpenAIChatCompletionClient" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Message Protocol\n", - "\n", - "The agents communicate using the following messages:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "@dataclass\n", - "class WorkerTask:\n", - " task: str\n", - " previous_results: List[str]\n", - "\n", - "\n", - "@dataclass\n", - "class WorkerTaskResult:\n", - " result: str\n", - "\n", - "\n", - "@dataclass\n", - "class UserTask:\n", - " task: str\n", - "\n", - "\n", - "@dataclass\n", - "class FinalResult:\n", - " result: str" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Worker Agent\n", - "\n", - "Each worker agent receives a task from the orchestrator agent and processes them\n", - "indepedently.\n", - "Once the task is completed, the worker agent returns the result." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "class WorkerAgent(RoutedAgent):\n", - " def __init__(\n", - " self,\n", - " model_client: ChatCompletionClient,\n", - " ) -> None:\n", - " super().__init__(description=\"Worker Agent\")\n", - " self._model_client = model_client\n", - "\n", - " @message_handler\n", - " async def handle_task(self, message: WorkerTask, ctx: MessageContext) -> WorkerTaskResult:\n", - " if message.previous_results:\n", - " # If previous results are provided, we need to synthesize them to create a single prompt.\n", - " system_prompt = \"You have been provided with a set of responses from various open-source models to the latest user query. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.\\n\\nResponses from models:\"\n", - " system_prompt += \"\\n\" + \"\\n\\n\".join([f\"{i+1}. {r}\" for i, r in enumerate(message.previous_results)])\n", - " model_result = await self._model_client.create(\n", - " [SystemMessage(content=system_prompt), UserMessage(content=message.task, source=\"user\")]\n", - " )\n", - " else:\n", - " # If no previous results are provided, we can simply pass the user query to the model.\n", - " model_result = await self._model_client.create([UserMessage(content=message.task, source=\"user\")])\n", - " assert isinstance(model_result.content, str)\n", - " print(f\"{'-'*80}\\nWorker-{self.id}:\\n{model_result.content}\")\n", - " return WorkerTaskResult(result=model_result.content)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Orchestrator Agent\n", - "\n", - "The orchestrator agent receives tasks from the user and distributes them to the worker agents,\n", - "iterating over multiple layers of worker agents. Once all worker agents have processed the task,\n", - "the orchestrator agent aggregates the results and publishes the final result." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "class OrchestratorAgent(RoutedAgent):\n", - " def __init__(\n", - " self,\n", - " model_client: ChatCompletionClient,\n", - " worker_agent_types: List[str],\n", - " num_layers: int,\n", - " ) -> None:\n", - " super().__init__(description=\"Aggregator Agent\")\n", - " self._model_client = model_client\n", - " self._worker_agent_types = worker_agent_types\n", - " self._num_layers = num_layers\n", - "\n", - " @message_handler\n", - " async def handle_task(self, message: UserTask, ctx: MessageContext) -> FinalResult:\n", - " print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nReceived task: {message.task}\")\n", - " # Create task for the first layer.\n", - " worker_task = WorkerTask(task=message.task, previous_results=[])\n", - " # Iterate over layers.\n", - " for i in range(self._num_layers - 1):\n", - " # Assign workers for this layer.\n", - " worker_ids = [\n", - " AgentId(worker_type, f\"{self.id.key}/layer_{i}/worker_{j}\")\n", - " for j, worker_type in enumerate(self._worker_agent_types)\n", - " ]\n", - " # Dispatch tasks to workers.\n", - " print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nDispatch to workers at layer {i}\")\n", - " results = await asyncio.gather(*[self.send_message(worker_task, worker_id) for worker_id in worker_ids])\n", - " print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nReceived results from workers at layer {i}\")\n", - " # Prepare task for the next layer.\n", - " worker_task = WorkerTask(task=message.task, previous_results=[r.result for r in results])\n", - " # Perform final aggregation.\n", - " print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nPerforming final aggregation\")\n", - " system_prompt = \"You have been provided with a set of responses from various open-source models to the latest user query. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.\\n\\nResponses from models:\"\n", - " system_prompt += \"\\n\" + \"\\n\\n\".join([f\"{i+1}. {r}\" for i, r in enumerate(worker_task.previous_results)])\n", - " model_result = await self._model_client.create(\n", - " [SystemMessage(content=system_prompt), UserMessage(content=message.task, source=\"user\")]\n", - " )\n", - " assert isinstance(model_result.content, str)\n", - " return FinalResult(result=model_result.content)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Running Mixture of Agents\n", - "\n", - "Let's run the mixture of agents on a math task. You can change the task to make it more challenging, for example, by trying tasks from the [International Mathematical Olympiad](https://www.imo-official.org/problems.aspx)." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "task = (\n", - " \"I have 432 cookies, and divide them 3:4:2 between Alice, Bob, and Charlie. How many cookies does each person get?\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's set up the runtime with 3 layers of worker agents, each layer consisting of 3 worker agents.\n", - "We only need to register a single worker agent types, \"worker\", because we are using\n", - "the same model client configuration (i.e., gpt-4o-mini) for all worker agents.\n", - "If you want to use different models, you will need to register multiple worker agent types,\n", - "one for each model, and update the `worker_agent_types` list in the orchestrator agent's\n", - "factory function.\n", - "\n", - "The instances of worker agents are automatically created when the orchestrator agent\n", - "dispatches tasks to them.\n", - "See [Agent Identity and Lifecycle](../core-concepts/agent-identity-and-lifecycle.md)\n", - "for more information on agent lifecycle." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "--------------------------------------------------------------------------------\n", - "Orchestrator-orchestrator:default:\n", - "Received task: I have 432 cookies, and divide them 3:4:2 between Alice, Bob, and Charlie. How many cookies does each person get?\n", - "--------------------------------------------------------------------------------\n", - "Orchestrator-orchestrator:default:\n", - "Dispatch to workers at layer 0\n", - "--------------------------------------------------------------------------------\n", - "Worker-worker:default/layer_0/worker_1:\n", - "To divide 432 cookies in the ratio of 3:4:2 between Alice, Bob, and Charlie, you first need to determine the total number of parts in the ratio.\n", - "\n", - "Add the parts together:\n", - "\\[ 3 + 4 + 2 = 9 \\]\n", - "\n", - "Now, you can find the value of one part by dividing the total number of cookies by the total number of parts:\n", - "\\[ \\text{Value of one part} = \\frac{432}{9} = 48 \\]\n", - "\n", - "Now, multiply the value of one part by the number of parts for each person:\n", - "\n", - "- For Alice (3 parts):\n", - "\\[ 3 \\times 48 = 144 \\]\n", - "\n", - "- For Bob (4 parts):\n", - "\\[ 4 \\times 48 = 192 \\]\n", - "\n", - "- For Charlie (2 parts):\n", - "\\[ 2 \\times 48 = 96 \\]\n", - "\n", - "Thus, the number of cookies each person gets is:\n", - "- Alice: 144 cookies\n", - "- Bob: 192 cookies\n", - "- Charlie: 96 cookies\n", - "--------------------------------------------------------------------------------\n", - "Worker-worker:default/layer_0/worker_0:\n", - "To divide 432 cookies in the ratio of 3:4:2 between Alice, Bob, and Charlie, we will first determine the total number of parts in the ratio:\n", - "\n", - "\\[\n", - "3 + 4 + 2 = 9 \\text{ parts}\n", - "\\]\n", - "\n", - "Next, we calculate the value of one part by dividing the total number of cookies by the total number of parts:\n", - "\n", - "\\[\n", - "\\text{Value of one part} = \\frac{432}{9} = 48\n", - "\\]\n", - "\n", - "Now, we can find out how many cookies each person receives by multiplying the value of one part by the number of parts each person receives:\n", - "\n", - "- For Alice (3 parts):\n", - "\\[\n", - "3 \\times 48 = 144 \\text{ cookies}\n", - "\\]\n", - "\n", - "- For Bob (4 parts):\n", - "\\[\n", - "4 \\times 48 = 192 \\text{ cookies}\n", - "\\]\n", - "\n", - "- For Charlie (2 parts):\n", - "\\[\n", - "2 \\times 48 = 96 \\text{ cookies}\n", - "\\]\n", - "\n", - "Thus, the number of cookies each person gets is:\n", - "- **Alice**: 144 cookies\n", - "- **Bob**: 192 cookies\n", - "- **Charlie**: 96 cookies\n", - "--------------------------------------------------------------------------------\n", - "Worker-worker:default/layer_0/worker_2:\n", - "To divide the cookies in the ratio of 3:4:2, we first need to find the total parts in the ratio. \n", - "\n", - "The total parts are:\n", - "- Alice: 3 parts\n", - "- Bob: 4 parts\n", - "- Charlie: 2 parts\n", - "\n", - "Adding these parts together gives:\n", - "\\[ 3 + 4 + 2 = 9 \\text{ parts} \\]\n", - "\n", - "Next, we can determine how many cookies each part represents by dividing the total number of cookies by the total parts:\n", - "\\[ \\text{Cookies per part} = \\frac{432 \\text{ cookies}}{9 \\text{ parts}} = 48 \\text{ cookies/part} \\]\n", - "\n", - "Now we can calculate the number of cookies for each person:\n", - "- Alice's share: \n", - "\\[ 3 \\text{ parts} \\times 48 \\text{ cookies/part} = 144 \\text{ cookies} \\]\n", - "- Bob's share: \n", - "\\[ 4 \\text{ parts} \\times 48 \\text{ cookies/part} = 192 \\text{ cookies} \\]\n", - "- Charlie's share: \n", - "\\[ 2 \\text{ parts} \\times 48 \\text{ cookies/part} = 96 \\text{ cookies} \\]\n", - "\n", - "So, the final distribution of cookies is:\n", - "- Alice: 144 cookies\n", - "- Bob: 192 cookies\n", - "- Charlie: 96 cookies\n", - "--------------------------------------------------------------------------------\n", - "Orchestrator-orchestrator:default:\n", - "Received results from workers at layer 0\n", - "--------------------------------------------------------------------------------\n", - "Orchestrator-orchestrator:default:\n", - "Dispatch to workers at layer 1\n", - "--------------------------------------------------------------------------------\n", - "Worker-worker:default/layer_1/worker_2:\n", - "To divide 432 cookies in the ratio of 3:4:2 among Alice, Bob, and Charlie, follow these steps:\n", - "\n", - "1. **Determine the total number of parts in the ratio**:\n", - " \\[\n", - " 3 + 4 + 2 = 9 \\text{ parts}\n", - " \\]\n", - "\n", - "2. **Calculate the value of one part** by dividing the total number of cookies by the total number of parts:\n", - " \\[\n", - " \\text{Value of one part} = \\frac{432}{9} = 48\n", - " \\]\n", - "\n", - "3. **Calculate the number of cookies each person receives** by multiplying the value of one part by the number of parts each individual gets:\n", - " - **For Alice (3 parts)**:\n", - " \\[\n", - " 3 \\times 48 = 144 \\text{ cookies}\n", - " \\]\n", - " - **For Bob (4 parts)**:\n", - " \\[\n", - " 4 \\times 48 = 192 \\text{ cookies}\n", - " \\]\n", - " - **For Charlie (2 parts)**:\n", - " \\[\n", - " 2 \\times 48 = 96 \\text{ cookies}\n", - " \\]\n", - "\n", - "Thus, the final distribution of cookies is:\n", - "- **Alice**: 144 cookies\n", - "- **Bob**: 192 cookies\n", - "- **Charlie**: 96 cookies\n", - "--------------------------------------------------------------------------------\n", - "Worker-worker:default/layer_1/worker_0:\n", - "To divide 432 cookies among Alice, Bob, and Charlie in the ratio of 3:4:2, we can follow these steps:\n", - "\n", - "1. **Calculate the Total Parts**: \n", - " Add the parts of the ratio together:\n", - " \\[\n", - " 3 + 4 + 2 = 9 \\text{ parts}\n", - " \\]\n", - "\n", - "2. **Determine the Value of One Part**: \n", - " Divide the total number of cookies by the total number of parts:\n", - " \\[\n", - " \\text{Value of one part} = \\frac{432 \\text{ cookies}}{9 \\text{ parts}} = 48 \\text{ cookies/part}\n", - " \\]\n", - "\n", - "3. **Calculate Each Person's Share**:\n", - " - **Alice's Share** (3 parts):\n", - " \\[\n", - " 3 \\times 48 = 144 \\text{ cookies}\n", - " \\]\n", - " - **Bob's Share** (4 parts):\n", - " \\[\n", - " 4 \\times 48 = 192 \\text{ cookies}\n", - " \\]\n", - " - **Charlie's Share** (2 parts):\n", - " \\[\n", - " 2 \\times 48 = 96 \\text{ cookies}\n", - " \\]\n", - "\n", - "4. **Final Distribution**:\n", - " - Alice: 144 cookies\n", - " - Bob: 192 cookies\n", - " - Charlie: 96 cookies\n", - "\n", - "Thus, the distribution of cookies is:\n", - "- **Alice**: 144 cookies\n", - "- **Bob**: 192 cookies\n", - "- **Charlie**: 96 cookies\n", - "--------------------------------------------------------------------------------\n", - "Worker-worker:default/layer_1/worker_1:\n", - "To divide 432 cookies among Alice, Bob, and Charlie in the ratio of 3:4:2, we first need to determine the total number of parts in this ratio.\n", - "\n", - "1. **Calculate Total Parts:**\n", - " \\[\n", - " 3 \\text{ (Alice)} + 4 \\text{ (Bob)} + 2 \\text{ (Charlie)} = 9 \\text{ parts}\n", - " \\]\n", - "\n", - "2. **Determine the Value of One Part:**\n", - " Next, we'll find out how many cookies correspond to one part by dividing the total number of cookies by the total number of parts:\n", - " \\[\n", - " \\text{Value of one part} = \\frac{432 \\text{ cookies}}{9 \\text{ parts}} = 48 \\text{ cookies/part}\n", - " \\]\n", - "\n", - "3. **Calculate the Share for Each Person:**\n", - " - **Alice's Share (3 parts):**\n", - " \\[\n", - " 3 \\times 48 = 144 \\text{ cookies}\n", - " \\]\n", - " - **Bob's Share (4 parts):**\n", - " \\[\n", - " 4 \\times 48 = 192 \\text{ cookies}\n", - " \\]\n", - " - **Charlie’s Share (2 parts):**\n", - " \\[\n", - " 2 \\times 48 = 96 \\text{ cookies}\n", - " \\]\n", - "\n", - "4. **Summary of the Distribution:**\n", - " - **Alice:** 144 cookies\n", - " - **Bob:** 192 cookies\n", - " - **Charlie:** 96 cookies\n", - "\n", - "In conclusion, Alice receives 144 cookies, Bob receives 192 cookies, and Charlie receives 96 cookies.\n", - "--------------------------------------------------------------------------------\n", - "Orchestrator-orchestrator:default:\n", - "Received results from workers at layer 1\n", - "--------------------------------------------------------------------------------\n", - "Orchestrator-orchestrator:default:\n", - "Performing final aggregation\n", - "--------------------------------------------------------------------------------\n", - "Final result:\n", - "To divide 432 cookies among Alice, Bob, and Charlie in the ratio of 3:4:2, follow these steps:\n", - "\n", - "1. **Calculate the Total Parts in the Ratio:**\n", - " Add the parts of the ratio together:\n", - " \\[\n", - " 3 + 4 + 2 = 9\n", - " \\]\n", - "\n", - "2. **Determine the Value of One Part:**\n", - " Divide the total number of cookies by the total number of parts:\n", - " \\[\n", - " \\text{Value of one part} = \\frac{432}{9} = 48 \\text{ cookies/part}\n", - " \\]\n", - "\n", - "3. **Calculate Each Person's Share:**\n", - " - **Alice's Share (3 parts):**\n", - " \\[\n", - " 3 \\times 48 = 144 \\text{ cookies}\n", - " \\]\n", - " - **Bob's Share (4 parts):**\n", - " \\[\n", - " 4 \\times 48 = 192 \\text{ cookies}\n", - " \\]\n", - " - **Charlie's Share (2 parts):**\n", - " \\[\n", - " 2 \\times 48 = 96 \\text{ cookies}\n", - " \\]\n", - "\n", - "Therefore, the distribution of cookies is as follows:\n", - "- **Alice:** 144 cookies\n", - "- **Bob:** 192 cookies\n", - "- **Charlie:** 96 cookies\n", - "\n", - "In summary, Alice gets 144 cookies, Bob gets 192 cookies, and Charlie gets 96 cookies.\n" - ] - } - ], - "source": [ - "runtime = SingleThreadedAgentRuntime()\n", - "await WorkerAgent.register(\n", - " runtime, \"worker\", lambda: WorkerAgent(model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"))\n", - ")\n", - "await OrchestratorAgent.register(\n", - " runtime,\n", - " \"orchestrator\",\n", - " lambda: OrchestratorAgent(\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"), worker_agent_types=[\"worker\"] * 3, num_layers=3\n", - " ),\n", - ")\n", - "\n", - "runtime.start()\n", - "result = await runtime.send_message(UserTask(task=task), AgentId(\"orchestrator\", \"default\"))\n", - "await runtime.stop_when_idle()\n", - "print(f\"{'-'*80}\\nFinal result:\\n{result.result}\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Mixture of Agents\n", + "\n", + "[Mixture of Agents](https://arxiv.org/abs/2406.04692) is a multi-agent design pattern\n", + "that models after the feed-forward neural network architecture.\n", + "\n", + "The pattern consists of two types of agents: worker agents and a single orchestrator agent.\n", + "Worker agents are organized into multiple layers, with each layer consisting of a fixed number of worker agents.\n", + "Messages from the worker agents in a previous layer are concatenated and sent to\n", + "all the worker agents in the next layer.\n", + "\n", + "This example implements the Mixture of Agents pattern using the core library\n", + "following the [original implementation](https://github.com/togethercomputer/moa) of multi-layer mixture of agents.\n", + "\n", + "Here is a high-level procedure overview of the pattern:\n", + "1. The orchestrator agent takes input a user task and first dispatches it to the worker agents in the first layer.\n", + "2. The worker agents in the first layer process the task and return the results to the orchestrator agent.\n", + "3. The orchestrator agent then synthesizes the results from the first layer and dispatches an updated task with the previous results to the worker agents in the second layer.\n", + "4. The process continues until the final layer is reached.\n", + "5. In the final layer, the orchestrator agent aggregates the results from previous layer and returns a single final result to the user.\n", + "\n", + "We use the direct messaging API {py:meth}`~autogen_core.BaseAgent.send_message` to implement this pattern.\n", + "This makes it easier to add more features like worker task cancellation and error handling in the future." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import asyncio\n", + "from dataclasses import dataclass\n", + "from typing import List\n", + "\n", + "from autogen_core import AgentId, MessageContext, RoutedAgent, SingleThreadedAgentRuntime, message_handler\n", + "from autogen_core.components.models import ChatCompletionClient, SystemMessage, UserMessage\n", + "from autogen_ext.models import OpenAIChatCompletionClient" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Message Protocol\n", + "\n", + "The agents communicate using the following messages:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "@dataclass\n", + "class WorkerTask:\n", + " task: str\n", + " previous_results: List[str]\n", + "\n", + "\n", + "@dataclass\n", + "class WorkerTaskResult:\n", + " result: str\n", + "\n", + "\n", + "@dataclass\n", + "class UserTask:\n", + " task: str\n", + "\n", + "\n", + "@dataclass\n", + "class FinalResult:\n", + " result: str" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Worker Agent\n", + "\n", + "Each worker agent receives a task from the orchestrator agent and processes them\n", + "indepedently.\n", + "Once the task is completed, the worker agent returns the result." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "class WorkerAgent(RoutedAgent):\n", + " def __init__(\n", + " self,\n", + " model_client: ChatCompletionClient,\n", + " ) -> None:\n", + " super().__init__(description=\"Worker Agent\")\n", + " self._model_client = model_client\n", + "\n", + " @message_handler\n", + " async def handle_task(self, message: WorkerTask, ctx: MessageContext) -> WorkerTaskResult:\n", + " if message.previous_results:\n", + " # If previous results are provided, we need to synthesize them to create a single prompt.\n", + " system_prompt = \"You have been provided with a set of responses from various open-source models to the latest user query. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.\\n\\nResponses from models:\"\n", + " system_prompt += \"\\n\" + \"\\n\\n\".join([f\"{i+1}. {r}\" for i, r in enumerate(message.previous_results)])\n", + " model_result = await self._model_client.create(\n", + " [SystemMessage(content=system_prompt), UserMessage(content=message.task, source=\"user\")]\n", + " )\n", + " else:\n", + " # If no previous results are provided, we can simply pass the user query to the model.\n", + " model_result = await self._model_client.create([UserMessage(content=message.task, source=\"user\")])\n", + " assert isinstance(model_result.content, str)\n", + " print(f\"{'-'*80}\\nWorker-{self.id}:\\n{model_result.content}\")\n", + " return WorkerTaskResult(result=model_result.content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Orchestrator Agent\n", + "\n", + "The orchestrator agent receives tasks from the user and distributes them to the worker agents,\n", + "iterating over multiple layers of worker agents. Once all worker agents have processed the task,\n", + "the orchestrator agent aggregates the results and publishes the final result." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "class OrchestratorAgent(RoutedAgent):\n", + " def __init__(\n", + " self,\n", + " model_client: ChatCompletionClient,\n", + " worker_agent_types: List[str],\n", + " num_layers: int,\n", + " ) -> None:\n", + " super().__init__(description=\"Aggregator Agent\")\n", + " self._model_client = model_client\n", + " self._worker_agent_types = worker_agent_types\n", + " self._num_layers = num_layers\n", + "\n", + " @message_handler\n", + " async def handle_task(self, message: UserTask, ctx: MessageContext) -> FinalResult:\n", + " print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nReceived task: {message.task}\")\n", + " # Create task for the first layer.\n", + " worker_task = WorkerTask(task=message.task, previous_results=[])\n", + " # Iterate over layers.\n", + " for i in range(self._num_layers - 1):\n", + " # Assign workers for this layer.\n", + " worker_ids = [\n", + " AgentId(worker_type, f\"{self.id.key}/layer_{i}/worker_{j}\")\n", + " for j, worker_type in enumerate(self._worker_agent_types)\n", + " ]\n", + " # Dispatch tasks to workers.\n", + " print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nDispatch to workers at layer {i}\")\n", + " results = await asyncio.gather(*[self.send_message(worker_task, worker_id) for worker_id in worker_ids])\n", + " print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nReceived results from workers at layer {i}\")\n", + " # Prepare task for the next layer.\n", + " worker_task = WorkerTask(task=message.task, previous_results=[r.result for r in results])\n", + " # Perform final aggregation.\n", + " print(f\"{'-'*80}\\nOrchestrator-{self.id}:\\nPerforming final aggregation\")\n", + " system_prompt = \"You have been provided with a set of responses from various open-source models to the latest user query. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.\\n\\nResponses from models:\"\n", + " system_prompt += \"\\n\" + \"\\n\\n\".join([f\"{i+1}. {r}\" for i, r in enumerate(worker_task.previous_results)])\n", + " model_result = await self._model_client.create(\n", + " [SystemMessage(content=system_prompt), UserMessage(content=message.task, source=\"user\")]\n", + " )\n", + " assert isinstance(model_result.content, str)\n", + " return FinalResult(result=model_result.content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Running Mixture of Agents\n", + "\n", + "Let's run the mixture of agents on a math task. You can change the task to make it more challenging, for example, by trying tasks from the [International Mathematical Olympiad](https://www.imo-official.org/problems.aspx)." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "task = (\n", + " \"I have 432 cookies, and divide them 3:4:2 between Alice, Bob, and Charlie. How many cookies does each person get?\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's set up the runtime with 3 layers of worker agents, each layer consisting of 3 worker agents.\n", + "We only need to register a single worker agent types, \"worker\", because we are using\n", + "the same model client configuration (i.e., gpt-4o-mini) for all worker agents.\n", + "If you want to use different models, you will need to register multiple worker agent types,\n", + "one for each model, and update the `worker_agent_types` list in the orchestrator agent's\n", + "factory function.\n", + "\n", + "The instances of worker agents are automatically created when the orchestrator agent\n", + "dispatches tasks to them.\n", + "See [Agent Identity and Lifecycle](../core-concepts/agent-identity-and-lifecycle.md)\n", + "for more information on agent lifecycle." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--------------------------------------------------------------------------------\n", + "Orchestrator-orchestrator:default:\n", + "Received task: I have 432 cookies, and divide them 3:4:2 between Alice, Bob, and Charlie. How many cookies does each person get?\n", + "--------------------------------------------------------------------------------\n", + "Orchestrator-orchestrator:default:\n", + "Dispatch to workers at layer 0\n", + "--------------------------------------------------------------------------------\n", + "Worker-worker:default/layer_0/worker_1:\n", + "To divide 432 cookies in the ratio of 3:4:2 between Alice, Bob, and Charlie, you first need to determine the total number of parts in the ratio.\n", + "\n", + "Add the parts together:\n", + "\\[ 3 + 4 + 2 = 9 \\]\n", + "\n", + "Now, you can find the value of one part by dividing the total number of cookies by the total number of parts:\n", + "\\[ \\text{Value of one part} = \\frac{432}{9} = 48 \\]\n", + "\n", + "Now, multiply the value of one part by the number of parts for each person:\n", + "\n", + "- For Alice (3 parts):\n", + "\\[ 3 \\times 48 = 144 \\]\n", + "\n", + "- For Bob (4 parts):\n", + "\\[ 4 \\times 48 = 192 \\]\n", + "\n", + "- For Charlie (2 parts):\n", + "\\[ 2 \\times 48 = 96 \\]\n", + "\n", + "Thus, the number of cookies each person gets is:\n", + "- Alice: 144 cookies\n", + "- Bob: 192 cookies\n", + "- Charlie: 96 cookies\n", + "--------------------------------------------------------------------------------\n", + "Worker-worker:default/layer_0/worker_0:\n", + "To divide 432 cookies in the ratio of 3:4:2 between Alice, Bob, and Charlie, we will first determine the total number of parts in the ratio:\n", + "\n", + "\\[\n", + "3 + 4 + 2 = 9 \\text{ parts}\n", + "\\]\n", + "\n", + "Next, we calculate the value of one part by dividing the total number of cookies by the total number of parts:\n", + "\n", + "\\[\n", + "\\text{Value of one part} = \\frac{432}{9} = 48\n", + "\\]\n", + "\n", + "Now, we can find out how many cookies each person receives by multiplying the value of one part by the number of parts each person receives:\n", + "\n", + "- For Alice (3 parts):\n", + "\\[\n", + "3 \\times 48 = 144 \\text{ cookies}\n", + "\\]\n", + "\n", + "- For Bob (4 parts):\n", + "\\[\n", + "4 \\times 48 = 192 \\text{ cookies}\n", + "\\]\n", + "\n", + "- For Charlie (2 parts):\n", + "\\[\n", + "2 \\times 48 = 96 \\text{ cookies}\n", + "\\]\n", + "\n", + "Thus, the number of cookies each person gets is:\n", + "- **Alice**: 144 cookies\n", + "- **Bob**: 192 cookies\n", + "- **Charlie**: 96 cookies\n", + "--------------------------------------------------------------------------------\n", + "Worker-worker:default/layer_0/worker_2:\n", + "To divide the cookies in the ratio of 3:4:2, we first need to find the total parts in the ratio. \n", + "\n", + "The total parts are:\n", + "- Alice: 3 parts\n", + "- Bob: 4 parts\n", + "- Charlie: 2 parts\n", + "\n", + "Adding these parts together gives:\n", + "\\[ 3 + 4 + 2 = 9 \\text{ parts} \\]\n", + "\n", + "Next, we can determine how many cookies each part represents by dividing the total number of cookies by the total parts:\n", + "\\[ \\text{Cookies per part} = \\frac{432 \\text{ cookies}}{9 \\text{ parts}} = 48 \\text{ cookies/part} \\]\n", + "\n", + "Now we can calculate the number of cookies for each person:\n", + "- Alice's share: \n", + "\\[ 3 \\text{ parts} \\times 48 \\text{ cookies/part} = 144 \\text{ cookies} \\]\n", + "- Bob's share: \n", + "\\[ 4 \\text{ parts} \\times 48 \\text{ cookies/part} = 192 \\text{ cookies} \\]\n", + "- Charlie's share: \n", + "\\[ 2 \\text{ parts} \\times 48 \\text{ cookies/part} = 96 \\text{ cookies} \\]\n", + "\n", + "So, the final distribution of cookies is:\n", + "- Alice: 144 cookies\n", + "- Bob: 192 cookies\n", + "- Charlie: 96 cookies\n", + "--------------------------------------------------------------------------------\n", + "Orchestrator-orchestrator:default:\n", + "Received results from workers at layer 0\n", + "--------------------------------------------------------------------------------\n", + "Orchestrator-orchestrator:default:\n", + "Dispatch to workers at layer 1\n", + "--------------------------------------------------------------------------------\n", + "Worker-worker:default/layer_1/worker_2:\n", + "To divide 432 cookies in the ratio of 3:4:2 among Alice, Bob, and Charlie, follow these steps:\n", + "\n", + "1. **Determine the total number of parts in the ratio**:\n", + " \\[\n", + " 3 + 4 + 2 = 9 \\text{ parts}\n", + " \\]\n", + "\n", + "2. **Calculate the value of one part** by dividing the total number of cookies by the total number of parts:\n", + " \\[\n", + " \\text{Value of one part} = \\frac{432}{9} = 48\n", + " \\]\n", + "\n", + "3. **Calculate the number of cookies each person receives** by multiplying the value of one part by the number of parts each individual gets:\n", + " - **For Alice (3 parts)**:\n", + " \\[\n", + " 3 \\times 48 = 144 \\text{ cookies}\n", + " \\]\n", + " - **For Bob (4 parts)**:\n", + " \\[\n", + " 4 \\times 48 = 192 \\text{ cookies}\n", + " \\]\n", + " - **For Charlie (2 parts)**:\n", + " \\[\n", + " 2 \\times 48 = 96 \\text{ cookies}\n", + " \\]\n", + "\n", + "Thus, the final distribution of cookies is:\n", + "- **Alice**: 144 cookies\n", + "- **Bob**: 192 cookies\n", + "- **Charlie**: 96 cookies\n", + "--------------------------------------------------------------------------------\n", + "Worker-worker:default/layer_1/worker_0:\n", + "To divide 432 cookies among Alice, Bob, and Charlie in the ratio of 3:4:2, we can follow these steps:\n", + "\n", + "1. **Calculate the Total Parts**: \n", + " Add the parts of the ratio together:\n", + " \\[\n", + " 3 + 4 + 2 = 9 \\text{ parts}\n", + " \\]\n", + "\n", + "2. **Determine the Value of One Part**: \n", + " Divide the total number of cookies by the total number of parts:\n", + " \\[\n", + " \\text{Value of one part} = \\frac{432 \\text{ cookies}}{9 \\text{ parts}} = 48 \\text{ cookies/part}\n", + " \\]\n", + "\n", + "3. **Calculate Each Person's Share**:\n", + " - **Alice's Share** (3 parts):\n", + " \\[\n", + " 3 \\times 48 = 144 \\text{ cookies}\n", + " \\]\n", + " - **Bob's Share** (4 parts):\n", + " \\[\n", + " 4 \\times 48 = 192 \\text{ cookies}\n", + " \\]\n", + " - **Charlie's Share** (2 parts):\n", + " \\[\n", + " 2 \\times 48 = 96 \\text{ cookies}\n", + " \\]\n", + "\n", + "4. **Final Distribution**:\n", + " - Alice: 144 cookies\n", + " - Bob: 192 cookies\n", + " - Charlie: 96 cookies\n", + "\n", + "Thus, the distribution of cookies is:\n", + "- **Alice**: 144 cookies\n", + "- **Bob**: 192 cookies\n", + "- **Charlie**: 96 cookies\n", + "--------------------------------------------------------------------------------\n", + "Worker-worker:default/layer_1/worker_1:\n", + "To divide 432 cookies among Alice, Bob, and Charlie in the ratio of 3:4:2, we first need to determine the total number of parts in this ratio.\n", + "\n", + "1. **Calculate Total Parts:**\n", + " \\[\n", + " 3 \\text{ (Alice)} + 4 \\text{ (Bob)} + 2 \\text{ (Charlie)} = 9 \\text{ parts}\n", + " \\]\n", + "\n", + "2. **Determine the Value of One Part:**\n", + " Next, we'll find out how many cookies correspond to one part by dividing the total number of cookies by the total number of parts:\n", + " \\[\n", + " \\text{Value of one part} = \\frac{432 \\text{ cookies}}{9 \\text{ parts}} = 48 \\text{ cookies/part}\n", + " \\]\n", + "\n", + "3. **Calculate the Share for Each Person:**\n", + " - **Alice's Share (3 parts):**\n", + " \\[\n", + " 3 \\times 48 = 144 \\text{ cookies}\n", + " \\]\n", + " - **Bob's Share (4 parts):**\n", + " \\[\n", + " 4 \\times 48 = 192 \\text{ cookies}\n", + " \\]\n", + " - **Charlie’s Share (2 parts):**\n", + " \\[\n", + " 2 \\times 48 = 96 \\text{ cookies}\n", + " \\]\n", + "\n", + "4. **Summary of the Distribution:**\n", + " - **Alice:** 144 cookies\n", + " - **Bob:** 192 cookies\n", + " - **Charlie:** 96 cookies\n", + "\n", + "In conclusion, Alice receives 144 cookies, Bob receives 192 cookies, and Charlie receives 96 cookies.\n", + "--------------------------------------------------------------------------------\n", + "Orchestrator-orchestrator:default:\n", + "Received results from workers at layer 1\n", + "--------------------------------------------------------------------------------\n", + "Orchestrator-orchestrator:default:\n", + "Performing final aggregation\n", + "--------------------------------------------------------------------------------\n", + "Final result:\n", + "To divide 432 cookies among Alice, Bob, and Charlie in the ratio of 3:4:2, follow these steps:\n", + "\n", + "1. **Calculate the Total Parts in the Ratio:**\n", + " Add the parts of the ratio together:\n", + " \\[\n", + " 3 + 4 + 2 = 9\n", + " \\]\n", + "\n", + "2. **Determine the Value of One Part:**\n", + " Divide the total number of cookies by the total number of parts:\n", + " \\[\n", + " \\text{Value of one part} = \\frac{432}{9} = 48 \\text{ cookies/part}\n", + " \\]\n", + "\n", + "3. **Calculate Each Person's Share:**\n", + " - **Alice's Share (3 parts):**\n", + " \\[\n", + " 3 \\times 48 = 144 \\text{ cookies}\n", + " \\]\n", + " - **Bob's Share (4 parts):**\n", + " \\[\n", + " 4 \\times 48 = 192 \\text{ cookies}\n", + " \\]\n", + " - **Charlie's Share (2 parts):**\n", + " \\[\n", + " 2 \\times 48 = 96 \\text{ cookies}\n", + " \\]\n", + "\n", + "Therefore, the distribution of cookies is as follows:\n", + "- **Alice:** 144 cookies\n", + "- **Bob:** 192 cookies\n", + "- **Charlie:** 96 cookies\n", + "\n", + "In summary, Alice gets 144 cookies, Bob gets 192 cookies, and Charlie gets 96 cookies.\n" + ] + } + ], + "source": [ + "runtime = SingleThreadedAgentRuntime()\n", + "await WorkerAgent.register(\n", + " runtime, \"worker\", lambda: WorkerAgent(model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"))\n", + ")\n", + "await OrchestratorAgent.register(\n", + " runtime,\n", + " \"orchestrator\",\n", + " lambda: OrchestratorAgent(\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o\"), worker_agent_types=[\"worker\"] * 3, num_layers=3\n", + " ),\n", + ")\n", + "\n", + "runtime.start()\n", + "result = await runtime.send_message(UserTask(task=task), AgentId(\"orchestrator\", \"default\"))\n", + "await runtime.stop_when_idle()\n", + "print(f\"{'-'*80}\\nFinal result:\\n{result.result}\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/multi-agent-debate.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/multi-agent-debate.ipynb index 7bbdac84f..54f191186 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/multi-agent-debate.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/multi-agent-debate.ipynb @@ -1,571 +1,571 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Multi-Agent Debate\n", - "\n", - "Multi-Agent Debate is a multi-agent design pattern that simulates a multi-turn interaction \n", - "where in each turn, agents exchange their responses with each other, and refine \n", - "their responses based on the responses from other agents.\n", - "\n", - "This example shows an implementation of the multi-agent debate pattern for solving\n", - "math problems from the [GSM8K benchmark](https://huggingface.co/datasets/openai/gsm8k).\n", - "\n", - "There are of two types of agents in this pattern: solver agents and an aggregator agent.\n", - "The solver agents are connected in a sparse manner following the technique described in\n", - "[Improving Multi-Agent Debate with Sparse Communication Topology](https://arxiv.org/abs/2406.11776).\n", - "The solver agents are responsible for solving math problems and exchanging responses with each other.\n", - "The aggregator agent is responsible for distributing math problems to the solver agents,\n", - "waiting for their final responses, and aggregating the responses to get the final answer.\n", - "\n", - "The pattern works as follows:\n", - "1. User sends a math problem to the aggregator agent.\n", - "2. The aggregator agent distributes the problem to the solver agents.\n", - "3. Each solver agent processes the problem, and publishes a response to its neighbors.\n", - "4. Each solver agent uses the responses from its neighbors to refine its response, and publishes a new response.\n", - "5. Repeat step 4 for a fixed number of rounds. In the final round, each solver agent publishes a final response.\n", - "6. The aggregator agent uses majority voting to aggregate the final responses from all solver agents to get a final answer, and publishes the answer.\n", - "\n", - "We will be using the broadcast API, i.e., {py:meth}`~autogen_core.base.BaseAgent.publish_message`,\n", - "and we will be using topic and subscription to implement the communication topology.\n", - "Read about [Topics and Subscriptions](../core-concepts/topic-and-subscription.md) to understand how they work." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import re\n", - "from dataclasses import dataclass\n", - "from typing import Dict, List\n", - "\n", - "from autogen_core import (\n", - " DefaultTopicId,\n", - " MessageContext,\n", - " RoutedAgent,\n", - " SingleThreadedAgentRuntime,\n", - " TypeSubscription,\n", - " default_subscription,\n", - " message_handler,\n", - ")\n", - "from autogen_core.components.models import (\n", - " AssistantMessage,\n", - " ChatCompletionClient,\n", - " LLMMessage,\n", - " SystemMessage,\n", - " UserMessage,\n", - ")\n", - "from autogen_ext.models import OpenAIChatCompletionClient" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Message Protocol\n", - "\n", - "First, we define the messages used by the agents.\n", - "`IntermediateSolverResponse` is the message exchanged among the solver agents in each round,\n", - "and `FinalSolverResponse` is the message published by the solver agents in the final round." - ] - }, - { - "cell_type": "code", - "execution_count": 39, - "metadata": {}, - "outputs": [], - "source": [ - "@dataclass\n", - "class Question:\n", - " content: str\n", - "\n", - "\n", - "@dataclass\n", - "class Answer:\n", - " content: str\n", - "\n", - "\n", - "@dataclass\n", - "class SolverRequest:\n", - " content: str\n", - " question: str\n", - "\n", - "\n", - "@dataclass\n", - "class IntermediateSolverResponse:\n", - " content: str\n", - " question: str\n", - " answer: str\n", - " round: int\n", - "\n", - "\n", - "@dataclass\n", - "class FinalSolverResponse:\n", - " answer: str" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Solver Agent\n", - "\n", - "The solver agent is responsible for solving math problems and exchanging responses with other solver agents.\n", - "Upon receiving a `SolverRequest`, the solver agent uses an LLM to generate an answer.\n", - "Then, it publishes a `IntermediateSolverResponse`\n", - "or a `FinalSolverResponse` based on the round number.\n", - "\n", - "The solver agent is given a topic type, which is used to indicate the topic\n", - "to which the agent should publish intermediate responses. This topic is subscribed\n", - "to by its neighbors to receive responses from this agent -- we will show\n", - "how this is done later.\n", - "\n", - "We use {py:meth}`~autogen_core.components.default_subscription` to let\n", - "solver agents subscribe to the default topic, which is used by the aggregator agent\n", - "to collect the final responses from the solver agents." - ] - }, - { - "cell_type": "code", - "execution_count": 40, - "metadata": {}, - "outputs": [], - "source": [ - "@default_subscription\n", - "class MathSolver(RoutedAgent):\n", - " def __init__(self, model_client: ChatCompletionClient, topic_type: str, num_neighbors: int, max_round: int) -> None:\n", - " super().__init__(\"A debator.\")\n", - " self._topic_type = topic_type\n", - " self._model_client = model_client\n", - " self._num_neighbors = num_neighbors\n", - " self._history: List[LLMMessage] = []\n", - " self._buffer: Dict[int, List[IntermediateSolverResponse]] = {}\n", - " self._system_messages = [\n", - " SystemMessage(\n", - " content=(\n", - " \"You are a helpful assistant with expertise in mathematics and reasoning. \"\n", - " \"Your task is to assist in solving a math reasoning problem by providing \"\n", - " \"a clear and detailed solution. Limit your output within 100 words, \"\n", - " \"and your final answer should be a single numerical number, \"\n", - " \"in the form of {{answer}}, at the end of your response. \"\n", - " \"For example, 'The answer is {{42}}.'\"\n", - " )\n", - " )\n", - " ]\n", - " self._round = 0\n", - " self._max_round = max_round\n", - "\n", - " @message_handler\n", - " async def handle_request(self, message: SolverRequest, ctx: MessageContext) -> None:\n", - " # Add the question to the memory.\n", - " self._history.append(UserMessage(content=message.content, source=\"user\"))\n", - " # Make an inference using the model.\n", - " model_result = await self._model_client.create(self._system_messages + self._history)\n", - " assert isinstance(model_result.content, str)\n", - " # Add the response to the memory.\n", - " self._history.append(AssistantMessage(content=model_result.content, source=self.metadata[\"type\"]))\n", - " print(f\"{'-'*80}\\nSolver {self.id} round {self._round}:\\n{model_result.content}\")\n", - " # Extract the answer from the response.\n", - " match = re.search(r\"\\{\\{(\\-?\\d+(\\.\\d+)?)\\}\\}\", model_result.content)\n", - " if match is None:\n", - " raise ValueError(\"The model response does not contain the answer.\")\n", - " answer = match.group(1)\n", - " # Increment the counter.\n", - " self._round += 1\n", - " if self._round == self._max_round:\n", - " # If the counter reaches the maximum round, publishes a final response.\n", - " await self.publish_message(FinalSolverResponse(answer=answer), topic_id=DefaultTopicId())\n", - " else:\n", - " # Publish intermediate response to the topic associated with this solver.\n", - " await self.publish_message(\n", - " IntermediateSolverResponse(\n", - " content=model_result.content,\n", - " question=message.question,\n", - " answer=answer,\n", - " round=self._round,\n", - " ),\n", - " topic_id=DefaultTopicId(type=self._topic_type),\n", - " )\n", - "\n", - " @message_handler\n", - " async def handle_response(self, message: IntermediateSolverResponse, ctx: MessageContext) -> None:\n", - " # Add neighbor's response to the buffer.\n", - " self._buffer.setdefault(message.round, []).append(message)\n", - " # Check if all neighbors have responded.\n", - " if len(self._buffer[message.round]) == self._num_neighbors:\n", - " print(\n", - " f\"{'-'*80}\\nSolver {self.id} round {message.round}:\\nReceived all responses from {self._num_neighbors} neighbors.\"\n", - " )\n", - " # Prepare the prompt for the next question.\n", - " prompt = \"These are the solutions to the problem from other agents:\\n\"\n", - " for resp in self._buffer[message.round]:\n", - " prompt += f\"One agent solution: {resp.content}\\n\"\n", - " prompt += (\n", - " \"Using the solutions from other agents as additional information, \"\n", - " \"can you provide your answer to the math problem? \"\n", - " f\"The original math problem is {message.question}. \"\n", - " \"Your final answer should be a single numerical number, \"\n", - " \"in the form of {{answer}}, at the end of your response.\"\n", - " )\n", - " # Send the question to the agent itself to solve.\n", - " await self.send_message(SolverRequest(content=prompt, question=message.question), self.id)\n", - " # Clear the buffer.\n", - " self._buffer.pop(message.round)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Aggregator Agent\n", - "\n", - "The aggregator agent is responsible for handling user question and \n", - "distributing math problems to the solver agents.\n", - "\n", - "The aggregator subscribes to the default topic using\n", - "{py:meth}`~autogen_core.components.default_subscription`. The default topic is used to\n", - "recieve user question, receive the final responses from the solver agents,\n", - "and publish the final answer back to the user.\n", - "\n", - "In a more complex application when you want to isolate the multi-agent debate into a\n", - "sub-component, you should use\n", - "{py:meth}`~autogen_core.components.type_subscription` to set a specific topic\n", - "type for the aggregator-solver communication, \n", - "and have the both the solver and aggregator publish and subscribe to that topic type." - ] - }, - { - "cell_type": "code", - "execution_count": 41, - "metadata": {}, - "outputs": [], - "source": [ - "@default_subscription\n", - "class MathAggregator(RoutedAgent):\n", - " def __init__(self, num_solvers: int) -> None:\n", - " super().__init__(\"Math Aggregator\")\n", - " self._num_solvers = num_solvers\n", - " self._buffer: List[FinalSolverResponse] = []\n", - "\n", - " @message_handler\n", - " async def handle_question(self, message: Question, ctx: MessageContext) -> None:\n", - " print(f\"{'-'*80}\\nAggregator {self.id} received question:\\n{message.content}\")\n", - " prompt = (\n", - " f\"Can you solve the following math problem?\\n{message.content}\\n\"\n", - " \"Explain your reasoning. Your final answer should be a single numerical number, \"\n", - " \"in the form of {{answer}}, at the end of your response.\"\n", - " )\n", - " print(f\"{'-'*80}\\nAggregator {self.id} publishes initial solver request.\")\n", - " await self.publish_message(SolverRequest(content=prompt, question=message.content), topic_id=DefaultTopicId())\n", - "\n", - " @message_handler\n", - " async def handle_final_solver_response(self, message: FinalSolverResponse, ctx: MessageContext) -> None:\n", - " self._buffer.append(message)\n", - " if len(self._buffer) == self._num_solvers:\n", - " print(f\"{'-'*80}\\nAggregator {self.id} received all final answers from {self._num_solvers} solvers.\")\n", - " # Find the majority answer.\n", - " answers = [resp.answer for resp in self._buffer]\n", - " majority_answer = max(set(answers), key=answers.count)\n", - " # Publish the aggregated response.\n", - " await self.publish_message(Answer(content=majority_answer), topic_id=DefaultTopicId())\n", - " # Clear the responses.\n", - " self._buffer.clear()\n", - " print(f\"{'-'*80}\\nAggregator {self.id} publishes final answer:\\n{majority_answer}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Setting Up a Debate\n", - "\n", - "We will now set up a multi-agent debate with 4 solver agents and 1 aggregator agent.\n", - "The solver agents will be connected in a sparse manner as illustrated in the figure\n", - "below:\n", - "\n", - "```\n", - "A --- B\n", - "| |\n", - "| |\n", - "C --- D\n", - "```\n", - "\n", - "Each solver agent is connected to two other solver agents. \n", - "For example, agent A is connected to agents B and C.\n", - "\n", - "Let's first create a runtime and register the agent types." - ] - }, - { - "cell_type": "code", - "execution_count": 42, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "AgentType(type='MathAggregator')" - ] - }, - "execution_count": 42, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "runtime = SingleThreadedAgentRuntime()\n", - "await MathSolver.register(\n", - " runtime,\n", - " \"MathSolverA\",\n", - " lambda: MathSolver(\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", - " topic_type=\"MathSolverA\",\n", - " num_neighbors=2,\n", - " max_round=3,\n", - " ),\n", - ")\n", - "await MathSolver.register(\n", - " runtime,\n", - " \"MathSolverB\",\n", - " lambda: MathSolver(\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", - " topic_type=\"MathSolverB\",\n", - " num_neighbors=2,\n", - " max_round=3,\n", - " ),\n", - ")\n", - "await MathSolver.register(\n", - " runtime,\n", - " \"MathSolverC\",\n", - " lambda: MathSolver(\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", - " topic_type=\"MathSolverC\",\n", - " num_neighbors=2,\n", - " max_round=3,\n", - " ),\n", - ")\n", - "await MathSolver.register(\n", - " runtime,\n", - " \"MathSolverD\",\n", - " lambda: MathSolver(\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", - " topic_type=\"MathSolverD\",\n", - " num_neighbors=2,\n", - " max_round=3,\n", - " ),\n", - ")\n", - "await MathAggregator.register(runtime, \"MathAggregator\", lambda: MathAggregator(num_solvers=4))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we will create the solver agent topology using {py:class}`~autogen_core.components.TypeSubscription`,\n", - "which maps each solver agent's publishing topic type to its neighbors' agent types." - ] - }, - { - "cell_type": "code", - "execution_count": 43, - "metadata": {}, - "outputs": [], - "source": [ - "# Subscriptions for topic published to by MathSolverA.\n", - "await runtime.add_subscription(TypeSubscription(\"MathSolverA\", \"MathSolverD\"))\n", - "await runtime.add_subscription(TypeSubscription(\"MathSolverA\", \"MathSolverB\"))\n", - "\n", - "# Subscriptions for topic published to by MathSolverB.\n", - "await runtime.add_subscription(TypeSubscription(\"MathSolverB\", \"MathSolverA\"))\n", - "await runtime.add_subscription(TypeSubscription(\"MathSolverB\", \"MathSolverC\"))\n", - "\n", - "# Subscriptions for topic published to by MathSolverC.\n", - "await runtime.add_subscription(TypeSubscription(\"MathSolverC\", \"MathSolverB\"))\n", - "await runtime.add_subscription(TypeSubscription(\"MathSolverC\", \"MathSolverD\"))\n", - "\n", - "# Subscriptions for topic published to by MathSolverD.\n", - "await runtime.add_subscription(TypeSubscription(\"MathSolverD\", \"MathSolverC\"))\n", - "await runtime.add_subscription(TypeSubscription(\"MathSolverD\", \"MathSolverA\"))\n", - "\n", - "# All solvers and the aggregator subscribe to the default topic." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Solving Math Problems\n", - "\n", - "Now let's run the debate to solve a math problem.\n", - "We publish a `SolverRequest` to the default topic, \n", - "and the aggregator agent will start the debate." - ] - }, - { - "cell_type": "code", - "execution_count": 44, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "--------------------------------------------------------------------------------\n", - "Aggregator MathAggregator:default received question:\n", - "Natalia sold clips to 48 of her friends in April, and then she sold half as many clips in May. How many clips did Natalia sell altogether in April and May?\n", - "--------------------------------------------------------------------------------\n", - "Aggregator MathAggregator:default publishes initial solver request.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverC:default round 0:\n", - "In April, Natalia sold 48 clips. In May, she sold half as many, which is 48 / 2 = 24 clips. To find the total number of clips sold in April and May, we add the amounts: 48 (April) + 24 (May) = 72 clips. \n", - "\n", - "Thus, the total number of clips sold by Natalia is {{72}}.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverB:default round 0:\n", - "In April, Natalia sold 48 clips. In May, she sold half as many clips, which is 48 / 2 = 24 clips. To find the total clips sold in April and May, we add both amounts: \n", - "\n", - "48 (April) + 24 (May) = 72.\n", - "\n", - "Thus, the total number of clips sold altogether is {{72}}.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverD:default round 0:\n", - "Natalia sold 48 clips in April. In May, she sold half as many, which is \\( \\frac{48}{2} = 24 \\) clips. To find the total clips sold in both months, we add the clips sold in April and May together:\n", - "\n", - "\\[ 48 + 24 = 72 \\]\n", - "\n", - "Thus, Natalia sold a total of 72 clips.\n", - "\n", - "The answer is {{72}}.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverC:default round 1:\n", - "Received all responses from 2 neighbors.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverA:default round 1:\n", - "Received all responses from 2 neighbors.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverA:default round 0:\n", - "In April, Natalia sold clips to 48 friends. In May, she sold half as many, which is calculated as follows:\n", - "\n", - "Half of 48 is \\( 48 \\div 2 = 24 \\).\n", - "\n", - "Now, to find the total clips sold in April and May, we add the totals from both months:\n", - "\n", - "\\( 48 + 24 = 72 \\).\n", - "\n", - "Thus, the total number of clips Natalia sold altogether in April and May is {{72}}.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverD:default round 1:\n", - "Received all responses from 2 neighbors.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverB:default round 1:\n", - "Received all responses from 2 neighbors.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverC:default round 1:\n", - "In April, Natalia sold 48 clips. In May, she sold half as many, which is 48 / 2 = 24 clips. The total number of clips sold in April and May is calculated by adding the two amounts: 48 (April) + 24 (May) = 72 clips. \n", - "\n", - "Therefore, the answer is {{72}}.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverA:default round 1:\n", - "In April, Natalia sold 48 clips. In May, she sold half of that amount, which is 48 / 2 = 24 clips. To find the total clips sold in both months, we sum the clips from April and May: \n", - "\n", - "48 (April) + 24 (May) = 72.\n", - "\n", - "Thus, Natalia sold a total of {{72}} clips. \n", - "\n", - "The answer is {{72}}.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverD:default round 2:\n", - "Received all responses from 2 neighbors.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverB:default round 2:\n", - "Received all responses from 2 neighbors.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverD:default round 1:\n", - "Natalia sold 48 clips in April. In May, she sold half of that, which is \\( 48 \\div 2 = 24 \\) clips. To find the total clips sold, we add the clips sold in both months:\n", - "\n", - "\\[ 48 + 24 = 72 \\]\n", - "\n", - "Therefore, the total number of clips sold by Natalia is {{72}}.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverB:default round 1:\n", - "In April, Natalia sold 48 clips. In May, she sold half that amount, which is 48 / 2 = 24 clips. To find the total clips sold in both months, we add the amounts: \n", - "\n", - "48 (April) + 24 (May) = 72.\n", - "\n", - "Therefore, the total number of clips sold altogether by Natalia is {{72}}.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverA:default round 2:\n", - "Received all responses from 2 neighbors.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverC:default round 2:\n", - "Received all responses from 2 neighbors.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverA:default round 2:\n", - "In April, Natalia sold 48 clips. In May, she sold half of that amount, which is \\( 48 \\div 2 = 24 \\) clips. To find the total clips sold in both months, we add the amounts from April and May:\n", - "\n", - "\\( 48 + 24 = 72 \\).\n", - "\n", - "Thus, the total number of clips sold by Natalia is {{72}}.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverC:default round 2:\n", - "In April, Natalia sold 48 clips. In May, she sold half of that amount, which is \\( 48 \\div 2 = 24 \\) clips. To find the total number of clips sold in both months, we add the clips sold in April and May: \n", - "\n", - "48 (April) + 24 (May) = 72. \n", - "\n", - "Thus, the total number of clips sold altogether by Natalia is {{72}}.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverB:default round 2:\n", - "In April, Natalia sold 48 clips. In May, she sold half as many, calculated as \\( 48 \\div 2 = 24 \\) clips. To find the total clips sold over both months, we sum the totals: \n", - "\n", - "\\( 48 (April) + 24 (May) = 72 \\).\n", - "\n", - "Therefore, the total number of clips Natalia sold is {{72}}.\n", - "--------------------------------------------------------------------------------\n", - "Solver MathSolverD:default round 2:\n", - "To solve the problem, we know that Natalia sold 48 clips in April. In May, she sold half that amount, which is calculated as \\( 48 \\div 2 = 24 \\) clips. To find the total number of clips sold over both months, we add the two amounts together:\n", - "\n", - "\\[ 48 + 24 = 72 \\]\n", - "\n", - "Thus, the total number of clips sold by Natalia is {{72}}.\n", - "--------------------------------------------------------------------------------\n", - "Aggregator MathAggregator:default received all final answers from 4 solvers.\n", - "--------------------------------------------------------------------------------\n", - "Aggregator MathAggregator:default publishes final answer:\n", - "72\n" - ] - } - ], - "source": [ - "question = \"Natalia sold clips to 48 of her friends in April, and then she sold half as many clips in May. How many clips did Natalia sell altogether in April and May?\"\n", - "runtime.start()\n", - "await runtime.publish_message(Question(content=question), DefaultTopicId())\n", - "await runtime.stop_when_idle()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Multi-Agent Debate\n", + "\n", + "Multi-Agent Debate is a multi-agent design pattern that simulates a multi-turn interaction \n", + "where in each turn, agents exchange their responses with each other, and refine \n", + "their responses based on the responses from other agents.\n", + "\n", + "This example shows an implementation of the multi-agent debate pattern for solving\n", + "math problems from the [GSM8K benchmark](https://huggingface.co/datasets/openai/gsm8k).\n", + "\n", + "There are of two types of agents in this pattern: solver agents and an aggregator agent.\n", + "The solver agents are connected in a sparse manner following the technique described in\n", + "[Improving Multi-Agent Debate with Sparse Communication Topology](https://arxiv.org/abs/2406.11776).\n", + "The solver agents are responsible for solving math problems and exchanging responses with each other.\n", + "The aggregator agent is responsible for distributing math problems to the solver agents,\n", + "waiting for their final responses, and aggregating the responses to get the final answer.\n", + "\n", + "The pattern works as follows:\n", + "1. User sends a math problem to the aggregator agent.\n", + "2. The aggregator agent distributes the problem to the solver agents.\n", + "3. Each solver agent processes the problem, and publishes a response to its neighbors.\n", + "4. Each solver agent uses the responses from its neighbors to refine its response, and publishes a new response.\n", + "5. Repeat step 4 for a fixed number of rounds. In the final round, each solver agent publishes a final response.\n", + "6. The aggregator agent uses majority voting to aggregate the final responses from all solver agents to get a final answer, and publishes the answer.\n", + "\n", + "We will be using the broadcast API, i.e., {py:meth}`~autogen_core.BaseAgent.publish_message`,\n", + "and we will be using topic and subscription to implement the communication topology.\n", + "Read about [Topics and Subscriptions](../core-concepts/topic-and-subscription.md) to understand how they work." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import re\n", + "from dataclasses import dataclass\n", + "from typing import Dict, List\n", + "\n", + "from autogen_core import (\n", + " DefaultTopicId,\n", + " MessageContext,\n", + " RoutedAgent,\n", + " SingleThreadedAgentRuntime,\n", + " TypeSubscription,\n", + " default_subscription,\n", + " message_handler,\n", + ")\n", + "from autogen_core.components.models import (\n", + " AssistantMessage,\n", + " ChatCompletionClient,\n", + " LLMMessage,\n", + " SystemMessage,\n", + " UserMessage,\n", + ")\n", + "from autogen_ext.models import OpenAIChatCompletionClient" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Message Protocol\n", + "\n", + "First, we define the messages used by the agents.\n", + "`IntermediateSolverResponse` is the message exchanged among the solver agents in each round,\n", + "and `FinalSolverResponse` is the message published by the solver agents in the final round." + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": {}, + "outputs": [], + "source": [ + "@dataclass\n", + "class Question:\n", + " content: str\n", + "\n", + "\n", + "@dataclass\n", + "class Answer:\n", + " content: str\n", + "\n", + "\n", + "@dataclass\n", + "class SolverRequest:\n", + " content: str\n", + " question: str\n", + "\n", + "\n", + "@dataclass\n", + "class IntermediateSolverResponse:\n", + " content: str\n", + " question: str\n", + " answer: str\n", + " round: int\n", + "\n", + "\n", + "@dataclass\n", + "class FinalSolverResponse:\n", + " answer: str" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Solver Agent\n", + "\n", + "The solver agent is responsible for solving math problems and exchanging responses with other solver agents.\n", + "Upon receiving a `SolverRequest`, the solver agent uses an LLM to generate an answer.\n", + "Then, it publishes a `IntermediateSolverResponse`\n", + "or a `FinalSolverResponse` based on the round number.\n", + "\n", + "The solver agent is given a topic type, which is used to indicate the topic\n", + "to which the agent should publish intermediate responses. This topic is subscribed\n", + "to by its neighbors to receive responses from this agent -- we will show\n", + "how this is done later.\n", + "\n", + "We use {py:meth}`~autogen_core.components.default_subscription` to let\n", + "solver agents subscribe to the default topic, which is used by the aggregator agent\n", + "to collect the final responses from the solver agents." + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "metadata": {}, + "outputs": [], + "source": [ + "@default_subscription\n", + "class MathSolver(RoutedAgent):\n", + " def __init__(self, model_client: ChatCompletionClient, topic_type: str, num_neighbors: int, max_round: int) -> None:\n", + " super().__init__(\"A debator.\")\n", + " self._topic_type = topic_type\n", + " self._model_client = model_client\n", + " self._num_neighbors = num_neighbors\n", + " self._history: List[LLMMessage] = []\n", + " self._buffer: Dict[int, List[IntermediateSolverResponse]] = {}\n", + " self._system_messages = [\n", + " SystemMessage(\n", + " content=(\n", + " \"You are a helpful assistant with expertise in mathematics and reasoning. \"\n", + " \"Your task is to assist in solving a math reasoning problem by providing \"\n", + " \"a clear and detailed solution. Limit your output within 100 words, \"\n", + " \"and your final answer should be a single numerical number, \"\n", + " \"in the form of {{answer}}, at the end of your response. \"\n", + " \"For example, 'The answer is {{42}}.'\"\n", + " )\n", + " )\n", + " ]\n", + " self._round = 0\n", + " self._max_round = max_round\n", + "\n", + " @message_handler\n", + " async def handle_request(self, message: SolverRequest, ctx: MessageContext) -> None:\n", + " # Add the question to the memory.\n", + " self._history.append(UserMessage(content=message.content, source=\"user\"))\n", + " # Make an inference using the model.\n", + " model_result = await self._model_client.create(self._system_messages + self._history)\n", + " assert isinstance(model_result.content, str)\n", + " # Add the response to the memory.\n", + " self._history.append(AssistantMessage(content=model_result.content, source=self.metadata[\"type\"]))\n", + " print(f\"{'-'*80}\\nSolver {self.id} round {self._round}:\\n{model_result.content}\")\n", + " # Extract the answer from the response.\n", + " match = re.search(r\"\\{\\{(\\-?\\d+(\\.\\d+)?)\\}\\}\", model_result.content)\n", + " if match is None:\n", + " raise ValueError(\"The model response does not contain the answer.\")\n", + " answer = match.group(1)\n", + " # Increment the counter.\n", + " self._round += 1\n", + " if self._round == self._max_round:\n", + " # If the counter reaches the maximum round, publishes a final response.\n", + " await self.publish_message(FinalSolverResponse(answer=answer), topic_id=DefaultTopicId())\n", + " else:\n", + " # Publish intermediate response to the topic associated with this solver.\n", + " await self.publish_message(\n", + " IntermediateSolverResponse(\n", + " content=model_result.content,\n", + " question=message.question,\n", + " answer=answer,\n", + " round=self._round,\n", + " ),\n", + " topic_id=DefaultTopicId(type=self._topic_type),\n", + " )\n", + "\n", + " @message_handler\n", + " async def handle_response(self, message: IntermediateSolverResponse, ctx: MessageContext) -> None:\n", + " # Add neighbor's response to the buffer.\n", + " self._buffer.setdefault(message.round, []).append(message)\n", + " # Check if all neighbors have responded.\n", + " if len(self._buffer[message.round]) == self._num_neighbors:\n", + " print(\n", + " f\"{'-'*80}\\nSolver {self.id} round {message.round}:\\nReceived all responses from {self._num_neighbors} neighbors.\"\n", + " )\n", + " # Prepare the prompt for the next question.\n", + " prompt = \"These are the solutions to the problem from other agents:\\n\"\n", + " for resp in self._buffer[message.round]:\n", + " prompt += f\"One agent solution: {resp.content}\\n\"\n", + " prompt += (\n", + " \"Using the solutions from other agents as additional information, \"\n", + " \"can you provide your answer to the math problem? \"\n", + " f\"The original math problem is {message.question}. \"\n", + " \"Your final answer should be a single numerical number, \"\n", + " \"in the form of {{answer}}, at the end of your response.\"\n", + " )\n", + " # Send the question to the agent itself to solve.\n", + " await self.send_message(SolverRequest(content=prompt, question=message.question), self.id)\n", + " # Clear the buffer.\n", + " self._buffer.pop(message.round)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Aggregator Agent\n", + "\n", + "The aggregator agent is responsible for handling user question and \n", + "distributing math problems to the solver agents.\n", + "\n", + "The aggregator subscribes to the default topic using\n", + "{py:meth}`~autogen_core.components.default_subscription`. The default topic is used to\n", + "recieve user question, receive the final responses from the solver agents,\n", + "and publish the final answer back to the user.\n", + "\n", + "In a more complex application when you want to isolate the multi-agent debate into a\n", + "sub-component, you should use\n", + "{py:meth}`~autogen_core.components.type_subscription` to set a specific topic\n", + "type for the aggregator-solver communication, \n", + "and have the both the solver and aggregator publish and subscribe to that topic type." + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "metadata": {}, + "outputs": [], + "source": [ + "@default_subscription\n", + "class MathAggregator(RoutedAgent):\n", + " def __init__(self, num_solvers: int) -> None:\n", + " super().__init__(\"Math Aggregator\")\n", + " self._num_solvers = num_solvers\n", + " self._buffer: List[FinalSolverResponse] = []\n", + "\n", + " @message_handler\n", + " async def handle_question(self, message: Question, ctx: MessageContext) -> None:\n", + " print(f\"{'-'*80}\\nAggregator {self.id} received question:\\n{message.content}\")\n", + " prompt = (\n", + " f\"Can you solve the following math problem?\\n{message.content}\\n\"\n", + " \"Explain your reasoning. Your final answer should be a single numerical number, \"\n", + " \"in the form of {{answer}}, at the end of your response.\"\n", + " )\n", + " print(f\"{'-'*80}\\nAggregator {self.id} publishes initial solver request.\")\n", + " await self.publish_message(SolverRequest(content=prompt, question=message.content), topic_id=DefaultTopicId())\n", + "\n", + " @message_handler\n", + " async def handle_final_solver_response(self, message: FinalSolverResponse, ctx: MessageContext) -> None:\n", + " self._buffer.append(message)\n", + " if len(self._buffer) == self._num_solvers:\n", + " print(f\"{'-'*80}\\nAggregator {self.id} received all final answers from {self._num_solvers} solvers.\")\n", + " # Find the majority answer.\n", + " answers = [resp.answer for resp in self._buffer]\n", + " majority_answer = max(set(answers), key=answers.count)\n", + " # Publish the aggregated response.\n", + " await self.publish_message(Answer(content=majority_answer), topic_id=DefaultTopicId())\n", + " # Clear the responses.\n", + " self._buffer.clear()\n", + " print(f\"{'-'*80}\\nAggregator {self.id} publishes final answer:\\n{majority_answer}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setting Up a Debate\n", + "\n", + "We will now set up a multi-agent debate with 4 solver agents and 1 aggregator agent.\n", + "The solver agents will be connected in a sparse manner as illustrated in the figure\n", + "below:\n", + "\n", + "```\n", + "A --- B\n", + "| |\n", + "| |\n", + "C --- D\n", + "```\n", + "\n", + "Each solver agent is connected to two other solver agents. \n", + "For example, agent A is connected to agents B and C.\n", + "\n", + "Let's first create a runtime and register the agent types." + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "AgentType(type='MathAggregator')" + ] + }, + "execution_count": 42, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "runtime = SingleThreadedAgentRuntime()\n", + "await MathSolver.register(\n", + " runtime,\n", + " \"MathSolverA\",\n", + " lambda: MathSolver(\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", + " topic_type=\"MathSolverA\",\n", + " num_neighbors=2,\n", + " max_round=3,\n", + " ),\n", + ")\n", + "await MathSolver.register(\n", + " runtime,\n", + " \"MathSolverB\",\n", + " lambda: MathSolver(\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", + " topic_type=\"MathSolverB\",\n", + " num_neighbors=2,\n", + " max_round=3,\n", + " ),\n", + ")\n", + "await MathSolver.register(\n", + " runtime,\n", + " \"MathSolverC\",\n", + " lambda: MathSolver(\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", + " topic_type=\"MathSolverC\",\n", + " num_neighbors=2,\n", + " max_round=3,\n", + " ),\n", + ")\n", + "await MathSolver.register(\n", + " runtime,\n", + " \"MathSolverD\",\n", + " lambda: MathSolver(\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", + " topic_type=\"MathSolverD\",\n", + " num_neighbors=2,\n", + " max_round=3,\n", + " ),\n", + ")\n", + "await MathAggregator.register(runtime, \"MathAggregator\", lambda: MathAggregator(num_solvers=4))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we will create the solver agent topology using {py:class}`~autogen_core.components.TypeSubscription`,\n", + "which maps each solver agent's publishing topic type to its neighbors' agent types." + ] + }, + { + "cell_type": "code", + "execution_count": 43, + "metadata": {}, + "outputs": [], + "source": [ + "# Subscriptions for topic published to by MathSolverA.\n", + "await runtime.add_subscription(TypeSubscription(\"MathSolverA\", \"MathSolverD\"))\n", + "await runtime.add_subscription(TypeSubscription(\"MathSolverA\", \"MathSolverB\"))\n", + "\n", + "# Subscriptions for topic published to by MathSolverB.\n", + "await runtime.add_subscription(TypeSubscription(\"MathSolverB\", \"MathSolverA\"))\n", + "await runtime.add_subscription(TypeSubscription(\"MathSolverB\", \"MathSolverC\"))\n", + "\n", + "# Subscriptions for topic published to by MathSolverC.\n", + "await runtime.add_subscription(TypeSubscription(\"MathSolverC\", \"MathSolverB\"))\n", + "await runtime.add_subscription(TypeSubscription(\"MathSolverC\", \"MathSolverD\"))\n", + "\n", + "# Subscriptions for topic published to by MathSolverD.\n", + "await runtime.add_subscription(TypeSubscription(\"MathSolverD\", \"MathSolverC\"))\n", + "await runtime.add_subscription(TypeSubscription(\"MathSolverD\", \"MathSolverA\"))\n", + "\n", + "# All solvers and the aggregator subscribe to the default topic." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Solving Math Problems\n", + "\n", + "Now let's run the debate to solve a math problem.\n", + "We publish a `SolverRequest` to the default topic, \n", + "and the aggregator agent will start the debate." + ] + }, + { + "cell_type": "code", + "execution_count": 44, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--------------------------------------------------------------------------------\n", + "Aggregator MathAggregator:default received question:\n", + "Natalia sold clips to 48 of her friends in April, and then she sold half as many clips in May. How many clips did Natalia sell altogether in April and May?\n", + "--------------------------------------------------------------------------------\n", + "Aggregator MathAggregator:default publishes initial solver request.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverC:default round 0:\n", + "In April, Natalia sold 48 clips. In May, she sold half as many, which is 48 / 2 = 24 clips. To find the total number of clips sold in April and May, we add the amounts: 48 (April) + 24 (May) = 72 clips. \n", + "\n", + "Thus, the total number of clips sold by Natalia is {{72}}.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverB:default round 0:\n", + "In April, Natalia sold 48 clips. In May, she sold half as many clips, which is 48 / 2 = 24 clips. To find the total clips sold in April and May, we add both amounts: \n", + "\n", + "48 (April) + 24 (May) = 72.\n", + "\n", + "Thus, the total number of clips sold altogether is {{72}}.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverD:default round 0:\n", + "Natalia sold 48 clips in April. In May, she sold half as many, which is \\( \\frac{48}{2} = 24 \\) clips. To find the total clips sold in both months, we add the clips sold in April and May together:\n", + "\n", + "\\[ 48 + 24 = 72 \\]\n", + "\n", + "Thus, Natalia sold a total of 72 clips.\n", + "\n", + "The answer is {{72}}.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverC:default round 1:\n", + "Received all responses from 2 neighbors.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverA:default round 1:\n", + "Received all responses from 2 neighbors.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverA:default round 0:\n", + "In April, Natalia sold clips to 48 friends. In May, she sold half as many, which is calculated as follows:\n", + "\n", + "Half of 48 is \\( 48 \\div 2 = 24 \\).\n", + "\n", + "Now, to find the total clips sold in April and May, we add the totals from both months:\n", + "\n", + "\\( 48 + 24 = 72 \\).\n", + "\n", + "Thus, the total number of clips Natalia sold altogether in April and May is {{72}}.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverD:default round 1:\n", + "Received all responses from 2 neighbors.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverB:default round 1:\n", + "Received all responses from 2 neighbors.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverC:default round 1:\n", + "In April, Natalia sold 48 clips. In May, she sold half as many, which is 48 / 2 = 24 clips. The total number of clips sold in April and May is calculated by adding the two amounts: 48 (April) + 24 (May) = 72 clips. \n", + "\n", + "Therefore, the answer is {{72}}.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverA:default round 1:\n", + "In April, Natalia sold 48 clips. In May, she sold half of that amount, which is 48 / 2 = 24 clips. To find the total clips sold in both months, we sum the clips from April and May: \n", + "\n", + "48 (April) + 24 (May) = 72.\n", + "\n", + "Thus, Natalia sold a total of {{72}} clips. \n", + "\n", + "The answer is {{72}}.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverD:default round 2:\n", + "Received all responses from 2 neighbors.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverB:default round 2:\n", + "Received all responses from 2 neighbors.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverD:default round 1:\n", + "Natalia sold 48 clips in April. In May, she sold half of that, which is \\( 48 \\div 2 = 24 \\) clips. To find the total clips sold, we add the clips sold in both months:\n", + "\n", + "\\[ 48 + 24 = 72 \\]\n", + "\n", + "Therefore, the total number of clips sold by Natalia is {{72}}.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverB:default round 1:\n", + "In April, Natalia sold 48 clips. In May, she sold half that amount, which is 48 / 2 = 24 clips. To find the total clips sold in both months, we add the amounts: \n", + "\n", + "48 (April) + 24 (May) = 72.\n", + "\n", + "Therefore, the total number of clips sold altogether by Natalia is {{72}}.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverA:default round 2:\n", + "Received all responses from 2 neighbors.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverC:default round 2:\n", + "Received all responses from 2 neighbors.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverA:default round 2:\n", + "In April, Natalia sold 48 clips. In May, she sold half of that amount, which is \\( 48 \\div 2 = 24 \\) clips. To find the total clips sold in both months, we add the amounts from April and May:\n", + "\n", + "\\( 48 + 24 = 72 \\).\n", + "\n", + "Thus, the total number of clips sold by Natalia is {{72}}.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverC:default round 2:\n", + "In April, Natalia sold 48 clips. In May, she sold half of that amount, which is \\( 48 \\div 2 = 24 \\) clips. To find the total number of clips sold in both months, we add the clips sold in April and May: \n", + "\n", + "48 (April) + 24 (May) = 72. \n", + "\n", + "Thus, the total number of clips sold altogether by Natalia is {{72}}.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverB:default round 2:\n", + "In April, Natalia sold 48 clips. In May, she sold half as many, calculated as \\( 48 \\div 2 = 24 \\) clips. To find the total clips sold over both months, we sum the totals: \n", + "\n", + "\\( 48 (April) + 24 (May) = 72 \\).\n", + "\n", + "Therefore, the total number of clips Natalia sold is {{72}}.\n", + "--------------------------------------------------------------------------------\n", + "Solver MathSolverD:default round 2:\n", + "To solve the problem, we know that Natalia sold 48 clips in April. In May, she sold half that amount, which is calculated as \\( 48 \\div 2 = 24 \\) clips. To find the total number of clips sold over both months, we add the two amounts together:\n", + "\n", + "\\[ 48 + 24 = 72 \\]\n", + "\n", + "Thus, the total number of clips sold by Natalia is {{72}}.\n", + "--------------------------------------------------------------------------------\n", + "Aggregator MathAggregator:default received all final answers from 4 solvers.\n", + "--------------------------------------------------------------------------------\n", + "Aggregator MathAggregator:default publishes final answer:\n", + "72\n" + ] + } + ], + "source": [ + "question = \"Natalia sold clips to 48 of her friends in April, and then she sold half as many clips in May. How many clips did Natalia sell altogether in April and May?\"\n", + "runtime.start()\n", + "await runtime.publish_message(Question(content=question), DefaultTopicId())\n", + "await runtime.stop_when_idle()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/faqs.md b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/faqs.md index fd77f3bce..d1fae6cad 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/faqs.md +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/faqs.md @@ -2,7 +2,7 @@ ## How do I get the underlying agent instance? -Agents might be distributed across multiple machines, so the underlying agent instance is intentionally discouraged from being accessed. If the agent is definitely running on the same machine, you can access the agent instance by calling {py:meth}`autogen_core.base.AgentRuntime.try_get_underlying_agent_instance` on the `AgentRuntime`. If the agent is not available this will throw an exception. +Agents might be distributed across multiple machines, so the underlying agent instance is intentionally discouraged from being accessed. If the agent is definitely running on the same machine, you can access the agent instance by calling {py:meth}`autogen_core.AgentRuntime.try_get_underlying_agent_instance` on the `AgentRuntime`. If the agent is not available this will throw an exception. ## How do I call call a function on an agent? @@ -12,7 +12,7 @@ This allows your agent to work in a distributed environment a well as a local on ## Why do I need to use a factory to register an agent? -An {py:class}`autogen_core.base.AgentId` is composed of a `type` and a `key`. The type corresponds to the factory that created the agent, and the key is a runtime, data dependent key for this instance. +An {py:class}`autogen_core.AgentId` is composed of a `type` and a `key`. The type corresponds to the factory that created the agent, and the key is a runtime, data dependent key for this instance. The key can correspond to a user id, a session id, or could just be "default" if you don't need to differentiate between instances. Each unique key will create a new instance of the agent, based on the factory provided. This allows the system to automatically scale to different instances of the same agent, and to manage the lifecycle of each instance independently based on how you choose to handle keys in your application. @@ -57,4 +57,4 @@ client = OpenAIChatCompletionClient( "json_output": False, } ) -``` \ No newline at end of file +``` diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb index 618700c8f..220ea22f3 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/agent-and-agent-runtime.ipynb @@ -1,276 +1,276 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Agent and Agent Runtime\n", - "\n", - "In this and the following section, we focus on the core concepts of AutoGen:\n", - "agents, agent runtime, messages, and communication.\n", - "You will not find any AI models or tools here, just the foundational\n", - "building blocks for building multi-agent applications." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "An agent in AutoGen is an entity defined by the base class {py:class}`autogen_core.base.BaseAgent`.\n", - "It has a unique identifier of the type {py:class}`autogen_core.base.AgentId`,\n", - "a metadata dictionary of the type {py:class}`autogen_core.base.AgentMetadata`,\n", - "and method for handling messages {py:meth}`autogen_core.base.BaseAgent.on_message_impl`.\n", - "\n", - "An agent runtime is the execution environment for agents in AutoGen.\n", - "Similar to the runtime environment of a programming language,\n", - "an agent runtime provides the necessary infrastructure to facilitate communication\n", - "between agents, manage agent lifecycles, enforce security boundaries, and support monitoring and\n", - "debugging.\n", - "For local development, developers can use {py:class}`~autogen_core.application.SingleThreadedAgentRuntime`,\n", - "which can be embedded in a Python application.\n", - "\n", - "```{note}\n", - "Agents are not directly instantiated and managed by application code.\n", - "Instead, they are created by the runtime when needed and managed by the runtime.\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Implementing an Agent\n", - "\n", - "To implement an agent, the developer must subclass the {py:class}`~autogen_core.base.BaseAgent` class\n", - "and implement the {py:meth}`~autogen_core.base.BaseAgent.on_message_impl` method.\n", - "This method is invoked when the agent receives a message. For example,\n", - "the following agent handles a simple message type and prints the message it receives:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from dataclasses import dataclass\n", - "\n", - "from autogen_core import AgentId, BaseAgent, MessageContext\n", - "\n", - "\n", - "@dataclass\n", - "class MyMessageType:\n", - " content: str\n", - "\n", - "\n", - "class MyAgent(BaseAgent):\n", - " def __init__(self) -> None:\n", - " super().__init__(\"MyAgent\")\n", - "\n", - " async def on_message_impl(self, message: MyMessageType, ctx: MessageContext) -> None:\n", - " print(f\"Received message: {message.content}\") # type: ignore" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This agent only handles `MyMessageType` messages. \n", - "To handle multiple message types, developers can subclass the {py:class}`~autogen_core.components.RoutedAgent` class\n", - "which provides an easy-to use API to implement different message handlers for different message types.\n", - "See the next section on [message and communication](./message-and-communication.ipynb)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Registering Agent Type\n", - "\n", - "To make agents available to the runtime, developers can use the\n", - "{py:meth}`~autogen_core.base.BaseAgent.register` class method of the\n", - "{py:class}`~autogen_core.base.BaseAgent` class.\n", - "The process of registration associates an agent type, which is uniquely identified by a string, \n", - "and a factory function\n", - "that creates an instance of the agent type of the given class.\n", - "The factory function is used to allow automatic creation of agent instances \n", - "when they are needed.\n", - "\n", - "Agent type ({py:class}`~autogen_core.base.AgentType`) is not the same as the agent class. In this example,\n", - "the agent type is `AgentType(\"my_agent\")` and the agent class is the Python class `MyAgent`.\n", - "The factory function is expected to return an instance of the agent class \n", - "on which the {py:meth}`~autogen_core.base.BaseAgent.register` class method is invoked.\n", - "Read [Agent Identity and Lifecycles](../core-concepts/agent-identity-and-lifecycle.md)\n", - "to learn more about agent type and identity.\n", - "\n", - "```{note}\n", - "Different agent types can be registered with factory functions that return \n", - "the same agent class. For example, in the factory functions, \n", - "variations of the constructor parameters\n", - "can be used to create different instances of the same agent class.\n", - "```\n", - "\n", - "To register an agent type with the \n", - "{py:class}`~autogen_core.application.SingleThreadedAgentRuntime`,\n", - "the following code can be used:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "AgentType(type='my_agent')" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from autogen_core import SingleThreadedAgentRuntime\n", - "\n", - "runtime = SingleThreadedAgentRuntime()\n", - "await MyAgent.register(runtime, \"my_agent\", lambda: MyAgent())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Once an agent type is registered, we can send a direct message to an agent instance\n", - "using an {py:class}`~autogen_core.base.AgentId`.\n", - "The runtime will create the instance the first time it delivers a\n", - "message to this instance." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Received message: Hello, World!\n" - ] - } - ], - "source": [ - "agent_id = AgentId(\"my_agent\", \"default\")\n", - "runtime.start() # Start processing messages in the background.\n", - "await runtime.send_message(MyMessageType(\"Hello, World!\"), agent_id)\n", - "await runtime.stop() # Stop processing messages in the background." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{note}\n", - "Because the runtime manages the lifecycle of agents, an {py:class}`~autogen_core.base.AgentId`\n", - "is only used to communicate with the agent or retrieve its metadata (e.g., description).\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Running the Single-Threaded Agent Runtime\n", - "\n", - "The above code snippet uses `runtime.start()` to start a background task\n", - "to process and deliver messages to recepients' message handlers.\n", - "This is a feature of the\n", - "local embedded runtime {py:class}`~autogen_core.application.SingleThreadedAgentRuntime`.\n", - "\n", - "To stop the background task immediately, use the `stop()` method:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "runtime.start()\n", - "# ... Send messages, publish messages, etc.\n", - "await runtime.stop() # This will return immediately but will not cancel\n", - "# any in-progress message handling." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can resume the background task by calling `start()` again.\n", - "\n", - "For batch scenarios such as running benchmarks for evaluating agents,\n", - "you may want to wait for the background task to stop automatically when\n", - "there are no unprocessed messages and no agent is handling messages --\n", - "the batch may considered complete.\n", - "You can achieve this by using the `stop_when_idle()` method:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "runtime.start()\n", - "# ... Send messages, publish messages, etc.\n", - "await runtime.stop_when_idle() # This will block until the runtime is idle." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can also directly process messages one-by-one without a background task using:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "await runtime.process_next()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Other runtime implementations will have their own ways of running the runtime." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.6" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Agent and Agent Runtime\n", + "\n", + "In this and the following section, we focus on the core concepts of AutoGen:\n", + "agents, agent runtime, messages, and communication.\n", + "You will not find any AI models or tools here, just the foundational\n", + "building blocks for building multi-agent applications." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "An agent in AutoGen is an entity defined by the base class {py:class}`autogen_core.Agent`.\n", + "It has a unique identifier of the type {py:class}`autogen_core.AgentId`,\n", + "a metadata dictionary of the type {py:class}`autogen_core.AgentMetadata`,\n", + "and method for handling messages {py:meth}`autogen_core.BaseAgent.on_message_impl`.\n", + "\n", + "An agent runtime is the execution environment for agents in AutoGen.\n", + "Similar to the runtime environment of a programming language,\n", + "an agent runtime provides the necessary infrastructure to facilitate communication\n", + "between agents, manage agent lifecycles, enforce security boundaries, and support monitoring and\n", + "debugging.\n", + "For local development, developers can use {py:class}`~autogen_core.application.SingleThreadedAgentRuntime`,\n", + "which can be embedded in a Python application.\n", + "\n", + "```{note}\n", + "Agents are not directly instantiated and managed by application code.\n", + "Instead, they are created by the runtime when needed and managed by the runtime.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Implementing an Agent\n", + "\n", + "To implement an agent, the developer must subclass the {py:class}`~autogen_core.BaseAgent` class\n", + "and implement the {py:meth}`~autogen_core.BaseAgent.on_message_impl` method.\n", + "This method is invoked when the agent receives a message. For example,\n", + "the following agent handles a simple message type and prints the message it receives:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from dataclasses import dataclass\n", + "\n", + "from autogen_core import AgentId, BaseAgent, MessageContext\n", + "\n", + "\n", + "@dataclass\n", + "class MyMessageType:\n", + " content: str\n", + "\n", + "\n", + "class MyAgent(BaseAgent):\n", + " def __init__(self) -> None:\n", + " super().__init__(\"MyAgent\")\n", + "\n", + " async def on_message_impl(self, message: MyMessageType, ctx: MessageContext) -> None:\n", + " print(f\"Received message: {message.content}\") # type: ignore" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This agent only handles `MyMessageType` messages. \n", + "To handle multiple message types, developers can subclass the {py:class}`~autogen_core.RoutedAgent` class\n", + "which provides an easy-to use API to implement different message handlers for different message types.\n", + "See the next section on [message and communication](./message-and-communication.ipynb)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Registering Agent Type\n", + "\n", + "To make agents available to the runtime, developers can use the\n", + "{py:meth}`~autogen_core.BaseAgent.register` class method of the\n", + "{py:class}`~autogen_core.BaseAgent` class.\n", + "The process of registration associates an agent type, which is uniquely identified by a string, \n", + "and a factory function\n", + "that creates an instance of the agent type of the given class.\n", + "The factory function is used to allow automatic creation of agent instances \n", + "when they are needed.\n", + "\n", + "Agent type ({py:class}`~autogen_core.AgentType`) is not the same as the agent class. In this example,\n", + "the agent type is `AgentType(\"my_agent\")` and the agent class is the Python class `MyAgent`.\n", + "The factory function is expected to return an instance of the agent class \n", + "on which the {py:meth}`~autogen_core.BaseAgent.register` class method is invoked.\n", + "Read [Agent Identity and Lifecycles](../core-concepts/agent-identity-and-lifecycle.md)\n", + "to learn more about agent type and identity.\n", + "\n", + "```{note}\n", + "Different agent types can be registered with factory functions that return \n", + "the same agent class. For example, in the factory functions, \n", + "variations of the constructor parameters\n", + "can be used to create different instances of the same agent class.\n", + "```\n", + "\n", + "To register an agent type with the \n", + "{py:class}`~autogen_core.application.SingleThreadedAgentRuntime`,\n", + "the following code can be used:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "AgentType(type='my_agent')" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from autogen_core import SingleThreadedAgentRuntime\n", + "\n", + "runtime = SingleThreadedAgentRuntime()\n", + "await MyAgent.register(runtime, \"my_agent\", lambda: MyAgent())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once an agent type is registered, we can send a direct message to an agent instance\n", + "using an {py:class}`~autogen_core.AgentId`.\n", + "The runtime will create the instance the first time it delivers a\n", + "message to this instance." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Received message: Hello, World!\n" + ] + } + ], + "source": [ + "agent_id = AgentId(\"my_agent\", \"default\")\n", + "runtime.start() # Start processing messages in the background.\n", + "await runtime.send_message(MyMessageType(\"Hello, World!\"), agent_id)\n", + "await runtime.stop() # Stop processing messages in the background." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{note}\n", + "Because the runtime manages the lifecycle of agents, an {py:class}`~autogen_core.AgentId`\n", + "is only used to communicate with the agent or retrieve its metadata (e.g., description).\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Running the Single-Threaded Agent Runtime\n", + "\n", + "The above code snippet uses `runtime.start()` to start a background task\n", + "to process and deliver messages to recepients' message handlers.\n", + "This is a feature of the\n", + "local embedded runtime {py:class}`~autogen_core.application.SingleThreadedAgentRuntime`.\n", + "\n", + "To stop the background task immediately, use the `stop()` method:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "runtime.start()\n", + "# ... Send messages, publish messages, etc.\n", + "await runtime.stop() # This will return immediately but will not cancel\n", + "# any in-progress message handling." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can resume the background task by calling `start()` again.\n", + "\n", + "For batch scenarios such as running benchmarks for evaluating agents,\n", + "you may want to wait for the background task to stop automatically when\n", + "there are no unprocessed messages and no agent is handling messages --\n", + "the batch may considered complete.\n", + "You can achieve this by using the `stop_when_idle()` method:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "runtime.start()\n", + "# ... Send messages, publish messages, etc.\n", + "await runtime.stop_when_idle() # This will block until the runtime is idle." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can also directly process messages one-by-one without a background task using:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "await runtime.process_next()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Other runtime implementations will have their own ways of running the runtime." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/command-line-code-executors.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/command-line-code-executors.ipynb index 2a0ea31c3..efac7daa3 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/command-line-code-executors.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/command-line-code-executors.ipynb @@ -10,17 +10,17 @@ "Generally speaking, it will save each code block to a file and then execute that file.\n", "This means that each code block is executed in a new process. There are two forms of this executor:\n", "\n", - "- Docker ({py:class}`~autogen_ext.code_executor.docker_executor.DockerCommandLineCodeExecutor`) - this is where all commands are executed in a Docker container\n", - "- Local ({py:class}`~autogen_core.code_executor.local.LocalCommandLineCodeExecutor`) - this is where all commands are executed on the host machine\n", + "- Docker ({py:class}`~autogen_ext.code_executors.docker.DockerCommandLineCodeExecutor`) - this is where all commands are executed in a Docker container\n", + "- Local ({py:class}`~autogen_ext.code_executors.local.LocalCommandLineCodeExecutor`) - this is where all commands are executed on the host machine\n", "\n", "## Docker\n", "\n", "```{note}\n", - "To use `DockerCommandLineCodeExecutor`, ensure the `autogen-ext[docker]` package is installed. For more details, see the [Packages Documentation](https://microsoft.github.io/autogen/dev/packages/index.html).\n", + "To use {py:class}`~autogen_ext.code_executors.docker.DockerCommandLineCodeExecutor`, ensure the `autogen-ext[docker]` package is installed. For more details, see the [Packages Documentation](https://microsoft.github.io/autogen/dev/packages/index.html).\n", "\n", "```\n", "\n", - "The {py:class}`~autogen_ext.code_executor.docker_executor.DockerCommandLineCodeExecutor` will create a Docker container and run all commands within that container. \n", + "The {py:class}`~autogen_ext.code_executors.docker.DockerCommandLineCodeExecutor` will create a Docker container and run all commands within that container. \n", "The default image that is used is `python:3-slim`, this can be customized by passing the `image` parameter to the constructor. \n", "If the image is not found locally then the class will try to pull it. \n", "Therefore, having built the image locally is enough. The only thing required for this image to be compatible with the executor is to have `sh` and `python` installed. \n", @@ -104,7 +104,7 @@ "The local version will run code on your local system. Use it with caution.\n", "```\n", "\n", - "To execute code on the host machine, as in the machine running your application, {py:class}`~autogen_core.components.code_executor.LocalCommandLineCodeExecutor` can be used.\n", + "To execute code on the host machine, as in the machine running your application, {py:class}`~autogen_ext.code_executors.local.LocalCommandLineCodeExecutor` can be used.\n", "\n", "### Example" ] @@ -149,7 +149,7 @@ "source": [ "## Local within a Virtual Environment\n", "\n", - "If you want the code to run within a virtual environment created as part of the application’s setup, you can specify a directory for the newly created environment and pass its context to {py:class}`~autogen_core.components.code_executor.LocalCommandLineCodeExecutor`. This setup allows the executor to use the specified virtual environment consistently throughout the application's lifetime, ensuring isolated dependencies and a controlled runtime environment." + "If you want the code to run within a virtual environment created as part of the application’s setup, you can specify a directory for the newly created environment and pass its context to {py:class}`~autogen_ext.code_executors.local.LocalCommandLineCodeExecutor`. This setup allows the executor to use the specified virtual environment consistently throughout the application's lifetime, ensuring isolated dependencies and a controlled runtime environment." ] }, { diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb index 0a1d922f1..e3cafb64a 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb @@ -1,222 +1,222 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Distributed Agent Runtime\n", - "\n", - "```{attention}\n", - "The distributed agent runtime is an experimental feature. Expect breaking changes\n", - "to the API.\n", - "```\n", - "\n", - "A distributed agent runtime facilitates communication and agent lifecycle management\n", - "across process boundaries.\n", - "It consists of a host service and at least one worker runtime.\n", - "\n", - "The host service maintains connections to all active worker runtimes,\n", - "facilitates message delivery, and keeps sessions for all direct messages (i.e., RPCs).\n", - "A worker runtime processes application code (agents) and connects to the host service.\n", - "It also advertises the agents which they support to the host service,\n", - "so the host service can deliver messages to the correct worker.\n", - "\n", - "````{note}\n", - "The distributed agent runtime requires extra dependencies, install them using:\n", - "```bash\n", - "pip install autogen-core[grpc]==0.4.0.dev9\n", - "```\n", - "````\n", - "\n", - "We can start a host service using {py:class}`~autogen_core.application.GrpcWorkerAgentRuntimeHost`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntimeHost\n", - "\n", - "host = GrpcWorkerAgentRuntimeHost(address=\"localhost:50051\")\n", - "host.start() # Start a host service in the background." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The above code starts the host service in the background and accepts\n", - "worker connections on port 50051.\n", - "\n", - "Before running worker runtimes, let's define our agent.\n", - "The agent will publish a new message on every message it receives.\n", - "It also keeps track of how many messages it has published, and \n", - "stops publishing new messages once it has published 5 messages." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "from dataclasses import dataclass\n", - "\n", - "from autogen_core import DefaultTopicId, MessageContext, RoutedAgent, default_subscription, message_handler\n", - "\n", - "\n", - "@dataclass\n", - "class MyMessage:\n", - " content: str\n", - "\n", - "\n", - "@default_subscription\n", - "class MyAgent(RoutedAgent):\n", - " def __init__(self, name: str) -> None:\n", - " super().__init__(\"My agent\")\n", - " self._name = name\n", - " self._counter = 0\n", - "\n", - " @message_handler\n", - " async def my_message_handler(self, message: MyMessage, ctx: MessageContext) -> None:\n", - " self._counter += 1\n", - " if self._counter > 5:\n", - " return\n", - " content = f\"{self._name}: Hello x {self._counter}\"\n", - " print(content)\n", - " await self.publish_message(MyMessage(content=content), DefaultTopicId())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we can set up the worker agent runtimes.\n", - "We use {py:class}`~autogen_core.application.GrpcWorkerAgentRuntime`.\n", - "We set up two worker runtimes. Each runtime hosts one agent.\n", - "All agents publish and subscribe to the default topic, so they can see all\n", - "messages being published.\n", - "\n", - "To run the agents, we publishes a message from a worker." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "worker1: Hello x 1\n", - "worker2: Hello x 1\n", - "worker2: Hello x 2\n", - "worker1: Hello x 2\n", - "worker1: Hello x 3\n", - "worker2: Hello x 3\n", - "worker2: Hello x 4\n", - "worker1: Hello x 4\n", - "worker1: Hello x 5\n", - "worker2: Hello x 5\n" - ] - } - ], - "source": [ - "import asyncio\n", - "\n", - "from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntime\n", - "\n", - "worker1 = GrpcWorkerAgentRuntime(host_address=\"localhost:50051\")\n", - "worker1.start()\n", - "await MyAgent.register(worker1, \"worker1\", lambda: MyAgent(\"worker1\"))\n", - "\n", - "worker2 = GrpcWorkerAgentRuntime(host_address=\"localhost:50051\")\n", - "worker2.start()\n", - "await MyAgent.register(worker2, \"worker2\", lambda: MyAgent(\"worker2\"))\n", - "\n", - "await worker2.publish_message(MyMessage(content=\"Hello!\"), DefaultTopicId())\n", - "\n", - "# Let the agents run for a while.\n", - "await asyncio.sleep(5)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can see each agent published exactly 5 messages.\n", - "\n", - "To stop the worker runtimes, we can call {py:meth}`~autogen_core.application.GrpcWorkerAgentRuntime.stop`." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "await worker1.stop()\n", - "await worker2.stop()\n", - "\n", - "# To keep the worker running until a termination signal is received (e.g., SIGTERM).\n", - "# await worker1.stop_when_signal()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can call {py:meth}`~autogen_core.application.GrpcWorkerAgentRuntimeHost.stop`\n", - "to stop the host service." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "await host.stop()\n", - "\n", - "# To keep the host service running until a termination signal (e.g., SIGTERM)\n", - "# await host.stop_when_signal()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Next Steps\n", - "To see complete examples of using distributed runtime, please take a look at the following samples:\n", - "\n", - "- [Distributed Workers](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core/samples/worker) \n", - "- [Distributed Semantic Router](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core/samples/semantic_router) \n", - "- [Distributed Group Chat](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core/samples/distributed-group-chat) \n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "agnext", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Distributed Agent Runtime\n", + "\n", + "```{attention}\n", + "The distributed agent runtime is an experimental feature. Expect breaking changes\n", + "to the API.\n", + "```\n", + "\n", + "A distributed agent runtime facilitates communication and agent lifecycle management\n", + "across process boundaries.\n", + "It consists of a host service and at least one worker runtime.\n", + "\n", + "The host service maintains connections to all active worker runtimes,\n", + "facilitates message delivery, and keeps sessions for all direct messages (i.e., RPCs).\n", + "A worker runtime processes application code (agents) and connects to the host service.\n", + "It also advertises the agents which they support to the host service,\n", + "so the host service can deliver messages to the correct worker.\n", + "\n", + "````{note}\n", + "The distributed agent runtime requires extra dependencies, install them using:\n", + "```bash\n", + "pip install \"autogen-ext[grpc]==0.4.0.dev9\"\n", + "```\n", + "````\n", + "\n", + "We can start a host service using {py:class}`~autogen_ext.runtimes.grpc.GrpcWorkerAgentRuntimeHost`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntimeHost\n", + "\n", + "host = GrpcWorkerAgentRuntimeHost(address=\"localhost:50051\")\n", + "host.start() # Start a host service in the background." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The above code starts the host service in the background and accepts\n", + "worker connections on port 50051.\n", + "\n", + "Before running worker runtimes, let's define our agent.\n", + "The agent will publish a new message on every message it receives.\n", + "It also keeps track of how many messages it has published, and \n", + "stops publishing new messages once it has published 5 messages." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from dataclasses import dataclass\n", + "\n", + "from autogen_core import DefaultTopicId, MessageContext, RoutedAgent, default_subscription, message_handler\n", + "\n", + "\n", + "@dataclass\n", + "class MyMessage:\n", + " content: str\n", + "\n", + "\n", + "@default_subscription\n", + "class MyAgent(RoutedAgent):\n", + " def __init__(self, name: str) -> None:\n", + " super().__init__(\"My agent\")\n", + " self._name = name\n", + " self._counter = 0\n", + "\n", + " @message_handler\n", + " async def my_message_handler(self, message: MyMessage, ctx: MessageContext) -> None:\n", + " self._counter += 1\n", + " if self._counter > 5:\n", + " return\n", + " content = f\"{self._name}: Hello x {self._counter}\"\n", + " print(content)\n", + " await self.publish_message(MyMessage(content=content), DefaultTopicId())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can set up the worker agent runtimes.\n", + "We use {py:class}`~autogen_core.application.GrpcWorkerAgentRuntime`.\n", + "We set up two worker runtimes. Each runtime hosts one agent.\n", + "All agents publish and subscribe to the default topic, so they can see all\n", + "messages being published.\n", + "\n", + "To run the agents, we publishes a message from a worker." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "worker1: Hello x 1\n", + "worker2: Hello x 1\n", + "worker2: Hello x 2\n", + "worker1: Hello x 2\n", + "worker1: Hello x 3\n", + "worker2: Hello x 3\n", + "worker2: Hello x 4\n", + "worker1: Hello x 4\n", + "worker1: Hello x 5\n", + "worker2: Hello x 5\n" + ] + } + ], + "source": [ + "import asyncio\n", + "\n", + "from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntime\n", + "\n", + "worker1 = GrpcWorkerAgentRuntime(host_address=\"localhost:50051\")\n", + "worker1.start()\n", + "await MyAgent.register(worker1, \"worker1\", lambda: MyAgent(\"worker1\"))\n", + "\n", + "worker2 = GrpcWorkerAgentRuntime(host_address=\"localhost:50051\")\n", + "worker2.start()\n", + "await MyAgent.register(worker2, \"worker2\", lambda: MyAgent(\"worker2\"))\n", + "\n", + "await worker2.publish_message(MyMessage(content=\"Hello!\"), DefaultTopicId())\n", + "\n", + "# Let the agents run for a while.\n", + "await asyncio.sleep(5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can see each agent published exactly 5 messages.\n", + "\n", + "To stop the worker runtimes, we can call {py:meth}`~autogen_ext.runtimes.grpc.GrpcWorkerAgentRuntime.stop`." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "await worker1.stop()\n", + "await worker2.stop()\n", + "\n", + "# To keep the worker running until a termination signal is received (e.g., SIGTERM).\n", + "# await worker1.stop_when_signal()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can call {py:meth}`~autogen_ext.runtimes.grpc.GrpcWorkerAgentRuntimeHost.stop`\n", + "to stop the host service." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "await host.stop()\n", + "\n", + "# To keep the host service running until a termination signal (e.g., SIGTERM)\n", + "# await host.stop_when_signal()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Next Steps\n", + "To see complete examples of using distributed runtime, please take a look at the following samples:\n", + "\n", + "- [Distributed Workers](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core/samples/worker) \n", + "- [Distributed Semantic Router](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core/samples/semantic_router) \n", + "- [Distributed Group Chat](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core/samples/distributed-group-chat) \n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "agnext", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/message-and-communication.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/message-and-communication.ipynb index a12304bfe..a775c0f59 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/message-and-communication.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/message-and-communication.ipynb @@ -1,643 +1,643 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Message and Communication\n", - "\n", - "An agent in AutoGen core can react to, send, and publish messages,\n", - "and messages are the only means through which agents can communicate\n", - "with each other." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Messages\n", - "\n", - "Messages are serializable objects, they can be defined using:\n", - "\n", - "- A subclass of Pydantic's {py:class}`pydantic.BaseModel`, or\n", - "- A dataclass\n", - "\n", - "For example:" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "metadata": {}, - "outputs": [], - "source": [ - "from dataclasses import dataclass\n", - "\n", - "\n", - "@dataclass\n", - "class TextMessage:\n", - " content: str\n", - " source: str\n", - "\n", - "\n", - "@dataclass\n", - "class ImageMessage:\n", - " url: str\n", - " source: str" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{note}\n", - "Messages are purely data, and should not contain any logic.\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Message Handlers\n", - "\n", - "When an agent receives a message the runtime will invoke the agent's message handler\n", - "({py:meth}`~autogen_core.base.Agent.on_message`) which should implement the agents message handling logic.\n", - "If this message cannot be handled by the agent, the agent should raise a\n", - "{py:class}`~autogen_core.base.exceptions.CantHandleException`.\n", - "\n", - "The base class {py:class}`~autogen_core.base.BaseAgent` provides no message handling logic\n", - "and implementing the {py:meth}`~autogen_core.base.Agent.on_message` method directly is not recommended\n", - "unless for the advanced use cases.\n", - "\n", - "Developers should start with implementing the {py:class}`~autogen_core.components.RoutedAgent` base class\n", - "which provides built-in message routing capability.\n", - "\n", - "### Routing Messages by Type\n", - "\n", - "The {py:class}`~autogen_core.components.RoutedAgent` base class provides a mechanism\n", - "for associating message types with message handlers \n", - "with the {py:meth}`~autogen_core.components.message_handler` decorator,\n", - "so developers do not need to implement the {py:meth}`~autogen_core.base.Agent.on_message` method.\n", - "\n", - "For example, the following type-routed agent responds to `TextMessage` and `ImageMessage`\n", - "using different message handlers:" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_core import AgentId, MessageContext, RoutedAgent, SingleThreadedAgentRuntime, message_handler\n", - "\n", - "\n", - "class MyAgent(RoutedAgent):\n", - " @message_handler\n", - " async def on_text_message(self, message: TextMessage, ctx: MessageContext) -> None:\n", - " print(f\"Hello, {message.source}, you said {message.content}!\")\n", - "\n", - " @message_handler\n", - " async def on_image_message(self, message: ImageMessage, ctx: MessageContext) -> None:\n", - " print(f\"Hello, {message.source}, you sent me {message.url}!\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Create the agent runtime and register the agent type (see [Agent and Agent Runtime](agent-and-agent-runtime.ipynb)):" - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "AgentType(type='my_agent')" - ] - }, - "execution_count": 26, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "runtime = SingleThreadedAgentRuntime()\n", - "await MyAgent.register(runtime, \"my_agent\", lambda: MyAgent(\"My Agent\"))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Test this agent with `TextMessage` and `ImageMessage`." - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Hello, User, you said Hello, World!!\n", - "Hello, User, you sent me https://example.com/image.jpg!\n" - ] - } - ], - "source": [ - "runtime.start()\n", - "agent_id = AgentId(\"my_agent\", \"default\")\n", - "await runtime.send_message(TextMessage(content=\"Hello, World!\", source=\"User\"), agent_id)\n", - "await runtime.send_message(ImageMessage(url=\"https://example.com/image.jpg\", source=\"User\"), agent_id)\n", - "await runtime.stop_when_idle()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The runtime automatically creates an instance of `MyAgent` with the \n", - "agent ID `AgentId(\"my_agent\", \"default\")` when delivering the first message." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Routing Messages of the Same Type\n", - "\n", - "In some scenarios, it is useful to route messages of the same type to different handlers.\n", - "For examples, messages from different sender agents should be handled differently.\n", - "You can use the `match` parameter of the {py:meth}`~autogen_core.components.message_handler` decorator.\n", - "\n", - "The `match` parameter associates handlers for the same message type\n", - "to a specific message -- it is secondary to the message type routing. \n", - "It accepts a callable that takes the message and \n", - "{py:class}`~autogen_core.base.MessageContext` as arguments, and\n", - "returns a boolean indicating whether the message should be handled by the decorated handler.\n", - "The callable is checked in the alphabetical order of the handlers.\n", - "\n", - "Here is an example of an agent that routes messages based on the sender agent\n", - "using the `match` parameter:" - ] - }, - { - "cell_type": "code", - "execution_count": 28, - "metadata": {}, - "outputs": [], - "source": [ - "class RoutedBySenderAgent(RoutedAgent):\n", - " @message_handler(match=lambda msg, ctx: msg.source.startswith(\"user1\")) # type: ignore\n", - " async def on_user1_message(self, message: TextMessage, ctx: MessageContext) -> None:\n", - " print(f\"Hello from user 1 handler, {message.source}, you said {message.content}!\")\n", - "\n", - " @message_handler(match=lambda msg, ctx: msg.source.startswith(\"user2\")) # type: ignore\n", - " async def on_user2_message(self, message: TextMessage, ctx: MessageContext) -> None:\n", - " print(f\"Hello from user 2 handler, {message.source}, you said {message.content}!\")\n", - "\n", - " @message_handler(match=lambda msg, ctx: msg.source.startswith(\"user2\")) # type: ignore\n", - " async def on_image_message(self, message: ImageMessage, ctx: MessageContext) -> None:\n", - " print(f\"Hello, {message.source}, you sent me {message.url}!\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The above agent uses the `source` field of the message to determine the sender agent.\n", - "You can also use the `sender` field of {py:class}`~autogen_core.base.MessageContext` to determine the sender agent\n", - "using the agent ID if available.\n", - "\n", - "Let's test this agent with messages with different `source` values:" - ] - }, - { - "cell_type": "code", - "execution_count": 29, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Hello from user 1 handler, user1-test, you said Hello, World!!\n", - "Hello from user 2 handler, user2-test, you said Hello, World!!\n", - "Hello, user2-test, you sent me https://example.com/image.jpg!\n" - ] - } - ], - "source": [ - "runtime = SingleThreadedAgentRuntime()\n", - "await RoutedBySenderAgent.register(runtime, \"my_agent\", lambda: RoutedBySenderAgent(\"Routed by sender agent\"))\n", - "runtime.start()\n", - "agent_id = AgentId(\"my_agent\", \"default\")\n", - "await runtime.send_message(TextMessage(content=\"Hello, World!\", source=\"user1-test\"), agent_id)\n", - "await runtime.send_message(TextMessage(content=\"Hello, World!\", source=\"user2-test\"), agent_id)\n", - "await runtime.send_message(ImageMessage(url=\"https://example.com/image.jpg\", source=\"user1-test\"), agent_id)\n", - "await runtime.send_message(ImageMessage(url=\"https://example.com/image.jpg\", source=\"user2-test\"), agent_id)\n", - "await runtime.stop_when_idle()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In the above example, the first `ImageMessage` is not handled because the `source` field\n", - "of the message does not match the handler's `match` condition." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Direct Messaging\n", - "\n", - "There are two types of communication in AutoGen core:\n", - "\n", - "- **Direct Messaging**: sends a direct message to another agent.\n", - "- **Broadcast**: publishes a message to a topic.\n", - "\n", - "Let's first look at direct messaging.\n", - "To send a direct message to another agent, within a message handler use\n", - "the {py:meth}`autogen_core.base.BaseAgent.send_message` method,\n", - "from the runtime use the {py:meth}`autogen_core.base.AgentRuntime.send_message` method.\n", - "Awaiting calls to these methods will return the return value of the\n", - "receiving agent's message handler.\n", - "When the receiving agent's handler returns `None`, `None` will be returned.\n", - "\n", - "```{note}\n", - "If the invoked agent raises an exception while the sender is awaiting,\n", - "the exception will be propagated back to the sender.\n", - "```\n", - "\n", - "### Request/Response\n", - "\n", - "Direct messaging can be used for request/response scenarios,\n", - "where the sender expects a response from the receiver.\n", - "The receiver can respond to the message by returning a value from its message handler.\n", - "You can think of this as a function call between agents.\n", - "\n", - "For example, consider the following agents:" - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "metadata": {}, - "outputs": [], - "source": [ - "from dataclasses import dataclass\n", - "\n", - "from autogen_core import MessageContext, RoutedAgent, SingleThreadedAgentRuntime, message_handler\n", - "\n", - "\n", - "@dataclass\n", - "class Message:\n", - " content: str\n", - "\n", - "\n", - "class InnerAgent(RoutedAgent):\n", - " @message_handler\n", - " async def on_my_message(self, message: Message, ctx: MessageContext) -> Message:\n", - " return Message(content=f\"Hello from inner, {message.content}\")\n", - "\n", - "\n", - "class OuterAgent(RoutedAgent):\n", - " def __init__(self, description: str, inner_agent_type: str):\n", - " super().__init__(description)\n", - " self.inner_agent_id = AgentId(inner_agent_type, self.id.key)\n", - "\n", - " @message_handler\n", - " async def on_my_message(self, message: Message, ctx: MessageContext) -> None:\n", - " print(f\"Received message: {message.content}\")\n", - " # Send a direct message to the inner agent and receves a response.\n", - " response = await self.send_message(Message(f\"Hello from outer, {message.content}\"), self.inner_agent_id)\n", - " print(f\"Received inner response: {response.content}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Upone receving a message, the `OuterAgent` sends a direct message to the `InnerAgent` and receives\n", - "a message in response.\n", - "\n", - "We can test these agents by sending a `Message` to the `OuterAgent`." - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Received message: Hello, World!\n", - "Received inner response: Hello from inner, Hello from outer, Hello, World!\n" - ] - } - ], - "source": [ - "runtime = SingleThreadedAgentRuntime()\n", - "await InnerAgent.register(runtime, \"inner_agent\", lambda: InnerAgent(\"InnerAgent\"))\n", - "await OuterAgent.register(runtime, \"outer_agent\", lambda: OuterAgent(\"OuterAgent\", \"inner_agent\"))\n", - "runtime.start()\n", - "outer_agent_id = AgentId(\"outer_agent\", \"default\")\n", - "await runtime.send_message(Message(content=\"Hello, World!\"), outer_agent_id)\n", - "await runtime.stop_when_idle()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Both outputs are produced by the `OuterAgent`'s message handler, however the second output is based on the response from the `InnerAgent`.\n", - "\n", - "Generally speaking, direct messaging is appropriate for scenarios when the sender and\n", - "recipient are tightly coupled -- they are created together and the sender\n", - "is linked to a specific instance of the recipient.\n", - "For example, an agent executes tool calls by sending direct messages to\n", - "an instance of {py:class}`~autogen_core.components.tool_agent.ToolAgent`,\n", - "and uses the responses to form an action-observation loop." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Broadcast\n", - "\n", - "Broadcast is effectively the publish/subscribe model with topic and subscription.\n", - "Read [Topic and Subscription](../core-concepts/topic-and-subscription.md)\n", - "to learn the core concepts.\n", - "\n", - "The key difference between direct messaging and broadcast is that broadcast\n", - "cannot be used for request/response scenarios.\n", - "When an agent publishes a message it is one way only, it cannot receive a response\n", - "from any other agent, even if a receiving agent's handler returns a value.\n", - "\n", - "```{note}\n", - "If a response is given to a published message, it will be thrown away.\n", - "```\n", - "\n", - "```{note}\n", - "If an agent publishes a message type for which it is subscribed it will not\n", - "receive the message it published. This is to prevent infinite loops.\n", - "```\n", - "\n", - "### Subscribe and Publish to Topics\n", - "\n", - "[Type-based subscription](../core-concepts/topic-and-subscription.md#type-based-subscription)\n", - "maps messages published to topics of a given topic type to \n", - "agents of a given agent type. \n", - "To make an agent that subsclasses {py:class}`~autogen_core.components.RoutedAgent`\n", - "subscribe to a topic of a given topic type,\n", - "you can use the {py:meth}`~autogen_core.components.type_subscription` class decorator.\n", - "\n", - "The following example shows a `ReceiverAgent` class that subscribes to topics of `\"default\"` topic type\n", - "using the {py:meth}`~autogen_core.components.type_subscription` decorator.\n", - "and prints the received messages." - ] - }, - { - "cell_type": "code", - "execution_count": 32, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_core import RoutedAgent, message_handler, type_subscription\n", - "\n", - "\n", - "@type_subscription(topic_type=\"default\")\n", - "class ReceivingAgent(RoutedAgent):\n", - " @message_handler\n", - " async def on_my_message(self, message: Message, ctx: MessageContext) -> None:\n", - " print(f\"Received a message: {message.content}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To publish a message from an agent's handler,\n", - "use the {py:meth}`~autogen_core.base.BaseAgent.publish_message` method and specify\n", - "a {py:class}`~autogen_core.base.TopicId`.\n", - "This call must still be awaited to allow the runtime to schedule delivery of \n", - "the message to all subscribers, but it will always return `None`.\n", - "If an agent raises an exception while handling a published message,\n", - "this will be logged but will not be propagated back to the publishing agent.\n", - "\n", - "The following example shows a `BroadcastingAgent` that \n", - "publishes a message to a topic upon receiving a message. " - ] - }, - { - "cell_type": "code", - "execution_count": 33, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_core import TopicId\n", - "\n", - "\n", - "class BroadcastingAgent(RoutedAgent):\n", - " @message_handler\n", - " async def on_my_message(self, message: Message, ctx: MessageContext) -> None:\n", - " await self.publish_message(\n", - " Message(\"Publishing a message from broadcasting agent!\"),\n", - " topic_id=TopicId(type=\"default\", source=self.id.key),\n", - " )" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`BroadcastingAgent` publishes message to a topic with type `\"default\"`\n", - "and source assigned to the agent instance's agent key." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Subscriptions are registered with the agent runtime, either as part of\n", - "agent type's registration or through a separate API method.\n", - "Here is how we register {py:class}`~autogen_core.components.TypeSubscription`\n", - "for the receiving agent with the {py:meth}`~autogen_core.components.type_subscription` decorator,\n", - "and for the broadcasting agent without the decorator." - ] - }, - { - "cell_type": "code", - "execution_count": 34, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Received a message: Hello, World! From the runtime!\n", - "Received a message: Publishing a message from broadcasting agent!\n" - ] - } - ], - "source": [ - "from autogen_core import TypeSubscription\n", - "\n", - "runtime = SingleThreadedAgentRuntime()\n", - "\n", - "# Option 1: with type_subscription decorator\n", - "# The type_subscription class decorator automatically adds a TypeSubscription to\n", - "# the runtime when the agent is registered.\n", - "await ReceivingAgent.register(runtime, \"receiving_agent\", lambda: ReceivingAgent(\"Receiving Agent\"))\n", - "\n", - "# Option 2: with TypeSubscription\n", - "await BroadcastingAgent.register(runtime, \"broadcasting_agent\", lambda: BroadcastingAgent(\"Broadcasting Agent\"))\n", - "await runtime.add_subscription(TypeSubscription(topic_type=\"default\", agent_type=\"broadcasting_agent\"))\n", - "\n", - "# Start the runtime and publish a message.\n", - "runtime.start()\n", - "await runtime.publish_message(\n", - " Message(\"Hello, World! From the runtime!\"), topic_id=TopicId(type=\"default\", source=\"default\")\n", - ")\n", - "await runtime.stop_when_idle()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As shown in the above example, you can also publish directly to a topic\n", - "through the runtime's {py:meth}`~autogen_core.base.AgentRuntime.publish_message` method\n", - "without the need to create an agent instance.\n", - "\n", - "From the output, you can see two messages were received by the receiving agent:\n", - "one was published through the runtime, and the other was published by the broadcasting agent." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Default Topic and Subscriptions\n", - "\n", - "In the above example, we used\n", - "{py:class}`~autogen_core.base.TopicId` and {py:class}`~autogen_core.components.TypeSubscription`\n", - "to specify the topic and subscriptions respectively.\n", - "This is the appropriate way for many scenarios.\n", - "However, when there is a single scope of publishing, that is, \n", - "all agents publish and subscribe to all broadcasted messages,\n", - "we can use the convenience classes {py:class}`~autogen_core.components.DefaultTopicId`\n", - "and {py:meth}`~autogen_core.components.default_subscription` to simplify our code.\n", - "\n", - "{py:class}`~autogen_core.components.DefaultTopicId` is\n", - "for creating a topic that uses `\"default\"` as the default value for the topic type\n", - "and the publishing agent's key as the default value for the topic source.\n", - "{py:meth}`~autogen_core.components.default_subscription` is\n", - "for creating a type subscription that subscribes to the default topic.\n", - "We can simplify `BroadcastingAgent` by using\n", - "{py:class}`~autogen_core.components.DefaultTopicId` and {py:meth}`~autogen_core.components.default_subscription`." - ] - }, - { - "cell_type": "code", - "execution_count": 35, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_core import DefaultTopicId, default_subscription\n", - "\n", - "\n", - "@default_subscription\n", - "class BroadcastingAgentDefaultTopic(RoutedAgent):\n", - " @message_handler\n", - " async def on_my_message(self, message: Message, ctx: MessageContext) -> None:\n", - " # Publish a message to all agents in the same namespace.\n", - " await self.publish_message(\n", - " Message(\"Publishing a message from broadcasting agent!\"),\n", - " topic_id=DefaultTopicId(),\n", - " )" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "When the runtime calls {py:meth}`~autogen_core.BaseAgent.register` to register the agent type,\n", - "it creates a {py:class}`~autogen_core.components.TypeSubscription`\n", - "whose topic type uses `\"default\"` as the default value and \n", - "agent type uses the same agent type that is being registered in the same context." - ] - }, - { - "cell_type": "code", - "execution_count": 37, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Received a message: Hello, World! From the runtime!\n", - "Received a message: Publishing a message from broadcasting agent!\n" - ] - } - ], - "source": [ - "runtime = SingleThreadedAgentRuntime()\n", - "await BroadcastingAgentDefaultTopic.register(\n", - " runtime, \"broadcasting_agent\", lambda: BroadcastingAgentDefaultTopic(\"Broadcasting Agent\")\n", - ")\n", - "await ReceivingAgent.register(runtime, \"receiving_agent\", lambda: ReceivingAgent(\"Receiving Agent\"))\n", - "runtime.start()\n", - "await runtime.publish_message(Message(\"Hello, World! From the runtime!\"), topic_id=DefaultTopicId())\n", - "await runtime.stop_when_idle()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{note}\n", - "If your scenario allows all agents to publish and subscribe to\n", - "all broadcasted messages, use {py:class}`~autogen_core.components.DefaultTopicId`\n", - "and {py:meth}`~autogen_core.components.default_subscription` to decorate your\n", - "agent classes.\n", - "```" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "agnext", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Message and Communication\n", + "\n", + "An agent in AutoGen core can react to, send, and publish messages,\n", + "and messages are the only means through which agents can communicate\n", + "with each other." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Messages\n", + "\n", + "Messages are serializable objects, they can be defined using:\n", + "\n", + "- A subclass of Pydantic's {py:class}`pydantic.BaseModel`, or\n", + "- A dataclass\n", + "\n", + "For example:" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "from dataclasses import dataclass\n", + "\n", + "\n", + "@dataclass\n", + "class TextMessage:\n", + " content: str\n", + " source: str\n", + "\n", + "\n", + "@dataclass\n", + "class ImageMessage:\n", + " url: str\n", + " source: str" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{note}\n", + "Messages are purely data, and should not contain any logic.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Message Handlers\n", + "\n", + "When an agent receives a message the runtime will invoke the agent's message handler\n", + "({py:meth}`~autogen_core.Agent.on_message`) which should implement the agents message handling logic.\n", + "If this message cannot be handled by the agent, the agent should raise a\n", + "{py:class}`~autogen_core.exceptions.CantHandleException`.\n", + "\n", + "The base class {py:class}`~autogen_core.BaseAgent` provides no message handling logic\n", + "and implementing the {py:meth}`~autogen_core.Agent.on_message` method directly is not recommended\n", + "unless for the advanced use cases.\n", + "\n", + "Developers should start with implementing the {py:class}`~autogen_core.RoutedAgent` base class\n", + "which provides built-in message routing capability.\n", + "\n", + "### Routing Messages by Type\n", + "\n", + "The {py:class}`~autogen_core.RoutedAgent` base class provides a mechanism\n", + "for associating message types with message handlers \n", + "with the {py:meth}`~autogen_core.components.message_handler` decorator,\n", + "so developers do not need to implement the {py:meth}`~autogen_core.Agent.on_message` method.\n", + "\n", + "For example, the following type-routed agent responds to `TextMessage` and `ImageMessage`\n", + "using different message handlers:" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_core import AgentId, MessageContext, RoutedAgent, SingleThreadedAgentRuntime, message_handler\n", + "\n", + "\n", + "class MyAgent(RoutedAgent):\n", + " @message_handler\n", + " async def on_text_message(self, message: TextMessage, ctx: MessageContext) -> None:\n", + " print(f\"Hello, {message.source}, you said {message.content}!\")\n", + "\n", + " @message_handler\n", + " async def on_image_message(self, message: ImageMessage, ctx: MessageContext) -> None:\n", + " print(f\"Hello, {message.source}, you sent me {message.url}!\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Create the agent runtime and register the agent type (see [Agent and Agent Runtime](agent-and-agent-runtime.ipynb)):" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "AgentType(type='my_agent')" + ] + }, + "execution_count": 26, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "runtime = SingleThreadedAgentRuntime()\n", + "await MyAgent.register(runtime, \"my_agent\", lambda: MyAgent(\"My Agent\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Test this agent with `TextMessage` and `ImageMessage`." + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Hello, User, you said Hello, World!!\n", + "Hello, User, you sent me https://example.com/image.jpg!\n" + ] + } + ], + "source": [ + "runtime.start()\n", + "agent_id = AgentId(\"my_agent\", \"default\")\n", + "await runtime.send_message(TextMessage(content=\"Hello, World!\", source=\"User\"), agent_id)\n", + "await runtime.send_message(ImageMessage(url=\"https://example.com/image.jpg\", source=\"User\"), agent_id)\n", + "await runtime.stop_when_idle()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The runtime automatically creates an instance of `MyAgent` with the \n", + "agent ID `AgentId(\"my_agent\", \"default\")` when delivering the first message." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Routing Messages of the Same Type\n", + "\n", + "In some scenarios, it is useful to route messages of the same type to different handlers.\n", + "For examples, messages from different sender agents should be handled differently.\n", + "You can use the `match` parameter of the {py:meth}`~autogen_core.components.message_handler` decorator.\n", + "\n", + "The `match` parameter associates handlers for the same message type\n", + "to a specific message -- it is secondary to the message type routing. \n", + "It accepts a callable that takes the message and \n", + "{py:class}`~autogen_core.MessageContext` as arguments, and\n", + "returns a boolean indicating whether the message should be handled by the decorated handler.\n", + "The callable is checked in the alphabetical order of the handlers.\n", + "\n", + "Here is an example of an agent that routes messages based on the sender agent\n", + "using the `match` parameter:" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [], + "source": [ + "class RoutedBySenderAgent(RoutedAgent):\n", + " @message_handler(match=lambda msg, ctx: msg.source.startswith(\"user1\")) # type: ignore\n", + " async def on_user1_message(self, message: TextMessage, ctx: MessageContext) -> None:\n", + " print(f\"Hello from user 1 handler, {message.source}, you said {message.content}!\")\n", + "\n", + " @message_handler(match=lambda msg, ctx: msg.source.startswith(\"user2\")) # type: ignore\n", + " async def on_user2_message(self, message: TextMessage, ctx: MessageContext) -> None:\n", + " print(f\"Hello from user 2 handler, {message.source}, you said {message.content}!\")\n", + "\n", + " @message_handler(match=lambda msg, ctx: msg.source.startswith(\"user2\")) # type: ignore\n", + " async def on_image_message(self, message: ImageMessage, ctx: MessageContext) -> None:\n", + " print(f\"Hello, {message.source}, you sent me {message.url}!\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The above agent uses the `source` field of the message to determine the sender agent.\n", + "You can also use the `sender` field of {py:class}`~autogen_core.MessageContext` to determine the sender agent\n", + "using the agent ID if available.\n", + "\n", + "Let's test this agent with messages with different `source` values:" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Hello from user 1 handler, user1-test, you said Hello, World!!\n", + "Hello from user 2 handler, user2-test, you said Hello, World!!\n", + "Hello, user2-test, you sent me https://example.com/image.jpg!\n" + ] + } + ], + "source": [ + "runtime = SingleThreadedAgentRuntime()\n", + "await RoutedBySenderAgent.register(runtime, \"my_agent\", lambda: RoutedBySenderAgent(\"Routed by sender agent\"))\n", + "runtime.start()\n", + "agent_id = AgentId(\"my_agent\", \"default\")\n", + "await runtime.send_message(TextMessage(content=\"Hello, World!\", source=\"user1-test\"), agent_id)\n", + "await runtime.send_message(TextMessage(content=\"Hello, World!\", source=\"user2-test\"), agent_id)\n", + "await runtime.send_message(ImageMessage(url=\"https://example.com/image.jpg\", source=\"user1-test\"), agent_id)\n", + "await runtime.send_message(ImageMessage(url=\"https://example.com/image.jpg\", source=\"user2-test\"), agent_id)\n", + "await runtime.stop_when_idle()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the above example, the first `ImageMessage` is not handled because the `source` field\n", + "of the message does not match the handler's `match` condition." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Direct Messaging\n", + "\n", + "There are two types of communication in AutoGen core:\n", + "\n", + "- **Direct Messaging**: sends a direct message to another agent.\n", + "- **Broadcast**: publishes a message to a topic.\n", + "\n", + "Let's first look at direct messaging.\n", + "To send a direct message to another agent, within a message handler use\n", + "the {py:meth}`autogen_core.BaseAgent.send_message` method,\n", + "from the runtime use the {py:meth}`autogen_core.AgentRuntime.send_message` method.\n", + "Awaiting calls to these methods will return the return value of the\n", + "receiving agent's message handler.\n", + "When the receiving agent's handler returns `None`, `None` will be returned.\n", + "\n", + "```{note}\n", + "If the invoked agent raises an exception while the sender is awaiting,\n", + "the exception will be propagated back to the sender.\n", + "```\n", + "\n", + "### Request/Response\n", + "\n", + "Direct messaging can be used for request/response scenarios,\n", + "where the sender expects a response from the receiver.\n", + "The receiver can respond to the message by returning a value from its message handler.\n", + "You can think of this as a function call between agents.\n", + "\n", + "For example, consider the following agents:" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [], + "source": [ + "from dataclasses import dataclass\n", + "\n", + "from autogen_core import MessageContext, RoutedAgent, SingleThreadedAgentRuntime, message_handler\n", + "\n", + "\n", + "@dataclass\n", + "class Message:\n", + " content: str\n", + "\n", + "\n", + "class InnerAgent(RoutedAgent):\n", + " @message_handler\n", + " async def on_my_message(self, message: Message, ctx: MessageContext) -> Message:\n", + " return Message(content=f\"Hello from inner, {message.content}\")\n", + "\n", + "\n", + "class OuterAgent(RoutedAgent):\n", + " def __init__(self, description: str, inner_agent_type: str):\n", + " super().__init__(description)\n", + " self.inner_agent_id = AgentId(inner_agent_type, self.id.key)\n", + "\n", + " @message_handler\n", + " async def on_my_message(self, message: Message, ctx: MessageContext) -> None:\n", + " print(f\"Received message: {message.content}\")\n", + " # Send a direct message to the inner agent and receves a response.\n", + " response = await self.send_message(Message(f\"Hello from outer, {message.content}\"), self.inner_agent_id)\n", + " print(f\"Received inner response: {response.content}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Upone receving a message, the `OuterAgent` sends a direct message to the `InnerAgent` and receives\n", + "a message in response.\n", + "\n", + "We can test these agents by sending a `Message` to the `OuterAgent`." + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Received message: Hello, World!\n", + "Received inner response: Hello from inner, Hello from outer, Hello, World!\n" + ] + } + ], + "source": [ + "runtime = SingleThreadedAgentRuntime()\n", + "await InnerAgent.register(runtime, \"inner_agent\", lambda: InnerAgent(\"InnerAgent\"))\n", + "await OuterAgent.register(runtime, \"outer_agent\", lambda: OuterAgent(\"OuterAgent\", \"inner_agent\"))\n", + "runtime.start()\n", + "outer_agent_id = AgentId(\"outer_agent\", \"default\")\n", + "await runtime.send_message(Message(content=\"Hello, World!\"), outer_agent_id)\n", + "await runtime.stop_when_idle()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Both outputs are produced by the `OuterAgent`'s message handler, however the second output is based on the response from the `InnerAgent`.\n", + "\n", + "Generally speaking, direct messaging is appropriate for scenarios when the sender and\n", + "recipient are tightly coupled -- they are created together and the sender\n", + "is linked to a specific instance of the recipient.\n", + "For example, an agent executes tool calls by sending direct messages to\n", + "an instance of {py:class}`~autogen_core.components.tool_agent.ToolAgent`,\n", + "and uses the responses to form an action-observation loop." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Broadcast\n", + "\n", + "Broadcast is effectively the publish/subscribe model with topic and subscription.\n", + "Read [Topic and Subscription](../core-concepts/topic-and-subscription.md)\n", + "to learn the core concepts.\n", + "\n", + "The key difference between direct messaging and broadcast is that broadcast\n", + "cannot be used for request/response scenarios.\n", + "When an agent publishes a message it is one way only, it cannot receive a response\n", + "from any other agent, even if a receiving agent's handler returns a value.\n", + "\n", + "```{note}\n", + "If a response is given to a published message, it will be thrown away.\n", + "```\n", + "\n", + "```{note}\n", + "If an agent publishes a message type for which it is subscribed it will not\n", + "receive the message it published. This is to prevent infinite loops.\n", + "```\n", + "\n", + "### Subscribe and Publish to Topics\n", + "\n", + "[Type-based subscription](../core-concepts/topic-and-subscription.md#type-based-subscription)\n", + "maps messages published to topics of a given topic type to \n", + "agents of a given agent type. \n", + "To make an agent that subsclasses {py:class}`~autogen_core.RoutedAgent`\n", + "subscribe to a topic of a given topic type,\n", + "you can use the {py:meth}`~autogen_core.components.type_subscription` class decorator.\n", + "\n", + "The following example shows a `ReceiverAgent` class that subscribes to topics of `\"default\"` topic type\n", + "using the {py:meth}`~autogen_core.components.type_subscription` decorator.\n", + "and prints the received messages." + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_core import RoutedAgent, message_handler, type_subscription\n", + "\n", + "\n", + "@type_subscription(topic_type=\"default\")\n", + "class ReceivingAgent(RoutedAgent):\n", + " @message_handler\n", + " async def on_my_message(self, message: Message, ctx: MessageContext) -> None:\n", + " print(f\"Received a message: {message.content}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To publish a message from an agent's handler,\n", + "use the {py:meth}`~autogen_core.BaseAgent.publish_message` method and specify\n", + "a {py:class}`~autogen_core.TopicId`.\n", + "This call must still be awaited to allow the runtime to schedule delivery of \n", + "the message to all subscribers, but it will always return `None`.\n", + "If an agent raises an exception while handling a published message,\n", + "this will be logged but will not be propagated back to the publishing agent.\n", + "\n", + "The following example shows a `BroadcastingAgent` that \n", + "publishes a message to a topic upon receiving a message. " + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_core import TopicId\n", + "\n", + "\n", + "class BroadcastingAgent(RoutedAgent):\n", + " @message_handler\n", + " async def on_my_message(self, message: Message, ctx: MessageContext) -> None:\n", + " await self.publish_message(\n", + " Message(\"Publishing a message from broadcasting agent!\"),\n", + " topic_id=TopicId(type=\"default\", source=self.id.key),\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`BroadcastingAgent` publishes message to a topic with type `\"default\"`\n", + "and source assigned to the agent instance's agent key." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Subscriptions are registered with the agent runtime, either as part of\n", + "agent type's registration or through a separate API method.\n", + "Here is how we register {py:class}`~autogen_core.components.TypeSubscription`\n", + "for the receiving agent with the {py:meth}`~autogen_core.components.type_subscription` decorator,\n", + "and for the broadcasting agent without the decorator." + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Received a message: Hello, World! From the runtime!\n", + "Received a message: Publishing a message from broadcasting agent!\n" + ] + } + ], + "source": [ + "from autogen_core import TypeSubscription\n", + "\n", + "runtime = SingleThreadedAgentRuntime()\n", + "\n", + "# Option 1: with type_subscription decorator\n", + "# The type_subscription class decorator automatically adds a TypeSubscription to\n", + "# the runtime when the agent is registered.\n", + "await ReceivingAgent.register(runtime, \"receiving_agent\", lambda: ReceivingAgent(\"Receiving Agent\"))\n", + "\n", + "# Option 2: with TypeSubscription\n", + "await BroadcastingAgent.register(runtime, \"broadcasting_agent\", lambda: BroadcastingAgent(\"Broadcasting Agent\"))\n", + "await runtime.add_subscription(TypeSubscription(topic_type=\"default\", agent_type=\"broadcasting_agent\"))\n", + "\n", + "# Start the runtime and publish a message.\n", + "runtime.start()\n", + "await runtime.publish_message(\n", + " Message(\"Hello, World! From the runtime!\"), topic_id=TopicId(type=\"default\", source=\"default\")\n", + ")\n", + "await runtime.stop_when_idle()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As shown in the above example, you can also publish directly to a topic\n", + "through the runtime's {py:meth}`~autogen_core.AgentRuntime.publish_message` method\n", + "without the need to create an agent instance.\n", + "\n", + "From the output, you can see two messages were received by the receiving agent:\n", + "one was published through the runtime, and the other was published by the broadcasting agent." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Default Topic and Subscriptions\n", + "\n", + "In the above example, we used\n", + "{py:class}`~autogen_core.TopicId` and {py:class}`~autogen_core.components.TypeSubscription`\n", + "to specify the topic and subscriptions respectively.\n", + "This is the appropriate way for many scenarios.\n", + "However, when there is a single scope of publishing, that is, \n", + "all agents publish and subscribe to all broadcasted messages,\n", + "we can use the convenience classes {py:class}`~autogen_core.components.DefaultTopicId`\n", + "and {py:meth}`~autogen_core.components.default_subscription` to simplify our code.\n", + "\n", + "{py:class}`~autogen_core.components.DefaultTopicId` is\n", + "for creating a topic that uses `\"default\"` as the default value for the topic type\n", + "and the publishing agent's key as the default value for the topic source.\n", + "{py:meth}`~autogen_core.components.default_subscription` is\n", + "for creating a type subscription that subscribes to the default topic.\n", + "We can simplify `BroadcastingAgent` by using\n", + "{py:class}`~autogen_core.components.DefaultTopicId` and {py:meth}`~autogen_core.components.default_subscription`." + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_core import DefaultTopicId, default_subscription\n", + "\n", + "\n", + "@default_subscription\n", + "class BroadcastingAgentDefaultTopic(RoutedAgent):\n", + " @message_handler\n", + " async def on_my_message(self, message: Message, ctx: MessageContext) -> None:\n", + " # Publish a message to all agents in the same namespace.\n", + " await self.publish_message(\n", + " Message(\"Publishing a message from broadcasting agent!\"),\n", + " topic_id=DefaultTopicId(),\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "When the runtime calls {py:meth}`~autogen_core.BaseAgent.register` to register the agent type,\n", + "it creates a {py:class}`~autogen_core.components.TypeSubscription`\n", + "whose topic type uses `\"default\"` as the default value and \n", + "agent type uses the same agent type that is being registered in the same context." + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Received a message: Hello, World! From the runtime!\n", + "Received a message: Publishing a message from broadcasting agent!\n" + ] + } + ], + "source": [ + "runtime = SingleThreadedAgentRuntime()\n", + "await BroadcastingAgentDefaultTopic.register(\n", + " runtime, \"broadcasting_agent\", lambda: BroadcastingAgentDefaultTopic(\"Broadcasting Agent\")\n", + ")\n", + "await ReceivingAgent.register(runtime, \"receiving_agent\", lambda: ReceivingAgent(\"Receiving Agent\"))\n", + "runtime.start()\n", + "await runtime.publish_message(Message(\"Hello, World! From the runtime!\"), topic_id=DefaultTopicId())\n", + "await runtime.stop_when_idle()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{note}\n", + "If your scenario allows all agents to publish and subscribe to\n", + "all broadcasted messages, use {py:class}`~autogen_core.components.DefaultTopicId`\n", + "and {py:meth}`~autogen_core.components.default_subscription` to decorate your\n", + "agent classes.\n", + "```" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "agnext", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/model-clients.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/model-clients.ipynb index 6d0caf98c..ae92a040f 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/model-clients.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/model-clients.ipynb @@ -365,12 +365,12 @@ "metadata": {}, "source": [ "The `SimpleAgent` class is a subclass of the\n", - "{py:class}`autogen_core.components.RoutedAgent` class for the convenience of automatically routing messages to the appropriate handlers.\n", + "{py:class}`autogen_core.RoutedAgent` class for the convenience of automatically routing messages to the appropriate handlers.\n", "It has a single handler, `handle_user_message`, which handles message from the user. It uses the `ChatCompletionClient` to generate a response to the message.\n", "It then returns the response to the user, following the direct communication model.\n", "\n", "```{note}\n", - "The `cancellation_token` of the type {py:class}`autogen_core.base.CancellationToken` is used to cancel\n", + "The `cancellation_token` of the type {py:class}`autogen_core.CancellationToken` is used to cancel\n", "asynchronous operations. It is linked to async calls inside the message handlers\n", "and can be used by the caller to cancel the handlers.\n", "```" diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/tools.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/tools.ipynb index 4e90cc5f2..1a2e8df84 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/tools.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/tools.ipynb @@ -70,9 +70,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The {py:class}`~autogen_core.components.code_executor.docker_executorCommandLineCodeExecutor`\n", + "The {py:class}`~autogen_ext.code_executors.docker.DockerCommandLineCodeExecutor`\n", "class is a built-in code executor that runs Python code snippets in a subprocess\n", - "in the local command line environment.\n", + "in the command line environment of a docker container.\n", "The {py:class}`~autogen_core.components.tools.PythonCodeExecutionTool` class wraps the code executor\n", "and provides a simple interface to execute Python code snippets.\n", "\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/quickstart.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/quickstart.ipynb index a0d781b80..88416adab 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/quickstart.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/quickstart.ipynb @@ -21,7 +21,7 @@ "the agents.\n", "\n", "```{attention}\n", - "Code generated in this example is run within a [Docker](https://www.docker.com/) container. Please ensure Docker is [installed](https://docs.docker.com/get-started/get-docker/) and running prior to running the example. Local code execution is available ({py:class}`~autogen_core.components.code_executor.LocalCommandLineCodeExecutor`) but is not recommended due to the risk of running LLM generated code in your local environment.\n", + "Code generated in this example is run within a [Docker](https://www.docker.com/) container. Please ensure Docker is [installed](https://docs.docker.com/get-started/get-docker/) and running prior to running the example. Local code execution is available ({py:class}`~autogen_ext.code_executors.local.LocalCommandLineCodeExecutor`) but is not recommended due to the risk of running LLM generated code in your local environment.\n", "```" ] }, @@ -36,7 +36,7 @@ "from typing import List\n", "\n", "from autogen_core import DefaultTopicId, MessageContext, RoutedAgent, default_subscription, message_handler\n", - "from autogen_core.components.code_executor import CodeBlock, CodeExecutor\n", + "from autogen_core.code_executor import CodeBlock, CodeExecutor\n", "from autogen_core.components.models import (\n", " AssistantMessage,\n", " ChatCompletionClient,\n", diff --git a/python/packages/autogen-core/docs/src/user-guide/extensions-user-guide/azure-container-code-executor.ipynb b/python/packages/autogen-core/docs/src/user-guide/extensions-user-guide/azure-container-code-executor.ipynb index 5b06b13ef..7bc7ef4da 100644 --- a/python/packages/autogen-core/docs/src/user-guide/extensions-user-guide/azure-container-code-executor.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/extensions-user-guide/azure-container-code-executor.ipynb @@ -24,7 +24,7 @@ "\n", "### Initialization\n", "\n", - "First, you will need to find or create a credentialing object that implements the {py:class}`~autogen_core.components.code_executor.TokenProvider` interface. This is any object that implements the following function\n", + "First, you will need to find or create a credentialing object that implements the {py:class}`~autogen_ext.code_executors.azure.TokenProvider` interface. This is any object that implements the following function\n", "```python\n", "def get_token(\n", " self, *scopes: str, claims: Optional[str] = None, tenant_id: Optional[str] = None, **kwargs: Any\n", @@ -208,7 +208,7 @@ "source": [ "### New Sessions\n", "\n", - "Every instance of the {py:class}`~autogen_core.components.code_executor.AzureContainerCodeExecutor` class will have a unique session ID. Every call to a particular code executor will be executed on the same session until the {py:meth}`~autogen_core.components.code_executor.AzureContainerCodeExecutor.restart` function is called on it. Previous sessions cannot be reused.\n", + "Every instance of the {py:class}`~autogen_ext.code_executors.azure.AzureContainerCodeExecutor` class will have a unique session ID. Every call to a particular code executor will be executed on the same session until the {py:meth}`~autogen_ext.code_executors.azure.AzureContainerCodeExecutor.restart` function is called on it. Previous sessions cannot be reused.\n", "\n", "Here we'll run some code on the code session, restart it, then verify that a new session has been opened." ] @@ -243,7 +243,7 @@ "source": [ "### Available Packages\n", "\n", - "Each code execution instance is pre-installed with most of the commonly used packages. However, the list of available packages and versions are not available outside of the execution environment. The packages list on the environment can be retrieved by calling the {py:meth}`~autogen_core.components.code_executor.AzureContainerCodeExecutor.get_available_packages` function on the code executor." + "Each code execution instance is pre-installed with most of the commonly used packages. However, the list of available packages and versions are not available outside of the execution environment. The packages list on the environment can be retrieved by calling the {py:meth}`~autogen_ext.code_executors.azure.AzureContainerCodeExecutor.get_available_packages` function on the code executor." ] }, { diff --git a/python/packages/autogen-core/src/autogen_core/_agent_runtime.py b/python/packages/autogen-core/src/autogen_core/_agent_runtime.py index c4025ce23..8156d3782 100644 --- a/python/packages/autogen-core/src/autogen_core/_agent_runtime.py +++ b/python/packages/autogen-core/src/autogen_core/_agent_runtime.py @@ -91,7 +91,7 @@ class AgentRuntime(Protocol): Args: type (str): The type of agent this factory creates. It is not the same as agent class name. The `type` parameter is used to differentiate between different factory functions rather than agent classes. - agent_factory (Callable[[], T]): The factory that creates the agent, where T is a concrete Agent type. Inside the factory, use `autogen_core.base.AgentInstantiationContext` to access variables like the current runtime and agent ID. + agent_factory (Callable[[], T]): The factory that creates the agent, where T is a concrete Agent type. Inside the factory, use `autogen_core.AgentInstantiationContext` to access variables like the current runtime and agent ID. subscriptions (Callable[[], list[Subscription]] | list[Subscription] | None, optional): The subscriptions that the agent should be subscribed to. Defaults to None. """ @@ -108,7 +108,7 @@ class AgentRuntime(Protocol): Args: type (str): The type of agent this factory creates. It is not the same as agent class name. The `type` parameter is used to differentiate between different factory functions rather than agent classes. - agent_factory (Callable[[], T]): The factory that creates the agent, where T is a concrete Agent type. Inside the factory, use `autogen_core.base.AgentInstantiationContext` to access variables like the current runtime and agent ID. + agent_factory (Callable[[], T]): The factory that creates the agent, where T is a concrete Agent type. Inside the factory, use `autogen_core.AgentInstantiationContext` to access variables like the current runtime and agent ID. """ ... diff --git a/python/packages/autogen-core/src/autogen_core/_base_agent.py b/python/packages/autogen-core/src/autogen_core/_base_agent.py index 03f080593..79bffd36d 100644 --- a/python/packages/autogen-core/src/autogen_core/_base_agent.py +++ b/python/packages/autogen-core/src/autogen_core/_base_agent.py @@ -122,7 +122,7 @@ class BaseAgent(ABC, Agent): *, cancellation_token: CancellationToken | None = None, ) -> Any: - """See :py:meth:`autogen_core.base.AgentRuntime.send_message` for more information.""" + """See :py:meth:`autogen_core.AgentRuntime.send_message` for more information.""" if cancellation_token is None: cancellation_token = CancellationToken() diff --git a/python/packages/autogen-core/src/autogen_core/_routed_agent.py b/python/packages/autogen-core/src/autogen_core/_routed_agent.py index ea23dfe63..a5908278c 100644 --- a/python/packages/autogen-core/src/autogen_core/_routed_agent.py +++ b/python/packages/autogen-core/src/autogen_core/_routed_agent.py @@ -104,7 +104,7 @@ def message_handler( - The method must have exactly 3 arguments: 1. `self` 2. `message`: The message to be handled, this must be type-hinted with the message type that it is intended to handle. - 3. `ctx`: A :class:`autogen_core.base.MessageContext` object. + 3. `ctx`: A :class:`autogen_core.MessageContext` object. - The method must be type hinted with what message types it can return as a response, or it can return `None` if it does not return anything. Handlers can handle more than one message type by accepting a Union of the message types. It can also return more than one message type by returning a Union of the message types. @@ -224,7 +224,7 @@ def event( - The method must have exactly 3 arguments: 1. `self` 2. `message`: The event message to be handled, this must be type-hinted with the message type that it is intended to handle. - 3. `ctx`: A :class:`autogen_core.base.MessageContext` object. + 3. `ctx`: A :class:`autogen_core.MessageContext` object. - The method must return `None`. Handlers can handle more than one message type by accepting a Union of the message types. @@ -344,7 +344,7 @@ def rpc( - The method must have exactly 3 arguments: 1. `self` 2. `message`: The message to be handled, this must be type-hinted with the message type that it is intended to handle. - 3. `ctx`: A :class:`autogen_core.base.MessageContext` object. + 3. `ctx`: A :class:`autogen_core.MessageContext` object. - The method must be type hinted with what message types it can return as a response, or it can return `None` if it does not return anything. Handlers can handle more than one message type by accepting a Union of the message types. It can also return more than one message type by returning a Union of the message types. diff --git a/python/packages/autogen-magentic-one/interface/magentic_one_helper.py b/python/packages/autogen-magentic-one/interface/magentic_one_helper.py index 1b2640757..3d7c25001 100644 --- a/python/packages/autogen-magentic-one/interface/magentic_one_helper.py +++ b/python/packages/autogen-magentic-one/interface/magentic_one_helper.py @@ -10,9 +10,9 @@ from autogen_core import SingleThreadedAgentRuntime from autogen_core.application.logging import EVENT_LOGGER_NAME from autogen_core import AgentId, AgentProxy from autogen_core import DefaultTopicId -from autogen_core.components.code_executor import LocalCommandLineCodeExecutor +from autogen_ext.code_executors.local import LocalCommandLineCodeExecutor from autogen_ext.code_executor.docker_executor import DockerCommandLineCodeExecutor -from autogen_core.components.code_executor import CodeBlock +from autogen_core.code_executor import CodeBlock from autogen_magentic_one.agents.coder import Coder, Executor from autogen_magentic_one.agents.file_surfer import FileSurfer from autogen_magentic_one.agents.multimodal_web_surfer import MultimodalWebSurfer