mirror of
https://github.com/microsoft/autogen.git
synced 2025-11-14 09:03:53 +00:00
Update agent documentation (#6394)
* Replace on_messages and on_messages_stream with run and run_stream to unify interface documentation with teams * Remove magentic-one-cli from homepage as it has not been maintained and improved for a while.
This commit is contained in:
parent
bab0dfd1e7
commit
d96aaebc8d
@ -90,10 +90,20 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
|
|||||||
the inner messages as they are created, and the :class:`~autogen_agentchat.base.Response`
|
the inner messages as they are created, and the :class:`~autogen_agentchat.base.Response`
|
||||||
object as the last item before closing the generator.
|
object as the last item before closing the generator.
|
||||||
|
|
||||||
|
The :meth:`BaseChatAgent.run` method returns a :class:`~autogen_agentchat.base.TaskResult`
|
||||||
|
containing the messages produced by the agent. In the list of messages,
|
||||||
|
:attr:`~autogen_agentchat.base.TaskResult.messages`,
|
||||||
|
the last message is the final response message.
|
||||||
|
|
||||||
|
The :meth:`BaseChatAgent.run_stream` method creates an async generator that produces
|
||||||
|
the inner messages as they are created, and the :class:`~autogen_agentchat.base.TaskResult`
|
||||||
|
object as the last item before closing the generator.
|
||||||
|
|
||||||
.. attention::
|
.. attention::
|
||||||
|
|
||||||
The caller must only pass the new messages to the agent on each call
|
The caller must only pass the new messages to the agent on each call
|
||||||
to the :meth:`on_messages` or :meth:`on_messages_stream` method.
|
to the :meth:`on_messages`, :meth:`on_messages_stream`, :meth:`BaseChatAgent.run`,
|
||||||
|
or :meth:`BaseChatAgent.run_stream` methods.
|
||||||
The agent maintains its state between calls to these methods.
|
The agent maintains its state between calls to these methods.
|
||||||
Do not pass the entire conversation history to the agent on each call.
|
Do not pass the entire conversation history to the agent on each call.
|
||||||
|
|
||||||
@ -215,10 +225,8 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
|
|||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
from autogen_core import CancellationToken
|
|
||||||
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
||||||
from autogen_agentchat.agents import AssistantAgent
|
from autogen_agentchat.agents import AssistantAgent
|
||||||
from autogen_agentchat.messages import TextMessage
|
|
||||||
|
|
||||||
|
|
||||||
async def main() -> None:
|
async def main() -> None:
|
||||||
@ -228,10 +236,8 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
|
|||||||
)
|
)
|
||||||
agent = AssistantAgent(name="assistant", model_client=model_client)
|
agent = AssistantAgent(name="assistant", model_client=model_client)
|
||||||
|
|
||||||
response = await agent.on_messages(
|
result = await agent.run(task="Name two cities in North America.")
|
||||||
[TextMessage(content="What is the capital of France?", source="user")], CancellationToken()
|
print(result)
|
||||||
)
|
|
||||||
print(response)
|
|
||||||
|
|
||||||
|
|
||||||
asyncio.run(main())
|
asyncio.run(main())
|
||||||
@ -246,8 +252,6 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
|
|||||||
import asyncio
|
import asyncio
|
||||||
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
||||||
from autogen_agentchat.agents import AssistantAgent
|
from autogen_agentchat.agents import AssistantAgent
|
||||||
from autogen_agentchat.messages import TextMessage
|
|
||||||
from autogen_core import CancellationToken
|
|
||||||
|
|
||||||
|
|
||||||
async def main() -> None:
|
async def main() -> None:
|
||||||
@ -261,9 +265,7 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
|
|||||||
model_client_stream=True,
|
model_client_stream=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
stream = agent.on_messages_stream(
|
stream = agent.run_stream(task="Name two cities in North America.")
|
||||||
[TextMessage(content="Name two cities in North America.", source="user")], CancellationToken()
|
|
||||||
)
|
|
||||||
async for message in stream:
|
async for message in stream:
|
||||||
print(message)
|
print(message)
|
||||||
|
|
||||||
@ -272,27 +274,23 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
|
|||||||
|
|
||||||
.. code-block:: text
|
.. code-block:: text
|
||||||
|
|
||||||
source='assistant' models_usage=None content='Two' type='ModelClientStreamingChunkEvent'
|
source='user' models_usage=None metadata={} content='Name two cities in North America.' type='TextMessage'
|
||||||
source='assistant' models_usage=None content=' cities' type='ModelClientStreamingChunkEvent'
|
source='assistant' models_usage=None metadata={} content='Two' type='ModelClientStreamingChunkEvent'
|
||||||
source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'
|
source='assistant' models_usage=None metadata={} content=' cities' type='ModelClientStreamingChunkEvent'
|
||||||
source='assistant' models_usage=None content=' North' type='ModelClientStreamingChunkEvent'
|
source='assistant' models_usage=None metadata={} content=' in' type='ModelClientStreamingChunkEvent'
|
||||||
source='assistant' models_usage=None content=' America' type='ModelClientStreamingChunkEvent'
|
source='assistant' models_usage=None metadata={} content=' North' type='ModelClientStreamingChunkEvent'
|
||||||
source='assistant' models_usage=None content=' are' type='ModelClientStreamingChunkEvent'
|
source='assistant' models_usage=None metadata={} content=' America' type='ModelClientStreamingChunkEvent'
|
||||||
source='assistant' models_usage=None content=' New' type='ModelClientStreamingChunkEvent'
|
source='assistant' models_usage=None metadata={} content=' are' type='ModelClientStreamingChunkEvent'
|
||||||
source='assistant' models_usage=None content=' York' type='ModelClientStreamingChunkEvent'
|
source='assistant' models_usage=None metadata={} content=' New' type='ModelClientStreamingChunkEvent'
|
||||||
source='assistant' models_usage=None content=' City' type='ModelClientStreamingChunkEvent'
|
source='assistant' models_usage=None metadata={} content=' York' type='ModelClientStreamingChunkEvent'
|
||||||
source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'
|
source='assistant' models_usage=None metadata={} content=' City' type='ModelClientStreamingChunkEvent'
|
||||||
source='assistant' models_usage=None content=' the' type='ModelClientStreamingChunkEvent'
|
source='assistant' models_usage=None metadata={} content=' and' type='ModelClientStreamingChunkEvent'
|
||||||
source='assistant' models_usage=None content=' United' type='ModelClientStreamingChunkEvent'
|
source='assistant' models_usage=None metadata={} content=' Toronto' type='ModelClientStreamingChunkEvent'
|
||||||
source='assistant' models_usage=None content=' States' type='ModelClientStreamingChunkEvent'
|
source='assistant' models_usage=None metadata={} content='.' type='ModelClientStreamingChunkEvent'
|
||||||
source='assistant' models_usage=None content=' and' type='ModelClientStreamingChunkEvent'
|
source='assistant' models_usage=None metadata={} content=' TERMIN' type='ModelClientStreamingChunkEvent'
|
||||||
source='assistant' models_usage=None content=' Toronto' type='ModelClientStreamingChunkEvent'
|
source='assistant' models_usage=None metadata={} content='ATE' type='ModelClientStreamingChunkEvent'
|
||||||
source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'
|
source='assistant' models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0) metadata={} content='Two cities in North America are New York City and Toronto. TERMINATE' type='TextMessage'
|
||||||
source='assistant' models_usage=None content=' Canada' type='ModelClientStreamingChunkEvent'
|
messages=[TextMessage(source='user', models_usage=None, metadata={}, content='Name two cities in North America.', type='TextMessage'), TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), metadata={}, content='Two cities in North America are New York City and Toronto. TERMINATE', type='TextMessage')] stop_reason=None
|
||||||
source='assistant' models_usage=None content='.' type='ModelClientStreamingChunkEvent'
|
|
||||||
source='assistant' models_usage=None content=' TERMIN' type='ModelClientStreamingChunkEvent'
|
|
||||||
source='assistant' models_usage=None content='ATE' type='ModelClientStreamingChunkEvent'
|
|
||||||
Response(chat_message=TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), content='Two cities in North America are New York City in the United States and Toronto in Canada. TERMINATE', type='TextMessage'), inner_messages=[])
|
|
||||||
|
|
||||||
|
|
||||||
**Example 3: agent with tools**
|
**Example 3: agent with tools**
|
||||||
@ -312,9 +310,7 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
|
|||||||
import asyncio
|
import asyncio
|
||||||
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
||||||
from autogen_agentchat.agents import AssistantAgent
|
from autogen_agentchat.agents import AssistantAgent
|
||||||
from autogen_agentchat.messages import TextMessage
|
|
||||||
from autogen_agentchat.ui import Console
|
from autogen_agentchat.ui import Console
|
||||||
from autogen_core import CancellationToken
|
|
||||||
|
|
||||||
|
|
||||||
async def get_current_time() -> str:
|
async def get_current_time() -> str:
|
||||||
@ -327,12 +323,7 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
|
|||||||
# api_key = "your_openai_api_key"
|
# api_key = "your_openai_api_key"
|
||||||
)
|
)
|
||||||
agent = AssistantAgent(name="assistant", model_client=model_client, tools=[get_current_time])
|
agent = AssistantAgent(name="assistant", model_client=model_client, tools=[get_current_time])
|
||||||
|
await Console(agent.run_stream(task="What is the current time?"))
|
||||||
await Console(
|
|
||||||
agent.on_messages_stream(
|
|
||||||
[TextMessage(content="What is the current time?", source="user")], CancellationToken()
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
asyncio.run(main())
|
asyncio.run(main())
|
||||||
@ -390,9 +381,7 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
|
|||||||
from typing import Literal
|
from typing import Literal
|
||||||
|
|
||||||
from autogen_agentchat.agents import AssistantAgent
|
from autogen_agentchat.agents import AssistantAgent
|
||||||
from autogen_agentchat.messages import TextMessage
|
|
||||||
from autogen_agentchat.ui import Console
|
from autogen_agentchat.ui import Console
|
||||||
from autogen_core import CancellationToken
|
|
||||||
from autogen_core.tools import FunctionTool
|
from autogen_core.tools import FunctionTool
|
||||||
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
@ -430,7 +419,7 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
|
|||||||
|
|
||||||
|
|
||||||
async def main() -> None:
|
async def main() -> None:
|
||||||
stream = agent.on_messages_stream([TextMessage(content="I am happy today!", source="user")], CancellationToken())
|
stream = agent.run_stream(task="I am happy today!")
|
||||||
await Console(stream)
|
await Console(stream)
|
||||||
|
|
||||||
|
|
||||||
@ -458,8 +447,6 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
|
|||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
from autogen_agentchat.agents import AssistantAgent
|
from autogen_agentchat.agents import AssistantAgent
|
||||||
from autogen_agentchat.messages import TextMessage
|
|
||||||
from autogen_core import CancellationToken
|
|
||||||
from autogen_core.model_context import BufferedChatCompletionContext
|
from autogen_core.model_context import BufferedChatCompletionContext
|
||||||
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
||||||
|
|
||||||
@ -482,20 +469,14 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
|
|||||||
system_message="You are a helpful assistant.",
|
system_message="You are a helpful assistant.",
|
||||||
)
|
)
|
||||||
|
|
||||||
response = await agent.on_messages(
|
result = await agent.run(task="Name two cities in North America.")
|
||||||
[TextMessage(content="Name two cities in North America.", source="user")], CancellationToken()
|
print(result.messages[-1].content) # type: ignore
|
||||||
)
|
|
||||||
print(response.chat_message.content) # type: ignore
|
|
||||||
|
|
||||||
response = await agent.on_messages(
|
result = await agent.run(task="My favorite color is blue.")
|
||||||
[TextMessage(content="My favorite color is blue.", source="user")], CancellationToken()
|
print(result.messages[-1].content) # type: ignore
|
||||||
)
|
|
||||||
print(response.chat_message.content) # type: ignore
|
|
||||||
|
|
||||||
response = await agent.on_messages(
|
result = await agent.run(task="Did I ask you any question?")
|
||||||
[TextMessage(content="Did I ask you any question?", source="user")], CancellationToken()
|
print(result.messages[-1].content) # type: ignore
|
||||||
)
|
|
||||||
print(response.chat_message.content) # type: ignore
|
|
||||||
|
|
||||||
|
|
||||||
asyncio.run(main())
|
asyncio.run(main())
|
||||||
@ -518,8 +499,6 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
|
|||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
from autogen_agentchat.agents import AssistantAgent
|
from autogen_agentchat.agents import AssistantAgent
|
||||||
from autogen_agentchat.messages import TextMessage
|
|
||||||
from autogen_core import CancellationToken
|
|
||||||
from autogen_core.memory import ListMemory, MemoryContent
|
from autogen_core.memory import ListMemory, MemoryContent
|
||||||
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
||||||
|
|
||||||
@ -544,10 +523,8 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
|
|||||||
system_message="You are a helpful assistant.",
|
system_message="You are a helpful assistant.",
|
||||||
)
|
)
|
||||||
|
|
||||||
response = await agent.on_messages(
|
result = await agent.run(task="What is a good dinner idea?")
|
||||||
[TextMessage(content="One idea for a dinner.", source="user")], CancellationToken()
|
print(result.messages[-1].content) # type: ignore
|
||||||
)
|
|
||||||
print(response.chat_message.content) # type: ignore
|
|
||||||
|
|
||||||
|
|
||||||
asyncio.run(main())
|
asyncio.run(main())
|
||||||
@ -573,10 +550,8 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
|
|||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
from autogen_core import CancellationToken
|
|
||||||
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
||||||
from autogen_agentchat.agents import AssistantAgent
|
from autogen_agentchat.agents import AssistantAgent
|
||||||
from autogen_agentchat.messages import TextMessage
|
|
||||||
|
|
||||||
|
|
||||||
async def main() -> None:
|
async def main() -> None:
|
||||||
@ -587,10 +562,8 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
|
|||||||
# The system message is not supported by the o1 series model.
|
# The system message is not supported by the o1 series model.
|
||||||
agent = AssistantAgent(name="assistant", model_client=model_client, system_message=None)
|
agent = AssistantAgent(name="assistant", model_client=model_client, system_message=None)
|
||||||
|
|
||||||
response = await agent.on_messages(
|
result = await agent.run(task="What is the capital of France?")
|
||||||
[TextMessage(content="What is the capital of France?", source="user")], CancellationToken()
|
print(result.messages[-1].content) # type: ignore
|
||||||
)
|
|
||||||
print(response)
|
|
||||||
|
|
||||||
|
|
||||||
asyncio.run(main())
|
asyncio.run(main())
|
||||||
|
|||||||
@ -46,40 +46,12 @@ A framework for building AI agents and applications
|
|||||||
::::{grid}
|
::::{grid}
|
||||||
:gutter: 2
|
:gutter: 2
|
||||||
|
|
||||||
:::{grid-item-card}
|
|
||||||
:shadow: none
|
|
||||||
:margin: 2 0 0 0
|
|
||||||
:columns: 12 12 6 6
|
|
||||||
|
|
||||||
<div class="sd-card-title sd-font-weight-bold docutils">
|
|
||||||
|
|
||||||
{fas}`book;pst-color-primary`
|
|
||||||
Magentic-One CLI [](https://pypi.org/project/magentic-one-cli/)
|
|
||||||
</div>
|
|
||||||
A console-based multi-agent assistant for web and file-based tasks.
|
|
||||||
Built on AgentChat.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip install -U magentic-one-cli
|
|
||||||
m1 "Find flights from Seattle to Paris and format the result in a table"
|
|
||||||
```
|
|
||||||
|
|
||||||
+++
|
|
||||||
|
|
||||||
```{button-ref} user-guide/agentchat-user-guide/magentic-one
|
|
||||||
:color: secondary
|
|
||||||
|
|
||||||
Get Started
|
|
||||||
```
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
:::{grid-item-card} {fas}`palette;pst-color-primary` Studio [](https://pypi.org/project/autogenstudio/)
|
:::{grid-item-card} {fas}`palette;pst-color-primary` Studio [](https://pypi.org/project/autogenstudio/)
|
||||||
:shadow: none
|
:shadow: none
|
||||||
:margin: 2 0 0 0
|
:margin: 2 0 0 0
|
||||||
:columns: 12 12 6 6
|
:columns: 12 12 12 12
|
||||||
|
|
||||||
An app for prototyping and managing agents without writing code.
|
An web-based UI for prototyping with agents without writing code.
|
||||||
Built on AgentChat.
|
Built on AgentChat.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -87,6 +59,8 @@ pip install -U autogenstudio
|
|||||||
autogenstudio ui --port 8080 --appdir ./myapp
|
autogenstudio ui --port 8080 --appdir ./myapp
|
||||||
```
|
```
|
||||||
|
|
||||||
|
_Start here if you are new to AutoGen and want to prototype with agents without writing code._
|
||||||
|
|
||||||
+++
|
+++
|
||||||
|
|
||||||
```{button-ref} user-guide/autogenstudio-user-guide/index
|
```{button-ref} user-guide/autogenstudio-user-guide/index
|
||||||
@ -124,7 +98,7 @@ async def main() -> None:
|
|||||||
asyncio.run(main())
|
asyncio.run(main())
|
||||||
```
|
```
|
||||||
|
|
||||||
_Start here if you are building conversational agents. [Migrating from AutoGen 0.2?](./user-guide/agentchat-user-guide/migration-guide.md)._
|
_Start here if you are prototyping with agents using Python. [Migrating from AutoGen 0.2?](./user-guide/agentchat-user-guide/migration-guide.md)._
|
||||||
|
|
||||||
+++
|
+++
|
||||||
|
|
||||||
@ -147,7 +121,7 @@ An event-driven programming framework for building scalable multi-agent AI syste
|
|||||||
* Research on multi-agent collaboration.
|
* Research on multi-agent collaboration.
|
||||||
* Distributed agents for multi-language applications.
|
* Distributed agents for multi-language applications.
|
||||||
|
|
||||||
_Start here if you are building workflows or distributed agent systems._
|
_Start here if you are getting serious about building multi-agent systems._
|
||||||
|
|
||||||
+++
|
+++
|
||||||
|
|
||||||
@ -167,7 +141,7 @@ Get Started
|
|||||||
Implementations of Core and AgentChat components that interface with external services or other libraries.
|
Implementations of Core and AgentChat components that interface with external services or other libraries.
|
||||||
You can find and use community extensions or create your own. Examples of built-in extensions:
|
You can find and use community extensions or create your own. Examples of built-in extensions:
|
||||||
|
|
||||||
* {py:class}`~autogen_ext.tools.langchain.LangChainToolAdapter` for using LangChain tools.
|
* {py:class}`~autogen_ext.tools.mcp.McpWorkbench` for using Model-Context Protocol (MCP) servers.
|
||||||
* {py:class}`~autogen_ext.agents.openai.OpenAIAssistantAgent` for using Assistant API.
|
* {py:class}`~autogen_ext.agents.openai.OpenAIAssistantAgent` for using Assistant API.
|
||||||
* {py:class}`~autogen_ext.code_executors.docker.DockerCommandLineCodeExecutor` for running model-generated code in a Docker container.
|
* {py:class}`~autogen_ext.code_executors.docker.DockerCommandLineCodeExecutor` for running model-generated code in a Docker container.
|
||||||
* {py:class}`~autogen_ext.runtimes.grpc.GrpcWorkerAgentRuntime` for distributed agents.
|
* {py:class}`~autogen_ext.runtimes.grpc.GrpcWorkerAgentRuntime` for distributed agents.
|
||||||
|
|||||||
@ -16,7 +16,9 @@
|
|||||||
"- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: The abstract method that resets the agent to its initial state. This method is called when the agent is asked to reset itself.\n",
|
"- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: The abstract method that resets the agent to its initial state. This method is called when the agent is asked to reset itself.\n",
|
||||||
"- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.produced_message_types`: The list of possible {py:class}`~autogen_agentchat.messages.BaseChatMessage` message types the agent can produce in its response.\n",
|
"- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.produced_message_types`: The list of possible {py:class}`~autogen_agentchat.messages.BaseChatMessage` message types the agent can produce in its response.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Optionally, you can implement the the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` method to stream messages as they are generated by the agent. If this method is not implemented, the agent\n",
|
"Optionally, you can implement the the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` method to stream messages as they are generated by the agent.\n",
|
||||||
|
"This method is called by {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run_stream` to stream messages.\n",
|
||||||
|
"If this method is not implemented, the agent\n",
|
||||||
"uses the default implementation of {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`\n",
|
"uses the default implementation of {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`\n",
|
||||||
"that calls the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` method and\n",
|
"that calls the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` method and\n",
|
||||||
"yields all messages in the response."
|
"yields all messages in the response."
|
||||||
@ -731,7 +733,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.12.3"
|
"version": "3.12.7"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
Loading…
x
Reference in New Issue
Block a user