mirror of
https://github.com/microsoft/autogen.git
synced 2025-06-26 22:30:10 +00:00
doc: Enrich AssistantAgent API documentation with usage examples. (#5653)
Resolves #5562
This commit is contained in:
parent
08c82e4c6f
commit
0360ab9715
@ -173,6 +173,8 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
|
||||
|
||||
Examples:
|
||||
|
||||
**Example 1: basic agent**
|
||||
|
||||
The following example demonstrates how to create an assistant agent with
|
||||
a model client and generate a response to a simple task.
|
||||
|
||||
@ -200,10 +202,76 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
|
||||
|
||||
asyncio.run(main())
|
||||
|
||||
**Example 2: model client token streaming**
|
||||
|
||||
This example demonstrates how to create an assistant agent with
|
||||
a model client and generate a token stream by setting `model_client_stream=True`.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import asyncio
|
||||
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
||||
from autogen_agentchat.agents import AssistantAgent
|
||||
from autogen_agentchat.messages import TextMessage
|
||||
from autogen_core import CancellationToken
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
model_client = OpenAIChatCompletionClient(
|
||||
model="gpt-4o",
|
||||
# api_key = "your_openai_api_key"
|
||||
)
|
||||
agent = AssistantAgent(
|
||||
name="assistant",
|
||||
model_client=model_client,
|
||||
model_client_stream=True,
|
||||
)
|
||||
|
||||
stream = agent.on_messages_stream(
|
||||
[TextMessage(content="Name two cities in North America.", source="user")], CancellationToken()
|
||||
)
|
||||
async for message in stream:
|
||||
print(message)
|
||||
|
||||
|
||||
asyncio.run(main())
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
source='assistant' models_usage=None content='Two' type='ModelClientStreamingChunkEvent'
|
||||
source='assistant' models_usage=None content=' cities' type='ModelClientStreamingChunkEvent'
|
||||
source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'
|
||||
source='assistant' models_usage=None content=' North' type='ModelClientStreamingChunkEvent'
|
||||
source='assistant' models_usage=None content=' America' type='ModelClientStreamingChunkEvent'
|
||||
source='assistant' models_usage=None content=' are' type='ModelClientStreamingChunkEvent'
|
||||
source='assistant' models_usage=None content=' New' type='ModelClientStreamingChunkEvent'
|
||||
source='assistant' models_usage=None content=' York' type='ModelClientStreamingChunkEvent'
|
||||
source='assistant' models_usage=None content=' City' type='ModelClientStreamingChunkEvent'
|
||||
source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'
|
||||
source='assistant' models_usage=None content=' the' type='ModelClientStreamingChunkEvent'
|
||||
source='assistant' models_usage=None content=' United' type='ModelClientStreamingChunkEvent'
|
||||
source='assistant' models_usage=None content=' States' type='ModelClientStreamingChunkEvent'
|
||||
source='assistant' models_usage=None content=' and' type='ModelClientStreamingChunkEvent'
|
||||
source='assistant' models_usage=None content=' Toronto' type='ModelClientStreamingChunkEvent'
|
||||
source='assistant' models_usage=None content=' in' type='ModelClientStreamingChunkEvent'
|
||||
source='assistant' models_usage=None content=' Canada' type='ModelClientStreamingChunkEvent'
|
||||
source='assistant' models_usage=None content='.' type='ModelClientStreamingChunkEvent'
|
||||
source='assistant' models_usage=None content=' TERMIN' type='ModelClientStreamingChunkEvent'
|
||||
source='assistant' models_usage=None content='ATE' type='ModelClientStreamingChunkEvent'
|
||||
Response(chat_message=TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), content='Two cities in North America are New York City in the United States and Toronto in Canada. TERMINATE', type='TextMessage'), inner_messages=[])
|
||||
|
||||
|
||||
**Example 3: agent with tools**
|
||||
|
||||
The following example demonstrates how to create an assistant agent with
|
||||
a model client and a tool, generate a stream of messages for a task, and
|
||||
print the messages to the console.
|
||||
print the messages to the console using :class:`~autogen_agentchat.ui.Console`.
|
||||
|
||||
The tool is a simple function that returns the current time.
|
||||
Under the hood, the function is wrapped in a :class:`~autogen_core.tools.FunctionTool`
|
||||
and used with the agent's model client. The doc string of the function
|
||||
is used as the tool description, the function name is used as the tool name,
|
||||
and the function signature including the type hints is used as the tool arguments.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@ -235,6 +303,199 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
|
||||
|
||||
asyncio.run(main())
|
||||
|
||||
**Example 4: agent with structured output and tool**
|
||||
|
||||
The following example demonstrates how to create an assistant agent with
|
||||
a model client configured to use structured output and a tool.
|
||||
Note that you need to use :class:`~autogen_core.tools.FunctionTool` to create the tool
|
||||
and the `strict=True` is required for structured output mode.
|
||||
Because the model is configured to use structured output, the output
|
||||
reflection response will be a JSON formatted string.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import asyncio
|
||||
from typing import Literal
|
||||
|
||||
from autogen_agentchat.agents import AssistantAgent
|
||||
from autogen_agentchat.messages import TextMessage
|
||||
from autogen_agentchat.ui import Console
|
||||
from autogen_core import CancellationToken
|
||||
from autogen_core.tools import FunctionTool
|
||||
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
# Define the structured output format.
|
||||
class AgentResponse(BaseModel):
|
||||
thoughts: str
|
||||
response: Literal["happy", "sad", "neutral"]
|
||||
|
||||
|
||||
# Define the function to be called as a tool.
|
||||
def sentiment_analysis(text: str) -> str:
|
||||
\"\"\"Given a text, return the sentiment.\"\"\"
|
||||
return "happy" if "happy" in text else "sad" if "sad" in text else "neutral"
|
||||
|
||||
|
||||
# Create a FunctionTool instance with `strict=True`,
|
||||
# which is required for structured output mode.
|
||||
tool = FunctionTool(sentiment_analysis, description="Sentiment Analysis", strict=True)
|
||||
|
||||
# Create an OpenAIChatCompletionClient instance that uses the structured output format.
|
||||
model_client = OpenAIChatCompletionClient(
|
||||
model="gpt-4o-mini",
|
||||
response_format=AgentResponse, # type: ignore
|
||||
)
|
||||
|
||||
# Create an AssistantAgent instance that uses the tool and model client.
|
||||
agent = AssistantAgent(
|
||||
name="assistant",
|
||||
model_client=model_client,
|
||||
tools=[tool],
|
||||
system_message="Use the tool to analyze sentiment.",
|
||||
reflect_on_tool_use=True, # Use reflection to have the agent generate a formatted response.
|
||||
)
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
stream = agent.on_messages_stream([TextMessage(content="I am happy today!", source="user")], CancellationToken())
|
||||
await Console(stream)
|
||||
|
||||
|
||||
asyncio.run(main())
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
---------- assistant ----------
|
||||
[FunctionCall(id='call_tIZjAVyKEDuijbBwLY6RHV2p', arguments='{"text":"I am happy today!"}', name='sentiment_analysis')]
|
||||
---------- assistant ----------
|
||||
[FunctionExecutionResult(content='happy', call_id='call_tIZjAVyKEDuijbBwLY6RHV2p', is_error=False)]
|
||||
---------- assistant ----------
|
||||
{"thoughts":"The user expresses a clear positive emotion by stating they are happy today, suggesting an upbeat mood.","response":"happy"}
|
||||
|
||||
**Example 5: agent with bounded model context**
|
||||
|
||||
The following example shows how to use a
|
||||
:class:`~autogen_core.model_context.BufferedChatCompletionContext`
|
||||
that only keeps the last 2 messages (1 user + 1 assistant).
|
||||
Bounded model context is useful when the model has a limit on the
|
||||
number of tokens it can process.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import asyncio
|
||||
|
||||
from autogen_agentchat.agents import AssistantAgent
|
||||
from autogen_agentchat.messages import TextMessage
|
||||
from autogen_core import CancellationToken
|
||||
from autogen_core.model_context import BufferedChatCompletionContext
|
||||
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
# Create a model client.
|
||||
model_client = OpenAIChatCompletionClient(
|
||||
model="gpt-4o-mini",
|
||||
# api_key = "your_openai_api_key"
|
||||
)
|
||||
|
||||
# Create a model context that only keeps the last 2 messages (1 user + 1 assistant).
|
||||
model_context = BufferedChatCompletionContext(buffer_size=2)
|
||||
|
||||
# Create an AssistantAgent instance with the model client and context.
|
||||
agent = AssistantAgent(
|
||||
name="assistant",
|
||||
model_client=model_client,
|
||||
model_context=model_context,
|
||||
system_message="You are a helpful assistant.",
|
||||
)
|
||||
|
||||
response = await agent.on_messages(
|
||||
[TextMessage(content="Name two cities in North America.", source="user")], CancellationToken()
|
||||
)
|
||||
print(response.chat_message.content) # type: ignore
|
||||
|
||||
response = await agent.on_messages(
|
||||
[TextMessage(content="My favorite color is blue.", source="user")], CancellationToken()
|
||||
)
|
||||
print(response.chat_message.content) # type: ignore
|
||||
|
||||
response = await agent.on_messages(
|
||||
[TextMessage(content="Did I ask you any question?", source="user")], CancellationToken()
|
||||
)
|
||||
print(response.chat_message.content) # type: ignore
|
||||
|
||||
|
||||
asyncio.run(main())
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
Two cities in North America are New York City and Toronto.
|
||||
That's great! Blue is often associated with calmness and serenity. Do you have a specific shade of blue that you like, or any particular reason why it's your favorite?
|
||||
No, you didn't ask a question. I apologize for any misunderstanding. If you have something specific you'd like to discuss or ask, feel free to let me know!
|
||||
|
||||
**Example 6: agent with memory**
|
||||
|
||||
The following example shows how to use a list-based memory with the assistant agent.
|
||||
The memory is preloaded with some initial content.
|
||||
Under the hood, the memory is used to update the model context
|
||||
before making an inference, using the :meth:`~autogen_core.memory.Memory.update_context` method.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import asyncio
|
||||
|
||||
from autogen_agentchat.agents import AssistantAgent
|
||||
from autogen_agentchat.messages import TextMessage
|
||||
from autogen_core import CancellationToken
|
||||
from autogen_core.memory import ListMemory, MemoryContent
|
||||
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
# Create a model client.
|
||||
model_client = OpenAIChatCompletionClient(
|
||||
model="gpt-4o-mini",
|
||||
# api_key = "your_openai_api_key"
|
||||
)
|
||||
|
||||
# Create a list-based memory with some initial content.
|
||||
memory = ListMemory()
|
||||
await memory.add(MemoryContent(content="User likes pizza.", mime_type="text/plain"))
|
||||
await memory.add(MemoryContent(content="User dislikes cheese.", mime_type="text/plain"))
|
||||
|
||||
# Create an AssistantAgent instance with the model client and memory.
|
||||
agent = AssistantAgent(
|
||||
name="assistant",
|
||||
model_client=model_client,
|
||||
memory=[memory],
|
||||
system_message="You are a helpful assistant.",
|
||||
)
|
||||
|
||||
response = await agent.on_messages(
|
||||
[TextMessage(content="One idea for a dinner.", source="user")], CancellationToken()
|
||||
)
|
||||
print(response.chat_message.content) # type: ignore
|
||||
|
||||
|
||||
asyncio.run(main())
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
How about making a delicious pizza without cheese? You can create a flavorful veggie pizza with a variety of toppings. Here's a quick idea:
|
||||
|
||||
**Veggie Tomato Sauce Pizza**
|
||||
- Start with a pizza crust (store-bought or homemade).
|
||||
- Spread a layer of marinara or tomato sauce evenly over the crust.
|
||||
- Top with your favorite vegetables like bell peppers, mushrooms, onions, olives, and spinach.
|
||||
- Add some protein if you’d like, such as grilled chicken or pepperoni (ensure it's cheese-free).
|
||||
- Sprinkle with herbs like oregano and basil, and maybe a drizzle of olive oil.
|
||||
- Bake according to the crust instructions until the edges are golden and the veggies are cooked.
|
||||
|
||||
Serve it with a side salad or some garlic bread to complete the meal! Enjoy your dinner!
|
||||
|
||||
**Example 7: agent with `o1-mini`**
|
||||
|
||||
The following example shows how to use `o1-mini` model with the assistant agent.
|
||||
|
||||
|
@ -14,10 +14,10 @@ from autogen_agentchat.messages import (
|
||||
ModelClientStreamingChunkEvent,
|
||||
MultiModalMessage,
|
||||
TextMessage,
|
||||
ThoughtEvent,
|
||||
ToolCallExecutionEvent,
|
||||
ToolCallRequestEvent,
|
||||
ToolCallSummaryMessage,
|
||||
ThoughtEvent,
|
||||
)
|
||||
from autogen_core import ComponentModel, FunctionCall, Image
|
||||
from autogen_core.memory import ListMemory, Memory, MemoryContent, MemoryMimeType, MemoryQueryResult
|
||||
|
Loading…
x
Reference in New Issue
Block a user