2024-10-22 13:27:06 -07:00
|
|
|
import json
|
2024-10-29 08:04:14 -07:00
|
|
|
import logging
|
2025-03-26 05:38:07 +08:00
|
|
|
from typing import Dict, List
|
2024-10-22 13:27:06 -07:00
|
|
|
|
|
|
|
import pytest
|
2024-10-29 08:04:14 -07:00
|
|
|
from autogen_agentchat import EVENT_LOGGER_NAME
|
2024-12-03 14:34:55 -08:00
|
|
|
from autogen_agentchat.agents import AssistantAgent
|
|
|
|
from autogen_agentchat.base import Handoff, TaskResult
|
2024-11-07 21:38:41 -08:00
|
|
|
from autogen_agentchat.messages import (
|
2025-03-30 09:34:40 -07:00
|
|
|
BaseChatMessage,
|
2024-11-07 21:38:41 -08:00
|
|
|
HandoffMessage,
|
Memory Interface in AgentChat (#4438)
* initial base memroy impl
* update, add example with chromadb
* include mimetype consideration
* add transform method
* update to address feedback, will update after 4681 is merged
* update memory impl,
* remove chroma db, typing fixes
* format, add test
* update uv lock
* update docs
* format updates
* update notebook
* add memoryqueryevent message, yield message for observability.
* minor fixes, make score optional/none
* Update python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
* update tests to improve cov
* refactor, move memory to core.
* format fixxes
* format updates
* format updates
* fix azure notebook import, other fixes
* update notebook, support str query in Memory protocol
* update test
* update cells
* add specific extensible return types to memory query and update_context
---------
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
2025-01-13 23:06:13 -08:00
|
|
|
MemoryQueryEvent,
|
2025-01-28 18:49:02 -08:00
|
|
|
ModelClientStreamingChunkEvent,
|
2024-11-07 21:38:41 -08:00
|
|
|
MultiModalMessage,
|
2025-03-26 16:19:52 -07:00
|
|
|
StructuredMessage,
|
2024-11-07 21:38:41 -08:00
|
|
|
TextMessage,
|
2025-02-24 08:57:34 -07:00
|
|
|
ThoughtEvent,
|
2024-12-18 14:09:19 -08:00
|
|
|
ToolCallExecutionEvent,
|
|
|
|
ToolCallRequestEvent,
|
2024-12-20 00:23:18 -05:00
|
|
|
ToolCallSummaryMessage,
|
2024-11-07 21:38:41 -08:00
|
|
|
)
|
2025-02-05 19:07:27 -05:00
|
|
|
from autogen_core import ComponentModel, FunctionCall, Image
|
Memory Interface in AgentChat (#4438)
* initial base memroy impl
* update, add example with chromadb
* include mimetype consideration
* add transform method
* update to address feedback, will update after 4681 is merged
* update memory impl,
* remove chroma db, typing fixes
* format, add test
* update uv lock
* update docs
* format updates
* update notebook
* add memoryqueryevent message, yield message for observability.
* minor fixes, make score optional/none
* Update python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
* update tests to improve cov
* refactor, move memory to core.
* format fixxes
* format updates
* format updates
* fix azure notebook import, other fixes
* update notebook, support str query in Memory protocol
* update test
* update cells
* add specific extensible return types to memory query and update_context
---------
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
2025-01-13 23:06:13 -08:00
|
|
|
from autogen_core.memory import ListMemory, Memory, MemoryContent, MemoryMimeType, MemoryQueryResult
|
2024-12-29 07:50:54 +01:00
|
|
|
from autogen_core.model_context import BufferedChatCompletionContext
|
2025-02-04 06:55:04 -08:00
|
|
|
from autogen_core.models import (
|
|
|
|
AssistantMessage,
|
|
|
|
CreateResult,
|
|
|
|
FunctionExecutionResult,
|
2025-03-13 14:29:46 -07:00
|
|
|
FunctionExecutionResultMessage,
|
2025-02-04 06:55:04 -08:00
|
|
|
LLMMessage,
|
|
|
|
RequestUsage,
|
|
|
|
SystemMessage,
|
|
|
|
UserMessage,
|
|
|
|
)
|
2025-05-14 19:02:34 +02:00
|
|
|
from autogen_core.models._model_client import ModelFamily, ModelInfo
|
2025-04-24 16:19:36 -07:00
|
|
|
from autogen_core.tools import BaseTool, FunctionTool, StaticWorkbench
|
2024-12-10 13:18:09 +10:00
|
|
|
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
2025-01-28 18:49:02 -08:00
|
|
|
from autogen_ext.models.replay import ReplayChatCompletionClient
|
2025-04-29 03:37:09 +09:00
|
|
|
from autogen_ext.tools.mcp import (
|
|
|
|
McpWorkbench,
|
|
|
|
SseServerParams,
|
|
|
|
)
|
2025-04-17 05:00:14 +10:00
|
|
|
from pydantic import BaseModel, ValidationError
|
2025-05-23 14:29:24 +09:00
|
|
|
from utils import FileLogHandler, compare_messages, compare_task_results
|
2024-10-22 13:27:06 -07:00
|
|
|
|
2024-10-29 08:04:14 -07:00
|
|
|
logger = logging.getLogger(EVENT_LOGGER_NAME)
|
|
|
|
logger.setLevel(logging.DEBUG)
|
|
|
|
logger.addHandler(FileLogHandler("test_assistant_agent.log"))
|
|
|
|
|
2024-10-22 13:27:06 -07:00
|
|
|
|
|
|
|
def _pass_function(input: str) -> str:
|
|
|
|
return "pass"
|
|
|
|
|
|
|
|
|
|
|
|
async def _fail_function(input: str) -> str:
|
|
|
|
return "fail"
|
|
|
|
|
|
|
|
|
2025-05-14 19:02:34 +02:00
|
|
|
async def _throw_function(input: str) -> str:
|
|
|
|
raise ValueError("Helpful debugging information what went wrong.")
|
|
|
|
|
|
|
|
|
2024-10-22 13:27:06 -07:00
|
|
|
async def _echo_function(input: str) -> str:
|
|
|
|
return input
|
|
|
|
|
|
|
|
|
2025-05-14 19:02:34 +02:00
|
|
|
@pytest.fixture
|
|
|
|
def model_info_all_capabilities() -> ModelInfo:
|
|
|
|
return {
|
|
|
|
"function_calling": True,
|
|
|
|
"vision": True,
|
|
|
|
"json_output": True,
|
|
|
|
"family": ModelFamily.GPT_4O,
|
|
|
|
"structured_output": True,
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_run_with_tool_call_summary_format_function(model_info_all_capabilities: ModelInfo) -> None:
|
|
|
|
model_client = ReplayChatCompletionClient(
|
|
|
|
[
|
|
|
|
CreateResult(
|
|
|
|
finish_reason="function_calls",
|
|
|
|
content=[
|
|
|
|
FunctionCall(id="1", arguments=json.dumps({"input": "task"}), name="_pass_function"),
|
|
|
|
FunctionCall(id="2", arguments=json.dumps({"input": "task"}), name="_throw_function"),
|
|
|
|
],
|
|
|
|
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
|
|
|
|
thought="Calling pass and fail function",
|
|
|
|
cached=False,
|
|
|
|
),
|
|
|
|
],
|
|
|
|
model_info=model_info_all_capabilities,
|
|
|
|
)
|
|
|
|
|
|
|
|
def conditional_string_templates(function_call: FunctionCall, function_call_result: FunctionExecutionResult) -> str:
|
|
|
|
if not function_call_result.is_error:
|
|
|
|
return "SUCCESS: {tool_name} with {arguments}"
|
|
|
|
|
|
|
|
else:
|
|
|
|
return "FAILURE: {result}"
|
|
|
|
|
|
|
|
agent = AssistantAgent(
|
|
|
|
"tool_use_agent",
|
|
|
|
model_client=model_client,
|
|
|
|
tools=[_pass_function, _throw_function],
|
|
|
|
tool_call_summary_formatter=conditional_string_templates,
|
|
|
|
)
|
|
|
|
result = await agent.run(task="task")
|
|
|
|
|
|
|
|
first_tool_call_summary = next((x for x in result.messages if isinstance(x, ToolCallSummaryMessage)), None)
|
|
|
|
if first_tool_call_summary is None:
|
|
|
|
raise AssertionError("Expected a ToolCallSummaryMessage but found none")
|
|
|
|
|
|
|
|
assert (
|
|
|
|
first_tool_call_summary.content
|
|
|
|
== 'SUCCESS: _pass_function with {"input": "task"}\nFAILURE: Helpful debugging information what went wrong.'
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2024-10-22 13:27:06 -07:00
|
|
|
@pytest.mark.asyncio
|
2024-10-25 23:17:06 -07:00
|
|
|
async def test_run_with_tools(monkeypatch: pytest.MonkeyPatch) -> None:
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client = ReplayChatCompletionClient(
|
|
|
|
[
|
|
|
|
CreateResult(
|
|
|
|
finish_reason="function_calls",
|
|
|
|
content=[FunctionCall(id="1", arguments=json.dumps({"input": "task"}), name="_pass_function")],
|
|
|
|
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
|
|
|
|
thought="Calling pass function",
|
|
|
|
cached=False,
|
|
|
|
),
|
|
|
|
"pass",
|
|
|
|
"TERMINATE",
|
|
|
|
],
|
2025-03-15 07:58:13 -07:00
|
|
|
model_info={
|
|
|
|
"function_calling": True,
|
|
|
|
"vision": True,
|
|
|
|
"json_output": True,
|
|
|
|
"family": ModelFamily.GPT_4O,
|
|
|
|
"structured_output": True,
|
|
|
|
},
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
)
|
2024-12-04 16:14:41 -08:00
|
|
|
agent = AssistantAgent(
|
2024-10-22 13:27:06 -07:00
|
|
|
"tool_use_agent",
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client=model_client,
|
2024-12-15 11:18:17 +05:30
|
|
|
tools=[
|
|
|
|
_pass_function,
|
|
|
|
_fail_function,
|
|
|
|
FunctionTool(_echo_function, description="Echo"),
|
|
|
|
],
|
2024-10-22 13:27:06 -07:00
|
|
|
)
|
2024-12-04 16:14:41 -08:00
|
|
|
result = await agent.run(task="task")
|
2024-12-09 19:03:31 -08:00
|
|
|
|
2025-03-13 14:29:46 -07:00
|
|
|
# Make sure the create call was made with the correct parameters.
|
|
|
|
assert len(model_client.create_calls) == 1
|
|
|
|
llm_messages = model_client.create_calls[0]["messages"]
|
|
|
|
assert len(llm_messages) == 2
|
|
|
|
assert isinstance(llm_messages[0], SystemMessage)
|
|
|
|
assert llm_messages[0].content == agent._system_messages[0].content # type: ignore
|
|
|
|
assert isinstance(llm_messages[1], UserMessage)
|
|
|
|
assert llm_messages[1].content == "task"
|
|
|
|
|
feat: Add thought process handling in tool calls and expose ThoughtEvent through stream in AgentChat (#5500)
Resolves #5192
Test
```python
import asyncio
import os
from random import randint
from typing import List
from autogen_core.tools import BaseTool, FunctionTool
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
async def get_current_time(city: str) -> str:
return f"The current time in {city} is {randint(0, 23)}:{randint(0, 59)}."
tools: List[BaseTool] = [
FunctionTool(
get_current_time,
name="get_current_time",
description="Get current time for a city.",
),
]
model_client = OpenAIChatCompletionClient(
model="anthropic/claude-3.5-haiku-20241022",
base_url="https://openrouter.ai/api/v1",
api_key=os.environ["OPENROUTER_API_KEY"],
model_info={
"family": "claude-3.5-haiku",
"function_calling": True,
"vision": False,
"json_output": False,
}
)
agent = AssistantAgent(
name="Agent",
model_client=model_client,
tools=tools,
system_message= "You are an assistant with some tools that can be used to answer some questions",
)
async def main() -> None:
await Console(agent.run_stream(task="What is current time of Paris and Toronto?"))
asyncio.run(main())
```
```
---------- user ----------
What is current time of Paris and Toronto?
---------- Agent ----------
I'll help you find the current time for Paris and Toronto by using the get_current_time function for each city.
---------- Agent ----------
[FunctionCall(id='toolu_01NwP3fNAwcYKn1x656Dq9xW', arguments='{"city": "Paris"}', name='get_current_time'), FunctionCall(id='toolu_018d4cWSy3TxXhjgmLYFrfRt', arguments='{"city": "Toronto"}', name='get_current_time')]
---------- Agent ----------
[FunctionExecutionResult(content='The current time in Paris is 1:10.', call_id='toolu_01NwP3fNAwcYKn1x656Dq9xW', is_error=False), FunctionExecutionResult(content='The current time in Toronto is 7:28.', call_id='toolu_018d4cWSy3TxXhjgmLYFrfRt', is_error=False)]
---------- Agent ----------
The current time in Paris is 1:10.
The current time in Toronto is 7:28.
```
---------
Co-authored-by: Jack Gerrits <jackgerrits@users.noreply.github.com>
2025-02-21 14:58:32 -07:00
|
|
|
assert len(result.messages) == 5
|
2024-12-09 19:03:31 -08:00
|
|
|
assert isinstance(result.messages[0], TextMessage)
|
|
|
|
assert result.messages[0].models_usage is None
|
feat: Add thought process handling in tool calls and expose ThoughtEvent through stream in AgentChat (#5500)
Resolves #5192
Test
```python
import asyncio
import os
from random import randint
from typing import List
from autogen_core.tools import BaseTool, FunctionTool
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
async def get_current_time(city: str) -> str:
return f"The current time in {city} is {randint(0, 23)}:{randint(0, 59)}."
tools: List[BaseTool] = [
FunctionTool(
get_current_time,
name="get_current_time",
description="Get current time for a city.",
),
]
model_client = OpenAIChatCompletionClient(
model="anthropic/claude-3.5-haiku-20241022",
base_url="https://openrouter.ai/api/v1",
api_key=os.environ["OPENROUTER_API_KEY"],
model_info={
"family": "claude-3.5-haiku",
"function_calling": True,
"vision": False,
"json_output": False,
}
)
agent = AssistantAgent(
name="Agent",
model_client=model_client,
tools=tools,
system_message= "You are an assistant with some tools that can be used to answer some questions",
)
async def main() -> None:
await Console(agent.run_stream(task="What is current time of Paris and Toronto?"))
asyncio.run(main())
```
```
---------- user ----------
What is current time of Paris and Toronto?
---------- Agent ----------
I'll help you find the current time for Paris and Toronto by using the get_current_time function for each city.
---------- Agent ----------
[FunctionCall(id='toolu_01NwP3fNAwcYKn1x656Dq9xW', arguments='{"city": "Paris"}', name='get_current_time'), FunctionCall(id='toolu_018d4cWSy3TxXhjgmLYFrfRt', arguments='{"city": "Toronto"}', name='get_current_time')]
---------- Agent ----------
[FunctionExecutionResult(content='The current time in Paris is 1:10.', call_id='toolu_01NwP3fNAwcYKn1x656Dq9xW', is_error=False), FunctionExecutionResult(content='The current time in Toronto is 7:28.', call_id='toolu_018d4cWSy3TxXhjgmLYFrfRt', is_error=False)]
---------- Agent ----------
The current time in Paris is 1:10.
The current time in Toronto is 7:28.
```
---------
Co-authored-by: Jack Gerrits <jackgerrits@users.noreply.github.com>
2025-02-21 14:58:32 -07:00
|
|
|
assert isinstance(result.messages[1], ThoughtEvent)
|
|
|
|
assert result.messages[1].content == "Calling pass function"
|
|
|
|
assert isinstance(result.messages[2], ToolCallRequestEvent)
|
|
|
|
assert result.messages[2].models_usage is not None
|
|
|
|
assert result.messages[2].models_usage.completion_tokens == 5
|
|
|
|
assert result.messages[2].models_usage.prompt_tokens == 10
|
|
|
|
assert isinstance(result.messages[3], ToolCallExecutionEvent)
|
2024-12-09 19:03:31 -08:00
|
|
|
assert result.messages[3].models_usage is None
|
feat: Add thought process handling in tool calls and expose ThoughtEvent through stream in AgentChat (#5500)
Resolves #5192
Test
```python
import asyncio
import os
from random import randint
from typing import List
from autogen_core.tools import BaseTool, FunctionTool
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
async def get_current_time(city: str) -> str:
return f"The current time in {city} is {randint(0, 23)}:{randint(0, 59)}."
tools: List[BaseTool] = [
FunctionTool(
get_current_time,
name="get_current_time",
description="Get current time for a city.",
),
]
model_client = OpenAIChatCompletionClient(
model="anthropic/claude-3.5-haiku-20241022",
base_url="https://openrouter.ai/api/v1",
api_key=os.environ["OPENROUTER_API_KEY"],
model_info={
"family": "claude-3.5-haiku",
"function_calling": True,
"vision": False,
"json_output": False,
}
)
agent = AssistantAgent(
name="Agent",
model_client=model_client,
tools=tools,
system_message= "You are an assistant with some tools that can be used to answer some questions",
)
async def main() -> None:
await Console(agent.run_stream(task="What is current time of Paris and Toronto?"))
asyncio.run(main())
```
```
---------- user ----------
What is current time of Paris and Toronto?
---------- Agent ----------
I'll help you find the current time for Paris and Toronto by using the get_current_time function for each city.
---------- Agent ----------
[FunctionCall(id='toolu_01NwP3fNAwcYKn1x656Dq9xW', arguments='{"city": "Paris"}', name='get_current_time'), FunctionCall(id='toolu_018d4cWSy3TxXhjgmLYFrfRt', arguments='{"city": "Toronto"}', name='get_current_time')]
---------- Agent ----------
[FunctionExecutionResult(content='The current time in Paris is 1:10.', call_id='toolu_01NwP3fNAwcYKn1x656Dq9xW', is_error=False), FunctionExecutionResult(content='The current time in Toronto is 7:28.', call_id='toolu_018d4cWSy3TxXhjgmLYFrfRt', is_error=False)]
---------- Agent ----------
The current time in Paris is 1:10.
The current time in Toronto is 7:28.
```
---------
Co-authored-by: Jack Gerrits <jackgerrits@users.noreply.github.com>
2025-02-21 14:58:32 -07:00
|
|
|
assert isinstance(result.messages[4], ToolCallSummaryMessage)
|
|
|
|
assert result.messages[4].content == "pass"
|
|
|
|
assert result.messages[4].models_usage is None
|
2024-12-09 19:03:31 -08:00
|
|
|
|
|
|
|
# Test streaming.
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client.reset()
|
2024-12-09 19:03:31 -08:00
|
|
|
index = 0
|
|
|
|
async for message in agent.run_stream(task="task"):
|
|
|
|
if isinstance(message, TaskResult):
|
2025-05-23 14:29:24 +09:00
|
|
|
assert compare_task_results(message, result)
|
2024-12-09 19:03:31 -08:00
|
|
|
else:
|
2025-05-23 14:29:24 +09:00
|
|
|
assert compare_messages(message, result.messages[index])
|
2024-12-15 11:18:17 +05:30
|
|
|
index += 1
|
2024-12-09 19:03:31 -08:00
|
|
|
|
|
|
|
# Test state saving and loading.
|
|
|
|
state = await agent.save_state()
|
|
|
|
agent2 = AssistantAgent(
|
|
|
|
"tool_use_agent",
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client=model_client,
|
2024-12-09 19:03:31 -08:00
|
|
|
tools=[_pass_function, _fail_function, FunctionTool(_echo_function, description="Echo")],
|
|
|
|
)
|
|
|
|
await agent2.load_state(state)
|
|
|
|
state2 = await agent2.save_state()
|
|
|
|
assert state == state2
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
async def test_run_with_tools_and_reflection() -> None:
|
|
|
|
model_client = ReplayChatCompletionClient(
|
|
|
|
[
|
|
|
|
CreateResult(
|
|
|
|
finish_reason="function_calls",
|
|
|
|
content=[FunctionCall(id="1", arguments=json.dumps({"input": "task"}), name="_pass_function")],
|
|
|
|
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
|
|
|
|
cached=False,
|
|
|
|
),
|
|
|
|
CreateResult(
|
|
|
|
finish_reason="stop",
|
|
|
|
content="Hello",
|
|
|
|
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
|
|
|
|
cached=False,
|
|
|
|
),
|
|
|
|
CreateResult(
|
|
|
|
finish_reason="stop",
|
|
|
|
content="TERMINATE",
|
|
|
|
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
|
|
|
|
cached=False,
|
|
|
|
),
|
|
|
|
],
|
2025-03-15 07:58:13 -07:00
|
|
|
model_info={
|
|
|
|
"function_calling": True,
|
|
|
|
"vision": True,
|
|
|
|
"json_output": True,
|
|
|
|
"family": ModelFamily.GPT_4O,
|
|
|
|
"structured_output": True,
|
|
|
|
},
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
)
|
2024-12-09 19:03:31 -08:00
|
|
|
agent = AssistantAgent(
|
|
|
|
"tool_use_agent",
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client=model_client,
|
2024-12-09 19:03:31 -08:00
|
|
|
tools=[_pass_function, _fail_function, FunctionTool(_echo_function, description="Echo")],
|
|
|
|
reflect_on_tool_use=True,
|
|
|
|
)
|
|
|
|
result = await agent.run(task="task")
|
|
|
|
|
2025-03-13 14:29:46 -07:00
|
|
|
# Make sure the create call was made with the correct parameters.
|
|
|
|
assert len(model_client.create_calls) == 2
|
|
|
|
llm_messages = model_client.create_calls[0]["messages"]
|
|
|
|
assert len(llm_messages) == 2
|
|
|
|
assert isinstance(llm_messages[0], SystemMessage)
|
|
|
|
assert llm_messages[0].content == agent._system_messages[0].content # type: ignore
|
|
|
|
assert isinstance(llm_messages[1], UserMessage)
|
|
|
|
assert llm_messages[1].content == "task"
|
|
|
|
llm_messages = model_client.create_calls[1]["messages"]
|
|
|
|
assert len(llm_messages) == 4
|
|
|
|
assert isinstance(llm_messages[0], SystemMessage)
|
|
|
|
assert llm_messages[0].content == agent._system_messages[0].content # type: ignore
|
|
|
|
assert isinstance(llm_messages[1], UserMessage)
|
|
|
|
assert llm_messages[1].content == "task"
|
|
|
|
assert isinstance(llm_messages[2], AssistantMessage)
|
|
|
|
assert isinstance(llm_messages[3], FunctionExecutionResultMessage)
|
|
|
|
|
2024-10-30 10:27:57 -07:00
|
|
|
assert len(result.messages) == 4
|
2024-10-25 10:57:04 -07:00
|
|
|
assert isinstance(result.messages[0], TextMessage)
|
2024-11-04 09:25:53 -08:00
|
|
|
assert result.messages[0].models_usage is None
|
2024-12-18 14:09:19 -08:00
|
|
|
assert isinstance(result.messages[1], ToolCallRequestEvent)
|
2024-11-04 09:25:53 -08:00
|
|
|
assert result.messages[1].models_usage is not None
|
|
|
|
assert result.messages[1].models_usage.completion_tokens == 5
|
|
|
|
assert result.messages[1].models_usage.prompt_tokens == 10
|
2024-12-18 14:09:19 -08:00
|
|
|
assert isinstance(result.messages[2], ToolCallExecutionEvent)
|
2024-11-04 09:25:53 -08:00
|
|
|
assert result.messages[2].models_usage is None
|
2024-10-30 10:27:57 -07:00
|
|
|
assert isinstance(result.messages[3], TextMessage)
|
2024-12-09 19:03:31 -08:00
|
|
|
assert result.messages[3].content == "Hello"
|
2024-11-04 09:25:53 -08:00
|
|
|
assert result.messages[3].models_usage is not None
|
|
|
|
assert result.messages[3].models_usage.completion_tokens == 5
|
|
|
|
assert result.messages[3].models_usage.prompt_tokens == 10
|
2024-10-29 08:04:14 -07:00
|
|
|
|
2024-11-01 04:12:43 -07:00
|
|
|
# Test streaming.
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client.reset()
|
2024-11-01 04:12:43 -07:00
|
|
|
index = 0
|
2024-12-04 16:14:41 -08:00
|
|
|
async for message in agent.run_stream(task="task"):
|
2024-11-01 04:12:43 -07:00
|
|
|
if isinstance(message, TaskResult):
|
2025-05-23 14:29:24 +09:00
|
|
|
assert compare_task_results(message, result)
|
2024-11-01 04:12:43 -07:00
|
|
|
else:
|
2025-05-23 14:29:24 +09:00
|
|
|
assert compare_messages(message, result.messages[index])
|
2024-11-01 04:12:43 -07:00
|
|
|
index += 1
|
|
|
|
|
2024-12-04 16:14:41 -08:00
|
|
|
# Test state saving and loading.
|
|
|
|
state = await agent.save_state()
|
|
|
|
agent2 = AssistantAgent(
|
|
|
|
"tool_use_agent",
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client=model_client,
|
2024-12-15 11:18:17 +05:30
|
|
|
tools=[
|
|
|
|
_pass_function,
|
|
|
|
_fail_function,
|
|
|
|
FunctionTool(_echo_function, description="Echo"),
|
|
|
|
],
|
2024-12-04 16:14:41 -08:00
|
|
|
)
|
|
|
|
await agent2.load_state(state)
|
|
|
|
state2 = await agent2.save_state()
|
|
|
|
assert state == state2
|
|
|
|
|
2024-10-29 08:04:14 -07:00
|
|
|
|
2025-01-17 15:39:57 -08:00
|
|
|
@pytest.mark.asyncio
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
async def test_run_with_parallel_tools() -> None:
|
|
|
|
model_client = ReplayChatCompletionClient(
|
|
|
|
[
|
|
|
|
CreateResult(
|
|
|
|
finish_reason="function_calls",
|
|
|
|
content=[
|
|
|
|
FunctionCall(id="1", arguments=json.dumps({"input": "task1"}), name="_pass_function"),
|
|
|
|
FunctionCall(id="2", arguments=json.dumps({"input": "task2"}), name="_pass_function"),
|
|
|
|
FunctionCall(id="3", arguments=json.dumps({"input": "task3"}), name="_echo_function"),
|
|
|
|
],
|
|
|
|
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
|
|
|
|
thought="Calling pass and echo functions",
|
|
|
|
cached=False,
|
|
|
|
),
|
|
|
|
"pass",
|
|
|
|
"TERMINATE",
|
|
|
|
],
|
2025-03-15 07:58:13 -07:00
|
|
|
model_info={
|
|
|
|
"function_calling": True,
|
|
|
|
"vision": True,
|
|
|
|
"json_output": True,
|
|
|
|
"family": ModelFamily.GPT_4O,
|
|
|
|
"structured_output": True,
|
|
|
|
},
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
)
|
2025-01-17 15:39:57 -08:00
|
|
|
agent = AssistantAgent(
|
|
|
|
"tool_use_agent",
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client=model_client,
|
2025-01-17 15:39:57 -08:00
|
|
|
tools=[
|
|
|
|
_pass_function,
|
|
|
|
_fail_function,
|
|
|
|
FunctionTool(_echo_function, description="Echo"),
|
|
|
|
],
|
|
|
|
)
|
|
|
|
result = await agent.run(task="task")
|
|
|
|
|
feat: Add thought process handling in tool calls and expose ThoughtEvent through stream in AgentChat (#5500)
Resolves #5192
Test
```python
import asyncio
import os
from random import randint
from typing import List
from autogen_core.tools import BaseTool, FunctionTool
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
async def get_current_time(city: str) -> str:
return f"The current time in {city} is {randint(0, 23)}:{randint(0, 59)}."
tools: List[BaseTool] = [
FunctionTool(
get_current_time,
name="get_current_time",
description="Get current time for a city.",
),
]
model_client = OpenAIChatCompletionClient(
model="anthropic/claude-3.5-haiku-20241022",
base_url="https://openrouter.ai/api/v1",
api_key=os.environ["OPENROUTER_API_KEY"],
model_info={
"family": "claude-3.5-haiku",
"function_calling": True,
"vision": False,
"json_output": False,
}
)
agent = AssistantAgent(
name="Agent",
model_client=model_client,
tools=tools,
system_message= "You are an assistant with some tools that can be used to answer some questions",
)
async def main() -> None:
await Console(agent.run_stream(task="What is current time of Paris and Toronto?"))
asyncio.run(main())
```
```
---------- user ----------
What is current time of Paris and Toronto?
---------- Agent ----------
I'll help you find the current time for Paris and Toronto by using the get_current_time function for each city.
---------- Agent ----------
[FunctionCall(id='toolu_01NwP3fNAwcYKn1x656Dq9xW', arguments='{"city": "Paris"}', name='get_current_time'), FunctionCall(id='toolu_018d4cWSy3TxXhjgmLYFrfRt', arguments='{"city": "Toronto"}', name='get_current_time')]
---------- Agent ----------
[FunctionExecutionResult(content='The current time in Paris is 1:10.', call_id='toolu_01NwP3fNAwcYKn1x656Dq9xW', is_error=False), FunctionExecutionResult(content='The current time in Toronto is 7:28.', call_id='toolu_018d4cWSy3TxXhjgmLYFrfRt', is_error=False)]
---------- Agent ----------
The current time in Paris is 1:10.
The current time in Toronto is 7:28.
```
---------
Co-authored-by: Jack Gerrits <jackgerrits@users.noreply.github.com>
2025-02-21 14:58:32 -07:00
|
|
|
assert len(result.messages) == 5
|
2025-01-17 15:39:57 -08:00
|
|
|
assert isinstance(result.messages[0], TextMessage)
|
|
|
|
assert result.messages[0].models_usage is None
|
feat: Add thought process handling in tool calls and expose ThoughtEvent through stream in AgentChat (#5500)
Resolves #5192
Test
```python
import asyncio
import os
from random import randint
from typing import List
from autogen_core.tools import BaseTool, FunctionTool
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
async def get_current_time(city: str) -> str:
return f"The current time in {city} is {randint(0, 23)}:{randint(0, 59)}."
tools: List[BaseTool] = [
FunctionTool(
get_current_time,
name="get_current_time",
description="Get current time for a city.",
),
]
model_client = OpenAIChatCompletionClient(
model="anthropic/claude-3.5-haiku-20241022",
base_url="https://openrouter.ai/api/v1",
api_key=os.environ["OPENROUTER_API_KEY"],
model_info={
"family": "claude-3.5-haiku",
"function_calling": True,
"vision": False,
"json_output": False,
}
)
agent = AssistantAgent(
name="Agent",
model_client=model_client,
tools=tools,
system_message= "You are an assistant with some tools that can be used to answer some questions",
)
async def main() -> None:
await Console(agent.run_stream(task="What is current time of Paris and Toronto?"))
asyncio.run(main())
```
```
---------- user ----------
What is current time of Paris and Toronto?
---------- Agent ----------
I'll help you find the current time for Paris and Toronto by using the get_current_time function for each city.
---------- Agent ----------
[FunctionCall(id='toolu_01NwP3fNAwcYKn1x656Dq9xW', arguments='{"city": "Paris"}', name='get_current_time'), FunctionCall(id='toolu_018d4cWSy3TxXhjgmLYFrfRt', arguments='{"city": "Toronto"}', name='get_current_time')]
---------- Agent ----------
[FunctionExecutionResult(content='The current time in Paris is 1:10.', call_id='toolu_01NwP3fNAwcYKn1x656Dq9xW', is_error=False), FunctionExecutionResult(content='The current time in Toronto is 7:28.', call_id='toolu_018d4cWSy3TxXhjgmLYFrfRt', is_error=False)]
---------- Agent ----------
The current time in Paris is 1:10.
The current time in Toronto is 7:28.
```
---------
Co-authored-by: Jack Gerrits <jackgerrits@users.noreply.github.com>
2025-02-21 14:58:32 -07:00
|
|
|
assert isinstance(result.messages[1], ThoughtEvent)
|
|
|
|
assert result.messages[1].content == "Calling pass and echo functions"
|
|
|
|
assert isinstance(result.messages[2], ToolCallRequestEvent)
|
|
|
|
assert result.messages[2].content == [
|
2025-01-17 15:39:57 -08:00
|
|
|
FunctionCall(id="1", arguments=r'{"input": "task1"}', name="_pass_function"),
|
|
|
|
FunctionCall(id="2", arguments=r'{"input": "task2"}', name="_pass_function"),
|
|
|
|
FunctionCall(id="3", arguments=r'{"input": "task3"}', name="_echo_function"),
|
|
|
|
]
|
feat: Add thought process handling in tool calls and expose ThoughtEvent through stream in AgentChat (#5500)
Resolves #5192
Test
```python
import asyncio
import os
from random import randint
from typing import List
from autogen_core.tools import BaseTool, FunctionTool
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
async def get_current_time(city: str) -> str:
return f"The current time in {city} is {randint(0, 23)}:{randint(0, 59)}."
tools: List[BaseTool] = [
FunctionTool(
get_current_time,
name="get_current_time",
description="Get current time for a city.",
),
]
model_client = OpenAIChatCompletionClient(
model="anthropic/claude-3.5-haiku-20241022",
base_url="https://openrouter.ai/api/v1",
api_key=os.environ["OPENROUTER_API_KEY"],
model_info={
"family": "claude-3.5-haiku",
"function_calling": True,
"vision": False,
"json_output": False,
}
)
agent = AssistantAgent(
name="Agent",
model_client=model_client,
tools=tools,
system_message= "You are an assistant with some tools that can be used to answer some questions",
)
async def main() -> None:
await Console(agent.run_stream(task="What is current time of Paris and Toronto?"))
asyncio.run(main())
```
```
---------- user ----------
What is current time of Paris and Toronto?
---------- Agent ----------
I'll help you find the current time for Paris and Toronto by using the get_current_time function for each city.
---------- Agent ----------
[FunctionCall(id='toolu_01NwP3fNAwcYKn1x656Dq9xW', arguments='{"city": "Paris"}', name='get_current_time'), FunctionCall(id='toolu_018d4cWSy3TxXhjgmLYFrfRt', arguments='{"city": "Toronto"}', name='get_current_time')]
---------- Agent ----------
[FunctionExecutionResult(content='The current time in Paris is 1:10.', call_id='toolu_01NwP3fNAwcYKn1x656Dq9xW', is_error=False), FunctionExecutionResult(content='The current time in Toronto is 7:28.', call_id='toolu_018d4cWSy3TxXhjgmLYFrfRt', is_error=False)]
---------- Agent ----------
The current time in Paris is 1:10.
The current time in Toronto is 7:28.
```
---------
Co-authored-by: Jack Gerrits <jackgerrits@users.noreply.github.com>
2025-02-21 14:58:32 -07:00
|
|
|
assert result.messages[2].models_usage is not None
|
|
|
|
assert result.messages[2].models_usage.completion_tokens == 5
|
|
|
|
assert result.messages[2].models_usage.prompt_tokens == 10
|
|
|
|
assert isinstance(result.messages[3], ToolCallExecutionEvent)
|
2025-01-17 15:39:57 -08:00
|
|
|
expected_content = [
|
2025-03-04 09:05:54 +10:00
|
|
|
FunctionExecutionResult(call_id="1", content="pass", is_error=False, name="_pass_function"),
|
|
|
|
FunctionExecutionResult(call_id="2", content="pass", is_error=False, name="_pass_function"),
|
|
|
|
FunctionExecutionResult(call_id="3", content="task3", is_error=False, name="_echo_function"),
|
2025-01-17 15:39:57 -08:00
|
|
|
]
|
|
|
|
for expected in expected_content:
|
feat: Add thought process handling in tool calls and expose ThoughtEvent through stream in AgentChat (#5500)
Resolves #5192
Test
```python
import asyncio
import os
from random import randint
from typing import List
from autogen_core.tools import BaseTool, FunctionTool
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
async def get_current_time(city: str) -> str:
return f"The current time in {city} is {randint(0, 23)}:{randint(0, 59)}."
tools: List[BaseTool] = [
FunctionTool(
get_current_time,
name="get_current_time",
description="Get current time for a city.",
),
]
model_client = OpenAIChatCompletionClient(
model="anthropic/claude-3.5-haiku-20241022",
base_url="https://openrouter.ai/api/v1",
api_key=os.environ["OPENROUTER_API_KEY"],
model_info={
"family": "claude-3.5-haiku",
"function_calling": True,
"vision": False,
"json_output": False,
}
)
agent = AssistantAgent(
name="Agent",
model_client=model_client,
tools=tools,
system_message= "You are an assistant with some tools that can be used to answer some questions",
)
async def main() -> None:
await Console(agent.run_stream(task="What is current time of Paris and Toronto?"))
asyncio.run(main())
```
```
---------- user ----------
What is current time of Paris and Toronto?
---------- Agent ----------
I'll help you find the current time for Paris and Toronto by using the get_current_time function for each city.
---------- Agent ----------
[FunctionCall(id='toolu_01NwP3fNAwcYKn1x656Dq9xW', arguments='{"city": "Paris"}', name='get_current_time'), FunctionCall(id='toolu_018d4cWSy3TxXhjgmLYFrfRt', arguments='{"city": "Toronto"}', name='get_current_time')]
---------- Agent ----------
[FunctionExecutionResult(content='The current time in Paris is 1:10.', call_id='toolu_01NwP3fNAwcYKn1x656Dq9xW', is_error=False), FunctionExecutionResult(content='The current time in Toronto is 7:28.', call_id='toolu_018d4cWSy3TxXhjgmLYFrfRt', is_error=False)]
---------- Agent ----------
The current time in Paris is 1:10.
The current time in Toronto is 7:28.
```
---------
Co-authored-by: Jack Gerrits <jackgerrits@users.noreply.github.com>
2025-02-21 14:58:32 -07:00
|
|
|
assert expected in result.messages[3].content
|
2025-02-13 23:11:44 -08:00
|
|
|
assert result.messages[3].models_usage is None
|
feat: Add thought process handling in tool calls and expose ThoughtEvent through stream in AgentChat (#5500)
Resolves #5192
Test
```python
import asyncio
import os
from random import randint
from typing import List
from autogen_core.tools import BaseTool, FunctionTool
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
async def get_current_time(city: str) -> str:
return f"The current time in {city} is {randint(0, 23)}:{randint(0, 59)}."
tools: List[BaseTool] = [
FunctionTool(
get_current_time,
name="get_current_time",
description="Get current time for a city.",
),
]
model_client = OpenAIChatCompletionClient(
model="anthropic/claude-3.5-haiku-20241022",
base_url="https://openrouter.ai/api/v1",
api_key=os.environ["OPENROUTER_API_KEY"],
model_info={
"family": "claude-3.5-haiku",
"function_calling": True,
"vision": False,
"json_output": False,
}
)
agent = AssistantAgent(
name="Agent",
model_client=model_client,
tools=tools,
system_message= "You are an assistant with some tools that can be used to answer some questions",
)
async def main() -> None:
await Console(agent.run_stream(task="What is current time of Paris and Toronto?"))
asyncio.run(main())
```
```
---------- user ----------
What is current time of Paris and Toronto?
---------- Agent ----------
I'll help you find the current time for Paris and Toronto by using the get_current_time function for each city.
---------- Agent ----------
[FunctionCall(id='toolu_01NwP3fNAwcYKn1x656Dq9xW', arguments='{"city": "Paris"}', name='get_current_time'), FunctionCall(id='toolu_018d4cWSy3TxXhjgmLYFrfRt', arguments='{"city": "Toronto"}', name='get_current_time')]
---------- Agent ----------
[FunctionExecutionResult(content='The current time in Paris is 1:10.', call_id='toolu_01NwP3fNAwcYKn1x656Dq9xW', is_error=False), FunctionExecutionResult(content='The current time in Toronto is 7:28.', call_id='toolu_018d4cWSy3TxXhjgmLYFrfRt', is_error=False)]
---------- Agent ----------
The current time in Paris is 1:10.
The current time in Toronto is 7:28.
```
---------
Co-authored-by: Jack Gerrits <jackgerrits@users.noreply.github.com>
2025-02-21 14:58:32 -07:00
|
|
|
assert isinstance(result.messages[4], ToolCallSummaryMessage)
|
|
|
|
assert result.messages[4].content == "pass\npass\ntask3"
|
|
|
|
assert result.messages[4].models_usage is None
|
2025-02-13 23:11:44 -08:00
|
|
|
|
|
|
|
# Test streaming.
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client.reset()
|
2025-02-13 23:11:44 -08:00
|
|
|
index = 0
|
|
|
|
async for message in agent.run_stream(task="task"):
|
|
|
|
if isinstance(message, TaskResult):
|
2025-05-23 14:29:24 +09:00
|
|
|
assert compare_task_results(message, result)
|
2025-02-13 23:11:44 -08:00
|
|
|
else:
|
2025-05-23 14:29:24 +09:00
|
|
|
assert compare_messages(message, result.messages[index])
|
2025-02-13 23:11:44 -08:00
|
|
|
index += 1
|
|
|
|
|
|
|
|
# Test state saving and loading.
|
|
|
|
state = await agent.save_state()
|
|
|
|
agent2 = AssistantAgent(
|
|
|
|
"tool_use_agent",
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client=model_client,
|
2025-02-13 23:11:44 -08:00
|
|
|
tools=[_pass_function, _fail_function, FunctionTool(_echo_function, description="Echo")],
|
|
|
|
)
|
|
|
|
await agent2.load_state(state)
|
|
|
|
state2 = await agent2.save_state()
|
|
|
|
assert state == state2
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
async def test_run_with_parallel_tools_with_empty_call_ids() -> None:
|
|
|
|
model_client = ReplayChatCompletionClient(
|
|
|
|
[
|
|
|
|
CreateResult(
|
|
|
|
finish_reason="function_calls",
|
|
|
|
content=[
|
|
|
|
FunctionCall(id="", arguments=json.dumps({"input": "task1"}), name="_pass_function"),
|
|
|
|
FunctionCall(id="", arguments=json.dumps({"input": "task2"}), name="_pass_function"),
|
|
|
|
FunctionCall(id="", arguments=json.dumps({"input": "task3"}), name="_echo_function"),
|
|
|
|
],
|
|
|
|
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
|
|
|
|
cached=False,
|
|
|
|
),
|
|
|
|
"pass",
|
|
|
|
"TERMINATE",
|
|
|
|
],
|
2025-03-15 07:58:13 -07:00
|
|
|
model_info={
|
|
|
|
"function_calling": True,
|
|
|
|
"vision": True,
|
|
|
|
"json_output": True,
|
|
|
|
"family": ModelFamily.GPT_4O,
|
|
|
|
"structured_output": True,
|
|
|
|
},
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
)
|
2025-02-13 23:11:44 -08:00
|
|
|
agent = AssistantAgent(
|
|
|
|
"tool_use_agent",
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client=model_client,
|
2025-02-13 23:11:44 -08:00
|
|
|
tools=[
|
|
|
|
_pass_function,
|
|
|
|
_fail_function,
|
|
|
|
FunctionTool(_echo_function, description="Echo"),
|
|
|
|
],
|
|
|
|
)
|
|
|
|
result = await agent.run(task="task")
|
|
|
|
|
|
|
|
assert len(result.messages) == 4
|
|
|
|
assert isinstance(result.messages[0], TextMessage)
|
|
|
|
assert result.messages[0].models_usage is None
|
|
|
|
assert isinstance(result.messages[1], ToolCallRequestEvent)
|
|
|
|
assert result.messages[1].content == [
|
|
|
|
FunctionCall(id="", arguments=r'{"input": "task1"}', name="_pass_function"),
|
|
|
|
FunctionCall(id="", arguments=r'{"input": "task2"}', name="_pass_function"),
|
|
|
|
FunctionCall(id="", arguments=r'{"input": "task3"}', name="_echo_function"),
|
|
|
|
]
|
|
|
|
assert result.messages[1].models_usage is not None
|
|
|
|
assert result.messages[1].models_usage.completion_tokens == 5
|
|
|
|
assert result.messages[1].models_usage.prompt_tokens == 10
|
|
|
|
assert isinstance(result.messages[2], ToolCallExecutionEvent)
|
|
|
|
expected_content = [
|
2025-03-04 09:05:54 +10:00
|
|
|
FunctionExecutionResult(call_id="", content="pass", is_error=False, name="_pass_function"),
|
|
|
|
FunctionExecutionResult(call_id="", content="pass", is_error=False, name="_pass_function"),
|
|
|
|
FunctionExecutionResult(call_id="", content="task3", is_error=False, name="_echo_function"),
|
2025-02-13 23:11:44 -08:00
|
|
|
]
|
|
|
|
for expected in expected_content:
|
|
|
|
assert expected in result.messages[2].content
|
2025-01-17 15:39:57 -08:00
|
|
|
assert result.messages[2].models_usage is None
|
|
|
|
assert isinstance(result.messages[3], ToolCallSummaryMessage)
|
|
|
|
assert result.messages[3].content == "pass\npass\ntask3"
|
|
|
|
assert result.messages[3].models_usage is None
|
|
|
|
|
|
|
|
# Test streaming.
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client.reset()
|
2025-01-17 15:39:57 -08:00
|
|
|
index = 0
|
|
|
|
async for message in agent.run_stream(task="task"):
|
|
|
|
if isinstance(message, TaskResult):
|
2025-05-23 14:29:24 +09:00
|
|
|
assert compare_task_results(message, result)
|
2025-01-17 15:39:57 -08:00
|
|
|
else:
|
2025-05-23 14:29:24 +09:00
|
|
|
assert compare_messages(message, result.messages[index])
|
2025-01-17 15:39:57 -08:00
|
|
|
index += 1
|
|
|
|
|
|
|
|
# Test state saving and loading.
|
|
|
|
state = await agent.save_state()
|
|
|
|
agent2 = AssistantAgent(
|
|
|
|
"tool_use_agent",
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client=model_client,
|
2025-01-17 15:39:57 -08:00
|
|
|
tools=[_pass_function, _fail_function, FunctionTool(_echo_function, description="Echo")],
|
|
|
|
)
|
|
|
|
await agent2.load_state(state)
|
|
|
|
state2 = await agent2.save_state()
|
|
|
|
assert state == state2
|
|
|
|
|
|
|
|
|
2025-04-24 16:19:36 -07:00
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_run_with_workbench() -> None:
|
|
|
|
model_client = ReplayChatCompletionClient(
|
|
|
|
[
|
|
|
|
CreateResult(
|
|
|
|
finish_reason="function_calls",
|
|
|
|
content=[FunctionCall(id="1", arguments=json.dumps({"input": "task"}), name="_pass_function")],
|
|
|
|
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
|
|
|
|
cached=False,
|
|
|
|
),
|
|
|
|
CreateResult(
|
|
|
|
finish_reason="stop",
|
|
|
|
content="Hello",
|
|
|
|
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
|
|
|
|
cached=False,
|
|
|
|
),
|
|
|
|
CreateResult(
|
|
|
|
finish_reason="stop",
|
|
|
|
content="TERMINATE",
|
|
|
|
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
|
|
|
|
cached=False,
|
|
|
|
),
|
|
|
|
],
|
|
|
|
model_info={
|
|
|
|
"function_calling": True,
|
|
|
|
"vision": True,
|
|
|
|
"json_output": True,
|
|
|
|
"family": ModelFamily.GPT_4O,
|
|
|
|
"structured_output": True,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
workbench = StaticWorkbench(
|
|
|
|
[
|
|
|
|
FunctionTool(_pass_function, description="Pass"),
|
|
|
|
FunctionTool(_fail_function, description="Fail"),
|
|
|
|
FunctionTool(_echo_function, description="Echo"),
|
|
|
|
]
|
|
|
|
)
|
|
|
|
|
|
|
|
# Test raise error when both workbench and tools are provided.
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
AssistantAgent(
|
|
|
|
"tool_use_agent",
|
|
|
|
model_client=model_client,
|
|
|
|
tools=[
|
|
|
|
_pass_function,
|
|
|
|
_fail_function,
|
|
|
|
FunctionTool(_echo_function, description="Echo"),
|
|
|
|
],
|
|
|
|
workbench=workbench,
|
|
|
|
)
|
|
|
|
|
|
|
|
agent = AssistantAgent(
|
|
|
|
"tool_use_agent",
|
|
|
|
model_client=model_client,
|
|
|
|
workbench=workbench,
|
|
|
|
reflect_on_tool_use=True,
|
|
|
|
)
|
|
|
|
result = await agent.run(task="task")
|
|
|
|
|
|
|
|
# Make sure the create call was made with the correct parameters.
|
|
|
|
assert len(model_client.create_calls) == 2
|
|
|
|
llm_messages = model_client.create_calls[0]["messages"]
|
|
|
|
assert len(llm_messages) == 2
|
|
|
|
assert isinstance(llm_messages[0], SystemMessage)
|
|
|
|
assert llm_messages[0].content == agent._system_messages[0].content # type: ignore
|
|
|
|
assert isinstance(llm_messages[1], UserMessage)
|
|
|
|
assert llm_messages[1].content == "task"
|
|
|
|
llm_messages = model_client.create_calls[1]["messages"]
|
|
|
|
assert len(llm_messages) == 4
|
|
|
|
assert isinstance(llm_messages[0], SystemMessage)
|
|
|
|
assert llm_messages[0].content == agent._system_messages[0].content # type: ignore
|
|
|
|
assert isinstance(llm_messages[1], UserMessage)
|
|
|
|
assert llm_messages[1].content == "task"
|
|
|
|
assert isinstance(llm_messages[2], AssistantMessage)
|
|
|
|
assert isinstance(llm_messages[3], FunctionExecutionResultMessage)
|
|
|
|
|
|
|
|
assert len(result.messages) == 4
|
|
|
|
assert isinstance(result.messages[0], TextMessage)
|
|
|
|
assert result.messages[0].models_usage is None
|
|
|
|
assert isinstance(result.messages[1], ToolCallRequestEvent)
|
|
|
|
assert result.messages[1].models_usage is not None
|
|
|
|
assert result.messages[1].models_usage.completion_tokens == 5
|
|
|
|
assert result.messages[1].models_usage.prompt_tokens == 10
|
|
|
|
assert isinstance(result.messages[2], ToolCallExecutionEvent)
|
|
|
|
assert result.messages[2].models_usage is None
|
|
|
|
assert isinstance(result.messages[3], TextMessage)
|
|
|
|
assert result.messages[3].content == "Hello"
|
|
|
|
assert result.messages[3].models_usage is not None
|
|
|
|
assert result.messages[3].models_usage.completion_tokens == 5
|
|
|
|
assert result.messages[3].models_usage.prompt_tokens == 10
|
|
|
|
|
|
|
|
# Test streaming.
|
|
|
|
model_client.reset()
|
|
|
|
index = 0
|
|
|
|
async for message in agent.run_stream(task="task"):
|
|
|
|
if isinstance(message, TaskResult):
|
2025-05-23 14:29:24 +09:00
|
|
|
assert compare_task_results(message, result)
|
2025-04-24 16:19:36 -07:00
|
|
|
else:
|
2025-05-23 14:29:24 +09:00
|
|
|
assert compare_messages(message, result.messages[index])
|
2025-04-24 16:19:36 -07:00
|
|
|
index += 1
|
|
|
|
|
|
|
|
# Test state saving and loading.
|
|
|
|
state = await agent.save_state()
|
|
|
|
agent2 = AssistantAgent(
|
|
|
|
"tool_use_agent",
|
|
|
|
model_client=model_client,
|
|
|
|
tools=[
|
|
|
|
_pass_function,
|
|
|
|
_fail_function,
|
|
|
|
FunctionTool(_echo_function, description="Echo"),
|
|
|
|
],
|
|
|
|
)
|
|
|
|
await agent2.load_state(state)
|
|
|
|
state2 = await agent2.save_state()
|
|
|
|
assert state == state2
|
|
|
|
|
|
|
|
|
Add output_format to AssistantAgent for structured output (#6071)
Resolves #5934
This PR adds ability for `AssistantAgent` to generate a
`StructuredMessage[T]` where `T` is the content type in base model.
How to use?
```python
from typing import Literal
from pydantic import BaseModel
from autogen_agentchat.agents import AssistantAgent
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_agentchat.ui import Console
# The response format for the agent as a Pydantic base model.
class AgentResponse(BaseModel):
thoughts: str
response: Literal["happy", "sad", "neutral"]
# Create an agent that uses the OpenAI GPT-4o model which supports structured output.
model_client = OpenAIChatCompletionClient(model="gpt-4o")
agent = AssistantAgent(
"assistant",
model_client=model_client,
system_message="Categorize the input as happy, sad, or neutral following the JSON format.",
# Setting the output format to AgentResponse to force the agent to produce a JSON string as response.
output_content_type=AgentResponse,
)
result = await Console(agent.run_stream(task="I am happy."))
# Check the last message in the result, validate its type, and print the thoughts and response.
assert isinstance(result.messages[-1], StructuredMessage)
assert isinstance(result.messages[-1].content, AgentResponse)
print("Thought: ", result.messages[-1].content.thoughts)
print("Response: ", result.messages[-1].content.response)
await model_client.close()
```
```
---------- user ----------
I am happy.
---------- assistant ----------
{
"thoughts": "The user explicitly states they are happy.",
"response": "happy"
}
Thought: The user explicitly states they are happy.
Response: happy
```
---------
Co-authored-by: Victor Dibia <victordibia@microsoft.com>
2025-04-01 13:11:01 -07:00
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_output_format() -> None:
|
|
|
|
class AgentResponse(BaseModel):
|
|
|
|
response: str
|
|
|
|
status: str
|
|
|
|
|
|
|
|
model_client = ReplayChatCompletionClient(
|
|
|
|
[
|
|
|
|
CreateResult(
|
|
|
|
finish_reason="stop",
|
|
|
|
content=AgentResponse(response="Hello", status="success").model_dump_json(),
|
|
|
|
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
|
|
|
|
cached=False,
|
|
|
|
),
|
|
|
|
]
|
|
|
|
)
|
|
|
|
agent = AssistantAgent(
|
|
|
|
"test_agent",
|
|
|
|
model_client=model_client,
|
|
|
|
output_content_type=AgentResponse,
|
|
|
|
)
|
|
|
|
assert StructuredMessage[AgentResponse] in agent.produced_message_types
|
|
|
|
assert TextMessage not in agent.produced_message_types
|
|
|
|
|
|
|
|
result = await agent.run()
|
|
|
|
assert len(result.messages) == 1
|
|
|
|
assert isinstance(result.messages[0], StructuredMessage)
|
|
|
|
assert isinstance(result.messages[0].content, AgentResponse) # type: ignore[reportUnknownMemberType]
|
|
|
|
assert result.messages[0].content.response == "Hello"
|
|
|
|
assert result.messages[0].content.status == "success"
|
|
|
|
|
|
|
|
# Test streaming.
|
|
|
|
agent = AssistantAgent(
|
|
|
|
"test_agent",
|
|
|
|
model_client=model_client,
|
|
|
|
model_client_stream=True,
|
|
|
|
output_content_type=AgentResponse,
|
|
|
|
)
|
|
|
|
model_client.reset()
|
|
|
|
stream = agent.run_stream()
|
|
|
|
stream_result: TaskResult | None = None
|
|
|
|
async for message in stream:
|
|
|
|
if isinstance(message, TaskResult):
|
|
|
|
stream_result = message
|
|
|
|
assert stream_result is not None
|
|
|
|
assert len(stream_result.messages) == 1
|
|
|
|
assert isinstance(stream_result.messages[0], StructuredMessage)
|
|
|
|
assert isinstance(stream_result.messages[0].content, AgentResponse) # type: ignore[reportUnknownMemberType]
|
|
|
|
assert stream_result.messages[0].content.response == "Hello"
|
|
|
|
assert stream_result.messages[0].content.status == "success"
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_reflection_output_format() -> None:
|
|
|
|
class AgentResponse(BaseModel):
|
|
|
|
response: str
|
|
|
|
status: str
|
|
|
|
|
|
|
|
model_client = ReplayChatCompletionClient(
|
|
|
|
[
|
|
|
|
CreateResult(
|
|
|
|
finish_reason="function_calls",
|
|
|
|
content=[FunctionCall(id="1", arguments=json.dumps({"input": "task"}), name="_pass_function")],
|
|
|
|
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
|
|
|
|
cached=False,
|
|
|
|
),
|
|
|
|
AgentResponse(response="Hello", status="success").model_dump_json(),
|
|
|
|
],
|
|
|
|
model_info={
|
|
|
|
"function_calling": True,
|
|
|
|
"vision": True,
|
|
|
|
"json_output": True,
|
|
|
|
"family": ModelFamily.GPT_4O,
|
|
|
|
"structured_output": True,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
agent = AssistantAgent(
|
|
|
|
"test_agent",
|
|
|
|
model_client=model_client,
|
|
|
|
output_content_type=AgentResponse,
|
|
|
|
# reflect_on_tool_use=True,
|
|
|
|
tools=[
|
|
|
|
_pass_function,
|
|
|
|
_fail_function,
|
|
|
|
],
|
|
|
|
)
|
|
|
|
result = await agent.run()
|
|
|
|
assert len(result.messages) == 3
|
|
|
|
assert isinstance(result.messages[0], ToolCallRequestEvent)
|
|
|
|
assert isinstance(result.messages[1], ToolCallExecutionEvent)
|
|
|
|
assert isinstance(result.messages[2], StructuredMessage)
|
|
|
|
assert isinstance(result.messages[2].content, AgentResponse) # type: ignore[reportUnknownMemberType]
|
|
|
|
assert result.messages[2].content.response == "Hello"
|
|
|
|
assert result.messages[2].content.status == "success"
|
|
|
|
|
|
|
|
# Test streaming.
|
|
|
|
agent = AssistantAgent(
|
|
|
|
"test_agent",
|
|
|
|
model_client=model_client,
|
|
|
|
model_client_stream=True,
|
|
|
|
output_content_type=AgentResponse,
|
|
|
|
# reflect_on_tool_use=True,
|
|
|
|
tools=[
|
|
|
|
_pass_function,
|
|
|
|
_fail_function,
|
|
|
|
],
|
|
|
|
)
|
|
|
|
model_client.reset()
|
|
|
|
stream = agent.run_stream()
|
|
|
|
stream_result: TaskResult | None = None
|
|
|
|
async for message in stream:
|
|
|
|
if isinstance(message, TaskResult):
|
|
|
|
stream_result = message
|
|
|
|
assert stream_result is not None
|
|
|
|
assert len(stream_result.messages) == 3
|
|
|
|
assert isinstance(stream_result.messages[0], ToolCallRequestEvent)
|
|
|
|
assert isinstance(stream_result.messages[1], ToolCallExecutionEvent)
|
|
|
|
assert isinstance(stream_result.messages[2], StructuredMessage)
|
|
|
|
assert isinstance(stream_result.messages[2].content, AgentResponse) # type: ignore[reportUnknownMemberType]
|
|
|
|
assert stream_result.messages[2].content.response == "Hello"
|
|
|
|
assert stream_result.messages[2].content.status == "success"
|
|
|
|
|
|
|
|
# Test when reflect_on_tool_use is False
|
|
|
|
model_client.reset()
|
|
|
|
agent = AssistantAgent(
|
|
|
|
"test_agent",
|
|
|
|
model_client=model_client,
|
|
|
|
output_content_type=AgentResponse,
|
|
|
|
reflect_on_tool_use=False,
|
|
|
|
tools=[
|
|
|
|
_pass_function,
|
|
|
|
_fail_function,
|
|
|
|
],
|
|
|
|
)
|
|
|
|
result = await agent.run()
|
|
|
|
assert len(result.messages) == 3
|
|
|
|
assert isinstance(result.messages[0], ToolCallRequestEvent)
|
|
|
|
assert isinstance(result.messages[1], ToolCallExecutionEvent)
|
|
|
|
assert isinstance(result.messages[2], ToolCallSummaryMessage)
|
|
|
|
|
|
|
|
|
2024-10-29 08:04:14 -07:00
|
|
|
@pytest.mark.asyncio
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
async def test_handoffs() -> None:
|
2024-10-29 08:04:14 -07:00
|
|
|
handoff = Handoff(target="agent2")
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client = ReplayChatCompletionClient(
|
|
|
|
[
|
|
|
|
CreateResult(
|
|
|
|
finish_reason="function_calls",
|
|
|
|
content=[
|
|
|
|
FunctionCall(id="1", arguments=json.dumps({}), name=handoff.name),
|
|
|
|
],
|
|
|
|
usage=RequestUsage(prompt_tokens=42, completion_tokens=43),
|
|
|
|
cached=False,
|
2025-04-16 20:22:49 -07:00
|
|
|
thought="Calling handoff function",
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
)
|
|
|
|
],
|
2025-03-15 07:58:13 -07:00
|
|
|
model_info={
|
|
|
|
"function_calling": True,
|
|
|
|
"vision": True,
|
|
|
|
"json_output": True,
|
|
|
|
"family": ModelFamily.GPT_4O,
|
|
|
|
"structured_output": True,
|
|
|
|
},
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
)
|
2024-10-29 08:04:14 -07:00
|
|
|
tool_use_agent = AssistantAgent(
|
|
|
|
"tool_use_agent",
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client=model_client,
|
2024-12-15 11:18:17 +05:30
|
|
|
tools=[
|
|
|
|
_pass_function,
|
|
|
|
_fail_function,
|
|
|
|
FunctionTool(_echo_function, description="Echo"),
|
|
|
|
],
|
2024-10-29 08:04:14 -07:00
|
|
|
handoffs=[handoff],
|
|
|
|
)
|
2024-10-30 05:32:11 -07:00
|
|
|
assert HandoffMessage in tool_use_agent.produced_message_types
|
2024-11-07 16:00:35 -08:00
|
|
|
result = await tool_use_agent.run(task="task")
|
2025-04-16 20:22:49 -07:00
|
|
|
assert len(result.messages) == 5
|
2024-11-01 04:12:43 -07:00
|
|
|
assert isinstance(result.messages[0], TextMessage)
|
2024-11-04 09:25:53 -08:00
|
|
|
assert result.messages[0].models_usage is None
|
2025-04-16 20:22:49 -07:00
|
|
|
assert isinstance(result.messages[1], ThoughtEvent)
|
|
|
|
assert result.messages[1].content == "Calling handoff function"
|
|
|
|
assert isinstance(result.messages[2], ToolCallRequestEvent)
|
|
|
|
assert result.messages[2].models_usage is not None
|
|
|
|
assert result.messages[2].models_usage.completion_tokens == 43
|
|
|
|
assert result.messages[2].models_usage.prompt_tokens == 42
|
|
|
|
assert isinstance(result.messages[3], ToolCallExecutionEvent)
|
|
|
|
assert result.messages[3].models_usage is None
|
|
|
|
assert isinstance(result.messages[4], HandoffMessage)
|
|
|
|
assert result.messages[4].content == handoff.message
|
|
|
|
assert result.messages[4].target == handoff.target
|
|
|
|
assert result.messages[4].models_usage is None
|
|
|
|
assert result.messages[4].context == [AssistantMessage(content="Calling handoff function", source="tool_use_agent")]
|
|
|
|
|
|
|
|
# Test streaming.
|
|
|
|
model_client.reset()
|
|
|
|
index = 0
|
|
|
|
async for message in tool_use_agent.run_stream(task="task"):
|
|
|
|
if isinstance(message, TaskResult):
|
2025-05-23 14:29:24 +09:00
|
|
|
assert compare_task_results(message, result)
|
2025-04-16 20:22:49 -07:00
|
|
|
else:
|
2025-05-23 14:29:24 +09:00
|
|
|
assert compare_messages(message, result.messages[index])
|
2025-04-16 20:22:49 -07:00
|
|
|
index += 1
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_handoff_with_tool_call_context() -> None:
|
|
|
|
handoff = Handoff(target="agent2")
|
|
|
|
model_client = ReplayChatCompletionClient(
|
|
|
|
[
|
|
|
|
CreateResult(
|
|
|
|
finish_reason="function_calls",
|
|
|
|
content=[
|
|
|
|
FunctionCall(id="1", arguments=json.dumps({}), name=handoff.name),
|
|
|
|
FunctionCall(id="2", arguments=json.dumps({"input": "task"}), name="_pass_function"),
|
|
|
|
],
|
|
|
|
usage=RequestUsage(prompt_tokens=42, completion_tokens=43),
|
|
|
|
cached=False,
|
|
|
|
thought="Calling handoff function",
|
|
|
|
)
|
|
|
|
],
|
|
|
|
model_info={
|
|
|
|
"function_calling": True,
|
|
|
|
"vision": True,
|
|
|
|
"json_output": True,
|
|
|
|
"family": ModelFamily.GPT_4O,
|
|
|
|
"structured_output": True,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
tool_use_agent = AssistantAgent(
|
|
|
|
"tool_use_agent",
|
|
|
|
model_client=model_client,
|
|
|
|
tools=[
|
|
|
|
_pass_function,
|
|
|
|
_fail_function,
|
|
|
|
FunctionTool(_echo_function, description="Echo"),
|
|
|
|
],
|
|
|
|
handoffs=[handoff],
|
|
|
|
)
|
|
|
|
assert HandoffMessage in tool_use_agent.produced_message_types
|
|
|
|
result = await tool_use_agent.run(task="task")
|
|
|
|
assert len(result.messages) == 5
|
|
|
|
assert isinstance(result.messages[0], TextMessage)
|
|
|
|
assert result.messages[0].models_usage is None
|
|
|
|
assert isinstance(result.messages[1], ThoughtEvent)
|
|
|
|
assert result.messages[1].content == "Calling handoff function"
|
|
|
|
assert isinstance(result.messages[2], ToolCallRequestEvent)
|
|
|
|
assert result.messages[2].models_usage is not None
|
|
|
|
assert result.messages[2].models_usage.completion_tokens == 43
|
|
|
|
assert result.messages[2].models_usage.prompt_tokens == 42
|
|
|
|
assert isinstance(result.messages[3], ToolCallExecutionEvent)
|
2024-11-04 09:25:53 -08:00
|
|
|
assert result.messages[3].models_usage is None
|
2025-04-16 20:22:49 -07:00
|
|
|
assert isinstance(result.messages[4], HandoffMessage)
|
|
|
|
assert result.messages[4].content == handoff.message
|
|
|
|
assert result.messages[4].target == handoff.target
|
|
|
|
assert result.messages[4].models_usage is None
|
|
|
|
assert result.messages[4].context == [
|
|
|
|
AssistantMessage(
|
|
|
|
content=[FunctionCall(id="2", arguments=r'{"input": "task"}', name="_pass_function")],
|
|
|
|
source="tool_use_agent",
|
|
|
|
thought="Calling handoff function",
|
|
|
|
),
|
|
|
|
FunctionExecutionResultMessage(
|
|
|
|
content=[FunctionExecutionResult(call_id="2", content="pass", is_error=False, name="_pass_function")]
|
|
|
|
),
|
|
|
|
]
|
2024-11-01 04:12:43 -07:00
|
|
|
|
|
|
|
# Test streaming.
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client.reset()
|
2024-11-01 04:12:43 -07:00
|
|
|
index = 0
|
2024-11-07 16:00:35 -08:00
|
|
|
async for message in tool_use_agent.run_stream(task="task"):
|
2024-11-01 04:12:43 -07:00
|
|
|
if isinstance(message, TaskResult):
|
2025-05-23 14:29:24 +09:00
|
|
|
assert compare_task_results(message, result)
|
2024-11-01 04:12:43 -07:00
|
|
|
else:
|
2025-05-23 14:29:24 +09:00
|
|
|
assert compare_messages(message, result.messages[index])
|
2024-11-01 04:12:43 -07:00
|
|
|
index += 1
|
2024-11-07 21:38:41 -08:00
|
|
|
|
|
|
|
|
2025-03-26 05:38:07 +08:00
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_custom_handoffs() -> None:
|
|
|
|
name = "transfer_to_agent2"
|
|
|
|
description = "Handoff to agent2."
|
|
|
|
next_action = "next_action"
|
|
|
|
|
|
|
|
class TextCommandHandOff(Handoff):
|
|
|
|
@property
|
|
|
|
def handoff_tool(self) -> BaseTool[BaseModel, BaseModel]:
|
|
|
|
"""Create a handoff tool from this handoff configuration."""
|
|
|
|
|
|
|
|
def _next_action(action: str) -> str:
|
|
|
|
"""Returns the action you want the user to perform"""
|
|
|
|
return action
|
|
|
|
|
|
|
|
return FunctionTool(_next_action, name=self.name, description=self.description, strict=True)
|
|
|
|
|
|
|
|
handoff = TextCommandHandOff(name=name, description=description, target="agent2")
|
|
|
|
model_client = ReplayChatCompletionClient(
|
|
|
|
[
|
|
|
|
CreateResult(
|
|
|
|
finish_reason="function_calls",
|
|
|
|
content=[
|
|
|
|
FunctionCall(id="1", arguments=json.dumps({"action": next_action}), name=handoff.name),
|
|
|
|
],
|
|
|
|
usage=RequestUsage(prompt_tokens=42, completion_tokens=43),
|
|
|
|
cached=False,
|
|
|
|
)
|
|
|
|
],
|
|
|
|
model_info={
|
|
|
|
"function_calling": True,
|
|
|
|
"vision": True,
|
|
|
|
"json_output": True,
|
|
|
|
"family": ModelFamily.GPT_4O,
|
|
|
|
"structured_output": True,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
tool_use_agent = AssistantAgent(
|
|
|
|
"tool_use_agent",
|
|
|
|
model_client=model_client,
|
|
|
|
tools=[
|
|
|
|
_pass_function,
|
|
|
|
_fail_function,
|
|
|
|
FunctionTool(_echo_function, description="Echo"),
|
|
|
|
],
|
|
|
|
handoffs=[handoff],
|
|
|
|
)
|
|
|
|
assert HandoffMessage in tool_use_agent.produced_message_types
|
|
|
|
result = await tool_use_agent.run(task="task")
|
|
|
|
assert len(result.messages) == 4
|
|
|
|
assert isinstance(result.messages[0], TextMessage)
|
|
|
|
assert result.messages[0].models_usage is None
|
|
|
|
assert isinstance(result.messages[1], ToolCallRequestEvent)
|
|
|
|
assert result.messages[1].models_usage is not None
|
|
|
|
assert result.messages[1].models_usage.completion_tokens == 43
|
|
|
|
assert result.messages[1].models_usage.prompt_tokens == 42
|
|
|
|
assert isinstance(result.messages[2], ToolCallExecutionEvent)
|
|
|
|
assert result.messages[2].models_usage is None
|
|
|
|
assert isinstance(result.messages[3], HandoffMessage)
|
|
|
|
assert result.messages[3].content == next_action
|
|
|
|
assert result.messages[3].target == handoff.target
|
|
|
|
|
|
|
|
assert result.messages[3].models_usage is None
|
|
|
|
|
|
|
|
# Test streaming.
|
|
|
|
model_client.reset()
|
|
|
|
index = 0
|
|
|
|
async for message in tool_use_agent.run_stream(task="task"):
|
|
|
|
if isinstance(message, TaskResult):
|
2025-05-23 14:29:24 +09:00
|
|
|
assert compare_task_results(message, result)
|
2025-03-26 05:38:07 +08:00
|
|
|
else:
|
2025-05-23 14:29:24 +09:00
|
|
|
assert compare_messages(message, result.messages[index])
|
2025-03-26 05:38:07 +08:00
|
|
|
index += 1
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_custom_object_handoffs() -> None:
|
|
|
|
"""test handoff tool return a object"""
|
|
|
|
name = "transfer_to_agent2"
|
|
|
|
description = "Handoff to agent2."
|
|
|
|
next_action = {"action": "next_action"} # using a map, not a str
|
|
|
|
|
|
|
|
class DictCommandHandOff(Handoff):
|
|
|
|
@property
|
|
|
|
def handoff_tool(self) -> BaseTool[BaseModel, BaseModel]:
|
|
|
|
"""Create a handoff tool from this handoff configuration."""
|
|
|
|
|
|
|
|
def _next_action(action: str) -> Dict[str, str]:
|
|
|
|
"""Returns the action you want the user to perform"""
|
|
|
|
return {"action": action}
|
|
|
|
|
|
|
|
return FunctionTool(_next_action, name=self.name, description=self.description, strict=True)
|
|
|
|
|
|
|
|
handoff = DictCommandHandOff(name=name, description=description, target="agent2")
|
|
|
|
model_client = ReplayChatCompletionClient(
|
|
|
|
[
|
|
|
|
CreateResult(
|
|
|
|
finish_reason="function_calls",
|
|
|
|
content=[
|
|
|
|
FunctionCall(id="1", arguments=json.dumps({"action": "next_action"}), name=handoff.name),
|
|
|
|
],
|
|
|
|
usage=RequestUsage(prompt_tokens=42, completion_tokens=43),
|
|
|
|
cached=False,
|
|
|
|
)
|
|
|
|
],
|
|
|
|
model_info={
|
|
|
|
"function_calling": True,
|
|
|
|
"vision": True,
|
|
|
|
"json_output": True,
|
|
|
|
"family": ModelFamily.GPT_4O,
|
|
|
|
"structured_output": True,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
tool_use_agent = AssistantAgent(
|
|
|
|
"tool_use_agent",
|
|
|
|
model_client=model_client,
|
|
|
|
tools=[
|
|
|
|
_pass_function,
|
|
|
|
_fail_function,
|
|
|
|
FunctionTool(_echo_function, description="Echo"),
|
|
|
|
],
|
|
|
|
handoffs=[handoff],
|
|
|
|
)
|
|
|
|
assert HandoffMessage in tool_use_agent.produced_message_types
|
|
|
|
result = await tool_use_agent.run(task="task")
|
|
|
|
assert len(result.messages) == 4
|
|
|
|
assert isinstance(result.messages[0], TextMessage)
|
|
|
|
assert result.messages[0].models_usage is None
|
|
|
|
assert isinstance(result.messages[1], ToolCallRequestEvent)
|
|
|
|
assert result.messages[1].models_usage is not None
|
|
|
|
assert result.messages[1].models_usage.completion_tokens == 43
|
|
|
|
assert result.messages[1].models_usage.prompt_tokens == 42
|
|
|
|
assert isinstance(result.messages[2], ToolCallExecutionEvent)
|
|
|
|
assert result.messages[2].models_usage is None
|
|
|
|
assert isinstance(result.messages[3], HandoffMessage)
|
|
|
|
# the content will return as a string, because the function call will convert to string
|
|
|
|
assert result.messages[3].content == str(next_action)
|
|
|
|
assert result.messages[3].target == handoff.target
|
|
|
|
|
|
|
|
assert result.messages[3].models_usage is None
|
|
|
|
|
|
|
|
# Test streaming.
|
|
|
|
model_client.reset()
|
|
|
|
index = 0
|
|
|
|
async for message in tool_use_agent.run_stream(task="task"):
|
|
|
|
if isinstance(message, TaskResult):
|
2025-05-23 14:29:24 +09:00
|
|
|
assert compare_task_results(message, result)
|
2025-03-26 05:38:07 +08:00
|
|
|
else:
|
2025-05-23 14:29:24 +09:00
|
|
|
assert compare_messages(message, result.messages[index])
|
2025-03-26 05:38:07 +08:00
|
|
|
index += 1
|
|
|
|
|
|
|
|
|
2024-11-07 21:38:41 -08:00
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_multi_modal_task(monkeypatch: pytest.MonkeyPatch) -> None:
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client = ReplayChatCompletionClient(["Hello"])
|
2024-12-15 11:18:17 +05:30
|
|
|
agent = AssistantAgent(
|
|
|
|
name="assistant",
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client=model_client,
|
2024-12-15 11:18:17 +05:30
|
|
|
)
|
2024-11-07 21:38:41 -08:00
|
|
|
# Generate a random base64 image.
|
|
|
|
img_base64 = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAADElEQVR4nGP4//8/AAX+Av4N70a4AAAAAElFTkSuQmCC"
|
|
|
|
result = await agent.run(task=MultiModalMessage(source="user", content=["Test", Image.from_base64(img_base64)]))
|
|
|
|
assert len(result.messages) == 2
|
2024-11-27 10:45:51 -08:00
|
|
|
|
|
|
|
|
2025-03-26 16:19:52 -07:00
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_run_with_structured_task() -> None:
|
|
|
|
class InputTask(BaseModel):
|
|
|
|
input: str
|
|
|
|
data: List[str]
|
|
|
|
|
|
|
|
model_client = ReplayChatCompletionClient(["Hello"])
|
|
|
|
agent = AssistantAgent(
|
|
|
|
name="assistant",
|
|
|
|
model_client=model_client,
|
|
|
|
)
|
|
|
|
|
|
|
|
task = StructuredMessage[InputTask](content=InputTask(input="Test", data=["Test1", "Test2"]), source="user")
|
|
|
|
result = await agent.run(task=task)
|
|
|
|
assert len(result.messages) == 2
|
|
|
|
|
|
|
|
|
2024-11-27 10:45:51 -08:00
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_invalid_model_capabilities() -> None:
|
|
|
|
model = "random-model"
|
|
|
|
model_client = OpenAIChatCompletionClient(
|
2024-12-15 11:18:17 +05:30
|
|
|
model=model,
|
|
|
|
api_key="",
|
2025-03-15 07:58:13 -07:00
|
|
|
model_info={
|
|
|
|
"vision": False,
|
|
|
|
"function_calling": False,
|
|
|
|
"json_output": False,
|
|
|
|
"family": ModelFamily.UNKNOWN,
|
|
|
|
"structured_output": False,
|
|
|
|
},
|
2024-11-27 10:45:51 -08:00
|
|
|
)
|
|
|
|
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
agent = AssistantAgent(
|
|
|
|
name="assistant",
|
|
|
|
model_client=model_client,
|
2024-12-15 11:18:17 +05:30
|
|
|
tools=[
|
|
|
|
_pass_function,
|
|
|
|
_fail_function,
|
|
|
|
FunctionTool(_echo_function, description="Echo"),
|
|
|
|
],
|
2024-11-27 10:45:51 -08:00
|
|
|
)
|
2025-02-04 06:55:04 -08:00
|
|
|
await agent.run(task=TextMessage(source="user", content="Test"))
|
2024-11-27 10:45:51 -08:00
|
|
|
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
agent = AssistantAgent(name="assistant", model_client=model_client, handoffs=["agent2"])
|
2025-02-04 06:55:04 -08:00
|
|
|
await agent.run(task=TextMessage(source="user", content="Test"))
|
2024-11-27 10:45:51 -08:00
|
|
|
|
2025-02-04 06:55:04 -08:00
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
async def test_remove_images() -> None:
|
2025-02-04 06:55:04 -08:00
|
|
|
model = "random-model"
|
|
|
|
model_client_1 = OpenAIChatCompletionClient(
|
|
|
|
model=model,
|
|
|
|
api_key="",
|
2025-03-15 07:58:13 -07:00
|
|
|
model_info={
|
|
|
|
"vision": False,
|
|
|
|
"function_calling": False,
|
|
|
|
"json_output": False,
|
|
|
|
"family": ModelFamily.UNKNOWN,
|
|
|
|
"structured_output": False,
|
|
|
|
},
|
2025-02-04 06:55:04 -08:00
|
|
|
)
|
|
|
|
model_client_2 = OpenAIChatCompletionClient(
|
|
|
|
model=model,
|
|
|
|
api_key="",
|
2025-03-15 07:58:13 -07:00
|
|
|
model_info={
|
|
|
|
"vision": True,
|
|
|
|
"function_calling": False,
|
|
|
|
"json_output": False,
|
|
|
|
"family": ModelFamily.UNKNOWN,
|
|
|
|
"structured_output": False,
|
|
|
|
},
|
2025-02-04 06:55:04 -08:00
|
|
|
)
|
|
|
|
|
|
|
|
img_base64 = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAADElEQVR4nGP4//8/AAX+Av4N70a4AAAAAElFTkSuQmCC"
|
|
|
|
messages: List[LLMMessage] = [
|
|
|
|
SystemMessage(content="System.1"),
|
|
|
|
UserMessage(content=["User.1", Image.from_base64(img_base64)], source="user.1"),
|
|
|
|
AssistantMessage(content="Assistant.1", source="assistant.1"),
|
|
|
|
UserMessage(content="User.2", source="assistant.2"),
|
|
|
|
]
|
|
|
|
|
|
|
|
agent_1 = AssistantAgent(name="assistant_1", model_client=model_client_1)
|
2025-02-26 05:11:35 +10:00
|
|
|
result = agent_1._get_compatible_context(model_client_1, messages) # type: ignore
|
2025-02-04 06:55:04 -08:00
|
|
|
assert len(result) == 4
|
|
|
|
assert isinstance(result[1].content, str)
|
|
|
|
|
|
|
|
agent_2 = AssistantAgent(name="assistant_2", model_client=model_client_2)
|
2025-02-26 05:11:35 +10:00
|
|
|
result = agent_2._get_compatible_context(model_client_2, messages) # type: ignore
|
2025-02-04 06:55:04 -08:00
|
|
|
assert len(result) == 4
|
|
|
|
assert isinstance(result[1].content, list)
|
2024-12-15 11:18:17 +05:30
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_list_chat_messages(monkeypatch: pytest.MonkeyPatch) -> None:
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client = ReplayChatCompletionClient(
|
|
|
|
[
|
|
|
|
CreateResult(
|
|
|
|
finish_reason="stop",
|
|
|
|
content="Response to message 1",
|
|
|
|
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
|
|
|
|
cached=False,
|
|
|
|
)
|
|
|
|
]
|
|
|
|
)
|
2024-12-15 11:18:17 +05:30
|
|
|
agent = AssistantAgent(
|
|
|
|
"test_agent",
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client=model_client,
|
2024-12-15 11:18:17 +05:30
|
|
|
)
|
|
|
|
|
|
|
|
# Create a list of chat messages
|
2025-03-30 09:34:40 -07:00
|
|
|
messages: List[BaseChatMessage] = [
|
2024-12-15 11:18:17 +05:30
|
|
|
TextMessage(content="Message 1", source="user"),
|
|
|
|
TextMessage(content="Message 2", source="user"),
|
|
|
|
]
|
|
|
|
|
|
|
|
# Test run method with list of messages
|
|
|
|
result = await agent.run(task=messages)
|
|
|
|
assert len(result.messages) == 3 # 2 input messages + 1 response message
|
|
|
|
assert isinstance(result.messages[0], TextMessage)
|
|
|
|
assert result.messages[0].content == "Message 1"
|
|
|
|
assert result.messages[0].source == "user"
|
|
|
|
assert isinstance(result.messages[1], TextMessage)
|
|
|
|
assert result.messages[1].content == "Message 2"
|
|
|
|
assert result.messages[1].source == "user"
|
|
|
|
assert isinstance(result.messages[2], TextMessage)
|
|
|
|
assert result.messages[2].content == "Response to message 1"
|
|
|
|
assert result.messages[2].source == "test_agent"
|
|
|
|
assert result.messages[2].models_usage is not None
|
|
|
|
assert result.messages[2].models_usage.completion_tokens == 5
|
|
|
|
assert result.messages[2].models_usage.prompt_tokens == 10
|
|
|
|
|
|
|
|
# Test run_stream method with list of messages
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client.reset() # Reset the mock client
|
2024-12-15 11:18:17 +05:30
|
|
|
index = 0
|
|
|
|
async for message in agent.run_stream(task=messages):
|
|
|
|
if isinstance(message, TaskResult):
|
2025-05-23 14:29:24 +09:00
|
|
|
assert compare_task_results(message, result)
|
2024-12-15 11:18:17 +05:30
|
|
|
else:
|
2025-05-23 14:29:24 +09:00
|
|
|
assert compare_messages(message, result.messages[index])
|
2024-12-15 11:18:17 +05:30
|
|
|
index += 1
|
2024-12-29 07:50:54 +01:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_model_context(monkeypatch: pytest.MonkeyPatch) -> None:
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client = ReplayChatCompletionClient(["Response to message 3"])
|
2024-12-29 07:50:54 +01:00
|
|
|
model_context = BufferedChatCompletionContext(buffer_size=2)
|
|
|
|
agent = AssistantAgent(
|
|
|
|
"test_agent",
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client=model_client,
|
2024-12-29 07:50:54 +01:00
|
|
|
model_context=model_context,
|
|
|
|
)
|
|
|
|
|
|
|
|
messages = [
|
|
|
|
TextMessage(content="Message 1", source="user"),
|
|
|
|
TextMessage(content="Message 2", source="user"),
|
|
|
|
TextMessage(content="Message 3", source="user"),
|
|
|
|
]
|
|
|
|
await agent.run(task=messages)
|
|
|
|
|
2025-03-22 23:21:29 -04:00
|
|
|
# Check that the model_context property returns the correct internal context
|
|
|
|
assert agent.model_context == model_context
|
2024-12-29 07:50:54 +01:00
|
|
|
# Check if the mock client is called with only the last two messages.
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
assert len(model_client.create_calls) == 1
|
Memory Interface in AgentChat (#4438)
* initial base memroy impl
* update, add example with chromadb
* include mimetype consideration
* add transform method
* update to address feedback, will update after 4681 is merged
* update memory impl,
* remove chroma db, typing fixes
* format, add test
* update uv lock
* update docs
* format updates
* update notebook
* add memoryqueryevent message, yield message for observability.
* minor fixes, make score optional/none
* Update python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
* update tests to improve cov
* refactor, move memory to core.
* format fixxes
* format updates
* format updates
* fix azure notebook import, other fixes
* update notebook, support str query in Memory protocol
* update test
* update cells
* add specific extensible return types to memory query and update_context
---------
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
2025-01-13 23:06:13 -08:00
|
|
|
# 2 message from the context + 1 system message
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
assert len(model_client.create_calls[0]["messages"]) == 3
|
Memory Interface in AgentChat (#4438)
* initial base memroy impl
* update, add example with chromadb
* include mimetype consideration
* add transform method
* update to address feedback, will update after 4681 is merged
* update memory impl,
* remove chroma db, typing fixes
* format, add test
* update uv lock
* update docs
* format updates
* update notebook
* add memoryqueryevent message, yield message for observability.
* minor fixes, make score optional/none
* Update python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
* update tests to improve cov
* refactor, move memory to core.
* format fixxes
* format updates
* format updates
* fix azure notebook import, other fixes
* update notebook, support str query in Memory protocol
* update test
* update cells
* add specific extensible return types to memory query and update_context
---------
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
2025-01-13 23:06:13 -08:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_run_with_memory(monkeypatch: pytest.MonkeyPatch) -> None:
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client = ReplayChatCompletionClient(["Hello"])
|
Memory Interface in AgentChat (#4438)
* initial base memroy impl
* update, add example with chromadb
* include mimetype consideration
* add transform method
* update to address feedback, will update after 4681 is merged
* update memory impl,
* remove chroma db, typing fixes
* format, add test
* update uv lock
* update docs
* format updates
* update notebook
* add memoryqueryevent message, yield message for observability.
* minor fixes, make score optional/none
* Update python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
* update tests to improve cov
* refactor, move memory to core.
* format fixxes
* format updates
* format updates
* fix azure notebook import, other fixes
* update notebook, support str query in Memory protocol
* update test
* update cells
* add specific extensible return types to memory query and update_context
---------
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
2025-01-13 23:06:13 -08:00
|
|
|
b64_image_str = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAADElEQVR4nGP4//8/AAX+Av4N70a4AAAAAElFTkSuQmCC"
|
|
|
|
|
|
|
|
# Test basic memory properties and empty context
|
|
|
|
memory = ListMemory(name="test_memory")
|
|
|
|
assert memory.name == "test_memory"
|
|
|
|
|
|
|
|
empty_context = BufferedChatCompletionContext(buffer_size=2)
|
|
|
|
empty_results = await memory.update_context(empty_context)
|
|
|
|
assert len(empty_results.memories.results) == 0
|
|
|
|
|
|
|
|
# Test various content types
|
|
|
|
memory = ListMemory()
|
|
|
|
await memory.add(MemoryContent(content="text content", mime_type=MemoryMimeType.TEXT))
|
|
|
|
await memory.add(MemoryContent(content={"key": "value"}, mime_type=MemoryMimeType.JSON))
|
|
|
|
await memory.add(MemoryContent(content=Image.from_base64(b64_image_str), mime_type=MemoryMimeType.IMAGE))
|
|
|
|
|
|
|
|
# Test query functionality
|
|
|
|
query_result = await memory.query(MemoryContent(content="", mime_type=MemoryMimeType.TEXT))
|
|
|
|
assert isinstance(query_result, MemoryQueryResult)
|
|
|
|
# Should have all three memories we added
|
|
|
|
assert len(query_result.results) == 3
|
|
|
|
|
|
|
|
# Test clear and cleanup
|
|
|
|
await memory.clear()
|
|
|
|
empty_query = await memory.query(MemoryContent(content="", mime_type=MemoryMimeType.TEXT))
|
|
|
|
assert len(empty_query.results) == 0
|
|
|
|
await memory.close() # Should not raise
|
|
|
|
|
|
|
|
# Test invalid memory type
|
|
|
|
with pytest.raises(TypeError):
|
|
|
|
AssistantAgent(
|
|
|
|
"test_agent",
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client=model_client,
|
Memory Interface in AgentChat (#4438)
* initial base memroy impl
* update, add example with chromadb
* include mimetype consideration
* add transform method
* update to address feedback, will update after 4681 is merged
* update memory impl,
* remove chroma db, typing fixes
* format, add test
* update uv lock
* update docs
* format updates
* update notebook
* add memoryqueryevent message, yield message for observability.
* minor fixes, make score optional/none
* Update python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
* update tests to improve cov
* refactor, move memory to core.
* format fixxes
* format updates
* format updates
* fix azure notebook import, other fixes
* update notebook, support str query in Memory protocol
* update test
* update cells
* add specific extensible return types to memory query and update_context
---------
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
2025-01-13 23:06:13 -08:00
|
|
|
memory="invalid", # type: ignore
|
|
|
|
)
|
|
|
|
|
|
|
|
# Test with agent
|
|
|
|
memory2 = ListMemory()
|
|
|
|
await memory2.add(MemoryContent(content="test instruction", mime_type=MemoryMimeType.TEXT))
|
|
|
|
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
agent = AssistantAgent("test_agent", model_client=model_client, memory=[memory2])
|
Memory Interface in AgentChat (#4438)
* initial base memroy impl
* update, add example with chromadb
* include mimetype consideration
* add transform method
* update to address feedback, will update after 4681 is merged
* update memory impl,
* remove chroma db, typing fixes
* format, add test
* update uv lock
* update docs
* format updates
* update notebook
* add memoryqueryevent message, yield message for observability.
* minor fixes, make score optional/none
* Update python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
* update tests to improve cov
* refactor, move memory to core.
* format fixxes
* format updates
* format updates
* fix azure notebook import, other fixes
* update notebook, support str query in Memory protocol
* update test
* update cells
* add specific extensible return types to memory query and update_context
---------
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
2025-01-13 23:06:13 -08:00
|
|
|
|
2025-02-05 19:07:27 -05:00
|
|
|
# Test dump and load component with memory
|
|
|
|
agent_config: ComponentModel = agent.dump_component()
|
|
|
|
assert agent_config.provider == "autogen_agentchat.agents.AssistantAgent"
|
|
|
|
agent2 = AssistantAgent.load_component(agent_config)
|
|
|
|
|
|
|
|
result = await agent2.run(task="test task")
|
Memory Interface in AgentChat (#4438)
* initial base memroy impl
* update, add example with chromadb
* include mimetype consideration
* add transform method
* update to address feedback, will update after 4681 is merged
* update memory impl,
* remove chroma db, typing fixes
* format, add test
* update uv lock
* update docs
* format updates
* update notebook
* add memoryqueryevent message, yield message for observability.
* minor fixes, make score optional/none
* Update python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
* update tests to improve cov
* refactor, move memory to core.
* format fixxes
* format updates
* format updates
* fix azure notebook import, other fixes
* update notebook, support str query in Memory protocol
* update test
* update cells
* add specific extensible return types to memory query and update_context
---------
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
2025-01-13 23:06:13 -08:00
|
|
|
assert len(result.messages) > 0
|
|
|
|
memory_event = next((msg for msg in result.messages if isinstance(msg, MemoryQueryEvent)), None)
|
|
|
|
assert memory_event is not None
|
|
|
|
assert len(memory_event.content) > 0
|
|
|
|
assert isinstance(memory_event.content[0], MemoryContent)
|
|
|
|
|
|
|
|
# Test memory protocol
|
|
|
|
class BadMemory:
|
|
|
|
pass
|
|
|
|
|
|
|
|
assert not isinstance(BadMemory(), Memory)
|
|
|
|
assert isinstance(ListMemory(), Memory)
|
2025-01-16 22:29:40 -08:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
async def test_assistant_agent_declarative() -> None:
|
|
|
|
model_client = ReplayChatCompletionClient(
|
|
|
|
["Response to message 3"],
|
2025-03-15 07:58:13 -07:00
|
|
|
model_info={
|
|
|
|
"function_calling": True,
|
|
|
|
"vision": True,
|
|
|
|
"json_output": True,
|
|
|
|
"family": ModelFamily.GPT_4O,
|
|
|
|
"structured_output": True,
|
|
|
|
},
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
)
|
2025-01-16 22:29:40 -08:00
|
|
|
model_context = BufferedChatCompletionContext(buffer_size=2)
|
|
|
|
agent = AssistantAgent(
|
|
|
|
"test_agent",
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client=model_client,
|
2025-01-16 22:29:40 -08:00
|
|
|
model_context=model_context,
|
2025-02-05 19:07:27 -05:00
|
|
|
memory=[ListMemory(name="test_memory")],
|
2025-01-16 22:29:40 -08:00
|
|
|
)
|
|
|
|
|
2025-02-05 19:07:27 -05:00
|
|
|
agent_config: ComponentModel = agent.dump_component()
|
2025-01-16 22:29:40 -08:00
|
|
|
assert agent_config.provider == "autogen_agentchat.agents.AssistantAgent"
|
|
|
|
|
|
|
|
agent2 = AssistantAgent.load_component(agent_config)
|
|
|
|
assert agent2.name == agent.name
|
|
|
|
|
|
|
|
agent3 = AssistantAgent(
|
|
|
|
"test_agent",
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
model_client=model_client,
|
2025-01-16 22:29:40 -08:00
|
|
|
model_context=model_context,
|
|
|
|
tools=[
|
|
|
|
_pass_function,
|
|
|
|
_fail_function,
|
|
|
|
FunctionTool(_echo_function, description="Echo"),
|
|
|
|
],
|
|
|
|
)
|
2025-01-25 12:04:05 -08:00
|
|
|
agent3_config = agent3.dump_component()
|
|
|
|
assert agent3_config.provider == "autogen_agentchat.agents.AssistantAgent"
|
2025-01-28 18:49:02 -08:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_model_client_stream() -> None:
|
|
|
|
mock_client = ReplayChatCompletionClient(
|
|
|
|
[
|
|
|
|
"Response to message 3",
|
|
|
|
]
|
|
|
|
)
|
|
|
|
agent = AssistantAgent(
|
|
|
|
"test_agent",
|
|
|
|
model_client=mock_client,
|
|
|
|
model_client_stream=True,
|
|
|
|
)
|
|
|
|
chunks: List[str] = []
|
|
|
|
async for message in agent.run_stream(task="task"):
|
|
|
|
if isinstance(message, TaskResult):
|
2025-03-26 16:19:52 -07:00
|
|
|
assert isinstance(message.messages[-1], TextMessage)
|
2025-01-28 18:49:02 -08:00
|
|
|
assert message.messages[-1].content == "Response to message 3"
|
|
|
|
elif isinstance(message, ModelClientStreamingChunkEvent):
|
|
|
|
chunks.append(message.content)
|
|
|
|
assert "".join(chunks) == "Response to message 3"
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_model_client_stream_with_tool_calls() -> None:
|
|
|
|
mock_client = ReplayChatCompletionClient(
|
|
|
|
[
|
|
|
|
CreateResult(
|
|
|
|
content=[
|
|
|
|
FunctionCall(id="1", name="_pass_function", arguments=r'{"input": "task"}'),
|
|
|
|
FunctionCall(id="3", name="_echo_function", arguments=r'{"input": "task"}'),
|
|
|
|
],
|
|
|
|
finish_reason="function_calls",
|
|
|
|
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
|
|
|
|
cached=False,
|
|
|
|
),
|
|
|
|
"Example response 2 to task",
|
|
|
|
]
|
|
|
|
)
|
|
|
|
mock_client._model_info["function_calling"] = True # pyright: ignore
|
|
|
|
agent = AssistantAgent(
|
|
|
|
"test_agent",
|
|
|
|
model_client=mock_client,
|
|
|
|
model_client_stream=True,
|
|
|
|
reflect_on_tool_use=True,
|
|
|
|
tools=[_pass_function, _echo_function],
|
|
|
|
)
|
|
|
|
chunks: List[str] = []
|
|
|
|
async for message in agent.run_stream(task="task"):
|
|
|
|
if isinstance(message, TaskResult):
|
2025-03-26 16:19:52 -07:00
|
|
|
assert isinstance(message.messages[-1], TextMessage)
|
|
|
|
assert isinstance(message.messages[1], ToolCallRequestEvent)
|
2025-01-28 18:49:02 -08:00
|
|
|
assert message.messages[-1].content == "Example response 2 to task"
|
|
|
|
assert message.messages[1].content == [
|
|
|
|
FunctionCall(id="1", name="_pass_function", arguments=r'{"input": "task"}'),
|
|
|
|
FunctionCall(id="3", name="_echo_function", arguments=r'{"input": "task"}'),
|
|
|
|
]
|
2025-03-26 16:19:52 -07:00
|
|
|
assert isinstance(message.messages[2], ToolCallExecutionEvent)
|
2025-01-28 18:49:02 -08:00
|
|
|
assert message.messages[2].content == [
|
2025-03-04 09:05:54 +10:00
|
|
|
FunctionExecutionResult(call_id="1", content="pass", is_error=False, name="_pass_function"),
|
|
|
|
FunctionExecutionResult(call_id="3", content="task", is_error=False, name="_echo_function"),
|
2025-01-28 18:49:02 -08:00
|
|
|
]
|
|
|
|
elif isinstance(message, ModelClientStreamingChunkEvent):
|
|
|
|
chunks.append(message.content)
|
|
|
|
assert "".join(chunks) == "Example response 2 to task"
|
2025-04-17 05:00:14 +10:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_invalid_structured_output_format() -> None:
|
|
|
|
class AgentResponse(BaseModel):
|
|
|
|
response: str
|
|
|
|
status: str
|
|
|
|
|
|
|
|
model_client = ReplayChatCompletionClient(
|
|
|
|
[
|
|
|
|
CreateResult(
|
|
|
|
finish_reason="stop",
|
|
|
|
content='{"response": "Hello"}',
|
|
|
|
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
|
|
|
|
cached=False,
|
|
|
|
),
|
|
|
|
]
|
|
|
|
)
|
|
|
|
|
|
|
|
agent = AssistantAgent(
|
|
|
|
name="assistant",
|
|
|
|
model_client=model_client,
|
|
|
|
output_content_type=AgentResponse,
|
|
|
|
)
|
|
|
|
|
|
|
|
with pytest.raises(ValidationError):
|
|
|
|
await agent.run()
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_structured_message_factory_serialization() -> None:
|
|
|
|
class AgentResponse(BaseModel):
|
|
|
|
result: str
|
|
|
|
status: str
|
|
|
|
|
|
|
|
model_client = ReplayChatCompletionClient(
|
|
|
|
[
|
|
|
|
CreateResult(
|
|
|
|
finish_reason="stop",
|
|
|
|
content=AgentResponse(result="All good", status="ok").model_dump_json(),
|
|
|
|
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
|
|
|
|
cached=False,
|
|
|
|
)
|
|
|
|
]
|
|
|
|
)
|
|
|
|
|
|
|
|
agent = AssistantAgent(
|
|
|
|
name="structured_agent",
|
|
|
|
model_client=model_client,
|
|
|
|
output_content_type=AgentResponse,
|
|
|
|
output_content_type_format="{result} - {status}",
|
|
|
|
)
|
|
|
|
|
|
|
|
dumped = agent.dump_component()
|
|
|
|
restored_agent = AssistantAgent.load_component(dumped)
|
|
|
|
result = await restored_agent.run()
|
|
|
|
|
|
|
|
assert isinstance(result.messages[0], StructuredMessage)
|
|
|
|
assert result.messages[0].content.result == "All good" # type: ignore[reportUnknownMemberType]
|
|
|
|
assert result.messages[0].content.status == "ok" # type: ignore[reportUnknownMemberType]
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_structured_message_format_string() -> None:
|
|
|
|
class AgentResponse(BaseModel):
|
|
|
|
field1: str
|
|
|
|
field2: str
|
|
|
|
|
|
|
|
expected = AgentResponse(field1="foo", field2="bar")
|
|
|
|
|
|
|
|
model_client = ReplayChatCompletionClient(
|
|
|
|
[
|
|
|
|
CreateResult(
|
|
|
|
finish_reason="stop",
|
|
|
|
content=expected.model_dump_json(),
|
|
|
|
usage=RequestUsage(prompt_tokens=10, completion_tokens=5),
|
|
|
|
cached=False,
|
|
|
|
)
|
|
|
|
]
|
|
|
|
)
|
|
|
|
|
|
|
|
agent = AssistantAgent(
|
|
|
|
name="formatted_agent",
|
|
|
|
model_client=model_client,
|
|
|
|
output_content_type=AgentResponse,
|
|
|
|
output_content_type_format="{field1} - {field2}",
|
|
|
|
)
|
|
|
|
|
|
|
|
result = await agent.run()
|
|
|
|
|
|
|
|
assert len(result.messages) == 1
|
|
|
|
message = result.messages[0]
|
|
|
|
|
|
|
|
# Check that it's a StructuredMessage with the correct content model
|
|
|
|
assert isinstance(message, StructuredMessage)
|
|
|
|
assert isinstance(message.content, AgentResponse) # type: ignore[reportUnknownMemberType]
|
|
|
|
assert message.content == expected
|
|
|
|
|
|
|
|
# Check that the format_string was applied correctly
|
|
|
|
assert message.to_model_text() == "foo - bar"
|
2025-04-29 03:37:09 +09:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_tools_serialize_and_deserialize() -> None:
|
|
|
|
def test() -> str:
|
|
|
|
return "hello world"
|
|
|
|
|
|
|
|
client = OpenAIChatCompletionClient(
|
|
|
|
model="gpt-4o",
|
|
|
|
api_key="API_KEY",
|
|
|
|
)
|
|
|
|
|
|
|
|
agent = AssistantAgent(
|
|
|
|
name="test",
|
|
|
|
model_client=client,
|
|
|
|
tools=[test],
|
|
|
|
)
|
|
|
|
|
|
|
|
serialize = agent.dump_component()
|
|
|
|
deserialize = AssistantAgent.load_component(serialize)
|
|
|
|
|
|
|
|
assert deserialize.name == agent.name
|
2025-05-29 10:36:14 -04:00
|
|
|
for original, restored in zip(agent._workbench, deserialize._workbench, strict=True): # type: ignore
|
|
|
|
assert await original.list_tools() == await restored.list_tools() # type: ignore
|
|
|
|
assert agent.component_version == deserialize.component_version
|
2025-04-29 03:37:09 +09:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_workbenchs_serialize_and_deserialize() -> None:
|
|
|
|
workbench = McpWorkbench(server_params=SseServerParams(url="http://test-url"))
|
|
|
|
|
|
|
|
client = OpenAIChatCompletionClient(
|
|
|
|
model="gpt-4o",
|
|
|
|
api_key="API_KEY",
|
|
|
|
)
|
|
|
|
|
|
|
|
agent = AssistantAgent(
|
|
|
|
name="test",
|
|
|
|
model_client=client,
|
|
|
|
workbench=workbench,
|
|
|
|
)
|
|
|
|
|
|
|
|
serialize = agent.dump_component()
|
|
|
|
deserialize = AssistantAgent.load_component(serialize)
|
|
|
|
|
|
|
|
assert deserialize.name == agent.name
|
2025-05-29 10:36:14 -04:00
|
|
|
for original, restored in zip(agent._workbench, deserialize._workbench, strict=True): # type: ignore
|
|
|
|
assert isinstance(original, McpWorkbench)
|
|
|
|
assert isinstance(restored, McpWorkbench)
|
|
|
|
assert original._to_config() == restored._to_config() # type: ignore
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_multiple_workbenchs_serialize_and_deserialize() -> None:
|
|
|
|
workbenches: List[McpWorkbench] = [
|
|
|
|
McpWorkbench(server_params=SseServerParams(url="http://test-url-1")),
|
|
|
|
McpWorkbench(server_params=SseServerParams(url="http://test-url-2")),
|
|
|
|
]
|
|
|
|
|
|
|
|
client = OpenAIChatCompletionClient(
|
|
|
|
model="gpt-4o",
|
|
|
|
api_key="API_KEY",
|
|
|
|
)
|
|
|
|
|
|
|
|
agent = AssistantAgent(
|
|
|
|
name="test_multi",
|
|
|
|
model_client=client,
|
|
|
|
workbench=workbenches,
|
|
|
|
)
|
|
|
|
|
|
|
|
serialize = agent.dump_component()
|
|
|
|
deserialized_agent: AssistantAgent = AssistantAgent.load_component(serialize)
|
|
|
|
|
|
|
|
assert deserialized_agent.name == agent.name
|
|
|
|
assert isinstance(deserialized_agent._workbench, list) # type: ignore
|
|
|
|
assert len(deserialized_agent._workbench) == len(workbenches) # type: ignore
|
|
|
|
|
|
|
|
for original, restored in zip(agent._workbench, deserialized_agent._workbench, strict=True): # type: ignore
|
|
|
|
assert isinstance(original, McpWorkbench)
|
|
|
|
assert isinstance(restored, McpWorkbench)
|
|
|
|
assert original._to_config() == restored._to_config() # type: ignore
|
2025-04-29 03:37:09 +09:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_tools_deserialize_aware() -> None:
|
|
|
|
dump = """
|
|
|
|
{
|
|
|
|
"provider": "autogen_agentchat.agents.AssistantAgent",
|
|
|
|
"component_type": "agent",
|
|
|
|
"version": 1,
|
2025-05-29 10:36:14 -04:00
|
|
|
"component_version": 2,
|
2025-04-29 03:37:09 +09:00
|
|
|
"description": "An agent that provides assistance with tool use.",
|
|
|
|
"label": "AssistantAgent",
|
|
|
|
"config": {
|
|
|
|
"name": "TestAgent",
|
|
|
|
"model_client":{
|
|
|
|
"provider": "autogen_ext.models.replay.ReplayChatCompletionClient",
|
|
|
|
"component_type": "replay_chat_completion_client",
|
|
|
|
"version": 1,
|
|
|
|
"component_version": 1,
|
|
|
|
"description": "A mock chat completion client that replays predefined responses using an index-based approach.",
|
|
|
|
"label": "ReplayChatCompletionClient",
|
|
|
|
"config": {
|
|
|
|
"chat_completions": [
|
|
|
|
{
|
|
|
|
"finish_reason": "function_calls",
|
|
|
|
"content": [
|
|
|
|
{
|
|
|
|
"id": "hello",
|
|
|
|
"arguments": "{}",
|
|
|
|
"name": "hello"
|
|
|
|
}
|
|
|
|
],
|
|
|
|
"usage": {
|
|
|
|
"prompt_tokens": 0,
|
|
|
|
"completion_tokens": 0
|
|
|
|
},
|
|
|
|
"cached": false
|
|
|
|
}
|
|
|
|
],
|
|
|
|
"model_info": {
|
|
|
|
"vision": false,
|
|
|
|
"function_calling": true,
|
|
|
|
"json_output": false,
|
|
|
|
"family": "unknown",
|
|
|
|
"structured_output": false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
"tools": [
|
|
|
|
{
|
|
|
|
"provider": "autogen_core.tools.FunctionTool",
|
|
|
|
"component_type": "tool",
|
|
|
|
"version": 1,
|
|
|
|
"component_version": 1,
|
|
|
|
"description": "Create custom tools by wrapping standard Python functions.",
|
|
|
|
"label": "FunctionTool",
|
|
|
|
"config": {
|
|
|
|
"source_code": "def hello():\\n return 'Hello, World!'\\n",
|
|
|
|
"name": "hello",
|
|
|
|
"description": "",
|
|
|
|
"global_imports": [],
|
|
|
|
"has_cancellation_support": false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
],
|
|
|
|
"model_context": {
|
|
|
|
"provider": "autogen_core.model_context.UnboundedChatCompletionContext",
|
|
|
|
"component_type": "chat_completion_context",
|
|
|
|
"version": 1,
|
|
|
|
"component_version": 1,
|
|
|
|
"description": "An unbounded chat completion context that keeps a view of the all the messages.",
|
|
|
|
"label": "UnboundedChatCompletionContext",
|
|
|
|
"config": {}
|
|
|
|
},
|
|
|
|
"description": "An agent that provides assistance with ability to use tools.",
|
|
|
|
"system_message": "You are a helpful assistant.",
|
|
|
|
"model_client_stream": false,
|
|
|
|
"reflect_on_tool_use": false,
|
|
|
|
"tool_call_summary_format": "{result}",
|
|
|
|
"metadata": {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
"""
|
|
|
|
agent = AssistantAgent.load_component(json.loads(dump))
|
|
|
|
result = await agent.run(task="hello")
|
|
|
|
|
|
|
|
assert len(result.messages) == 4
|
|
|
|
assert result.messages[-1].content == "Hello, World!" # type: ignore
|
|
|
|
assert result.messages[-1].type == "ToolCallSummaryMessage" # type: ignore
|
|
|
|
assert isinstance(result.messages[-1], ToolCallSummaryMessage) # type: ignore
|