2024-12-11 11:06:14 -08:00
|
|
|
"""
|
|
|
|
This module defines various message types used for agent-to-agent communication.
|
2024-12-23 16:10:46 -08:00
|
|
|
Each message type inherits either from the BaseChatMessage class or BaseAgentEvent
|
|
|
|
class and includes specific fields relevant to the type of message being sent.
|
2024-12-11 11:06:14 -08:00
|
|
|
"""
|
|
|
|
|
2024-12-23 16:10:46 -08:00
|
|
|
from abc import ABC
|
2024-12-04 16:14:41 -08:00
|
|
|
from typing import List, Literal
|
2024-10-22 19:23:02 +01:00
|
|
|
|
2024-12-03 17:00:44 -08:00
|
|
|
from autogen_core import FunctionCall, Image
|
Memory Interface in AgentChat (#4438)
* initial base memroy impl
* update, add example with chromadb
* include mimetype consideration
* add transform method
* update to address feedback, will update after 4681 is merged
* update memory impl,
* remove chroma db, typing fixes
* format, add test
* update uv lock
* update docs
* format updates
* update notebook
* add memoryqueryevent message, yield message for observability.
* minor fixes, make score optional/none
* Update python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
* update tests to improve cov
* refactor, move memory to core.
* format fixxes
* format updates
* format updates
* fix azure notebook import, other fixes
* update notebook, support str query in Memory protocol
* update test
* update cells
* add specific extensible return types to memory query and update_context
---------
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
2025-01-13 23:06:13 -08:00
|
|
|
from autogen_core.memory import MemoryContent
|
2025-01-17 15:39:57 -08:00
|
|
|
from autogen_core.models import FunctionExecutionResult, LLMMessage, RequestUsage
|
2024-12-04 16:14:41 -08:00
|
|
|
from pydantic import BaseModel, ConfigDict, Field
|
2025-01-08 08:56:08 -05:00
|
|
|
from typing_extensions import Annotated
|
2024-10-22 19:23:02 +01:00
|
|
|
|
|
|
|
|
2024-12-23 16:10:46 -08:00
|
|
|
class BaseMessage(BaseModel, ABC):
|
|
|
|
"""Base class for all message types."""
|
2024-10-22 19:23:02 +01:00
|
|
|
|
|
|
|
source: str
|
|
|
|
"""The name of the agent that sent this message."""
|
|
|
|
|
2024-11-04 09:25:53 -08:00
|
|
|
models_usage: RequestUsage | None = None
|
2024-11-01 13:20:25 -07:00
|
|
|
"""The model client usage incurred when producing this message."""
|
|
|
|
|
2024-11-05 08:07:49 -08:00
|
|
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
|
|
|
2024-10-22 19:23:02 +01:00
|
|
|
|
2024-12-23 16:10:46 -08:00
|
|
|
class BaseChatMessage(BaseMessage, ABC):
|
|
|
|
"""Base class for chat messages."""
|
|
|
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
class BaseAgentEvent(BaseMessage, ABC):
|
|
|
|
"""Base class for agent events."""
|
|
|
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
class TextMessage(BaseChatMessage):
|
2024-10-22 19:23:02 +01:00
|
|
|
"""A text message."""
|
|
|
|
|
|
|
|
content: str
|
|
|
|
"""The content of the message."""
|
|
|
|
|
2024-12-04 16:14:41 -08:00
|
|
|
type: Literal["TextMessage"] = "TextMessage"
|
|
|
|
|
2024-10-22 19:23:02 +01:00
|
|
|
|
2024-12-23 16:10:46 -08:00
|
|
|
class MultiModalMessage(BaseChatMessage):
|
2024-10-22 19:23:02 +01:00
|
|
|
"""A multimodal message."""
|
|
|
|
|
|
|
|
content: List[str | Image]
|
|
|
|
"""The content of the message."""
|
|
|
|
|
2024-12-04 16:14:41 -08:00
|
|
|
type: Literal["MultiModalMessage"] = "MultiModalMessage"
|
|
|
|
|
2024-10-22 19:23:02 +01:00
|
|
|
|
2024-12-23 16:10:46 -08:00
|
|
|
class StopMessage(BaseChatMessage):
|
2024-10-22 19:23:02 +01:00
|
|
|
"""A message requesting stop of a conversation."""
|
|
|
|
|
|
|
|
content: str
|
|
|
|
"""The content for the stop message."""
|
|
|
|
|
2024-12-04 16:14:41 -08:00
|
|
|
type: Literal["StopMessage"] = "StopMessage"
|
|
|
|
|
2024-10-22 19:23:02 +01:00
|
|
|
|
2024-12-23 16:10:46 -08:00
|
|
|
class HandoffMessage(BaseChatMessage):
|
2024-10-25 10:57:04 -07:00
|
|
|
"""A message requesting handoff of a conversation to another agent."""
|
|
|
|
|
2024-10-29 08:04:14 -07:00
|
|
|
target: str
|
|
|
|
"""The name of the target agent to handoff to."""
|
|
|
|
|
2024-10-25 10:57:04 -07:00
|
|
|
content: str
|
2024-10-29 08:04:14 -07:00
|
|
|
"""The handoff message to the target agent."""
|
2024-10-25 10:57:04 -07:00
|
|
|
|
2025-01-17 15:39:57 -08:00
|
|
|
context: List[LLMMessage] = []
|
|
|
|
"""The model context to be passed to the target agent."""
|
|
|
|
|
2024-12-04 16:14:41 -08:00
|
|
|
type: Literal["HandoffMessage"] = "HandoffMessage"
|
|
|
|
|
2024-10-25 10:57:04 -07:00
|
|
|
|
2024-12-23 16:10:46 -08:00
|
|
|
class ToolCallRequestEvent(BaseAgentEvent):
|
2024-12-18 14:09:19 -08:00
|
|
|
"""An event signaling a request to use tools."""
|
|
|
|
|
|
|
|
content: List[FunctionCall]
|
|
|
|
"""The tool calls."""
|
|
|
|
|
|
|
|
type: Literal["ToolCallRequestEvent"] = "ToolCallRequestEvent"
|
|
|
|
|
|
|
|
|
2024-12-23 16:10:46 -08:00
|
|
|
class ToolCallExecutionEvent(BaseAgentEvent):
|
2024-12-18 14:09:19 -08:00
|
|
|
"""An event signaling the execution of tool calls."""
|
|
|
|
|
|
|
|
content: List[FunctionExecutionResult]
|
|
|
|
"""The tool call results."""
|
|
|
|
|
|
|
|
type: Literal["ToolCallExecutionEvent"] = "ToolCallExecutionEvent"
|
|
|
|
|
|
|
|
|
2024-12-23 16:10:46 -08:00
|
|
|
class ToolCallSummaryMessage(BaseChatMessage):
|
2024-12-20 00:23:18 -05:00
|
|
|
"""A message signaling the summary of tool call results."""
|
|
|
|
|
|
|
|
content: str
|
|
|
|
"""Summary of the the tool call results."""
|
|
|
|
|
|
|
|
type: Literal["ToolCallSummaryMessage"] = "ToolCallSummaryMessage"
|
|
|
|
|
|
|
|
|
2025-01-13 10:28:08 -05:00
|
|
|
class UserInputRequestedEvent(BaseAgentEvent):
|
|
|
|
"""An event signaling a that the user proxy has requested user input. Published prior to invoking the input callback."""
|
|
|
|
|
|
|
|
request_id: str
|
|
|
|
"""Identifier for the user input request."""
|
|
|
|
|
|
|
|
content: Literal[""] = ""
|
|
|
|
"""Empty content for compat with consumers expecting a content field."""
|
|
|
|
|
|
|
|
type: Literal["UserInputRequestedEvent"] = "UserInputRequestedEvent"
|
|
|
|
|
|
|
|
|
Memory Interface in AgentChat (#4438)
* initial base memroy impl
* update, add example with chromadb
* include mimetype consideration
* add transform method
* update to address feedback, will update after 4681 is merged
* update memory impl,
* remove chroma db, typing fixes
* format, add test
* update uv lock
* update docs
* format updates
* update notebook
* add memoryqueryevent message, yield message for observability.
* minor fixes, make score optional/none
* Update python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
* update tests to improve cov
* refactor, move memory to core.
* format fixxes
* format updates
* format updates
* fix azure notebook import, other fixes
* update notebook, support str query in Memory protocol
* update test
* update cells
* add specific extensible return types to memory query and update_context
---------
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
2025-01-13 23:06:13 -08:00
|
|
|
class MemoryQueryEvent(BaseAgentEvent):
|
|
|
|
"""An event signaling the results of memory queries."""
|
|
|
|
|
|
|
|
content: List[MemoryContent]
|
|
|
|
"""The memory query results."""
|
|
|
|
|
|
|
|
type: Literal["MemoryQueryEvent"] = "MemoryQueryEvent"
|
|
|
|
|
|
|
|
|
2025-01-28 18:49:02 -08:00
|
|
|
class ModelClientStreamingChunkEvent(BaseAgentEvent):
|
|
|
|
"""An event signaling a text output chunk from a model client in streaming mode."""
|
|
|
|
|
|
|
|
content: str
|
|
|
|
"""The partial text chunk."""
|
|
|
|
|
|
|
|
type: Literal["ModelClientStreamingChunkEvent"] = "ModelClientStreamingChunkEvent"
|
|
|
|
|
|
|
|
|
feat: Add thought process handling in tool calls and expose ThoughtEvent through stream in AgentChat (#5500)
Resolves #5192
Test
```python
import asyncio
import os
from random import randint
from typing import List
from autogen_core.tools import BaseTool, FunctionTool
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
async def get_current_time(city: str) -> str:
return f"The current time in {city} is {randint(0, 23)}:{randint(0, 59)}."
tools: List[BaseTool] = [
FunctionTool(
get_current_time,
name="get_current_time",
description="Get current time for a city.",
),
]
model_client = OpenAIChatCompletionClient(
model="anthropic/claude-3.5-haiku-20241022",
base_url="https://openrouter.ai/api/v1",
api_key=os.environ["OPENROUTER_API_KEY"],
model_info={
"family": "claude-3.5-haiku",
"function_calling": True,
"vision": False,
"json_output": False,
}
)
agent = AssistantAgent(
name="Agent",
model_client=model_client,
tools=tools,
system_message= "You are an assistant with some tools that can be used to answer some questions",
)
async def main() -> None:
await Console(agent.run_stream(task="What is current time of Paris and Toronto?"))
asyncio.run(main())
```
```
---------- user ----------
What is current time of Paris and Toronto?
---------- Agent ----------
I'll help you find the current time for Paris and Toronto by using the get_current_time function for each city.
---------- Agent ----------
[FunctionCall(id='toolu_01NwP3fNAwcYKn1x656Dq9xW', arguments='{"city": "Paris"}', name='get_current_time'), FunctionCall(id='toolu_018d4cWSy3TxXhjgmLYFrfRt', arguments='{"city": "Toronto"}', name='get_current_time')]
---------- Agent ----------
[FunctionExecutionResult(content='The current time in Paris is 1:10.', call_id='toolu_01NwP3fNAwcYKn1x656Dq9xW', is_error=False), FunctionExecutionResult(content='The current time in Toronto is 7:28.', call_id='toolu_018d4cWSy3TxXhjgmLYFrfRt', is_error=False)]
---------- Agent ----------
The current time in Paris is 1:10.
The current time in Toronto is 7:28.
```
---------
Co-authored-by: Jack Gerrits <jackgerrits@users.noreply.github.com>
2025-02-21 14:58:32 -07:00
|
|
|
class ThoughtEvent(BaseAgentEvent):
|
|
|
|
"""An event signaling the thought process of an agent.
|
|
|
|
It is used to communicate the reasoning tokens generated by a reasoning model,
|
|
|
|
or the extra text content generated by a function call."""
|
|
|
|
|
|
|
|
content: str
|
|
|
|
"""The thought process."""
|
|
|
|
|
|
|
|
type: Literal["ThoughtEvent"] = "ThoughtEvent"
|
|
|
|
|
|
|
|
|
2024-12-20 00:23:18 -05:00
|
|
|
ChatMessage = Annotated[
|
|
|
|
TextMessage | MultiModalMessage | StopMessage | ToolCallSummaryMessage | HandoffMessage, Field(discriminator="type")
|
|
|
|
]
|
2024-12-18 14:09:19 -08:00
|
|
|
"""Messages for agent-to-agent communication only."""
|
|
|
|
|
|
|
|
|
2025-01-13 10:28:08 -05:00
|
|
|
AgentEvent = Annotated[
|
2025-01-28 18:49:02 -08:00
|
|
|
ToolCallRequestEvent
|
|
|
|
| ToolCallExecutionEvent
|
|
|
|
| MemoryQueryEvent
|
|
|
|
| UserInputRequestedEvent
|
feat: Add thought process handling in tool calls and expose ThoughtEvent through stream in AgentChat (#5500)
Resolves #5192
Test
```python
import asyncio
import os
from random import randint
from typing import List
from autogen_core.tools import BaseTool, FunctionTool
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
async def get_current_time(city: str) -> str:
return f"The current time in {city} is {randint(0, 23)}:{randint(0, 59)}."
tools: List[BaseTool] = [
FunctionTool(
get_current_time,
name="get_current_time",
description="Get current time for a city.",
),
]
model_client = OpenAIChatCompletionClient(
model="anthropic/claude-3.5-haiku-20241022",
base_url="https://openrouter.ai/api/v1",
api_key=os.environ["OPENROUTER_API_KEY"],
model_info={
"family": "claude-3.5-haiku",
"function_calling": True,
"vision": False,
"json_output": False,
}
)
agent = AssistantAgent(
name="Agent",
model_client=model_client,
tools=tools,
system_message= "You are an assistant with some tools that can be used to answer some questions",
)
async def main() -> None:
await Console(agent.run_stream(task="What is current time of Paris and Toronto?"))
asyncio.run(main())
```
```
---------- user ----------
What is current time of Paris and Toronto?
---------- Agent ----------
I'll help you find the current time for Paris and Toronto by using the get_current_time function for each city.
---------- Agent ----------
[FunctionCall(id='toolu_01NwP3fNAwcYKn1x656Dq9xW', arguments='{"city": "Paris"}', name='get_current_time'), FunctionCall(id='toolu_018d4cWSy3TxXhjgmLYFrfRt', arguments='{"city": "Toronto"}', name='get_current_time')]
---------- Agent ----------
[FunctionExecutionResult(content='The current time in Paris is 1:10.', call_id='toolu_01NwP3fNAwcYKn1x656Dq9xW', is_error=False), FunctionExecutionResult(content='The current time in Toronto is 7:28.', call_id='toolu_018d4cWSy3TxXhjgmLYFrfRt', is_error=False)]
---------- Agent ----------
The current time in Paris is 1:10.
The current time in Toronto is 7:28.
```
---------
Co-authored-by: Jack Gerrits <jackgerrits@users.noreply.github.com>
2025-02-21 14:58:32 -07:00
|
|
|
| ModelClientStreamingChunkEvent
|
|
|
|
| ThoughtEvent,
|
Memory Interface in AgentChat (#4438)
* initial base memroy impl
* update, add example with chromadb
* include mimetype consideration
* add transform method
* update to address feedback, will update after 4681 is merged
* update memory impl,
* remove chroma db, typing fixes
* format, add test
* update uv lock
* update docs
* format updates
* update notebook
* add memoryqueryevent message, yield message for observability.
* minor fixes, make score optional/none
* Update python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
* update tests to improve cov
* refactor, move memory to core.
* format fixxes
* format updates
* format updates
* fix azure notebook import, other fixes
* update notebook, support str query in Memory protocol
* update test
* update cells
* add specific extensible return types to memory query and update_context
---------
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
2025-01-13 23:06:13 -08:00
|
|
|
Field(discriminator="type"),
|
2025-01-13 10:28:08 -05:00
|
|
|
]
|
2024-12-18 14:09:19 -08:00
|
|
|
"""Events emitted by agents and teams when they work, not used for agent-to-agent communication."""
|
2024-10-22 19:23:02 +01:00
|
|
|
|
|
|
|
|
|
|
|
__all__ = [
|
2025-01-13 10:28:08 -05:00
|
|
|
"AgentEvent",
|
2024-10-22 19:23:02 +01:00
|
|
|
"BaseMessage",
|
2025-01-13 10:28:08 -05:00
|
|
|
"ChatMessage",
|
|
|
|
"HandoffMessage",
|
2024-10-22 19:23:02 +01:00
|
|
|
"MultiModalMessage",
|
|
|
|
"StopMessage",
|
2025-01-13 10:28:08 -05:00
|
|
|
"TextMessage",
|
2024-12-18 14:09:19 -08:00
|
|
|
"ToolCallExecutionEvent",
|
2025-01-13 10:28:08 -05:00
|
|
|
"ToolCallRequestEvent",
|
2024-12-20 00:23:18 -05:00
|
|
|
"ToolCallSummaryMessage",
|
Memory Interface in AgentChat (#4438)
* initial base memroy impl
* update, add example with chromadb
* include mimetype consideration
* add transform method
* update to address feedback, will update after 4681 is merged
* update memory impl,
* remove chroma db, typing fixes
* format, add test
* update uv lock
* update docs
* format updates
* update notebook
* add memoryqueryevent message, yield message for observability.
* minor fixes, make score optional/none
* Update python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
* update tests to improve cov
* refactor, move memory to core.
* format fixxes
* format updates
* format updates
* fix azure notebook import, other fixes
* update notebook, support str query in Memory protocol
* update test
* update cells
* add specific extensible return types to memory query and update_context
---------
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
2025-01-13 23:06:13 -08:00
|
|
|
"MemoryQueryEvent",
|
2025-01-13 10:28:08 -05:00
|
|
|
"UserInputRequestedEvent",
|
2025-01-28 18:49:02 -08:00
|
|
|
"ModelClientStreamingChunkEvent",
|
feat: Add thought process handling in tool calls and expose ThoughtEvent through stream in AgentChat (#5500)
Resolves #5192
Test
```python
import asyncio
import os
from random import randint
from typing import List
from autogen_core.tools import BaseTool, FunctionTool
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
async def get_current_time(city: str) -> str:
return f"The current time in {city} is {randint(0, 23)}:{randint(0, 59)}."
tools: List[BaseTool] = [
FunctionTool(
get_current_time,
name="get_current_time",
description="Get current time for a city.",
),
]
model_client = OpenAIChatCompletionClient(
model="anthropic/claude-3.5-haiku-20241022",
base_url="https://openrouter.ai/api/v1",
api_key=os.environ["OPENROUTER_API_KEY"],
model_info={
"family": "claude-3.5-haiku",
"function_calling": True,
"vision": False,
"json_output": False,
}
)
agent = AssistantAgent(
name="Agent",
model_client=model_client,
tools=tools,
system_message= "You are an assistant with some tools that can be used to answer some questions",
)
async def main() -> None:
await Console(agent.run_stream(task="What is current time of Paris and Toronto?"))
asyncio.run(main())
```
```
---------- user ----------
What is current time of Paris and Toronto?
---------- Agent ----------
I'll help you find the current time for Paris and Toronto by using the get_current_time function for each city.
---------- Agent ----------
[FunctionCall(id='toolu_01NwP3fNAwcYKn1x656Dq9xW', arguments='{"city": "Paris"}', name='get_current_time'), FunctionCall(id='toolu_018d4cWSy3TxXhjgmLYFrfRt', arguments='{"city": "Toronto"}', name='get_current_time')]
---------- Agent ----------
[FunctionExecutionResult(content='The current time in Paris is 1:10.', call_id='toolu_01NwP3fNAwcYKn1x656Dq9xW', is_error=False), FunctionExecutionResult(content='The current time in Toronto is 7:28.', call_id='toolu_018d4cWSy3TxXhjgmLYFrfRt', is_error=False)]
---------- Agent ----------
The current time in Paris is 1:10.
The current time in Toronto is 7:28.
```
---------
Co-authored-by: Jack Gerrits <jackgerrits@users.noreply.github.com>
2025-02-21 14:58:32 -07:00
|
|
|
"ThoughtEvent",
|
2024-10-22 19:23:02 +01:00
|
|
|
]
|