2025-04-18 14:39:47 +09:00
|
|
|
from types import MethodType
|
|
|
|
from typing import Any, AsyncGenerator, Sequence
|
2024-11-08 16:41:34 -08:00
|
|
|
|
|
|
|
import pytest
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
import pytest_asyncio
|
2024-11-08 16:41:34 -08:00
|
|
|
from autogen_agentchat.agents import AssistantAgent, SocietyOfMindAgent
|
2024-12-03 15:24:25 -08:00
|
|
|
from autogen_agentchat.conditions import MaxMessageTermination
|
2024-11-08 16:41:34 -08:00
|
|
|
from autogen_agentchat.teams import RoundRobinGroupChat
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
from autogen_core import AgentRuntime, SingleThreadedAgentRuntime
|
2025-04-18 14:39:47 +09:00
|
|
|
from autogen_core.models import CreateResult, LLMMessage, SystemMessage
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
from autogen_ext.models.replay import ReplayChatCompletionClient
|
2024-11-08 16:41:34 -08:00
|
|
|
|
|
|
|
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
@pytest_asyncio.fixture(params=["single_threaded", "embedded"]) # type: ignore
|
|
|
|
async def runtime(request: pytest.FixtureRequest) -> AsyncGenerator[AgentRuntime | None, None]:
|
|
|
|
if request.param == "single_threaded":
|
|
|
|
runtime = SingleThreadedAgentRuntime()
|
|
|
|
runtime.start()
|
|
|
|
yield runtime
|
|
|
|
await runtime.stop()
|
|
|
|
elif request.param == "embedded":
|
|
|
|
yield None
|
2024-11-08 16:41:34 -08:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
async def test_society_of_mind_agent(runtime: AgentRuntime | None) -> None:
|
|
|
|
model_client = ReplayChatCompletionClient(
|
|
|
|
["1", "2", "3"],
|
|
|
|
)
|
2024-11-08 16:41:34 -08:00
|
|
|
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
|
|
|
|
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
|
|
|
|
inner_termination = MaxMessageTermination(3)
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
inner_team = RoundRobinGroupChat([agent1, agent2], termination_condition=inner_termination, runtime=runtime)
|
2024-11-08 16:41:34 -08:00
|
|
|
society_of_mind_agent = SocietyOfMindAgent("society_of_mind", team=inner_team, model_client=model_client)
|
|
|
|
response = await society_of_mind_agent.run(task="Count to 10.")
|
Improve SocietyOfMindAgent message handling (#6142)
Please refer to #6123 for full context.
That issue outlines several design and behavioral problems with
`SocietyOfMindAgent`.
This DRAFT PR focuses on resolving the most critical and broken
behaviors first.
Here is the error list
🔍 SocietyOfMindAgent: Design Issues and Historical Comparison (v0.2 vs
v0.4+)
### ✅ P1–P4 Regression Issue Table (Updated with Fixes in PR #6142)
| ID | Description | Current v0.4+ Issue | Resolution in PR #6142 | Was
it a problem in v0.2? | Notes |
|-----|-------------|----------------------|--------------------------|----------------------------|-------|
| **P1** | `inner_messages` leaks into outer team termination evaluation
| `Response.inner_messages` is appended to the outer team's
`_message_thread`, affecting termination conditions. Violates
encapsulation. | ✅ `inner_messages` is excluded from `_message_thread`,
avoiding contamination of outer termination logic. | ❌ No | Structural
boundary is now enforced |
| **P2** | Inner team does not execute when outer message history is
empty | In chained executions, if no new outer message exists, no task
is created and the inner team is skipped entirely | ✅ Detects absence of
new outer message and reuses the previous task, passing it via a handoff
message. This ensures the inner team always receives a valid task to
execute | ❌ No | The issue was silent task omission, not summary
failure. Summary succeeds as a downstream effect |
| **P3** | Summary LLM prompt is built from external input only | Prompt
is constructed using external message history, ignoring internal
reasoning | ✅ Prompt construction now uses
`final_response.inner_messages`, restoring internal reasoning as the
source of summarization | ❌ No | Matches v0.2 internal monologue
behavior |
| **P4** | External input is included in summary prompt (possibly
incorrectly) | Outer messages are used in the final LLM summarization
prompt | ✅ Resolved via the same fix as P3; outer messages are no longer
used for summary | ❌ No | Redundant with P3, now fully addressed |
<!-- Thank you for your contribution! Please review
https://microsoft.github.io/autogen/docs/Contribute before opening a
pull request. -->
<!-- Please add a reviewer to the assignee section when you create a PR.
If you don't have the access to it, we will shortly find a reviewer and
assign them to your PR. -->
## Why are these changes needed?
<!-- Please give a short summary of the change and the problem this
solves. -->
## Related issue number
resolve #6123
Blocked #6168 (Sometimes SoMA send last whitespace message)
related #6187
<!-- For example: "Closes #1234" -->
## Checks
- [ ] I've included any doc changes needed for
<https://microsoft.github.io/autogen/>. See
<https://github.com/microsoft/autogen/blob/main/CONTRIBUTING.md> to
build and test documentation locally.
- [ ] I've added tests (if relevant) corresponding to the changes
introduced in this PR.
- [ ] I've made sure all auto checks have passed.
---------
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
2025-04-05 05:50:50 +09:00
|
|
|
assert len(response.messages) == 2
|
2024-11-08 16:41:34 -08:00
|
|
|
assert response.messages[0].source == "user"
|
Improve SocietyOfMindAgent message handling (#6142)
Please refer to #6123 for full context.
That issue outlines several design and behavioral problems with
`SocietyOfMindAgent`.
This DRAFT PR focuses on resolving the most critical and broken
behaviors first.
Here is the error list
🔍 SocietyOfMindAgent: Design Issues and Historical Comparison (v0.2 vs
v0.4+)
### ✅ P1–P4 Regression Issue Table (Updated with Fixes in PR #6142)
| ID | Description | Current v0.4+ Issue | Resolution in PR #6142 | Was
it a problem in v0.2? | Notes |
|-----|-------------|----------------------|--------------------------|----------------------------|-------|
| **P1** | `inner_messages` leaks into outer team termination evaluation
| `Response.inner_messages` is appended to the outer team's
`_message_thread`, affecting termination conditions. Violates
encapsulation. | ✅ `inner_messages` is excluded from `_message_thread`,
avoiding contamination of outer termination logic. | ❌ No | Structural
boundary is now enforced |
| **P2** | Inner team does not execute when outer message history is
empty | In chained executions, if no new outer message exists, no task
is created and the inner team is skipped entirely | ✅ Detects absence of
new outer message and reuses the previous task, passing it via a handoff
message. This ensures the inner team always receives a valid task to
execute | ❌ No | The issue was silent task omission, not summary
failure. Summary succeeds as a downstream effect |
| **P3** | Summary LLM prompt is built from external input only | Prompt
is constructed using external message history, ignoring internal
reasoning | ✅ Prompt construction now uses
`final_response.inner_messages`, restoring internal reasoning as the
source of summarization | ❌ No | Matches v0.2 internal monologue
behavior |
| **P4** | External input is included in summary prompt (possibly
incorrectly) | Outer messages are used in the final LLM summarization
prompt | ✅ Resolved via the same fix as P3; outer messages are no longer
used for summary | ❌ No | Redundant with P3, now fully addressed |
<!-- Thank you for your contribution! Please review
https://microsoft.github.io/autogen/docs/Contribute before opening a
pull request. -->
<!-- Please add a reviewer to the assignee section when you create a PR.
If you don't have the access to it, we will shortly find a reviewer and
assign them to your PR. -->
## Why are these changes needed?
<!-- Please give a short summary of the change and the problem this
solves. -->
## Related issue number
resolve #6123
Blocked #6168 (Sometimes SoMA send last whitespace message)
related #6187
<!-- For example: "Closes #1234" -->
## Checks
- [ ] I've included any doc changes needed for
<https://microsoft.github.io/autogen/>. See
<https://github.com/microsoft/autogen/blob/main/CONTRIBUTING.md> to
build and test documentation locally.
- [ ] I've added tests (if relevant) corresponding to the changes
introduced in this PR.
- [ ] I've made sure all auto checks have passed.
---------
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
2025-04-05 05:50:50 +09:00
|
|
|
assert response.messages[1].source == "society_of_mind"
|
2024-12-15 11:18:17 +05:30
|
|
|
|
|
|
|
# Test save and load state.
|
|
|
|
state = await society_of_mind_agent.save_state()
|
|
|
|
assert state is not None
|
|
|
|
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
|
|
|
|
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
|
|
|
|
inner_termination = MaxMessageTermination(3)
|
Support for external agent runtime in AgentChat (#5843)
Resolves #4075
1. Introduce custom runtime parameter for all AgentChat teams
(RoundRobinGroupChat, SelectorGroupChat, etc.). This is done by making
sure each team's topics are isolated from other teams, and decoupling
state from agent identities. Also, I removed the closure agent from the
BaseGroupChat and use the group chat manager agent to relay messages to
the output message queue.
2. Added unit tests to test scenarios with custom runtimes by using
pytest fixture
3. Refactored existing unit tests to use ReplayChatCompletionClient with
a few improvements to the client.
4. Fix a one-liner bug in AssistantAgent that caused deserialized agent
to have handoffs.
How to use it?
```python
import asyncio
from autogen_core import SingleThreadedAgentRuntime
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.conditions import TextMentionTermination
from autogen_ext.models.replay import ReplayChatCompletionClient
async def main() -> None:
# Create a runtime
runtime = SingleThreadedAgentRuntime()
runtime.start()
# Create a model client.
model_client = ReplayChatCompletionClient(
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
)
# Create agents
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
# Create a termination condition
termination_condition = TextMentionTermination("10", sources=["assistant1", "assistant2"])
# Create a team
team = RoundRobinGroupChat([agent1, agent2], runtime=runtime, termination_condition=termination_condition)
# Run the team
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Save the state.
state = await team.save_state()
# Load the state to an existing team.
await team.load_state(state)
# Run the team again
model_client.reset()
stream = team.run_stream(task="Count to 10.")
async for message in stream:
print(message)
# Create a new team, with the same agent names.
agent3 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
agent4 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
new_team = RoundRobinGroupChat([agent3, agent4], runtime=runtime, termination_condition=termination_condition)
# Load the state to the new team.
await new_team.load_state(state)
# Run the new team
model_client.reset()
new_stream = new_team.run_stream(task="Count to 10.")
async for message in new_stream:
print(message)
# Stop the runtime
await runtime.stop()
asyncio.run(main())
```
TODOs as future PRs:
1. Documentation.
2. How to handle errors in custom runtime when the agent has exception?
---------
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-06 10:32:52 -08:00
|
|
|
inner_team = RoundRobinGroupChat([agent1, agent2], termination_condition=inner_termination, runtime=runtime)
|
2024-12-15 11:18:17 +05:30
|
|
|
society_of_mind_agent2 = SocietyOfMindAgent("society_of_mind", team=inner_team, model_client=model_client)
|
|
|
|
await society_of_mind_agent2.load_state(state)
|
|
|
|
state2 = await society_of_mind_agent2.save_state()
|
|
|
|
assert state == state2
|
2025-01-23 23:08:22 -08:00
|
|
|
|
|
|
|
# Test serialization.
|
|
|
|
soc_agent_config = society_of_mind_agent.dump_component()
|
|
|
|
assert soc_agent_config.provider == "autogen_agentchat.agents.SocietyOfMindAgent"
|
|
|
|
|
|
|
|
# Test deserialization.
|
|
|
|
loaded_soc_agent = SocietyOfMindAgent.load_component(soc_agent_config)
|
|
|
|
assert isinstance(loaded_soc_agent, SocietyOfMindAgent)
|
|
|
|
assert loaded_soc_agent.name == "society_of_mind"
|
Improve SocietyOfMindAgent message handling (#6142)
Please refer to #6123 for full context.
That issue outlines several design and behavioral problems with
`SocietyOfMindAgent`.
This DRAFT PR focuses on resolving the most critical and broken
behaviors first.
Here is the error list
🔍 SocietyOfMindAgent: Design Issues and Historical Comparison (v0.2 vs
v0.4+)
### ✅ P1–P4 Regression Issue Table (Updated with Fixes in PR #6142)
| ID | Description | Current v0.4+ Issue | Resolution in PR #6142 | Was
it a problem in v0.2? | Notes |
|-----|-------------|----------------------|--------------------------|----------------------------|-------|
| **P1** | `inner_messages` leaks into outer team termination evaluation
| `Response.inner_messages` is appended to the outer team's
`_message_thread`, affecting termination conditions. Violates
encapsulation. | ✅ `inner_messages` is excluded from `_message_thread`,
avoiding contamination of outer termination logic. | ❌ No | Structural
boundary is now enforced |
| **P2** | Inner team does not execute when outer message history is
empty | In chained executions, if no new outer message exists, no task
is created and the inner team is skipped entirely | ✅ Detects absence of
new outer message and reuses the previous task, passing it via a handoff
message. This ensures the inner team always receives a valid task to
execute | ❌ No | The issue was silent task omission, not summary
failure. Summary succeeds as a downstream effect |
| **P3** | Summary LLM prompt is built from external input only | Prompt
is constructed using external message history, ignoring internal
reasoning | ✅ Prompt construction now uses
`final_response.inner_messages`, restoring internal reasoning as the
source of summarization | ❌ No | Matches v0.2 internal monologue
behavior |
| **P4** | External input is included in summary prompt (possibly
incorrectly) | Outer messages are used in the final LLM summarization
prompt | ✅ Resolved via the same fix as P3; outer messages are no longer
used for summary | ❌ No | Redundant with P3, now fully addressed |
<!-- Thank you for your contribution! Please review
https://microsoft.github.io/autogen/docs/Contribute before opening a
pull request. -->
<!-- Please add a reviewer to the assignee section when you create a PR.
If you don't have the access to it, we will shortly find a reviewer and
assign them to your PR. -->
## Why are these changes needed?
<!-- Please give a short summary of the change and the problem this
solves. -->
## Related issue number
resolve #6123
Blocked #6168 (Sometimes SoMA send last whitespace message)
related #6187
<!-- For example: "Closes #1234" -->
## Checks
- [ ] I've included any doc changes needed for
<https://microsoft.github.io/autogen/>. See
<https://github.com/microsoft/autogen/blob/main/CONTRIBUTING.md> to
build and test documentation locally.
- [ ] I've added tests (if relevant) corresponding to the changes
introduced in this PR.
- [ ] I've made sure all auto checks have passed.
---------
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
2025-04-05 05:50:50 +09:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_society_of_mind_agent_empty_messges(runtime: AgentRuntime | None) -> None:
|
|
|
|
model_client = ReplayChatCompletionClient(
|
|
|
|
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
|
|
|
|
)
|
|
|
|
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
|
|
|
|
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
|
|
|
|
inner_termination = MaxMessageTermination(3)
|
|
|
|
inner_team = RoundRobinGroupChat([agent1, agent2], termination_condition=inner_termination, runtime=runtime)
|
|
|
|
society_of_mind_agent = SocietyOfMindAgent("society_of_mind", team=inner_team, model_client=model_client)
|
|
|
|
response = await society_of_mind_agent.run()
|
|
|
|
assert len(response.messages) == 1
|
|
|
|
assert response.messages[0].source == "society_of_mind"
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_society_of_mind_agent_no_response(runtime: AgentRuntime | None) -> None:
|
|
|
|
model_client = ReplayChatCompletionClient(
|
|
|
|
["1", "2", "3"],
|
|
|
|
)
|
|
|
|
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
|
|
|
|
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
|
|
|
|
inner_termination = MaxMessageTermination(1) # Set to 1 to force no response.
|
|
|
|
inner_team = RoundRobinGroupChat([agent1, agent2], termination_condition=inner_termination, runtime=runtime)
|
|
|
|
society_of_mind_agent = SocietyOfMindAgent("society_of_mind", team=inner_team, model_client=model_client)
|
|
|
|
response = await society_of_mind_agent.run(task="Count to 10.")
|
|
|
|
assert len(response.messages) == 2
|
|
|
|
assert response.messages[0].source == "user"
|
|
|
|
assert response.messages[1].source == "society_of_mind"
|
|
|
|
assert response.messages[1].to_text() == "No response."
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_society_of_mind_agent_multiple_rounds(runtime: AgentRuntime | None) -> None:
|
|
|
|
model_client = ReplayChatCompletionClient(
|
|
|
|
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
|
|
|
|
)
|
|
|
|
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
|
|
|
|
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
|
|
|
|
inner_termination = MaxMessageTermination(3)
|
|
|
|
inner_team = RoundRobinGroupChat([agent1, agent2], termination_condition=inner_termination, runtime=runtime)
|
|
|
|
society_of_mind_agent = SocietyOfMindAgent("society_of_mind", team=inner_team, model_client=model_client)
|
|
|
|
response = await society_of_mind_agent.run(task="Count to 10.")
|
|
|
|
assert len(response.messages) == 2
|
|
|
|
assert response.messages[0].source == "user"
|
|
|
|
assert response.messages[1].source == "society_of_mind"
|
|
|
|
|
|
|
|
# Continue.
|
|
|
|
response = await society_of_mind_agent.run()
|
|
|
|
assert len(response.messages) == 1
|
|
|
|
assert response.messages[0].source == "society_of_mind"
|
|
|
|
|
|
|
|
# Continue.
|
|
|
|
response = await society_of_mind_agent.run()
|
|
|
|
assert len(response.messages) == 1
|
|
|
|
assert response.messages[0].source == "society_of_mind"
|
2025-04-18 14:39:47 +09:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_society_of_mind_agent_no_multiple_system_messages(
|
|
|
|
monkeypatch: pytest.MonkeyPatch, runtime: AgentRuntime | None
|
|
|
|
) -> None:
|
|
|
|
model_client = ReplayChatCompletionClient(["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"])
|
|
|
|
|
|
|
|
model_client_soma = ReplayChatCompletionClient(
|
|
|
|
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
|
|
|
|
model_info={
|
|
|
|
"vision": False,
|
|
|
|
"function_calling": False,
|
|
|
|
"json_output": False,
|
|
|
|
"family": "unknown",
|
|
|
|
"structured_output": False,
|
|
|
|
"multiple_system_messages": False,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
original_create = model_client_soma.create
|
|
|
|
|
|
|
|
# mock method with bound self
|
|
|
|
async def _mock_create(
|
|
|
|
self: ReplayChatCompletionClient, messages: Sequence[LLMMessage], *args: Any, **kwargs: Any
|
|
|
|
) -> CreateResult:
|
|
|
|
for message in messages:
|
|
|
|
assert not isinstance(message, SystemMessage)
|
|
|
|
kwargs["messages"] = messages
|
|
|
|
return await original_create(*args, **kwargs)
|
|
|
|
|
|
|
|
# bind it
|
|
|
|
monkeypatch.setattr(model_client_soma, "create", MethodType(_mock_create, model_client_soma))
|
|
|
|
|
|
|
|
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
|
|
|
|
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
|
|
|
|
inner_termination = MaxMessageTermination(3)
|
|
|
|
inner_team = RoundRobinGroupChat([agent1, agent2], termination_condition=inner_termination, runtime=runtime)
|
|
|
|
society_of_mind_agent = SocietyOfMindAgent("society_of_mind", team=inner_team, model_client=model_client_soma)
|
|
|
|
await society_of_mind_agent.run(task="Count to 10.")
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_society_of_mind_agent_yes_multiple_system_messages(
|
|
|
|
monkeypatch: pytest.MonkeyPatch, runtime: AgentRuntime | None
|
|
|
|
) -> None:
|
|
|
|
model_client = ReplayChatCompletionClient(["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"])
|
|
|
|
|
|
|
|
model_client_soma = ReplayChatCompletionClient(
|
|
|
|
["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
|
|
|
|
model_info={
|
|
|
|
"vision": False,
|
|
|
|
"function_calling": False,
|
|
|
|
"json_output": False,
|
|
|
|
"family": "unknown",
|
|
|
|
"structured_output": False,
|
|
|
|
"multiple_system_messages": True,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
original_create = model_client_soma.create
|
|
|
|
|
|
|
|
# mock method with bound self
|
|
|
|
async def _mock_create(
|
|
|
|
self: ReplayChatCompletionClient, messages: Sequence[LLMMessage], *args: Any, **kwargs: Any
|
|
|
|
) -> CreateResult:
|
|
|
|
assert isinstance(messages[0], SystemMessage)
|
|
|
|
assert isinstance(messages[-1], SystemMessage)
|
|
|
|
kwargs["messages"] = messages
|
|
|
|
return await original_create(*args, **kwargs)
|
|
|
|
|
|
|
|
# bind it
|
|
|
|
monkeypatch.setattr(model_client_soma, "create", MethodType(_mock_create, model_client_soma))
|
|
|
|
|
|
|
|
agent1 = AssistantAgent("assistant1", model_client=model_client, system_message="You are a helpful assistant.")
|
|
|
|
agent2 = AssistantAgent("assistant2", model_client=model_client, system_message="You are a helpful assistant.")
|
|
|
|
inner_termination = MaxMessageTermination(3)
|
|
|
|
inner_team = RoundRobinGroupChat([agent1, agent2], termination_condition=inner_termination, runtime=runtime)
|
|
|
|
society_of_mind_agent = SocietyOfMindAgent("society_of_mind", team=inner_team, model_client=model_client_soma)
|
|
|
|
await society_of_mind_agent.run(task="Count to 10.")
|