mirror of
https://github.com/microsoft/autogen.git
synced 2025-06-26 22:30:10 +00:00

This PR fixes a bug where `model_context` was either ignored or explicitly set to `None` during agent deserialization (`_from_config`) in: - `AssistantAgent`: `model_context` was serialized but not restored. - `SocietyOfMindAgent`: `model_context` was neither serialized nor restored. - `CodeExecutorAgent`: `model_context` was serialized but not restored. As a result, restoring an agent from its config silently dropped runtime context settings, potentially affecting agent behavior. This patch: - Adds proper serialization/deserialization of `model_context` using `.dump_component()` and `load_component(...)`. - Ensures round-trip consistency when using declarative agent configs. ## Related issue number Closes #6336 ## Checks - [ ] I've included any doc changes needed for <https://microsoft.github.io/autogen/>. See <https://github.com/microsoft/autogen/blob/main/CONTRIBUTING.md> to build and test documentation locally. - [x] I've added tests (if relevant) corresponding to the changes introduced in this PR. - [x] I've made sure all auto checks have passed. --------- Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
127 lines
5.0 KiB
Python
127 lines
5.0 KiB
Python
import pytest
|
|
from autogen_agentchat.agents import (
|
|
AssistantAgent,
|
|
CodeExecutorAgent,
|
|
SocietyOfMindAgent,
|
|
)
|
|
from autogen_agentchat.teams import RoundRobinGroupChat
|
|
from autogen_core.model_context import (
|
|
BufferedChatCompletionContext,
|
|
ChatCompletionContext,
|
|
HeadAndTailChatCompletionContext,
|
|
TokenLimitedChatCompletionContext,
|
|
UnboundedChatCompletionContext,
|
|
)
|
|
from autogen_ext.code_executors.local import LocalCommandLineCodeExecutor
|
|
from autogen_ext.models.replay import ReplayChatCompletionClient
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"model_context_class",
|
|
[
|
|
UnboundedChatCompletionContext(),
|
|
BufferedChatCompletionContext(buffer_size=5),
|
|
TokenLimitedChatCompletionContext(model_client=ReplayChatCompletionClient([]), token_limit=5),
|
|
HeadAndTailChatCompletionContext(head_size=3, tail_size=2),
|
|
],
|
|
)
|
|
def test_serialize_and_deserialize_model_context_on_assistant_agent(model_context_class: ChatCompletionContext) -> None:
|
|
"""Test the serialization and deserialization of the message context on the AssistantAgent."""
|
|
agent = AssistantAgent(
|
|
name="assistant",
|
|
model_client=ReplayChatCompletionClient([]),
|
|
description="An assistant agent.",
|
|
model_context=model_context_class,
|
|
)
|
|
|
|
# Serialize the agent
|
|
serialized_agent = agent.dump_component()
|
|
# Deserialize the agent
|
|
deserialized_agent = AssistantAgent.load_component(serialized_agent)
|
|
|
|
# Check that the deserialized agent has the same model context as the original agent
|
|
original_model_context = agent.model_context
|
|
deserialized_model_context = deserialized_agent.model_context
|
|
|
|
assert isinstance(original_model_context, type(deserialized_model_context))
|
|
assert isinstance(deserialized_model_context, type(original_model_context))
|
|
assert original_model_context.dump_component() == deserialized_model_context.dump_component()
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"model_context_class",
|
|
[
|
|
UnboundedChatCompletionContext(),
|
|
BufferedChatCompletionContext(buffer_size=5),
|
|
TokenLimitedChatCompletionContext(model_client=ReplayChatCompletionClient([]), token_limit=5),
|
|
HeadAndTailChatCompletionContext(head_size=3, tail_size=2),
|
|
],
|
|
)
|
|
def test_serialize_and_deserialize_model_context_on_society_of_mind_agent(
|
|
model_context_class: ChatCompletionContext,
|
|
) -> None:
|
|
"""Test the serialization and deserialization of the message context on the AssistantAgent."""
|
|
agent1 = AssistantAgent(
|
|
name="assistant1", model_client=ReplayChatCompletionClient([]), description="An assistant agent."
|
|
)
|
|
agent2 = AssistantAgent(
|
|
name="assistant2", model_client=ReplayChatCompletionClient([]), description="An assistant agent."
|
|
)
|
|
team = RoundRobinGroupChat(
|
|
participants=[agent1, agent2],
|
|
)
|
|
agent = SocietyOfMindAgent(
|
|
name="assistant",
|
|
model_client=ReplayChatCompletionClient([]),
|
|
description="An assistant agent.",
|
|
team=team,
|
|
model_context=model_context_class,
|
|
)
|
|
|
|
# Serialize the agent
|
|
serialized_agent = agent.dump_component()
|
|
# Deserialize the agent
|
|
deserialized_agent = SocietyOfMindAgent.load_component(serialized_agent)
|
|
|
|
# Check that the deserialized agent has the same model context as the original agent
|
|
original_model_context = agent.model_context
|
|
deserialized_model_context = deserialized_agent.model_context
|
|
|
|
assert isinstance(original_model_context, type(deserialized_model_context))
|
|
assert isinstance(deserialized_model_context, type(original_model_context))
|
|
assert original_model_context.dump_component() == deserialized_model_context.dump_component()
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"model_context_class",
|
|
[
|
|
UnboundedChatCompletionContext(),
|
|
BufferedChatCompletionContext(buffer_size=5),
|
|
TokenLimitedChatCompletionContext(model_client=ReplayChatCompletionClient([]), token_limit=5),
|
|
HeadAndTailChatCompletionContext(head_size=3, tail_size=2),
|
|
],
|
|
)
|
|
def test_serialize_and_deserialize_model_context_on_code_executor_agent(
|
|
model_context_class: ChatCompletionContext,
|
|
) -> None:
|
|
"""Test the serialization and deserialization of the message context on the AssistantAgent."""
|
|
agent = CodeExecutorAgent(
|
|
name="assistant",
|
|
code_executor=LocalCommandLineCodeExecutor(),
|
|
description="An assistant agent.",
|
|
model_context=model_context_class,
|
|
)
|
|
|
|
# Serialize the agent
|
|
serialized_agent = agent.dump_component()
|
|
# Deserialize the agent
|
|
deserialized_agent = CodeExecutorAgent.load_component(serialized_agent)
|
|
|
|
# Check that the deserialized agent has the same model context as the original agent
|
|
original_model_context = agent.model_context
|
|
deserialized_model_context = deserialized_agent.model_context
|
|
|
|
assert isinstance(original_model_context, type(deserialized_model_context))
|
|
assert isinstance(deserialized_model_context, type(original_model_context))
|
|
assert original_model_context.dump_component() == deserialized_model_context.dump_component()
|