autogen/python/packages/autogen-ext/tests/test_openai_agent.py

331 lines
13 KiB
Python
Raw Normal View History

from typing import Any, AsyncGenerator, List, Union, cast
Feature: Add OpenAIAgent backed by OpenAI Response API (#6418) ## Why are these changes needed? This PR introduces a new `OpenAIAgent` implementation that uses the [OpenAI Response API](https://platform.openai.com/docs/guides/responses-vs-chat-completions) as its backend. The OpenAI Assistant API will be deprecated in 2026, and the Response API is its successor. This change ensures our codebase is future-proof and aligned with OpenAI’s latest platform direction. ### Motivation - **Deprecation Notice:** The OpenAI Assistant API will be deprecated in 2026. - **Future-Proofing:** The Response API is the recommended replacement and offers improved capabilities for stateful, multi-turn, and tool-augmented conversations. - **AgentChat Compatibility:** The new agent is designed to conform to the behavior and expectations of `AssistantAgent` in AgentChat, but is implemented directly on top of the OpenAI Response API. ### Key Changes - **New Agent:** Adds `OpenAIAgent`, a stateful agent that interacts with the OpenAI Response API. - **Stateful Design:** The agent maintains conversation state, tool usage, and other metadata as required by the Response API. - **AssistantAgent Parity:** The new agent matches the interface and behavior of `AssistantAgent` in AgentChat, ensuring a smooth migration path. - **Direct OpenAI Integration:** Uses the official `openai` Python library for all API interactions. - **Extensible:** Designed to support future enhancements, such as advanced tool use, function calling, and multi-modal capabilities. ### Migration Path - Existing users of the Assistant API should migrate to the new `OpenAIAgent` to ensure long-term compatibility. - Documentation and examples will be updated to reflect the new agent and its usage patterns. ### References - [OpenAI: Responses vs. Chat Completions](https://platform.openai.com/docs/guides/responses-vs-chat-completions) - [OpenAI Deprecation Notice](https://platform.openai.com/docs/guides/responses-vs-chat-completions#deprecation-timeline) --- ## Related issue number Closes #6032 ## Checks - [ ] I've included any doc changes needed for <https://microsoft.github.io/autogen/>. See <https://github.com/microsoft/autogen/blob/main/CONTRIBUTING.md> to build and test documentation locally. - [X] I've added tests (if relevant) corresponding to the changes introduced in this PR. - [X] I've made sure all auto checks have passed. Co-authored-by: Griffin Bassman <griffinbassman@gmail.com>
2025-06-03 10:42:27 -07:00
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from autogen_agentchat.base import Response
from autogen_agentchat.messages import BaseChatMessage, MultiModalMessage, TextMessage
from autogen_core import CancellationToken, Image
Feature: Add OpenAIAgent backed by OpenAI Response API (#6418) ## Why are these changes needed? This PR introduces a new `OpenAIAgent` implementation that uses the [OpenAI Response API](https://platform.openai.com/docs/guides/responses-vs-chat-completions) as its backend. The OpenAI Assistant API will be deprecated in 2026, and the Response API is its successor. This change ensures our codebase is future-proof and aligned with OpenAI’s latest platform direction. ### Motivation - **Deprecation Notice:** The OpenAI Assistant API will be deprecated in 2026. - **Future-Proofing:** The Response API is the recommended replacement and offers improved capabilities for stateful, multi-turn, and tool-augmented conversations. - **AgentChat Compatibility:** The new agent is designed to conform to the behavior and expectations of `AssistantAgent` in AgentChat, but is implemented directly on top of the OpenAI Response API. ### Key Changes - **New Agent:** Adds `OpenAIAgent`, a stateful agent that interacts with the OpenAI Response API. - **Stateful Design:** The agent maintains conversation state, tool usage, and other metadata as required by the Response API. - **AssistantAgent Parity:** The new agent matches the interface and behavior of `AssistantAgent` in AgentChat, ensuring a smooth migration path. - **Direct OpenAI Integration:** Uses the official `openai` Python library for all API interactions. - **Extensible:** Designed to support future enhancements, such as advanced tool use, function calling, and multi-modal capabilities. ### Migration Path - Existing users of the Assistant API should migrate to the new `OpenAIAgent` to ensure long-term compatibility. - Documentation and examples will be updated to reflect the new agent and its usage patterns. ### References - [OpenAI: Responses vs. Chat Completions](https://platform.openai.com/docs/guides/responses-vs-chat-completions) - [OpenAI Deprecation Notice](https://platform.openai.com/docs/guides/responses-vs-chat-completions#deprecation-timeline) --- ## Related issue number Closes #6032 ## Checks - [ ] I've included any doc changes needed for <https://microsoft.github.io/autogen/>. See <https://github.com/microsoft/autogen/blob/main/CONTRIBUTING.md> to build and test documentation locally. - [X] I've added tests (if relevant) corresponding to the changes introduced in this PR. - [X] I've made sure all auto checks have passed. Co-authored-by: Griffin Bassman <griffinbassman@gmail.com>
2025-06-03 10:42:27 -07:00
from autogen_core.models import UserMessage
from autogen_ext.agents.openai import OpenAIAgent
from openai import AsyncOpenAI
def create_mock_openai_client() -> AsyncOpenAI:
"""Create a mock OpenAI client for the Responses API."""
client = AsyncMock(spec=AsyncOpenAI)
async def mock_responses_create(**kwargs: Any) -> Any:
class MockResponse:
def __init__(self, output_text: str, id: str) -> None:
self.output_text = output_text
self.id = id
if "tools" in kwargs and kwargs["tools"]:
return MockResponse(output_text='{"temperature": 72.5, "conditions": "sunny"}', id="resp-123")
return MockResponse(output_text="Hello world!", id="resp-abc")
responses = MagicMock()
responses.create = AsyncMock(side_effect=mock_responses_create)
client.responses = responses
return client
@pytest.fixture
def mock_openai_client() -> AsyncOpenAI:
return create_mock_openai_client()
@pytest.fixture
def mock_error_client() -> AsyncOpenAI:
client = AsyncMock(spec=AsyncOpenAI)
beta = MagicMock()
client.beta = beta
beta.chat = MagicMock()
beta.chat.completions = MagicMock()
async def mock_create_error(**kwargs: Any) -> None:
raise Exception("API Error")
responses = MagicMock()
responses.create = AsyncMock(side_effect=mock_create_error)
client.responses = responses
return client
@pytest.fixture
def cancellation_token() -> CancellationToken:
return CancellationToken()
@pytest.fixture
def agent(mock_openai_client: AsyncOpenAI) -> OpenAIAgent:
Feature: Add OpenAIAgent backed by OpenAI Response API (#6418) ## Why are these changes needed? This PR introduces a new `OpenAIAgent` implementation that uses the [OpenAI Response API](https://platform.openai.com/docs/guides/responses-vs-chat-completions) as its backend. The OpenAI Assistant API will be deprecated in 2026, and the Response API is its successor. This change ensures our codebase is future-proof and aligned with OpenAI’s latest platform direction. ### Motivation - **Deprecation Notice:** The OpenAI Assistant API will be deprecated in 2026. - **Future-Proofing:** The Response API is the recommended replacement and offers improved capabilities for stateful, multi-turn, and tool-augmented conversations. - **AgentChat Compatibility:** The new agent is designed to conform to the behavior and expectations of `AssistantAgent` in AgentChat, but is implemented directly on top of the OpenAI Response API. ### Key Changes - **New Agent:** Adds `OpenAIAgent`, a stateful agent that interacts with the OpenAI Response API. - **Stateful Design:** The agent maintains conversation state, tool usage, and other metadata as required by the Response API. - **AssistantAgent Parity:** The new agent matches the interface and behavior of `AssistantAgent` in AgentChat, ensuring a smooth migration path. - **Direct OpenAI Integration:** Uses the official `openai` Python library for all API interactions. - **Extensible:** Designed to support future enhancements, such as advanced tool use, function calling, and multi-modal capabilities. ### Migration Path - Existing users of the Assistant API should migrate to the new `OpenAIAgent` to ensure long-term compatibility. - Documentation and examples will be updated to reflect the new agent and its usage patterns. ### References - [OpenAI: Responses vs. Chat Completions](https://platform.openai.com/docs/guides/responses-vs-chat-completions) - [OpenAI Deprecation Notice](https://platform.openai.com/docs/guides/responses-vs-chat-completions#deprecation-timeline) --- ## Related issue number Closes #6032 ## Checks - [ ] I've included any doc changes needed for <https://microsoft.github.io/autogen/>. See <https://github.com/microsoft/autogen/blob/main/CONTRIBUTING.md> to build and test documentation locally. - [X] I've added tests (if relevant) corresponding to the changes introduced in this PR. - [X] I've made sure all auto checks have passed. Co-authored-by: Griffin Bassman <griffinbassman@gmail.com>
2025-06-03 10:42:27 -07:00
return OpenAIAgent(
name="assistant",
description="Test assistant using the Response API",
client=mock_openai_client,
model="gpt-4o",
instructions="You are a helpful AI assistant.",
tools=["web_search_preview"],
Feature: Add OpenAIAgent backed by OpenAI Response API (#6418) ## Why are these changes needed? This PR introduces a new `OpenAIAgent` implementation that uses the [OpenAI Response API](https://platform.openai.com/docs/guides/responses-vs-chat-completions) as its backend. The OpenAI Assistant API will be deprecated in 2026, and the Response API is its successor. This change ensures our codebase is future-proof and aligned with OpenAI’s latest platform direction. ### Motivation - **Deprecation Notice:** The OpenAI Assistant API will be deprecated in 2026. - **Future-Proofing:** The Response API is the recommended replacement and offers improved capabilities for stateful, multi-turn, and tool-augmented conversations. - **AgentChat Compatibility:** The new agent is designed to conform to the behavior and expectations of `AssistantAgent` in AgentChat, but is implemented directly on top of the OpenAI Response API. ### Key Changes - **New Agent:** Adds `OpenAIAgent`, a stateful agent that interacts with the OpenAI Response API. - **Stateful Design:** The agent maintains conversation state, tool usage, and other metadata as required by the Response API. - **AssistantAgent Parity:** The new agent matches the interface and behavior of `AssistantAgent` in AgentChat, ensuring a smooth migration path. - **Direct OpenAI Integration:** Uses the official `openai` Python library for all API interactions. - **Extensible:** Designed to support future enhancements, such as advanced tool use, function calling, and multi-modal capabilities. ### Migration Path - Existing users of the Assistant API should migrate to the new `OpenAIAgent` to ensure long-term compatibility. - Documentation and examples will be updated to reflect the new agent and its usage patterns. ### References - [OpenAI: Responses vs. Chat Completions](https://platform.openai.com/docs/guides/responses-vs-chat-completions) - [OpenAI Deprecation Notice](https://platform.openai.com/docs/guides/responses-vs-chat-completions#deprecation-timeline) --- ## Related issue number Closes #6032 ## Checks - [ ] I've included any doc changes needed for <https://microsoft.github.io/autogen/>. See <https://github.com/microsoft/autogen/blob/main/CONTRIBUTING.md> to build and test documentation locally. - [X] I've added tests (if relevant) corresponding to the changes introduced in this PR. - [X] I've made sure all auto checks have passed. Co-authored-by: Griffin Bassman <griffinbassman@gmail.com>
2025-06-03 10:42:27 -07:00
temperature=0.7,
max_output_tokens=1000,
store=True,
truncation="auto",
)
@pytest.fixture
def json_mode_agent(mock_openai_client: AsyncOpenAI) -> OpenAIAgent:
return OpenAIAgent(
name="json_assistant",
description="JSON assistant",
client=mock_openai_client,
model="gpt-4o",
instructions="Return JSON responses",
json_mode=True,
)
@pytest.fixture
def error_agent(mock_error_client: AsyncOpenAI) -> OpenAIAgent:
return OpenAIAgent(
name="error_assistant",
description="Assistant that generates errors",
client=mock_error_client,
model="gpt-4o",
instructions="You are a helpful AI assistant.",
)
@pytest.mark.asyncio
async def test_basic_response(agent: OpenAIAgent, cancellation_token: CancellationToken) -> None:
"""Test that the agent returns a basic text response from the Responses API."""
message = TextMessage(source="user", content="Hello, how are you?")
response = await agent.on_messages([message], cancellation_token)
assert response.chat_message is not None
assert isinstance(response.chat_message, TextMessage)
assert response.chat_message.content in ("Hello world!", '{"temperature": 72.5, "conditions": "sunny"}')
assert response.chat_message.source == "assistant"
@pytest.mark.asyncio
async def test_tool_calling(agent: OpenAIAgent, cancellation_token: CancellationToken) -> None:
"""Test that enabling a built-in tool yields a tool-style JSON response via the Responses API."""
message = TextMessage(source="user", content="What's the weather in New York?")
Feature: Add OpenAIAgent backed by OpenAI Response API (#6418) ## Why are these changes needed? This PR introduces a new `OpenAIAgent` implementation that uses the [OpenAI Response API](https://platform.openai.com/docs/guides/responses-vs-chat-completions) as its backend. The OpenAI Assistant API will be deprecated in 2026, and the Response API is its successor. This change ensures our codebase is future-proof and aligned with OpenAI’s latest platform direction. ### Motivation - **Deprecation Notice:** The OpenAI Assistant API will be deprecated in 2026. - **Future-Proofing:** The Response API is the recommended replacement and offers improved capabilities for stateful, multi-turn, and tool-augmented conversations. - **AgentChat Compatibility:** The new agent is designed to conform to the behavior and expectations of `AssistantAgent` in AgentChat, but is implemented directly on top of the OpenAI Response API. ### Key Changes - **New Agent:** Adds `OpenAIAgent`, a stateful agent that interacts with the OpenAI Response API. - **Stateful Design:** The agent maintains conversation state, tool usage, and other metadata as required by the Response API. - **AssistantAgent Parity:** The new agent matches the interface and behavior of `AssistantAgent` in AgentChat, ensuring a smooth migration path. - **Direct OpenAI Integration:** Uses the official `openai` Python library for all API interactions. - **Extensible:** Designed to support future enhancements, such as advanced tool use, function calling, and multi-modal capabilities. ### Migration Path - Existing users of the Assistant API should migrate to the new `OpenAIAgent` to ensure long-term compatibility. - Documentation and examples will be updated to reflect the new agent and its usage patterns. ### References - [OpenAI: Responses vs. Chat Completions](https://platform.openai.com/docs/guides/responses-vs-chat-completions) - [OpenAI Deprecation Notice](https://platform.openai.com/docs/guides/responses-vs-chat-completions#deprecation-timeline) --- ## Related issue number Closes #6032 ## Checks - [ ] I've included any doc changes needed for <https://microsoft.github.io/autogen/>. See <https://github.com/microsoft/autogen/blob/main/CONTRIBUTING.md> to build and test documentation locally. - [X] I've added tests (if relevant) corresponding to the changes introduced in this PR. - [X] I've made sure all auto checks have passed. Co-authored-by: Griffin Bassman <griffinbassman@gmail.com>
2025-06-03 10:42:27 -07:00
all_messages: List[Any] = []
async for msg in agent.on_messages_stream([message], cancellation_token):
all_messages.append(msg)
Feature: Add OpenAIAgent backed by OpenAI Response API (#6418) ## Why are these changes needed? This PR introduces a new `OpenAIAgent` implementation that uses the [OpenAI Response API](https://platform.openai.com/docs/guides/responses-vs-chat-completions) as its backend. The OpenAI Assistant API will be deprecated in 2026, and the Response API is its successor. This change ensures our codebase is future-proof and aligned with OpenAI’s latest platform direction. ### Motivation - **Deprecation Notice:** The OpenAI Assistant API will be deprecated in 2026. - **Future-Proofing:** The Response API is the recommended replacement and offers improved capabilities for stateful, multi-turn, and tool-augmented conversations. - **AgentChat Compatibility:** The new agent is designed to conform to the behavior and expectations of `AssistantAgent` in AgentChat, but is implemented directly on top of the OpenAI Response API. ### Key Changes - **New Agent:** Adds `OpenAIAgent`, a stateful agent that interacts with the OpenAI Response API. - **Stateful Design:** The agent maintains conversation state, tool usage, and other metadata as required by the Response API. - **AssistantAgent Parity:** The new agent matches the interface and behavior of `AssistantAgent` in AgentChat, ensuring a smooth migration path. - **Direct OpenAI Integration:** Uses the official `openai` Python library for all API interactions. - **Extensible:** Designed to support future enhancements, such as advanced tool use, function calling, and multi-modal capabilities. ### Migration Path - Existing users of the Assistant API should migrate to the new `OpenAIAgent` to ensure long-term compatibility. - Documentation and examples will be updated to reflect the new agent and its usage patterns. ### References - [OpenAI: Responses vs. Chat Completions](https://platform.openai.com/docs/guides/responses-vs-chat-completions) - [OpenAI Deprecation Notice](https://platform.openai.com/docs/guides/responses-vs-chat-completions#deprecation-timeline) --- ## Related issue number Closes #6032 ## Checks - [ ] I've included any doc changes needed for <https://microsoft.github.io/autogen/>. See <https://github.com/microsoft/autogen/blob/main/CONTRIBUTING.md> to build and test documentation locally. - [X] I've added tests (if relevant) corresponding to the changes introduced in this PR. - [X] I've made sure all auto checks have passed. Co-authored-by: Griffin Bassman <griffinbassman@gmail.com>
2025-06-03 10:42:27 -07:00
final_response = next((msg for msg in all_messages if hasattr(msg, "chat_message")), None)
assert final_response is not None
assert hasattr(final_response, "chat_message")
response_msg = cast(Response, final_response)
assert isinstance(response_msg.chat_message, TextMessage)
assert response_msg.chat_message.content == '{"temperature": 72.5, "conditions": "sunny"}'
Feature: Add OpenAIAgent backed by OpenAI Response API (#6418) ## Why are these changes needed? This PR introduces a new `OpenAIAgent` implementation that uses the [OpenAI Response API](https://platform.openai.com/docs/guides/responses-vs-chat-completions) as its backend. The OpenAI Assistant API will be deprecated in 2026, and the Response API is its successor. This change ensures our codebase is future-proof and aligned with OpenAI’s latest platform direction. ### Motivation - **Deprecation Notice:** The OpenAI Assistant API will be deprecated in 2026. - **Future-Proofing:** The Response API is the recommended replacement and offers improved capabilities for stateful, multi-turn, and tool-augmented conversations. - **AgentChat Compatibility:** The new agent is designed to conform to the behavior and expectations of `AssistantAgent` in AgentChat, but is implemented directly on top of the OpenAI Response API. ### Key Changes - **New Agent:** Adds `OpenAIAgent`, a stateful agent that interacts with the OpenAI Response API. - **Stateful Design:** The agent maintains conversation state, tool usage, and other metadata as required by the Response API. - **AssistantAgent Parity:** The new agent matches the interface and behavior of `AssistantAgent` in AgentChat, ensuring a smooth migration path. - **Direct OpenAI Integration:** Uses the official `openai` Python library for all API interactions. - **Extensible:** Designed to support future enhancements, such as advanced tool use, function calling, and multi-modal capabilities. ### Migration Path - Existing users of the Assistant API should migrate to the new `OpenAIAgent` to ensure long-term compatibility. - Documentation and examples will be updated to reflect the new agent and its usage patterns. ### References - [OpenAI: Responses vs. Chat Completions](https://platform.openai.com/docs/guides/responses-vs-chat-completions) - [OpenAI Deprecation Notice](https://platform.openai.com/docs/guides/responses-vs-chat-completions#deprecation-timeline) --- ## Related issue number Closes #6032 ## Checks - [ ] I've included any doc changes needed for <https://microsoft.github.io/autogen/>. See <https://github.com/microsoft/autogen/blob/main/CONTRIBUTING.md> to build and test documentation locally. - [X] I've added tests (if relevant) corresponding to the changes introduced in this PR. - [X] I've made sure all auto checks have passed. Co-authored-by: Griffin Bassman <griffinbassman@gmail.com>
2025-06-03 10:42:27 -07:00
@pytest.mark.asyncio
async def test_error_handling(error_agent: OpenAIAgent, cancellation_token: CancellationToken) -> None:
"""Test that the agent returns an error message if the Responses API fails."""
message = TextMessage(source="user", content="This will cause an error")
all_messages: List[Any] = []
async for msg in error_agent.on_messages_stream([message], cancellation_token):
all_messages.append(msg)
final_response = next((msg for msg in all_messages if hasattr(msg, "chat_message")), None)
assert final_response is not None
assert isinstance(final_response.chat_message, TextMessage)
assert "Error generating response:" in final_response.chat_message.content
@pytest.mark.asyncio
async def test_state_management(agent: OpenAIAgent, cancellation_token: CancellationToken) -> None:
agent._last_response_id = "resp-123" # type: ignore
agent._message_history = [{"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi there"}] # type: ignore
state = await agent.save_state()
new_agent = OpenAIAgent(
name="assistant2",
description="Test assistant 2",
client=agent._client, # type: ignore
model="gpt-4o",
instructions="You are a helpful AI assistant.",
)
await new_agent.load_state(state)
assert new_agent._last_response_id == "resp-123" # type: ignore
assert len(new_agent._message_history) == 2 # type: ignore
assert new_agent._message_history[0]["role"] == "user" # type: ignore
assert new_agent._message_history[0]["content"] == "Hello" # type: ignore
await new_agent.on_reset(cancellation_token)
assert new_agent._last_response_id is None # type: ignore
assert len(new_agent._message_history) == 0 # type: ignore
@pytest.mark.asyncio
async def test_convert_message_functions(agent: OpenAIAgent) -> None:
from autogen_ext.agents.openai._openai_agent import _convert_message_to_openai_message # type: ignore
user_msg = TextMessage(content="Hello", source="user")
openai_user_msg = _convert_message_to_openai_message(user_msg) # type: ignore
assert openai_user_msg["role"] == "user"
assert openai_user_msg["content"] == "Hello"
sys_msg = TextMessage(content="System prompt", source="system")
openai_sys_msg = _convert_message_to_openai_message(sys_msg) # type: ignore
assert openai_sys_msg["role"] == "system"
assert openai_sys_msg["content"] == "System prompt"
assistant_msg = TextMessage(content="Assistant reply", source="assistant")
openai_assistant_msg = _convert_message_to_openai_message(assistant_msg) # type: ignore
assert openai_assistant_msg["role"] == "assistant"
assert openai_assistant_msg["content"] == "Assistant reply"
text_msg = TextMessage(content="Plain text", source="other")
openai_text_msg = _convert_message_to_openai_message(text_msg) # type: ignore
assert openai_text_msg["role"] == "user"
assert openai_text_msg["content"] == "Plain text"
@pytest.mark.asyncio
async def test_on_messages_inner_messages(agent: OpenAIAgent, cancellation_token: CancellationToken) -> None:
class DummyMsg(BaseChatMessage):
type: str = "DummyMsg"
content: str = "dummy content"
def __init__(self) -> None:
super().__init__(source="dummy")
def to_model_message(self) -> UserMessage:
return UserMessage(content=self.content, source=self.source)
def to_model_text(self) -> str:
return self.content
def to_text(self) -> str:
return self.content
dummy_inner = DummyMsg()
dummy_response = Response(chat_message=TextMessage(source="agent", content="hi"), inner_messages=None)
async def fake_stream(*args: Any, **kwargs: Any) -> AsyncGenerator[Union[BaseChatMessage, Response], None]:
yield dummy_inner
yield dummy_response
with patch.object(agent, "on_messages_stream", fake_stream):
response = await agent.on_messages([TextMessage(source="user", content="test")], cancellation_token)
assert response.chat_message is not None
assert isinstance(response.chat_message, TextMessage)
assert response.chat_message.content == "hi"
assert response.inner_messages is not None
assert dummy_inner in response.inner_messages
@pytest.mark.asyncio
async def test_build_api_params(agent: OpenAIAgent) -> None:
agent._last_response_id = None # type: ignore
params = agent._build_api_parameters([{"role": "user", "content": "hi"}]) # type: ignore
assert "previous_response_id" not in params
agent._last_response_id = "resp-456" # type: ignore
params = agent._build_api_parameters([{"role": "user", "content": "hi"}]) # type: ignore
assert params.get("previous_response_id") == "resp-456"
assert "max_tokens" not in params
assert params.get("max_output_tokens") == 1000
assert params.get("store") is True
assert params.get("truncation") == "auto"
agent._json_mode = True # type: ignore
params = agent._build_api_parameters([{"role": "user", "content": "hi"}]) # type: ignore
assert "text.format" not in params
assert params.get("text") == {"type": "json_object"}
Feature: Add OpenAIAgent backed by OpenAI Response API (#6418) ## Why are these changes needed? This PR introduces a new `OpenAIAgent` implementation that uses the [OpenAI Response API](https://platform.openai.com/docs/guides/responses-vs-chat-completions) as its backend. The OpenAI Assistant API will be deprecated in 2026, and the Response API is its successor. This change ensures our codebase is future-proof and aligned with OpenAI’s latest platform direction. ### Motivation - **Deprecation Notice:** The OpenAI Assistant API will be deprecated in 2026. - **Future-Proofing:** The Response API is the recommended replacement and offers improved capabilities for stateful, multi-turn, and tool-augmented conversations. - **AgentChat Compatibility:** The new agent is designed to conform to the behavior and expectations of `AssistantAgent` in AgentChat, but is implemented directly on top of the OpenAI Response API. ### Key Changes - **New Agent:** Adds `OpenAIAgent`, a stateful agent that interacts with the OpenAI Response API. - **Stateful Design:** The agent maintains conversation state, tool usage, and other metadata as required by the Response API. - **AssistantAgent Parity:** The new agent matches the interface and behavior of `AssistantAgent` in AgentChat, ensuring a smooth migration path. - **Direct OpenAI Integration:** Uses the official `openai` Python library for all API interactions. - **Extensible:** Designed to support future enhancements, such as advanced tool use, function calling, and multi-modal capabilities. ### Migration Path - Existing users of the Assistant API should migrate to the new `OpenAIAgent` to ensure long-term compatibility. - Documentation and examples will be updated to reflect the new agent and its usage patterns. ### References - [OpenAI: Responses vs. Chat Completions](https://platform.openai.com/docs/guides/responses-vs-chat-completions) - [OpenAI Deprecation Notice](https://platform.openai.com/docs/guides/responses-vs-chat-completions#deprecation-timeline) --- ## Related issue number Closes #6032 ## Checks - [ ] I've included any doc changes needed for <https://microsoft.github.io/autogen/>. See <https://github.com/microsoft/autogen/blob/main/CONTRIBUTING.md> to build and test documentation locally. - [X] I've added tests (if relevant) corresponding to the changes introduced in this PR. - [X] I've made sure all auto checks have passed. Co-authored-by: Griffin Bassman <griffinbassman@gmail.com>
2025-06-03 10:42:27 -07:00
@pytest.mark.asyncio
async def test_on_messages_previous_response_id(agent: OpenAIAgent, cancellation_token: CancellationToken) -> None:
message = TextMessage(source="user", content="hi")
response = await agent.on_messages([message], cancellation_token)
assert response.chat_message is not None
assert isinstance(response.chat_message, TextMessage)
message = TextMessage(source="user", content="hi")
response = await agent.on_messages([message], cancellation_token)
assert response.chat_message is not None
assert isinstance(response.chat_message, TextMessage)
@pytest.mark.asyncio
async def test_on_messages_stream(agent: OpenAIAgent, cancellation_token: CancellationToken) -> None:
dummy_response = Response(chat_message=TextMessage(source="agent", content="hi"), inner_messages=None)
async def fake_stream(*args: Any, **kwargs: Any) -> AsyncGenerator[Response, None]:
yield dummy_response
with patch.object(agent, "on_messages_stream", fake_stream):
resp = await agent.on_messages([TextMessage(source="user", content="hi")], cancellation_token)
assert isinstance(resp.chat_message, TextMessage)
assert resp.chat_message.content == "hi"
@pytest.mark.asyncio
async def test_component_serialization(agent: OpenAIAgent) -> None:
config = agent.dump_component()
config_dict = config.config
Feature: Add OpenAIAgent backed by OpenAI Response API (#6418) ## Why are these changes needed? This PR introduces a new `OpenAIAgent` implementation that uses the [OpenAI Response API](https://platform.openai.com/docs/guides/responses-vs-chat-completions) as its backend. The OpenAI Assistant API will be deprecated in 2026, and the Response API is its successor. This change ensures our codebase is future-proof and aligned with OpenAI’s latest platform direction. ### Motivation - **Deprecation Notice:** The OpenAI Assistant API will be deprecated in 2026. - **Future-Proofing:** The Response API is the recommended replacement and offers improved capabilities for stateful, multi-turn, and tool-augmented conversations. - **AgentChat Compatibility:** The new agent is designed to conform to the behavior and expectations of `AssistantAgent` in AgentChat, but is implemented directly on top of the OpenAI Response API. ### Key Changes - **New Agent:** Adds `OpenAIAgent`, a stateful agent that interacts with the OpenAI Response API. - **Stateful Design:** The agent maintains conversation state, tool usage, and other metadata as required by the Response API. - **AssistantAgent Parity:** The new agent matches the interface and behavior of `AssistantAgent` in AgentChat, ensuring a smooth migration path. - **Direct OpenAI Integration:** Uses the official `openai` Python library for all API interactions. - **Extensible:** Designed to support future enhancements, such as advanced tool use, function calling, and multi-modal capabilities. ### Migration Path - Existing users of the Assistant API should migrate to the new `OpenAIAgent` to ensure long-term compatibility. - Documentation and examples will be updated to reflect the new agent and its usage patterns. ### References - [OpenAI: Responses vs. Chat Completions](https://platform.openai.com/docs/guides/responses-vs-chat-completions) - [OpenAI Deprecation Notice](https://platform.openai.com/docs/guides/responses-vs-chat-completions#deprecation-timeline) --- ## Related issue number Closes #6032 ## Checks - [ ] I've included any doc changes needed for <https://microsoft.github.io/autogen/>. See <https://github.com/microsoft/autogen/blob/main/CONTRIBUTING.md> to build and test documentation locally. - [X] I've added tests (if relevant) corresponding to the changes introduced in this PR. - [X] I've made sure all auto checks have passed. Co-authored-by: Griffin Bassman <griffinbassman@gmail.com>
2025-06-03 10:42:27 -07:00
assert config_dict["name"] == "assistant"
assert config_dict["description"] == "Test assistant using the Response API"
assert config_dict["model"] == "gpt-4o"
assert config_dict["instructions"] == "You are a helpful AI assistant."
assert config_dict["temperature"] == 0.7
assert config_dict["max_output_tokens"] == 1000
assert config_dict["store"] is True
assert config_dict["truncation"] == "auto"
@pytest.mark.asyncio
async def test_from_config(agent: OpenAIAgent) -> None:
config = agent.dump_component()
with patch("openai.AsyncOpenAI"):
loaded_agent = OpenAIAgent.load_component(config)
Feature: Add OpenAIAgent backed by OpenAI Response API (#6418) ## Why are these changes needed? This PR introduces a new `OpenAIAgent` implementation that uses the [OpenAI Response API](https://platform.openai.com/docs/guides/responses-vs-chat-completions) as its backend. The OpenAI Assistant API will be deprecated in 2026, and the Response API is its successor. This change ensures our codebase is future-proof and aligned with OpenAI’s latest platform direction. ### Motivation - **Deprecation Notice:** The OpenAI Assistant API will be deprecated in 2026. - **Future-Proofing:** The Response API is the recommended replacement and offers improved capabilities for stateful, multi-turn, and tool-augmented conversations. - **AgentChat Compatibility:** The new agent is designed to conform to the behavior and expectations of `AssistantAgent` in AgentChat, but is implemented directly on top of the OpenAI Response API. ### Key Changes - **New Agent:** Adds `OpenAIAgent`, a stateful agent that interacts with the OpenAI Response API. - **Stateful Design:** The agent maintains conversation state, tool usage, and other metadata as required by the Response API. - **AssistantAgent Parity:** The new agent matches the interface and behavior of `AssistantAgent` in AgentChat, ensuring a smooth migration path. - **Direct OpenAI Integration:** Uses the official `openai` Python library for all API interactions. - **Extensible:** Designed to support future enhancements, such as advanced tool use, function calling, and multi-modal capabilities. ### Migration Path - Existing users of the Assistant API should migrate to the new `OpenAIAgent` to ensure long-term compatibility. - Documentation and examples will be updated to reflect the new agent and its usage patterns. ### References - [OpenAI: Responses vs. Chat Completions](https://platform.openai.com/docs/guides/responses-vs-chat-completions) - [OpenAI Deprecation Notice](https://platform.openai.com/docs/guides/responses-vs-chat-completions#deprecation-timeline) --- ## Related issue number Closes #6032 ## Checks - [ ] I've included any doc changes needed for <https://microsoft.github.io/autogen/>. See <https://github.com/microsoft/autogen/blob/main/CONTRIBUTING.md> to build and test documentation locally. - [X] I've added tests (if relevant) corresponding to the changes introduced in this PR. - [X] I've made sure all auto checks have passed. Co-authored-by: Griffin Bassman <griffinbassman@gmail.com>
2025-06-03 10:42:27 -07:00
assert loaded_agent.name == "assistant"
assert loaded_agent.description == "Test assistant using the Response API"
assert loaded_agent._model == "gpt-4o" # type: ignore
assert loaded_agent._instructions == "You are a helpful AI assistant." # type: ignore
assert loaded_agent._temperature == 0.7 # type: ignore
assert loaded_agent._max_output_tokens == 1000 # type: ignore
assert loaded_agent._store is True # type: ignore
assert loaded_agent._truncation == "auto" # type: ignore
@pytest.mark.asyncio
async def test_multimodal_message_response(agent: OpenAIAgent, cancellation_token: CancellationToken) -> None:
# Test that the multimodal message is converted to the correct format
img = Image.from_base64(
"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAADElEQVR4nGP4z8AAAAMBAQDJ/pLvAAAAAElFTkSuQmCC"
)
multimodal_message = MultiModalMessage(content=["Can you describe the content of this image?", img], source="user")
# Patch client.responses.create to simulate image-capable output
async def mock_responses_create(**kwargs: Any) -> Any:
class MockResponse:
def __init__(self) -> None:
self.output_text = "I see a cat in the image."
self.id = "resp-image-001"
return MockResponse()
agent._client.responses.create = AsyncMock(side_effect=mock_responses_create) # type: ignore
response = await agent.on_messages([multimodal_message], cancellation_token)
assert response.chat_message is not None
assert isinstance(response.chat_message, TextMessage)
assert "cat" in response.chat_message.content.lower()