Fix OpenAI UnprocessableEntityError when AssistantAgent makes multiple tool calls (#6799)

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: ekzhu <320302+ekzhu@users.noreply.github.com>
Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
This commit is contained in:
Copilot 2025-07-20 00:30:42 -07:00 committed by GitHub
parent ae024e262d
commit 7c536a8c95
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 127 additions and 4 deletions

View File

@ -267,6 +267,12 @@ def _set_pass_message_when_whitespace(message: LLMMessage, context: Dict[str, An
return {}
def _set_null_content_for_tool_calls(message: LLMMessage, context: Dict[str, Any]) -> Dict[str, None]:
"""Set content to null for tool calls without thought. Required by OpenAI API."""
assert isinstance(message, AssistantMessage)
return {"content": None}
# === Base Transformers list ===
base_system_message_transformers: List[Callable[[LLMMessage, Dict[str, Any]], Dict[str, Any]]] = [
_set_content_direct,
@ -316,19 +322,22 @@ tools_assistant_transformer_funcs: List[Callable[[LLMMessage, Dict[str, Any]], D
base_assistant_transformer_funcs
+ [
_set_tool_calls,
_set_null_content_for_tool_calls,
]
)
thought_assistant_transformer_funcs: List[Callable[[LLMMessage, Dict[str, Any]], Dict[str, Any]]] = (
tools_assistant_transformer_funcs
base_assistant_transformer_funcs
+ [
_set_tool_calls,
_set_thought_as_content,
]
)
thought_assistant_transformer_funcs_gemini: List[Callable[[LLMMessage, Dict[str, Any]], Dict[str, Any]]] = (
tools_assistant_transformer_funcs
base_assistant_transformer_funcs
+ [
_set_tool_calls,
_set_thought_as_content_gemini,
]
)

View File

@ -1661,6 +1661,120 @@ async def test_tool_calling_with_stream(monkeypatch: pytest.MonkeyPatch) -> None
assert chunks[-1].thought == "Hello Another Hello Yet Another Hello"
@pytest.mark.asyncio
async def test_tool_calls_assistant_message_content_field(monkeypatch: pytest.MonkeyPatch) -> None:
"""Test that AssistantMessage with tool calls includes required content field.
This test addresses the issue where AssistantMessage with tool calls but no thought
was missing the required 'content' field, causing OpenAI API UnprocessableEntityError(422).
"""
# Create a tool call for testing
tool_calls = [
FunctionCall(id="call_1", name="increment_number", arguments='{"number": 5}'),
FunctionCall(id="call_2", name="increment_number", arguments='{"number": 6}'),
]
# Mock response for tool calls
chat_completion = ChatCompletion(
id="id1",
choices=[
Choice(
finish_reason="stop",
index=0,
message=ChatCompletionMessage(
role="assistant",
content="Done",
),
)
],
created=1234567890,
model="gpt-4o",
object="chat.completion",
usage=CompletionUsage(completion_tokens=10, prompt_tokens=5, total_tokens=15),
)
client = OpenAIChatCompletionClient(model="gpt-4o", api_key="test")
mock_create = AsyncMock(return_value=chat_completion)
# Test AssistantMessage with tool calls but no thought
assistant_message_no_thought = AssistantMessage(
content=tool_calls,
source="assistant",
thought=None, # No thought - this was causing the issue
)
with monkeypatch.context() as mp:
mp.setattr(client._client.chat.completions, "create", mock_create) # type: ignore[reportPrivateUsage]
await client.create(
messages=[
UserMessage(content="Please increment these numbers", source="user"),
assistant_message_no_thought,
]
)
# Verify the API was called and check the messages sent
mock_create.assert_called_once()
call_args = mock_create.call_args
# Extract the messages from the API call
messages = call_args.kwargs["messages"]
# Find the assistant message in the API call
assistant_messages = [msg for msg in messages if msg["role"] == "assistant"]
assert len(assistant_messages) == 1
assistant_msg = assistant_messages[0]
# Verify all required fields are present
assert "role" in assistant_msg
assert "tool_calls" in assistant_msg
assert "content" in assistant_msg # This was missing before the fix
# Verify field values
assert assistant_msg["role"] == "assistant"
assert assistant_msg["content"] is None # Should be null for tools without thought
assert len(assistant_msg["tool_calls"]) == 2
# Test AssistantMessage with tool calls AND thought
assistant_message_with_thought = AssistantMessage(
content=tool_calls, source="assistant", thought="I need to increment these numbers."
)
mock_create.reset_mock() # Reset for second test
with monkeypatch.context() as mp:
mp.setattr(client._client.chat.completions, "create", mock_create) # type: ignore[reportPrivateUsage]
await client.create(
messages=[
UserMessage(content="Please increment these numbers", source="user"),
assistant_message_with_thought,
]
)
# Verify the API was called for the second test
mock_create.assert_called_once()
call_args = mock_create.call_args
# Extract the messages from the API call
messages = call_args.kwargs["messages"]
# Find the assistant message in the API call
assistant_messages = [msg for msg in messages if msg["role"] == "assistant"]
assert len(assistant_messages) == 1
assistant_msg_with_thought = assistant_messages[0]
# Should have both tool_calls and content with thought text
assert "role" in assistant_msg_with_thought
assert "tool_calls" in assistant_msg_with_thought
assert "content" in assistant_msg_with_thought
assert assistant_msg_with_thought["role"] == "assistant"
assert assistant_msg_with_thought["content"] == "I need to increment these numbers."
assert len(assistant_msg_with_thought["tool_calls"]) == 2
@pytest.fixture()
def openai_client(request: pytest.FixtureRequest) -> OpenAIChatCompletionClient:
model = request.node.callspec.params["model"] # type: ignore
@ -1711,7 +1825,7 @@ async def test_model_client_with_function_calling(model: str, openai_client: Ope
pass_tool = FunctionTool(_pass_function, name="pass_tool", description="pass session.")
fail_tool = FunctionTool(_fail_function, name="fail_tool", description="fail session.")
messages: List[LLMMessage] = [
UserMessage(content="Call the pass tool with input 'task' and talk result", source="user")
UserMessage(content="Call the pass tool with input 'task' summarize the result.", source="user")
]
create_result = await openai_client.create(messages=messages, tools=[pass_tool, fail_tool])
assert isinstance(create_result.content, list)
@ -1743,7 +1857,7 @@ async def test_model_client_with_function_calling(model: str, openai_client: Ope
# Test parallel tool calling
messages = [
UserMessage(
content="Call both the pass tool with input 'task' and the fail tool also with input 'task' and talk result",
content="Call both the pass tool with input 'task' and the fail tool also with input 'task' and summarize the result",
source="user",
)
]