Add parallel_tool_call to openai model client config (#6888)

This commit is contained in:
Eric Zhu 2025-08-02 19:28:33 -05:00 committed by GitHub
parent 3e30f9e05d
commit 9cb067e6ab
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 9 additions and 5 deletions

View File

@ -50,7 +50,7 @@ class AgentTool(TaskRunnerTool, Component[AgentToolConfig]):
async def main() -> None:
model_client = OpenAIChatCompletionClient(model="gpt-4")
model_client = OpenAIChatCompletionClient(model="gpt-4.1")
writer = AssistantAgent(
name="writer",
description="A writer agent for generating text.",
@ -60,7 +60,7 @@ class AgentTool(TaskRunnerTool, Component[AgentToolConfig]):
writer_tool = AgentTool(agent=writer)
# Create model client with parallel tool calls disabled for the main agent
main_model_client = OpenAIChatCompletionClient(model="gpt-4", parallel_tool_calls=False)
main_model_client = OpenAIChatCompletionClient(model="gpt-4.1", parallel_tool_calls=False)
assistant = AssistantAgent(
name="assistant",
model_client=main_model_client,

View File

@ -51,12 +51,12 @@ class TeamTool(TaskRunnerTool, Component[TeamToolConfig]):
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.tools import TeamTool
from autogen_agentchat.ui import Console
from autogen_ext.models.ollama import OllamaChatCompletionClient
from autogen_ext.models.openai import OpenAIChatCompletionClient
async def main() -> None:
# Disable parallel tool calls when using TeamTool
model_client = OllamaChatCompletionClient(model="llama3.2")
model_client = OpenAIChatCompletionClient(model="gpt-4.1")
writer = AssistantAgent(name="writer", model_client=model_client, system_message="You are a helpful assistant.")
reviewer = AssistantAgent(
@ -80,7 +80,7 @@ class TeamTool(TaskRunnerTool, Component[TeamToolConfig]):
)
# Create model client with parallel tool calls disabled for the main agent
main_model_client = OllamaChatCompletionClient(model="llama3.2", parallel_tool_calls=False)
main_model_client = OpenAIChatCompletionClient(model="gpt-4.1", parallel_tool_calls=False)
main_agent = AssistantAgent(
name="main_agent",
model_client=main_model_client,

View File

@ -1236,6 +1236,7 @@ class OpenAIChatCompletionClient(BaseOpenAIChatCompletionClient, Component[OpenA
stop (optional, str | List[str]):
temperature (optional, float):
top_p (optional, float):
parallel_tool_calls (optional, bool): Whether to allow parallel tool calls. When not set, defaults to server behavior.
user (optional, str):
default_headers (optional, dict[str, str]): Custom headers; useful for authentication or other custom requirements.
add_name_prefixes (optional, bool): Whether to prepend the `source` value
@ -1576,6 +1577,7 @@ class AzureOpenAIChatCompletionClient(
stop (optional, str | List[str]):
temperature (optional, float):
top_p (optional, float):
parallel_tool_calls (optional, bool): Whether to allow parallel tool calls. When not set, defaults to server behavior.
user (optional, str):
default_headers (optional, dict[str, str]): Custom headers; useful for authentication or other custom requirements.
add_name_prefixes (optional, bool): Whether to prepend the `source` value

View File

@ -49,6 +49,7 @@ class CreateArguments(TypedDict, total=False):
top_p: Optional[float]
user: str
stream_options: Optional[StreamOptions]
parallel_tool_calls: Optional[bool]
AsyncAzureADTokenProvider = Callable[[], Union[str, Awaitable[str]]]
@ -97,6 +98,7 @@ class CreateArgumentsConfigModel(BaseModel):
top_p: float | None = None
user: str | None = None
stream_options: StreamOptions | None = None
parallel_tool_calls: bool | None = None
class BaseOpenAIClientConfigurationConfigModel(CreateArgumentsConfigModel):