diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/tools/_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/tools/_agent.py index d7ef31418..ba83bea6b 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/tools/_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/tools/_agent.py @@ -50,7 +50,7 @@ class AgentTool(TaskRunnerTool, Component[AgentToolConfig]): async def main() -> None: - model_client = OpenAIChatCompletionClient(model="gpt-4") + model_client = OpenAIChatCompletionClient(model="gpt-4.1") writer = AssistantAgent( name="writer", description="A writer agent for generating text.", @@ -60,7 +60,7 @@ class AgentTool(TaskRunnerTool, Component[AgentToolConfig]): writer_tool = AgentTool(agent=writer) # Create model client with parallel tool calls disabled for the main agent - main_model_client = OpenAIChatCompletionClient(model="gpt-4", parallel_tool_calls=False) + main_model_client = OpenAIChatCompletionClient(model="gpt-4.1", parallel_tool_calls=False) assistant = AssistantAgent( name="assistant", model_client=main_model_client, diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/tools/_team.py b/python/packages/autogen-agentchat/src/autogen_agentchat/tools/_team.py index 4a7167eb6..9c8ecf1b0 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/tools/_team.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/tools/_team.py @@ -51,12 +51,12 @@ class TeamTool(TaskRunnerTool, Component[TeamToolConfig]): from autogen_agentchat.teams import RoundRobinGroupChat from autogen_agentchat.tools import TeamTool from autogen_agentchat.ui import Console - from autogen_ext.models.ollama import OllamaChatCompletionClient + from autogen_ext.models.openai import OpenAIChatCompletionClient async def main() -> None: # Disable parallel tool calls when using TeamTool - model_client = OllamaChatCompletionClient(model="llama3.2") + model_client = OpenAIChatCompletionClient(model="gpt-4.1") writer = AssistantAgent(name="writer", model_client=model_client, system_message="You are a helpful assistant.") reviewer = AssistantAgent( @@ -80,7 +80,7 @@ class TeamTool(TaskRunnerTool, Component[TeamToolConfig]): ) # Create model client with parallel tool calls disabled for the main agent - main_model_client = OllamaChatCompletionClient(model="llama3.2", parallel_tool_calls=False) + main_model_client = OpenAIChatCompletionClient(model="gpt-4.1", parallel_tool_calls=False) main_agent = AssistantAgent( name="main_agent", model_client=main_model_client, diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py index 18ea278e7..02b8d911a 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py @@ -1236,6 +1236,7 @@ class OpenAIChatCompletionClient(BaseOpenAIChatCompletionClient, Component[OpenA stop (optional, str | List[str]): temperature (optional, float): top_p (optional, float): + parallel_tool_calls (optional, bool): Whether to allow parallel tool calls. When not set, defaults to server behavior. user (optional, str): default_headers (optional, dict[str, str]): Custom headers; useful for authentication or other custom requirements. add_name_prefixes (optional, bool): Whether to prepend the `source` value @@ -1576,6 +1577,7 @@ class AzureOpenAIChatCompletionClient( stop (optional, str | List[str]): temperature (optional, float): top_p (optional, float): + parallel_tool_calls (optional, bool): Whether to allow parallel tool calls. When not set, defaults to server behavior. user (optional, str): default_headers (optional, dict[str, str]): Custom headers; useful for authentication or other custom requirements. add_name_prefixes (optional, bool): Whether to prepend the `source` value diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/config/__init__.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/config/__init__.py index f9c942a7d..02b1e3a80 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/config/__init__.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/config/__init__.py @@ -49,6 +49,7 @@ class CreateArguments(TypedDict, total=False): top_p: Optional[float] user: str stream_options: Optional[StreamOptions] + parallel_tool_calls: Optional[bool] AsyncAzureADTokenProvider = Callable[[], Union[str, Awaitable[str]]] @@ -97,6 +98,7 @@ class CreateArgumentsConfigModel(BaseModel): top_p: float | None = None user: str | None = None stream_options: StreamOptions | None = None + parallel_tool_calls: bool | None = None class BaseOpenAIClientConfigurationConfigModel(CreateArgumentsConfigModel):