diff --git a/python/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb b/python/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb index cb081f552..efd530e1b 100644 --- a/python/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb +++ b/python/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb @@ -433,6 +433,13 @@ "when agent behavior needs to be consistent across different models.\n", "This should be done at the model client level.\n", "\n", + "```{important}\n", + "When using {py:class}`~autogen_agentchat.tools.AgentTool` or {py:class}`~autogen_agentchat.tools.TeamTool`,\n", + "you **must** disable parallel tool calls to avoid concurrency issues.\n", + "These tools cannot run concurrently as agents and teams maintain internal state\n", + "that would conflict with parallel execution.\n", + "```\n", + "\n", "For {py:class}`~autogen_ext.models.openai.OpenAIChatCompletionClient` and {py:class}`~autogen_ext.models.openai.AzureOpenAIChatCompletionClient`,\n", "set `parallel_tool_calls=False` to disable parallel tool calls." ] diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/tools/_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/tools/_agent.py index e4d51195d..d7ef31418 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/tools/_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/tools/_agent.py @@ -22,6 +22,13 @@ class AgentTool(TaskRunnerTool, Component[AgentToolConfig]): The tool returns the result of the task execution as a :class:`~autogen_agentchat.base.TaskResult` object. + .. important:: + When using AgentTool, you **must** disable parallel tool calls in the model client configuration + to avoid concurrency issues. Agents cannot run concurrently as they maintain internal state + that would conflict with parallel execution. For example, set ``parallel_tool_calls=False`` + for :class:`~autogen_ext.models.openai.OpenAIChatCompletionClient` and + :class:`~autogen_ext.models.openai.AzureOpenAIChatCompletionClient`. + Args: agent (BaseChatAgent): The agent to be used for running the task. return_value_as_last_message (bool): Whether to use the last message content of the task result @@ -51,9 +58,12 @@ class AgentTool(TaskRunnerTool, Component[AgentToolConfig]): system_message="Write well.", ) writer_tool = AgentTool(agent=writer) + + # Create model client with parallel tool calls disabled for the main agent + main_model_client = OpenAIChatCompletionClient(model="gpt-4", parallel_tool_calls=False) assistant = AssistantAgent( name="assistant", - model_client=model_client, + model_client=main_model_client, tools=[writer_tool], system_message="You are a helpful assistant.", ) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/tools/_team.py b/python/packages/autogen-agentchat/src/autogen_agentchat/tools/_team.py index 28bbb0daa..4a7167eb6 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/tools/_team.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/tools/_team.py @@ -25,6 +25,13 @@ class TeamTool(TaskRunnerTool, Component[TeamToolConfig]): The tool returns the result of the task execution as a :class:`~autogen_agentchat.base.TaskResult` object. + .. important:: + When using TeamTool, you **must** disable parallel tool calls in the model client configuration + to avoid concurrency issues. Teams cannot run concurrently as they maintain internal state + that would conflict with parallel execution. For example, set ``parallel_tool_calls=False`` + for :class:`~autogen_ext.models.openai.OpenAIChatCompletionClient` and + :class:`~autogen_ext.models.openai.AzureOpenAIChatCompletionClient`. + Args: team (BaseGroupChat): The team to be used for running the task. name (str): The name of the tool. @@ -48,6 +55,7 @@ class TeamTool(TaskRunnerTool, Component[TeamToolConfig]): async def main() -> None: + # Disable parallel tool calls when using TeamTool model_client = OllamaChatCompletionClient(model="llama3.2") writer = AssistantAgent(name="writer", model_client=model_client, system_message="You are a helpful assistant.") @@ -65,12 +73,17 @@ class TeamTool(TaskRunnerTool, Component[TeamToolConfig]): # Create a TeamTool that uses the team to run tasks, returning the last message as the result. tool = TeamTool( - team=team, name="writing_team", description="A tool for writing tasks.", return_value_as_last_message=True + team=team, + name="writing_team", + description="A tool for writing tasks.", + return_value_as_last_message=True, ) + # Create model client with parallel tool calls disabled for the main agent + main_model_client = OllamaChatCompletionClient(model="llama3.2", parallel_tool_calls=False) main_agent = AssistantAgent( name="main_agent", - model_client=model_client, + model_client=main_model_client, system_message="You are a helpful assistant that can use the writing tool.", tools=[tool], )