mirror of
https://github.com/microsoft/autogen.git
synced 2025-12-27 15:09:41 +00:00
OTel GenAI Traces for Agent and Tool (#6653)
Add OTel GenAI traces: - `create_agent` - `invoke_agnet` - `execute_tool` Introduces context manager helpers to create these traces. The helpers also serve as instrumentation points for other instrumentation libraries. Resolves #6644
This commit is contained in:
parent
892492f1d9
commit
e14fb8fc09
@ -1323,7 +1323,7 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
|
||||
for handoff_tool in handoff_tools:
|
||||
if tool_call.name == handoff_tool.name:
|
||||
# Run handoff tool call.
|
||||
result = await handoff_tool.run_json(arguments, cancellation_token)
|
||||
result = await handoff_tool.run_json(arguments, cancellation_token, call_id=tool_call.id)
|
||||
result_as_str = handoff_tool.return_value_as_string(result)
|
||||
return (
|
||||
tool_call,
|
||||
@ -1343,6 +1343,7 @@ class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]):
|
||||
name=tool_call.name,
|
||||
arguments=arguments,
|
||||
cancellation_token=cancellation_token,
|
||||
call_id=tool_call.id,
|
||||
)
|
||||
return (
|
||||
tool_call,
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, AsyncGenerator, List, Mapping, Sequence
|
||||
|
||||
from autogen_core import CancellationToken, ComponentBase
|
||||
from autogen_core import CancellationToken, ComponentBase, trace_create_agent_span, trace_invoke_agent_span
|
||||
from pydantic import BaseModel
|
||||
|
||||
from ..base import ChatAgent, Response, TaskResult
|
||||
@ -39,10 +39,15 @@ class BaseChatAgent(ChatAgent, ABC, ComponentBase[BaseModel]):
|
||||
component_type = "agent"
|
||||
|
||||
def __init__(self, name: str, description: str) -> None:
|
||||
self._name = name
|
||||
if self._name.isidentifier() is False:
|
||||
raise ValueError("The agent name must be a valid Python identifier.")
|
||||
self._description = description
|
||||
"""Initialize the agent with a name and description."""
|
||||
with trace_create_agent_span(
|
||||
agent_name=name,
|
||||
agent_description=description,
|
||||
):
|
||||
self._name = name
|
||||
if self._name.isidentifier() is False:
|
||||
raise ValueError("The agent name must be a valid Python identifier.")
|
||||
self._description = description
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
@ -110,34 +115,38 @@ class BaseChatAgent(ChatAgent, ABC, ComponentBase[BaseModel]):
|
||||
cancellation_token: CancellationToken | None = None,
|
||||
) -> TaskResult:
|
||||
"""Run the agent with the given task and return the result."""
|
||||
if cancellation_token is None:
|
||||
cancellation_token = CancellationToken()
|
||||
input_messages: List[BaseChatMessage] = []
|
||||
output_messages: List[BaseAgentEvent | BaseChatMessage] = []
|
||||
if task is None:
|
||||
pass
|
||||
elif isinstance(task, str):
|
||||
text_msg = TextMessage(content=task, source="user")
|
||||
input_messages.append(text_msg)
|
||||
output_messages.append(text_msg)
|
||||
elif isinstance(task, BaseChatMessage):
|
||||
input_messages.append(task)
|
||||
output_messages.append(task)
|
||||
else:
|
||||
if not task:
|
||||
raise ValueError("Task list cannot be empty.")
|
||||
# Task is a sequence of messages.
|
||||
for msg in task:
|
||||
if isinstance(msg, BaseChatMessage):
|
||||
input_messages.append(msg)
|
||||
output_messages.append(msg)
|
||||
else:
|
||||
raise ValueError(f"Invalid message type in sequence: {type(msg)}")
|
||||
response = await self.on_messages(input_messages, cancellation_token)
|
||||
if response.inner_messages is not None:
|
||||
output_messages += response.inner_messages
|
||||
output_messages.append(response.chat_message)
|
||||
return TaskResult(messages=output_messages)
|
||||
with trace_invoke_agent_span(
|
||||
agent_name=self.name,
|
||||
agent_description=self.description,
|
||||
):
|
||||
if cancellation_token is None:
|
||||
cancellation_token = CancellationToken()
|
||||
input_messages: List[BaseChatMessage] = []
|
||||
output_messages: List[BaseAgentEvent | BaseChatMessage] = []
|
||||
if task is None:
|
||||
pass
|
||||
elif isinstance(task, str):
|
||||
text_msg = TextMessage(content=task, source="user")
|
||||
input_messages.append(text_msg)
|
||||
output_messages.append(text_msg)
|
||||
elif isinstance(task, BaseChatMessage):
|
||||
input_messages.append(task)
|
||||
output_messages.append(task)
|
||||
else:
|
||||
if not task:
|
||||
raise ValueError("Task list cannot be empty.")
|
||||
# Task is a sequence of messages.
|
||||
for msg in task:
|
||||
if isinstance(msg, BaseChatMessage):
|
||||
input_messages.append(msg)
|
||||
output_messages.append(msg)
|
||||
else:
|
||||
raise ValueError(f"Invalid message type in sequence: {type(msg)}")
|
||||
response = await self.on_messages(input_messages, cancellation_token)
|
||||
if response.inner_messages is not None:
|
||||
output_messages += response.inner_messages
|
||||
output_messages.append(response.chat_message)
|
||||
return TaskResult(messages=output_messages)
|
||||
|
||||
async def run_stream(
|
||||
self,
|
||||
@ -147,42 +156,46 @@ class BaseChatAgent(ChatAgent, ABC, ComponentBase[BaseModel]):
|
||||
) -> AsyncGenerator[BaseAgentEvent | BaseChatMessage | TaskResult, None]:
|
||||
"""Run the agent with the given task and return a stream of messages
|
||||
and the final task result as the last item in the stream."""
|
||||
if cancellation_token is None:
|
||||
cancellation_token = CancellationToken()
|
||||
input_messages: List[BaseChatMessage] = []
|
||||
output_messages: List[BaseAgentEvent | BaseChatMessage] = []
|
||||
if task is None:
|
||||
pass
|
||||
elif isinstance(task, str):
|
||||
text_msg = TextMessage(content=task, source="user")
|
||||
input_messages.append(text_msg)
|
||||
output_messages.append(text_msg)
|
||||
yield text_msg
|
||||
elif isinstance(task, BaseChatMessage):
|
||||
input_messages.append(task)
|
||||
output_messages.append(task)
|
||||
yield task
|
||||
else:
|
||||
if not task:
|
||||
raise ValueError("Task list cannot be empty.")
|
||||
for msg in task:
|
||||
if isinstance(msg, BaseChatMessage):
|
||||
input_messages.append(msg)
|
||||
output_messages.append(msg)
|
||||
yield msg
|
||||
else:
|
||||
raise ValueError(f"Invalid message type in sequence: {type(msg)}")
|
||||
async for message in self.on_messages_stream(input_messages, cancellation_token):
|
||||
if isinstance(message, Response):
|
||||
yield message.chat_message
|
||||
output_messages.append(message.chat_message)
|
||||
yield TaskResult(messages=output_messages)
|
||||
with trace_invoke_agent_span(
|
||||
agent_name=self.name,
|
||||
agent_description=self.description,
|
||||
):
|
||||
if cancellation_token is None:
|
||||
cancellation_token = CancellationToken()
|
||||
input_messages: List[BaseChatMessage] = []
|
||||
output_messages: List[BaseAgentEvent | BaseChatMessage] = []
|
||||
if task is None:
|
||||
pass
|
||||
elif isinstance(task, str):
|
||||
text_msg = TextMessage(content=task, source="user")
|
||||
input_messages.append(text_msg)
|
||||
output_messages.append(text_msg)
|
||||
yield text_msg
|
||||
elif isinstance(task, BaseChatMessage):
|
||||
input_messages.append(task)
|
||||
output_messages.append(task)
|
||||
yield task
|
||||
else:
|
||||
yield message
|
||||
if isinstance(message, ModelClientStreamingChunkEvent):
|
||||
# Skip the model client streaming chunk events.
|
||||
continue
|
||||
output_messages.append(message)
|
||||
if not task:
|
||||
raise ValueError("Task list cannot be empty.")
|
||||
for msg in task:
|
||||
if isinstance(msg, BaseChatMessage):
|
||||
input_messages.append(msg)
|
||||
output_messages.append(msg)
|
||||
yield msg
|
||||
else:
|
||||
raise ValueError(f"Invalid message type in sequence: {type(msg)}")
|
||||
async for message in self.on_messages_stream(input_messages, cancellation_token):
|
||||
if isinstance(message, Response):
|
||||
yield message.chat_message
|
||||
output_messages.append(message.chat_message)
|
||||
yield TaskResult(messages=output_messages)
|
||||
else:
|
||||
yield message
|
||||
if isinstance(message, ModelClientStreamingChunkEvent):
|
||||
# Skip the model client streaming chunk events.
|
||||
continue
|
||||
output_messages.append(message)
|
||||
|
||||
@abstractmethod
|
||||
async def on_reset(self, cancellation_token: CancellationToken) -> None:
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
from typing import Any, List, Mapping
|
||||
|
||||
from autogen_core import DefaultTopicId, MessageContext, event, rpc
|
||||
from autogen_core import DefaultTopicId, MessageContext, event, rpc, trace_invoke_agent_span
|
||||
|
||||
from autogen_agentchat.messages import BaseAgentEvent, BaseChatMessage, MessageFactory
|
||||
|
||||
@ -73,36 +73,41 @@ class ChatAgentContainer(SequentialRoutedAgent):
|
||||
async def handle_request(self, message: GroupChatRequestPublish, ctx: MessageContext) -> None:
|
||||
"""Handle a content request event by passing the messages in the buffer
|
||||
to the delegate agent and publish the response."""
|
||||
try:
|
||||
# Pass the messages in the buffer to the delegate agent.
|
||||
response: Response | None = None
|
||||
async for msg in self._agent.on_messages_stream(self._message_buffer, ctx.cancellation_token):
|
||||
if isinstance(msg, Response):
|
||||
await self._log_message(msg.chat_message)
|
||||
response = msg
|
||||
else:
|
||||
await self._log_message(msg)
|
||||
if response is None:
|
||||
raise ValueError(
|
||||
"The agent did not produce a final response. Check the agent's on_messages_stream method."
|
||||
with trace_invoke_agent_span(
|
||||
agent_name=self._agent.name,
|
||||
agent_description=self._agent.description,
|
||||
agent_id=str(self.id),
|
||||
):
|
||||
try:
|
||||
# Pass the messages in the buffer to the delegate agent.
|
||||
response: Response | None = None
|
||||
async for msg in self._agent.on_messages_stream(self._message_buffer, ctx.cancellation_token):
|
||||
if isinstance(msg, Response):
|
||||
await self._log_message(msg.chat_message)
|
||||
response = msg
|
||||
else:
|
||||
await self._log_message(msg)
|
||||
if response is None:
|
||||
raise ValueError(
|
||||
"The agent did not produce a final response. Check the agent's on_messages_stream method."
|
||||
)
|
||||
# Publish the response to the group chat.
|
||||
self._message_buffer.clear()
|
||||
await self.publish_message(
|
||||
GroupChatAgentResponse(agent_response=response, agent_name=self._agent.name),
|
||||
topic_id=DefaultTopicId(type=self._parent_topic_type),
|
||||
cancellation_token=ctx.cancellation_token,
|
||||
)
|
||||
# Publish the response to the group chat.
|
||||
self._message_buffer.clear()
|
||||
await self.publish_message(
|
||||
GroupChatAgentResponse(agent_response=response, agent_name=self._agent.name),
|
||||
topic_id=DefaultTopicId(type=self._parent_topic_type),
|
||||
cancellation_token=ctx.cancellation_token,
|
||||
)
|
||||
except Exception as e:
|
||||
# Publish the error to the group chat.
|
||||
error_message = SerializableException.from_exception(e)
|
||||
await self.publish_message(
|
||||
GroupChatError(error=error_message),
|
||||
topic_id=DefaultTopicId(type=self._parent_topic_type),
|
||||
cancellation_token=ctx.cancellation_token,
|
||||
)
|
||||
# Raise the error to the runtime.
|
||||
raise
|
||||
except Exception as e:
|
||||
# Publish the error to the group chat.
|
||||
error_message = SerializableException.from_exception(e)
|
||||
await self.publish_message(
|
||||
GroupChatError(error=error_message),
|
||||
topic_id=DefaultTopicId(type=self._parent_topic_type),
|
||||
cancellation_token=ctx.cancellation_token,
|
||||
)
|
||||
# Raise the error to the runtime.
|
||||
raise
|
||||
|
||||
def _buffer_message(self, message: BaseChatMessage) -> None:
|
||||
if not self._message_factory.is_registered(message.__class__):
|
||||
|
||||
BIN
python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/jaeger.png
(Stored with Git LFS)
BIN
python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/jaeger.png
(Stored with Git LFS)
Binary file not shown.
@ -1,403 +1,329 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tracing and Observability\n",
|
||||
"\n",
|
||||
"AutoGen has [built-in support for tracing](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/framework/telemetry.html) and observability for collecting comprehensive records on the execution of your application. This feature is useful for debugging, performance analysis, and understanding the flow of your application.\n",
|
||||
"\n",
|
||||
"This capability is powered by the [OpenTelemetry](https://opentelemetry.io/) library, which means you can use any OpenTelemetry-compatible backend to collect and analyze traces.\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To begin, you need to install the OpenTelemetry Python package. You can do this using pip:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install opentelemetry-sdk opentelemetry-exporter-otlp-proto-grpc\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Once you have the SDK installed, the simplest way to set up tracing in AutoGen is to:\n",
|
||||
"\n",
|
||||
"1. Configure an OpenTelemetry tracer provider\n",
|
||||
"2. Set up an exporter to send traces to your backend\n",
|
||||
"3. Connect the tracer provider to the AutoGen runtime\n",
|
||||
"\n",
|
||||
"## Telemetry Backend\n",
|
||||
"\n",
|
||||
"To collect and view traces, you need to set up a telemetry backend. Several open-source options are available, including Jaeger, Zipkin. For this example, we will use Jaeger as our telemetry backend.\n",
|
||||
"\n",
|
||||
"For a quick start, you can run Jaeger locally using Docker:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"docker run -d --name jaeger \\\n",
|
||||
" -e COLLECTOR_OTLP_ENABLED=true \\\n",
|
||||
" -p 16686:16686 \\\n",
|
||||
" -p 4317:4317 \\\n",
|
||||
" -p 4318:4318 \\\n",
|
||||
" jaegertracing/all-in-one:latest\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This command starts a Jaeger instance that listens on port 16686 for the Jaeger UI and port 4317 for the OpenTelemetry collector. You can access the Jaeger UI at `http://localhost:16686`.\n",
|
||||
"\n",
|
||||
"## Instrumenting an AgentChat Team\n",
|
||||
"\n",
|
||||
"In the following section, we will review how to enable tracing with an AutoGen GroupChat team. The AutoGen runtime already supports open telemetry (automatically logging message metadata). To begin, we will create a tracing service that will be used to instrument the AutoGen runtime. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from opentelemetry import trace\n",
|
||||
"from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter\n",
|
||||
"from opentelemetry.sdk.resources import Resource\n",
|
||||
"from opentelemetry.sdk.trace import TracerProvider\n",
|
||||
"from opentelemetry.sdk.trace.export import BatchSpanProcessor\n",
|
||||
"\n",
|
||||
"otel_exporter = OTLPSpanExporter(endpoint=\"http://localhost:4317\", insecure=True)\n",
|
||||
"tracer_provider = TracerProvider(resource=Resource({\"service.name\": \"autogen-test-agentchat\"}))\n",
|
||||
"span_processor = BatchSpanProcessor(otel_exporter)\n",
|
||||
"tracer_provider.add_span_processor(span_processor)\n",
|
||||
"trace.set_tracer_provider(tracer_provider)\n",
|
||||
"\n",
|
||||
"# we will get reference this tracer later using its service name\n",
|
||||
"# tracer = trace.get_tracer(\"autogen-test-agentchat\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"All of the code to create a [team](./tutorial/teams.ipynb) should already be familiar to you. An important note here is that all AgentChat agents and teams are run using the AutoGen core API runtime. In turn, the runtime is already instrumented to log [runtime messaging events (metadata)] (https://github.com/microsoft/autogen/blob/main/python/packages/autogen-core/src/autogen_core/_telemetry/_tracing_config.py) including:\n",
|
||||
"\n",
|
||||
"- **create**: When a message is created\n",
|
||||
"- **send**: When a message is sent\n",
|
||||
"- **publish**: When a message is published\n",
|
||||
"- **receive**: When a message is received\n",
|
||||
"- **intercept**: When a message is intercepted\n",
|
||||
"- **process**: When a message is processed\n",
|
||||
"- **ack**: When a message is acknowledged \n",
|
||||
" "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from autogen_agentchat.agents import AssistantAgent\n",
|
||||
"from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n",
|
||||
"from autogen_agentchat.teams import SelectorGroupChat\n",
|
||||
"from autogen_agentchat.ui import Console\n",
|
||||
"from autogen_core import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_ext.models.openai import OpenAIChatCompletionClient\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def search_web_tool(query: str) -> str:\n",
|
||||
" if \"2006-2007\" in query:\n",
|
||||
" return \"\"\"Here are the total points scored by Miami Heat players in the 2006-2007 season:\n",
|
||||
" Udonis Haslem: 844 points\n",
|
||||
" Dwayne Wade: 1397 points\n",
|
||||
" James Posey: 550 points\n",
|
||||
" ...\n",
|
||||
" \"\"\"\n",
|
||||
" elif \"2007-2008\" in query:\n",
|
||||
" return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\"\n",
|
||||
" elif \"2008-2009\" in query:\n",
|
||||
" return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\"\n",
|
||||
" return \"No data found.\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def percentage_change_tool(start: float, end: float) -> float:\n",
|
||||
" return ((end - start) / start) * 100\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def main() -> None:\n",
|
||||
" model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n",
|
||||
"\n",
|
||||
" planning_agent = AssistantAgent(\n",
|
||||
" \"PlanningAgent\",\n",
|
||||
" description=\"An agent for planning tasks, this agent should be the first to engage when given a new task.\",\n",
|
||||
" model_client=model_client,\n",
|
||||
" system_message=\"\"\"\n",
|
||||
" You are a planning agent.\n",
|
||||
" Your job is to break down complex tasks into smaller, manageable subtasks.\n",
|
||||
" Your team members are:\n",
|
||||
" WebSearchAgent: Searches for information\n",
|
||||
" DataAnalystAgent: Performs calculations\n",
|
||||
"\n",
|
||||
" You only plan and delegate tasks - you do not execute them yourself.\n",
|
||||
"\n",
|
||||
" When assigning tasks, use this format:\n",
|
||||
" 1. <agent> : <task>\n",
|
||||
"\n",
|
||||
" After all tasks are complete, summarize the findings and end with \"TERMINATE\".\n",
|
||||
" \"\"\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" web_search_agent = AssistantAgent(\n",
|
||||
" \"WebSearchAgent\",\n",
|
||||
" description=\"An agent for searching information on the web.\",\n",
|
||||
" tools=[search_web_tool],\n",
|
||||
" model_client=model_client,\n",
|
||||
" system_message=\"\"\"\n",
|
||||
" You are a web search agent.\n",
|
||||
" Your only tool is search_tool - use it to find information.\n",
|
||||
" You make only one search call at a time.\n",
|
||||
" Once you have the results, you never do calculations based on them.\n",
|
||||
" \"\"\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" data_analyst_agent = AssistantAgent(\n",
|
||||
" \"DataAnalystAgent\",\n",
|
||||
" description=\"An agent for performing calculations.\",\n",
|
||||
" model_client=model_client,\n",
|
||||
" tools=[percentage_change_tool],\n",
|
||||
" system_message=\"\"\"\n",
|
||||
" You are a data analyst.\n",
|
||||
" Given the tasks you have been assigned, you should analyze the data and provide results using the tools provided.\n",
|
||||
" If you have not seen the data, ask for it.\n",
|
||||
" \"\"\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" text_mention_termination = TextMentionTermination(\"TERMINATE\")\n",
|
||||
" max_messages_termination = MaxMessageTermination(max_messages=25)\n",
|
||||
" termination = text_mention_termination | max_messages_termination\n",
|
||||
"\n",
|
||||
" selector_prompt = \"\"\"Select an agent to perform task.\n",
|
||||
"\n",
|
||||
" {roles}\n",
|
||||
"\n",
|
||||
" Current conversation context:\n",
|
||||
" {history}\n",
|
||||
"\n",
|
||||
" Read the above conversation, then select an agent from {participants} to perform the next task.\n",
|
||||
" Make sure the planner agent has assigned tasks before other agents start working.\n",
|
||||
" Only select one agent.\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
" task = \"Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\"\n",
|
||||
"\n",
|
||||
" tracer = trace.get_tracer(\"autogen-test-agentchat\")\n",
|
||||
" with tracer.start_as_current_span(\"runtime\"):\n",
|
||||
" team = SelectorGroupChat(\n",
|
||||
" [planning_agent, web_search_agent, data_analyst_agent],\n",
|
||||
" model_client=model_client,\n",
|
||||
" termination_condition=termination,\n",
|
||||
" selector_prompt=selector_prompt,\n",
|
||||
" allow_repeated_speaker=True,\n",
|
||||
" )\n",
|
||||
" await Console(team.run_stream(task=task))\n",
|
||||
"\n",
|
||||
" await model_client.close()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# asyncio.run(main())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"---------- user ----------\n",
|
||||
"Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n",
|
||||
"---------- PlanningAgent ----------\n",
|
||||
"To accomplish this, we can break down the tasks as follows:\n",
|
||||
"\n",
|
||||
"1. WebSearchAgent: Search for the Miami Heat player with the highest points during the 2006-2007 NBA season.\n",
|
||||
"2. WebSearchAgent: Find the total rebounds for the identified player in both the 2007-2008 and 2008-2009 NBA seasons.\n",
|
||||
"3. DataAnalystAgent: Calculate the percentage change in total rebounds for the player between the 2007-2008 and 2008-2009 seasons.\n",
|
||||
"\n",
|
||||
"Once these tasks are complete, I will summarize the findings.\n",
|
||||
"---------- WebSearchAgent ----------\n",
|
||||
"[FunctionCall(id='call_PUhxZyR0CTlWCY4uwd5Zh3WO', arguments='{\"query\":\"Miami Heat highest points scorer 2006-2007 season\"}', name='search_web_tool')]\n",
|
||||
"---------- WebSearchAgent ----------\n",
|
||||
"[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_PUhxZyR0CTlWCY4uwd5Zh3WO', is_error=False)]\n",
|
||||
"---------- WebSearchAgent ----------\n",
|
||||
"Here are the total points scored by Miami Heat players in the 2006-2007 season:\n",
|
||||
" Udonis Haslem: 844 points\n",
|
||||
" Dwayne Wade: 1397 points\n",
|
||||
" James Posey: 550 points\n",
|
||||
" ...\n",
|
||||
" \n",
|
||||
"---------- WebSearchAgent ----------\n",
|
||||
"Dwyane Wade was the Miami Heat player with the highest points in the 2006-2007 season, scoring 1,397 points. Now, let's find his total rebounds for the 2007-2008 and 2008-2009 NBA seasons.\n",
|
||||
"---------- WebSearchAgent ----------\n",
|
||||
"[FunctionCall(id='call_GL7KkWKj9ejIM8FfpgXe2dPk', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_X81huZoiA30zIjSAIDgb8ebe', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]\n",
|
||||
"---------- WebSearchAgent ----------\n",
|
||||
"[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_GL7KkWKj9ejIM8FfpgXe2dPk', is_error=False), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_X81huZoiA30zIjSAIDgb8ebe', is_error=False)]\n",
|
||||
"---------- WebSearchAgent ----------\n",
|
||||
"The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n",
|
||||
"The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n",
|
||||
"---------- DataAnalystAgent ----------\n",
|
||||
"[FunctionCall(id='call_kB50RkFVqHptA7FOf0lL2RS8', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n",
|
||||
"---------- DataAnalystAgent ----------\n",
|
||||
"[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_kB50RkFVqHptA7FOf0lL2RS8', is_error=False)]\n",
|
||||
"---------- DataAnalystAgent ----------\n",
|
||||
"85.98130841121495\n",
|
||||
"---------- PlanningAgent ----------\n",
|
||||
"The Miami Heat player with the highest points during the 2006-2007 NBA season was Dwayne Wade, who scored 1,397 points. The percentage increase in his total rebounds from the 2007-2008 season (214 rebounds) to the 2008-2009 season (398 rebounds) was approximately 86%.\n",
|
||||
"\n",
|
||||
"TERMINATE\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"await main()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can then use the Jaeger UI to view the traces collected from the application run above. \n",
|
||||
"\n",
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Custom Traces \n",
|
||||
"\n",
|
||||
"So far, we are logging only the default events that are generated by the AutoGen runtime (message created, publish etc). However, you can also create custom spans to log specific events in your application. \n",
|
||||
"\n",
|
||||
"In the example below, we will show how to log messages from the `RoundRobinGroupChat` team as they are generated by adding custom spans around the team to log runtime events and spans to log messages generated by the team.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"-- primary_agent -- : Leaves cascade like gold, \n",
|
||||
"Whispering winds cool the earth.\n",
|
||||
"primary_agent: Leaves cascade like gold, \n",
|
||||
"Whispering winds cool the earth.\n",
|
||||
"\n",
|
||||
"-- critic_agent -- : Your haiku beautifully captures the essence of the fall season with vivid imagery. However, it appears to have six syllables in the second line, which should traditionally be five. Here's a revised version keeping the 5-7-5 syllable structure:\n",
|
||||
"\n",
|
||||
"Leaves cascade like gold, \n",
|
||||
"Whispering winds cool the air. \n",
|
||||
"\n",
|
||||
"Please adjust the second line to reflect a five-syllable count. Thank you!\n",
|
||||
"critic_agent: Your haiku beautifully captures the essence of the fall season with vivid imagery. However, it appears to have six syllables in the second line, which should traditionally be five. Here's a revised version keeping the 5-7-5 syllable structure:\n",
|
||||
"\n",
|
||||
"Leaves cascade like gold, \n",
|
||||
"Whispering winds cool the air. \n",
|
||||
"\n",
|
||||
"Please adjust the second line to reflect a five-syllable count. Thank you!\n",
|
||||
"\n",
|
||||
"-- primary_agent -- : Leaves cascade like gold, \n",
|
||||
"Whispering winds cool the air.\n",
|
||||
"primary_agent: Leaves cascade like gold, \n",
|
||||
"Whispering winds cool the air.\n",
|
||||
"\n",
|
||||
"-- critic_agent -- : APPROVE\n",
|
||||
"critic_agent: APPROVE\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from autogen_agentchat.base import TaskResult\n",
|
||||
"from autogen_agentchat.conditions import ExternalTermination\n",
|
||||
"from autogen_agentchat.teams import RoundRobinGroupChat\n",
|
||||
"from autogen_core import CancellationToken\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def run_agents() -> None:\n",
|
||||
" # Create an OpenAI model client.\n",
|
||||
" model_client = OpenAIChatCompletionClient(model=\"gpt-4o-2024-08-06\")\n",
|
||||
"\n",
|
||||
" # Create the primary agent.\n",
|
||||
" primary_agent = AssistantAgent(\n",
|
||||
" \"primary_agent\",\n",
|
||||
" model_client=model_client,\n",
|
||||
" system_message=\"You are a helpful AI assistant.\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # Create the critic agent.\n",
|
||||
" critic_agent = AssistantAgent(\n",
|
||||
" \"critic_agent\",\n",
|
||||
" model_client=model_client,\n",
|
||||
" system_message=\"Provide constructive feedback. Respond with 'APPROVE' to when your feedbacks are addressed.\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # Define a termination condition that stops the task if the critic approves.\n",
|
||||
" text_termination = TextMentionTermination(\"APPROVE\")\n",
|
||||
"\n",
|
||||
" tracer = trace.get_tracer(\"autogen-test-agentchat\")\n",
|
||||
" with tracer.start_as_current_span(\"runtime_round_robin_events\"):\n",
|
||||
" team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=text_termination)\n",
|
||||
"\n",
|
||||
" response_stream = team.run_stream(task=\"Write a 2 line haiku about the fall season\")\n",
|
||||
" async for response in response_stream:\n",
|
||||
" async for response in response_stream:\n",
|
||||
" if not isinstance(response, TaskResult):\n",
|
||||
" print(f\"\\n-- {response.source} -- : {response.to_text()}\")\n",
|
||||
" with tracer.start_as_current_span(f\"agent_message.{response.source}\") as message_span:\n",
|
||||
" message_span.set_attribute(\"agent.name\", response.source)\n",
|
||||
" message_span.set_attribute(\"message.content\", response.to_text())\n",
|
||||
" print(f\"{response.source}: {response.to_text()}\")\n",
|
||||
"\n",
|
||||
" await model_client.close()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await run_agents()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"In the code above, we create a new span for each message sent by the agent. We set attributes on the span to include the agent's name and the message content. This allows us to trace the flow of messages through our application and understand how they are processed. \n",
|
||||
" "
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tracing and Observability\n",
|
||||
"\n",
|
||||
"AutoGen has [built-in support for tracing](https://microsoft.github.io/autogen/dev/user-guide/core-user-guide/framework/telemetry.html) and observability for collecting comprehensive records on the execution of your application. This feature is useful for debugging, performance analysis, and understanding the flow of your application.\n",
|
||||
"\n",
|
||||
"This capability is powered by the [OpenTelemetry](https://opentelemetry.io/) library, which means you can use any OpenTelemetry-compatible backend to collect and analyze traces.\n",
|
||||
"\n",
|
||||
"AutoGen follows the [OpenTelemetry Semantic Conventions](https://opentelemetry.io/docs/specs/semconv/) for tracing, for agents and tools.\n",
|
||||
"It also follows the [Semantic Conventions for GenAI Systems](https://opentelemetry.io/docs/specs/semconv/gen-ai/) currently under development.\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To begin, you need to install the OpenTelemetry Python package. You can do this using pip:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install opentelemetry-sdk opentelemetry-exporter-otlp-proto-grpc opentelemetry-instrumentation-openai\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Once you have the SDK installed, the simplest way to set up tracing in AutoGen is to:\n",
|
||||
"\n",
|
||||
"1. Configure an OpenTelemetry tracer provider\n",
|
||||
"2. Set up an exporter to send traces to your backend\n",
|
||||
"3. Connect the tracer provider to the AutoGen runtime\n",
|
||||
"\n",
|
||||
"## Telemetry Backend\n",
|
||||
"\n",
|
||||
"To collect and view traces, you need to set up a telemetry backend. Several open-source options are available, including Jaeger, Zipkin. For this example, we will use Jaeger as our telemetry backend.\n",
|
||||
"\n",
|
||||
"For a quick start, you can run Jaeger locally using Docker:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"docker run -d --name jaeger \\\n",
|
||||
" -e COLLECTOR_OTLP_ENABLED=true \\\n",
|
||||
" -p 16686:16686 \\\n",
|
||||
" -p 4317:4317 \\\n",
|
||||
" -p 4318:4318 \\\n",
|
||||
" jaegertracing/all-in-one:latest\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This command starts a Jaeger instance that listens on port 16686 for the Jaeger UI and port 4317 for the OpenTelemetry collector. You can access the Jaeger UI at `http://localhost:16686`.\n",
|
||||
"\n",
|
||||
"## Tracing an AgentChat Team\n",
|
||||
"\n",
|
||||
"In the following section, we will review how to enable tracing with an AutoGen GroupChat team. The AutoGen runtime already supports open telemetry (automatically logging message metadata). To begin, we will create a tracing service that will be used to instrument the AutoGen runtime. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Overriding of current TracerProvider is not allowed\n",
|
||||
"Attempting to instrument while already instrumented\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from opentelemetry import trace\n",
|
||||
"from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter\n",
|
||||
"from opentelemetry.instrumentation.openai import OpenAIInstrumentor\n",
|
||||
"from opentelemetry.sdk.resources import Resource\n",
|
||||
"from opentelemetry.sdk.trace import TracerProvider\n",
|
||||
"from opentelemetry.sdk.trace.export import BatchSpanProcessor\n",
|
||||
"\n",
|
||||
"# Set up telemetry span exporter.\n",
|
||||
"otel_exporter = OTLPSpanExporter(endpoint=\"http://localhost:4317\", insecure=True)\n",
|
||||
"span_processor = BatchSpanProcessor(otel_exporter)\n",
|
||||
"\n",
|
||||
"# Set up telemetry trace provider.\n",
|
||||
"tracer_provider = TracerProvider(resource=Resource({\"service.name\": \"autogen-test-agentchat\"}))\n",
|
||||
"tracer_provider.add_span_processor(span_processor)\n",
|
||||
"trace.set_tracer_provider(tracer_provider)\n",
|
||||
"\n",
|
||||
"# Instrument the OpenAI Python library\n",
|
||||
"OpenAIInstrumentor().instrument()\n",
|
||||
"\n",
|
||||
"# we will get reference this tracer later using its service name\n",
|
||||
"# tracer = trace.get_tracer(\"autogen-test-agentchat\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"All of the code to create a [team](./tutorial/teams.ipynb) should already be familiar to you.\n",
|
||||
"\n",
|
||||
"```{note}\n",
|
||||
"AgentChat teams are run using the AutoGen Core's agent runtime.\n",
|
||||
"In turn, the runtime is already instrumented to log, see [Core Telemetry Guide](../core-user-guide/framework/telemetry.md).\n",
|
||||
"To disable the agent runtime telemetry, you can set the `trace_provider` to\n",
|
||||
"`opentelemetry.trace.NoOpTraceProvider` in the runtime constructor.\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from autogen_agentchat.agents import AssistantAgent\n",
|
||||
"from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination\n",
|
||||
"from autogen_agentchat.teams import SelectorGroupChat\n",
|
||||
"from autogen_agentchat.ui import Console\n",
|
||||
"from autogen_core import SingleThreadedAgentRuntime\n",
|
||||
"from autogen_ext.models.openai import OpenAIChatCompletionClient\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def search_web_tool(query: str) -> str:\n",
|
||||
" if \"2006-2007\" in query:\n",
|
||||
" return \"\"\"Here are the total points scored by Miami Heat players in the 2006-2007 season:\n",
|
||||
" Udonis Haslem: 844 points\n",
|
||||
" Dwayne Wade: 1397 points\n",
|
||||
" James Posey: 550 points\n",
|
||||
" ...\n",
|
||||
" \"\"\"\n",
|
||||
" elif \"2007-2008\" in query:\n",
|
||||
" return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\"\n",
|
||||
" elif \"2008-2009\" in query:\n",
|
||||
" return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\"\n",
|
||||
" return \"No data found.\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def percentage_change_tool(start: float, end: float) -> float:\n",
|
||||
" return ((end - start) / start) * 100\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def main() -> None:\n",
|
||||
" model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n",
|
||||
"\n",
|
||||
" # Get a tracer with the default tracer provider.\n",
|
||||
" tracer = trace.get_tracer(\"tracing-autogen-agentchat\")\n",
|
||||
"\n",
|
||||
" # Use the tracer to create a span for the main function.\n",
|
||||
" with tracer.start_as_current_span(\"run_team\"):\n",
|
||||
" planning_agent = AssistantAgent(\n",
|
||||
" \"PlanningAgent\",\n",
|
||||
" description=\"An agent for planning tasks, this agent should be the first to engage when given a new task.\",\n",
|
||||
" model_client=model_client,\n",
|
||||
" system_message=\"\"\"\n",
|
||||
" You are a planning agent.\n",
|
||||
" Your job is to break down complex tasks into smaller, manageable subtasks.\n",
|
||||
" Your team members are:\n",
|
||||
" WebSearchAgent: Searches for information\n",
|
||||
" DataAnalystAgent: Performs calculations\n",
|
||||
"\n",
|
||||
" You only plan and delegate tasks - you do not execute them yourself.\n",
|
||||
"\n",
|
||||
" When assigning tasks, use this format:\n",
|
||||
" 1. <agent> : <task>\n",
|
||||
"\n",
|
||||
" After all tasks are complete, summarize the findings and end with \"TERMINATE\".\n",
|
||||
" \"\"\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" web_search_agent = AssistantAgent(\n",
|
||||
" \"WebSearchAgent\",\n",
|
||||
" description=\"An agent for searching information on the web.\",\n",
|
||||
" tools=[search_web_tool],\n",
|
||||
" model_client=model_client,\n",
|
||||
" system_message=\"\"\"\n",
|
||||
" You are a web search agent.\n",
|
||||
" Your only tool is search_tool - use it to find information.\n",
|
||||
" You make only one search call at a time.\n",
|
||||
" Once you have the results, you never do calculations based on them.\n",
|
||||
" \"\"\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" data_analyst_agent = AssistantAgent(\n",
|
||||
" \"DataAnalystAgent\",\n",
|
||||
" description=\"An agent for performing calculations.\",\n",
|
||||
" model_client=model_client,\n",
|
||||
" tools=[percentage_change_tool],\n",
|
||||
" system_message=\"\"\"\n",
|
||||
" You are a data analyst.\n",
|
||||
" Given the tasks you have been assigned, you should analyze the data and provide results using the tools provided.\n",
|
||||
" If you have not seen the data, ask for it.\n",
|
||||
" \"\"\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" text_mention_termination = TextMentionTermination(\"TERMINATE\")\n",
|
||||
" max_messages_termination = MaxMessageTermination(max_messages=25)\n",
|
||||
" termination = text_mention_termination | max_messages_termination\n",
|
||||
"\n",
|
||||
" selector_prompt = \"\"\"Select an agent to perform task.\n",
|
||||
"\n",
|
||||
" {roles}\n",
|
||||
"\n",
|
||||
" Current conversation context:\n",
|
||||
" {history}\n",
|
||||
"\n",
|
||||
" Read the above conversation, then select an agent from {participants} to perform the next task.\n",
|
||||
" Make sure the planner agent has assigned tasks before other agents start working.\n",
|
||||
" Only select one agent.\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
" task = \"Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\"\n",
|
||||
"\n",
|
||||
" runtime = SingleThreadedAgentRuntime(\n",
|
||||
" tracer_provider=trace.NoOpTracerProvider(), # Disable telemetry for runtime.\n",
|
||||
" )\n",
|
||||
" runtime.start()\n",
|
||||
"\n",
|
||||
" team = SelectorGroupChat(\n",
|
||||
" [planning_agent, web_search_agent, data_analyst_agent],\n",
|
||||
" model_client=model_client,\n",
|
||||
" termination_condition=termination,\n",
|
||||
" selector_prompt=selector_prompt,\n",
|
||||
" allow_repeated_speaker=True,\n",
|
||||
" runtime=runtime,\n",
|
||||
" )\n",
|
||||
" await Console(team.run_stream(task=task))\n",
|
||||
"\n",
|
||||
" await runtime.stop()\n",
|
||||
"\n",
|
||||
" await model_client.close()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# asyncio.run(main())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"---------- TextMessage (user) ----------\n",
|
||||
"Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n",
|
||||
"---------- TextMessage (PlanningAgent) ----------\n",
|
||||
"To find the information requested, we need to follow these steps:\n",
|
||||
"\n",
|
||||
"1. Identify the Miami Heat player with the highest points during the 2006-2007 season.\n",
|
||||
"2. Get the total rebounds for that player in both the 2007-2008 and 2008-2009 seasons.\n",
|
||||
"3. Calculate the percentage change in total rebounds between these two seasons.\n",
|
||||
"\n",
|
||||
"Here are the tasks assigned to achieve this:\n",
|
||||
"\n",
|
||||
"1. WebSearchAgent: Find the Miami Heat player with the highest points during the 2006-2007 season.\n",
|
||||
"2. WebSearchAgent: After identifying the player, find the total rebounds for that player in the 2007-2008 and 2008-2009 seasons.\n",
|
||||
"3. DataAnalystAgent: Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons.\n",
|
||||
"---------- ToolCallRequestEvent (WebSearchAgent) ----------\n",
|
||||
"[FunctionCall(id='call_hS8yod9l6CYUllDveUffp58e', arguments='{\"query\":\"Miami Heat leading scorer 2006-2007 season\"}', name='search_web_tool')]\n",
|
||||
"---------- ToolCallExecutionEvent (WebSearchAgent) ----------\n",
|
||||
"[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', name='search_web_tool', call_id='call_hS8yod9l6CYUllDveUffp58e', is_error=False)]\n",
|
||||
"---------- ToolCallSummaryMessage (WebSearchAgent) ----------\n",
|
||||
"Here are the total points scored by Miami Heat players in the 2006-2007 season:\n",
|
||||
" Udonis Haslem: 844 points\n",
|
||||
" Dwayne Wade: 1397 points\n",
|
||||
" James Posey: 550 points\n",
|
||||
" ...\n",
|
||||
" \n",
|
||||
"---------- ToolCallRequestEvent (WebSearchAgent) ----------\n",
|
||||
"[FunctionCall(id='call_bUJxtpxUXFSxECDogye9WL0g', arguments='{\"query\":\"Dwyane Wade total rebounds in 2007-2008 season\"}', name='search_web_tool')]\n",
|
||||
"---------- ToolCallExecutionEvent (WebSearchAgent) ----------\n",
|
||||
"[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', name='search_web_tool', call_id='call_bUJxtpxUXFSxECDogye9WL0g', is_error=False)]\n",
|
||||
"---------- ToolCallSummaryMessage (WebSearchAgent) ----------\n",
|
||||
"The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\n",
|
||||
"---------- ToolCallRequestEvent (WebSearchAgent) ----------\n",
|
||||
"[FunctionCall(id='call_pgYNSDhhyodtteot56FRktxp', arguments='{\"query\":\"Dwyane Wade total rebounds in 2008-2009 season\"}', name='search_web_tool')]\n",
|
||||
"---------- ToolCallExecutionEvent (WebSearchAgent) ----------\n",
|
||||
"[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', name='search_web_tool', call_id='call_pgYNSDhhyodtteot56FRktxp', is_error=False)]\n",
|
||||
"---------- ToolCallSummaryMessage (WebSearchAgent) ----------\n",
|
||||
"The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\n",
|
||||
"---------- ToolCallRequestEvent (DataAnalystAgent) ----------\n",
|
||||
"[FunctionCall(id='call_A89acjYHlNDLzG09rVNJ0J6H', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n",
|
||||
"---------- ToolCallExecutionEvent (DataAnalystAgent) ----------\n",
|
||||
"[FunctionExecutionResult(content='85.98130841121495', name='percentage_change_tool', call_id='call_A89acjYHlNDLzG09rVNJ0J6H', is_error=False)]\n",
|
||||
"---------- ToolCallSummaryMessage (DataAnalystAgent) ----------\n",
|
||||
"85.98130841121495\n",
|
||||
"---------- TextMessage (PlanningAgent) ----------\n",
|
||||
"The Miami Heat player with the highest points during the 2006-2007 season was Dwyane Wade, who scored 1,397 points. \n",
|
||||
"\n",
|
||||
"The total rebounds for Dwyane Wade in the 2007-2008 season were 214, and in the 2008-2009 season, they were 398.\n",
|
||||
"\n",
|
||||
"The percentage change in his total rebounds between these two seasons is approximately 86.0%.\n",
|
||||
"\n",
|
||||
"TERMINATE\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"await main()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can then use the Jaeger UI to view the traces collected from the application run above. \n",
|
||||
"\n",
|
||||
""
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
||||
@ -19,8 +19,9 @@ dependencies = [
|
||||
"typing-extensions>=4.0.0",
|
||||
"pydantic<3.0.0,>=2.10.0",
|
||||
"protobuf~=5.29.3",
|
||||
"opentelemetry-api>=1.27.0",
|
||||
"opentelemetry-api>=1.34.1",
|
||||
"jsonref~=1.1.0",
|
||||
"opentelemetry-semantic-conventions==0.55b1",
|
||||
]
|
||||
|
||||
|
||||
@ -42,7 +43,7 @@ dev = [
|
||||
"llama-index",
|
||||
"markdownify",
|
||||
"nbqa",
|
||||
"opentelemetry-sdk>=1.27.0",
|
||||
"opentelemetry-sdk>=1.34.1",
|
||||
"pip",
|
||||
"polars",
|
||||
"python-dotenv",
|
||||
@ -68,6 +69,7 @@ dev = [
|
||||
"autodoc_pydantic~=2.2",
|
||||
"pygments",
|
||||
"sphinxext-rediraffe",
|
||||
"opentelemetry-instrumentation-openai",
|
||||
|
||||
"autogen_ext==0.6.1",
|
||||
|
||||
|
||||
@ -59,6 +59,11 @@ from ._serialization import (
|
||||
from ._single_threaded_agent_runtime import SingleThreadedAgentRuntime
|
||||
from ._subscription import Subscription
|
||||
from ._subscription_context import SubscriptionInstantiationContext
|
||||
from ._telemetry import (
|
||||
trace_create_agent_span,
|
||||
trace_invoke_agent_span,
|
||||
trace_tool_span,
|
||||
)
|
||||
from ._topic import TopicId
|
||||
from ._type_prefix_subscription import TypePrefixSubscription
|
||||
from ._type_subscription import TypeSubscription
|
||||
@ -132,4 +137,7 @@ __all__ = [
|
||||
"DropMessage",
|
||||
"InterventionHandler",
|
||||
"DefaultInterventionHandler",
|
||||
"trace_create_agent_span",
|
||||
"trace_invoke_agent_span",
|
||||
"trace_tool_span",
|
||||
]
|
||||
|
||||
@ -1,3 +1,8 @@
|
||||
from ._genai import (
|
||||
trace_create_agent_span,
|
||||
trace_invoke_agent_span,
|
||||
trace_tool_span,
|
||||
)
|
||||
from ._propagation import (
|
||||
EnvelopeMetadata,
|
||||
TelemetryMetadataContainer,
|
||||
@ -14,4 +19,7 @@ __all__ = [
|
||||
"TelemetryMetadataContainer",
|
||||
"TraceHelper",
|
||||
"MessageRuntimeTracingConfig",
|
||||
"trace_create_agent_span",
|
||||
"trace_invoke_agent_span",
|
||||
"trace_tool_span",
|
||||
]
|
||||
|
||||
@ -0,0 +1,192 @@
|
||||
from collections.abc import Generator
|
||||
from contextlib import contextmanager
|
||||
from typing import Any, Optional
|
||||
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import (
|
||||
GEN_AI_AGENT_DESCRIPTION,
|
||||
GEN_AI_AGENT_ID,
|
||||
GEN_AI_AGENT_NAME,
|
||||
GEN_AI_OPERATION_NAME,
|
||||
GEN_AI_SYSTEM,
|
||||
GEN_AI_TOOL_CALL_ID,
|
||||
GEN_AI_TOOL_DESCRIPTION,
|
||||
GEN_AI_TOOL_NAME,
|
||||
GenAiOperationNameValues,
|
||||
)
|
||||
from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
|
||||
from opentelemetry.trace import Span, SpanKind
|
||||
|
||||
from .._agent_instantiation import AgentInstantiationContext
|
||||
|
||||
# Constant for system name
|
||||
GENAI_SYSTEM_AUTOGEN = "autogen"
|
||||
|
||||
|
||||
@contextmanager
|
||||
def trace_tool_span(
|
||||
tool_name: str,
|
||||
*,
|
||||
tracer: Optional[trace.Tracer] = None,
|
||||
parent: Optional[Span] = None,
|
||||
tool_description: Optional[str] = None,
|
||||
tool_call_id: Optional[str] = None,
|
||||
) -> Generator[Span, Any, None]:
|
||||
"""Context manager to create a span for tool execution following the
|
||||
OpenTelemetry Semantic conventions for generative AI systems.
|
||||
|
||||
See the GenAI semantic conventions documentation:
|
||||
`OpenTelemetry GenAI Semantic Conventions <https://opentelemetry.io/docs/specs/semconv/gen-ai/>`__
|
||||
|
||||
.. warning::
|
||||
|
||||
The GenAI Semantic Conventions are still in incubation and
|
||||
subject to changes in future releases.
|
||||
|
||||
|
||||
Args:
|
||||
tool_name (str): The name of the tool being executed.
|
||||
tracer (Optional[trace.Tracer]): The tracer to use for creating the span.
|
||||
parent (Optional[Span]): The parent span to link this span to.
|
||||
tool_description (Optional[str]): A description of the tool.
|
||||
tool_call_id (Optional[str]): A unique identifier for the tool call.
|
||||
"""
|
||||
if tracer is None:
|
||||
tracer = trace.get_tracer("autogen-core")
|
||||
span_attributes = {
|
||||
GEN_AI_OPERATION_NAME: GenAiOperationNameValues.EXECUTE_TOOL.value,
|
||||
GEN_AI_SYSTEM: GENAI_SYSTEM_AUTOGEN,
|
||||
GEN_AI_TOOL_NAME: tool_name,
|
||||
}
|
||||
if tool_description is not None:
|
||||
span_attributes[GEN_AI_TOOL_DESCRIPTION] = tool_description
|
||||
if tool_call_id is not None:
|
||||
span_attributes[GEN_AI_TOOL_CALL_ID] = tool_call_id
|
||||
with tracer.start_as_current_span(
|
||||
f"{GenAiOperationNameValues.EXECUTE_TOOL.value} {tool_name}",
|
||||
kind=SpanKind.INTERNAL,
|
||||
context=trace.set_span_in_context(parent) if parent else None,
|
||||
attributes=span_attributes,
|
||||
) as span:
|
||||
try:
|
||||
yield span
|
||||
except Exception as e:
|
||||
# Set the exception details on the span if an error occurs
|
||||
span.record_exception(e)
|
||||
span.set_status(trace.Status(trace.StatusCode.ERROR, str(e)))
|
||||
span.set_attribute(ERROR_TYPE, type(e).__name__)
|
||||
raise
|
||||
|
||||
|
||||
@contextmanager
|
||||
def trace_create_agent_span(
|
||||
agent_name: str,
|
||||
*,
|
||||
tracer: Optional[trace.Tracer] = None,
|
||||
parent: Optional[Span] = None,
|
||||
agent_id: Optional[str] = None,
|
||||
agent_description: Optional[str] = None,
|
||||
) -> Generator[Span, Any, None]:
|
||||
"""Context manager to create a span for agent creation following the
|
||||
OpenTelemetry Semantic conventions for generative AI systems.
|
||||
|
||||
See the GenAI semantic conventions documentation:
|
||||
`OpenTelemetry GenAI Semantic Conventions <https://opentelemetry.io/docs/specs/semconv/gen-ai/>`__
|
||||
|
||||
.. warning::
|
||||
|
||||
The GenAI Semantic Conventions are still in incubation and
|
||||
subject to changes in future releases.
|
||||
|
||||
Args:
|
||||
agent_name (str): The name of the agent being created.
|
||||
tracer (Optional[trace.Tracer]): The tracer to use for creating the span.
|
||||
parent (Optional[Span]): The parent span to link this span to.
|
||||
agent_id (Optional[str]): The unique identifier for the agent.
|
||||
agent_description (Optional[str]): A description of the agent.
|
||||
"""
|
||||
if tracer is None:
|
||||
tracer = trace.get_tracer("autogen-core")
|
||||
span_attributes = {
|
||||
GEN_AI_OPERATION_NAME: GenAiOperationNameValues.CREATE_AGENT.value,
|
||||
GEN_AI_SYSTEM: GENAI_SYSTEM_AUTOGEN,
|
||||
GEN_AI_AGENT_NAME: agent_name,
|
||||
}
|
||||
if agent_id is None:
|
||||
# Try to see if we can get the agent ID from the current context
|
||||
try:
|
||||
agent_id = str(AgentInstantiationContext.current_agent_id())
|
||||
except RuntimeError:
|
||||
agent_id = None
|
||||
if agent_id is not None:
|
||||
span_attributes[GEN_AI_AGENT_ID] = agent_id
|
||||
if agent_description is not None:
|
||||
span_attributes[GEN_AI_AGENT_DESCRIPTION] = agent_description
|
||||
with tracer.start_as_current_span(
|
||||
f"{GenAiOperationNameValues.CREATE_AGENT.value} {agent_name}",
|
||||
kind=SpanKind.CLIENT,
|
||||
context=trace.set_span_in_context(parent) if parent else None,
|
||||
attributes=span_attributes,
|
||||
) as span:
|
||||
try:
|
||||
yield span
|
||||
except Exception as e:
|
||||
# Set the exception details on the span if an error occurs
|
||||
span.record_exception(e)
|
||||
span.set_status(trace.Status(trace.StatusCode.ERROR, str(e)))
|
||||
span.set_attribute(ERROR_TYPE, type(e).__name__)
|
||||
raise
|
||||
|
||||
|
||||
@contextmanager
|
||||
def trace_invoke_agent_span(
|
||||
agent_name: str,
|
||||
*,
|
||||
tracer: Optional[trace.Tracer] = None,
|
||||
parent: Optional[Span] = None,
|
||||
agent_id: Optional[str] = None,
|
||||
agent_description: Optional[str] = None,
|
||||
) -> Generator[Span, Any, None]:
|
||||
"""Context manager to create a span for invoking an agent following the
|
||||
OpenTelemetry Semantic conventions for generative AI systems.
|
||||
|
||||
See the GenAI semantic conventions documentation:
|
||||
`OpenTelemetry GenAI Semantic Conventions <https://opentelemetry.io/docs/specs/semconv/gen-ai/>`__
|
||||
|
||||
.. warning::
|
||||
|
||||
The GenAI Semantic Conventions are still in incubation and
|
||||
subject to changes in future releases.
|
||||
|
||||
Args:
|
||||
agent_name (str): The name of the agent being invoked.
|
||||
tracer (Optional[trace.Tracer]): The tracer to use for creating the span.
|
||||
parent (Optional[Span]): The parent span to link this span to.
|
||||
agent_id (Optional[str]): The unique identifier for the agent.
|
||||
agent_description (Optional[str]): A description of the agent.
|
||||
"""
|
||||
if tracer is None:
|
||||
tracer = trace.get_tracer("autogen-core")
|
||||
span_attributes = {
|
||||
GEN_AI_OPERATION_NAME: GenAiOperationNameValues.INVOKE_AGENT.value,
|
||||
GEN_AI_SYSTEM: GENAI_SYSTEM_AUTOGEN,
|
||||
GEN_AI_AGENT_NAME: agent_name,
|
||||
}
|
||||
if agent_id is not None:
|
||||
span_attributes[GEN_AI_AGENT_ID] = agent_id
|
||||
if agent_description is not None:
|
||||
span_attributes[GEN_AI_AGENT_DESCRIPTION] = agent_description
|
||||
with tracer.start_as_current_span(
|
||||
f"{GenAiOperationNameValues.INVOKE_AGENT.value} {agent_name}",
|
||||
kind=SpanKind.CLIENT,
|
||||
context=trace.set_span_in_context(parent) if parent else None,
|
||||
attributes=span_attributes,
|
||||
) as span:
|
||||
try:
|
||||
yield span
|
||||
except Exception as e:
|
||||
# Set the exception details on the span if an error occurs
|
||||
span.record_exception(e)
|
||||
span.set_status(trace.Status(trace.StatusCode.ERROR, str(e)))
|
||||
span.set_attribute(ERROR_TYPE, type(e).__name__)
|
||||
raise
|
||||
@ -83,7 +83,9 @@ class ToolAgent(RoutedAgent):
|
||||
else:
|
||||
try:
|
||||
arguments = json.loads(message.arguments)
|
||||
result = await tool.run_json(args=arguments, cancellation_token=ctx.cancellation_token)
|
||||
result = await tool.run_json(
|
||||
args=arguments, cancellation_token=ctx.cancellation_token, call_id=message.id
|
||||
)
|
||||
result_as_str = tool.return_value_as_string(result)
|
||||
except json.JSONDecodeError as e:
|
||||
raise InvalidToolArgumentsException(
|
||||
|
||||
@ -5,13 +5,13 @@ from collections.abc import Sequence
|
||||
from typing import Any, Dict, Generic, Mapping, Protocol, Type, TypeVar, cast, runtime_checkable
|
||||
|
||||
import jsonref
|
||||
from opentelemetry.trace import get_tracer
|
||||
from pydantic import BaseModel
|
||||
from typing_extensions import NotRequired, TypedDict
|
||||
|
||||
from .. import EVENT_LOGGER_NAME, CancellationToken
|
||||
from .._component_config import ComponentBase
|
||||
from .._function_utils import normalize_annotated_type
|
||||
from .._telemetry import trace_tool_span
|
||||
from ..logging import ToolCallEvent
|
||||
|
||||
T = TypeVar("T", bound=BaseModel, contravariant=True)
|
||||
@ -52,7 +52,9 @@ class Tool(Protocol):
|
||||
|
||||
def return_value_as_string(self, value: Any) -> str: ...
|
||||
|
||||
async def run_json(self, args: Mapping[str, Any], cancellation_token: CancellationToken) -> Any: ...
|
||||
async def run_json(
|
||||
self, args: Mapping[str, Any], cancellation_token: CancellationToken, call_id: str | None = None
|
||||
) -> Any: ...
|
||||
|
||||
async def save_state_json(self) -> Mapping[str, Any]: ...
|
||||
|
||||
@ -147,14 +149,23 @@ class BaseTool(ABC, Tool, Generic[ArgsT, ReturnT], ComponentBase[BaseModel]):
|
||||
@abstractmethod
|
||||
async def run(self, args: ArgsT, cancellation_token: CancellationToken) -> ReturnT: ...
|
||||
|
||||
async def run_json(self, args: Mapping[str, Any], cancellation_token: CancellationToken) -> Any:
|
||||
with get_tracer("base_tool").start_as_current_span(
|
||||
self._name,
|
||||
attributes={
|
||||
"tool_name": self._name,
|
||||
"tool_description": self._description,
|
||||
"tool_args": json.dumps(args),
|
||||
},
|
||||
async def run_json(
|
||||
self, args: Mapping[str, Any], cancellation_token: CancellationToken, call_id: str | None = None
|
||||
) -> Any:
|
||||
"""Run the tool with the provided arguments in a dictionary.
|
||||
|
||||
Args:
|
||||
args (Mapping[str, Any]): The arguments to pass to the tool.
|
||||
cancellation_token (CancellationToken): A token to cancel the operation if needed.
|
||||
call_id (str | None): An optional identifier for the tool call, used for tracing.
|
||||
|
||||
Returns:
|
||||
Any: The return value of the tool's run method.
|
||||
"""
|
||||
with trace_tool_span(
|
||||
tool_name=self._name,
|
||||
tool_description=self._description,
|
||||
tool_call_id=call_id,
|
||||
):
|
||||
# Execute the tool's run method
|
||||
return_value = await self.run(self._args_type.model_validate(args), cancellation_token)
|
||||
|
||||
@ -40,7 +40,11 @@ class StaticWorkbench(Workbench, Component[StaticWorkbenchConfig]):
|
||||
return [tool.schema for tool in self._tools]
|
||||
|
||||
async def call_tool(
|
||||
self, name: str, arguments: Mapping[str, Any] | None = None, cancellation_token: CancellationToken | None = None
|
||||
self,
|
||||
name: str,
|
||||
arguments: Mapping[str, Any] | None = None,
|
||||
cancellation_token: CancellationToken | None = None,
|
||||
call_id: str | None = None,
|
||||
) -> ToolResult:
|
||||
tool = next((tool for tool in self._tools if tool.name == name), None)
|
||||
if tool is None:
|
||||
@ -54,7 +58,7 @@ class StaticWorkbench(Workbench, Component[StaticWorkbenchConfig]):
|
||||
if not arguments:
|
||||
arguments = {}
|
||||
try:
|
||||
result_future = asyncio.ensure_future(tool.run_json(arguments, cancellation_token))
|
||||
result_future = asyncio.ensure_future(tool.run_json(arguments, cancellation_token, call_id=call_id))
|
||||
cancellation_token.link_future(result_future)
|
||||
actual_tool_output = await result_future
|
||||
is_error = False
|
||||
|
||||
@ -105,7 +105,11 @@ class Workbench(ABC, ComponentBase[BaseModel]):
|
||||
|
||||
@abstractmethod
|
||||
async def call_tool(
|
||||
self, name: str, arguments: Mapping[str, Any] | None = None, cancellation_token: CancellationToken | None = None
|
||||
self,
|
||||
name: str,
|
||||
arguments: Mapping[str, Any] | None = None,
|
||||
cancellation_token: CancellationToken | None = None,
|
||||
call_id: str | None = None,
|
||||
) -> ToolResult:
|
||||
"""
|
||||
Call a tool in the workbench.
|
||||
@ -116,6 +120,7 @@ class Workbench(ABC, ComponentBase[BaseModel]):
|
||||
If None, the tool will be called with no arguments.
|
||||
cancellation_token (CancellationToken | None): An optional cancellation token
|
||||
to cancel the tool execution.
|
||||
call_id (str | None): An optional identifier for the tool call, used for tracing.
|
||||
Returns:
|
||||
ToolResult: The result of the tool execution.
|
||||
"""
|
||||
|
||||
@ -559,7 +559,7 @@ class AzureAIAgent(BaseChatAgent):
|
||||
if tool is None:
|
||||
raise ValueError(f"The tool '{tool_call.name}' is not available.")
|
||||
arguments = json.loads(tool_call.arguments)
|
||||
result = await tool.run_json(arguments, cancellation_token)
|
||||
result = await tool.run_json(arguments, cancellation_token, call_id=tool_call.id)
|
||||
return tool.return_value_as_string(result)
|
||||
|
||||
async def _upload_files(
|
||||
|
||||
@ -481,7 +481,7 @@ class OpenAIAgent(BaseChatAgent, Component[OpenAIAgentConfig]):
|
||||
is_error=True,
|
||||
)
|
||||
|
||||
result = await tool.run_json(arguments, cancellation_token)
|
||||
result = await tool.run_json(arguments, cancellation_token, call_id=tool_call.id)
|
||||
return FunctionExecutionResult(
|
||||
content=tool.return_value_as_string(result), call_id=tool_call.id, name=tool_name, is_error=False
|
||||
)
|
||||
|
||||
@ -389,7 +389,7 @@ class OpenAIAssistantAgent(BaseChatAgent):
|
||||
if tool is None:
|
||||
raise ValueError(f"The tool '{tool_call.name}' is not available.")
|
||||
arguments = json.loads(tool_call.arguments)
|
||||
result = await tool.run_json(arguments, cancellation_token)
|
||||
result = await tool.run_json(arguments, cancellation_token, call_id=tool_call.id)
|
||||
return tool.return_value_as_string(result)
|
||||
|
||||
async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response:
|
||||
|
||||
@ -3,7 +3,7 @@ import builtins
|
||||
import warnings
|
||||
from typing import Any, List, Literal, Mapping
|
||||
|
||||
from autogen_core import CancellationToken, Component, Image
|
||||
from autogen_core import CancellationToken, Component, Image, trace_tool_span
|
||||
from autogen_core.tools import (
|
||||
ImageResultContent,
|
||||
ParametersSchema,
|
||||
@ -192,7 +192,11 @@ class McpWorkbench(Workbench, Component[McpWorkbenchConfig]):
|
||||
return schema
|
||||
|
||||
async def call_tool(
|
||||
self, name: str, arguments: Mapping[str, Any] | None = None, cancellation_token: CancellationToken | None = None
|
||||
self,
|
||||
name: str,
|
||||
arguments: Mapping[str, Any] | None = None,
|
||||
cancellation_token: CancellationToken | None = None,
|
||||
call_id: str | None = None,
|
||||
) -> ToolResult:
|
||||
if not self._actor:
|
||||
await self.start() # fallback to start the actor if not initialized instead of raising an error
|
||||
@ -204,30 +208,34 @@ class McpWorkbench(Workbench, Component[McpWorkbenchConfig]):
|
||||
cancellation_token = CancellationToken()
|
||||
if not arguments:
|
||||
arguments = {}
|
||||
try:
|
||||
result_future = await self._actor.call("call_tool", {"name": name, "kargs": arguments})
|
||||
cancellation_token.link_future(result_future)
|
||||
result = await result_future
|
||||
assert isinstance(
|
||||
result, CallToolResult
|
||||
), f"call_tool must return a CallToolResult, instead of : {str(type(result))}"
|
||||
result_parts: List[TextResultContent | ImageResultContent] = []
|
||||
is_error = result.isError
|
||||
for content in result.content:
|
||||
if isinstance(content, TextContent):
|
||||
result_parts.append(TextResultContent(content=content.text))
|
||||
elif isinstance(content, ImageContent):
|
||||
result_parts.append(ImageResultContent(content=Image.from_base64(content.data)))
|
||||
elif isinstance(content, EmbeddedResource):
|
||||
# TODO: how to handle embedded resources?
|
||||
# For now we just use text representation.
|
||||
result_parts.append(TextResultContent(content=content.model_dump_json()))
|
||||
else:
|
||||
raise ValueError(f"Unknown content type from server: {type(content)}")
|
||||
except Exception as e:
|
||||
error_message = self._format_errors(e)
|
||||
is_error = True
|
||||
result_parts = [TextResultContent(content=error_message)]
|
||||
with trace_tool_span(
|
||||
tool_name=name,
|
||||
tool_call_id=call_id,
|
||||
):
|
||||
try:
|
||||
result_future = await self._actor.call("call_tool", {"name": name, "kargs": arguments})
|
||||
cancellation_token.link_future(result_future)
|
||||
result = await result_future
|
||||
assert isinstance(
|
||||
result, CallToolResult
|
||||
), f"call_tool must return a CallToolResult, instead of : {str(type(result))}"
|
||||
result_parts: List[TextResultContent | ImageResultContent] = []
|
||||
is_error = result.isError
|
||||
for content in result.content:
|
||||
if isinstance(content, TextContent):
|
||||
result_parts.append(TextResultContent(content=content.text))
|
||||
elif isinstance(content, ImageContent):
|
||||
result_parts.append(ImageResultContent(content=Image.from_base64(content.data)))
|
||||
elif isinstance(content, EmbeddedResource):
|
||||
# TODO: how to handle embedded resources?
|
||||
# For now we just use text representation.
|
||||
result_parts.append(TextResultContent(content=content.model_dump_json()))
|
||||
else:
|
||||
raise ValueError(f"Unknown content type from server: {type(content)}")
|
||||
except Exception as e:
|
||||
error_message = self._format_errors(e)
|
||||
is_error = True
|
||||
result_parts = [TextResultContent(content=error_message)]
|
||||
return ToolResult(name=name, result=result_parts, is_error=is_error)
|
||||
|
||||
def _format_errors(self, error: Exception) -> str:
|
||||
|
||||
@ -143,7 +143,9 @@ class WeatherTool(Tool):
|
||||
return json.dumps(value)
|
||||
return str(value)
|
||||
|
||||
async def run_json(self, args: Mapping[str, Any], cancellation_token: CancellationToken) -> Dict[str, Any]:
|
||||
async def run_json(
|
||||
self, args: Mapping[str, Any], cancellation_token: CancellationToken, call_id: str | None = None
|
||||
) -> Dict[str, Any]:
|
||||
_ = GetWeatherArgs(**args)
|
||||
return WeatherResponse(temperature=72.5, conditions="sunny").model_dump()
|
||||
|
||||
|
||||
@ -189,7 +189,7 @@ Today's date is {datetime.datetime.now().strftime("%Y-%m-%d")}
|
||||
if tool is None:
|
||||
raise ValueError(f"Tool not found: {call.name}")
|
||||
arguments = json.loads(call.arguments)
|
||||
await tool.run_json(arguments, ctx.cancellation_token)
|
||||
await tool.run_json(arguments, ctx.cancellation_token, call_id=call.id)
|
||||
await self.publish_message(
|
||||
TerminateMessage(content="Meeting scheduled"),
|
||||
topic_id=DefaultTopicId("scheduling_assistant_conversation"),
|
||||
|
||||
@ -98,7 +98,7 @@ class SimpleAssistantAgent(RoutedAgent):
|
||||
# Run the tool and capture the result.
|
||||
try:
|
||||
arguments = json.loads(call.arguments)
|
||||
result = await tool.run_json(arguments, cancellation_token)
|
||||
result = await tool.run_json(arguments, cancellation_token, call_id=call.id)
|
||||
return FunctionExecutionResult(
|
||||
call_id=call.id, content=tool.return_value_as_string(result), is_error=False, name=tool.name
|
||||
)
|
||||
|
||||
@ -70,14 +70,14 @@ class AIAgent(RoutedAgent):
|
||||
await self._response_queue.put({"type":"function","message":f"Executing {call.name}"})
|
||||
if call.name in self._tools:
|
||||
# Execute the tool directly.
|
||||
result = await self._tools[call.name].run_json(arguments, ctx.cancellation_token)
|
||||
result = await self._tools[call.name].run_json(arguments, ctx.cancellation_token, call_id=call.id)
|
||||
result_as_str = self._tools[call.name].return_value_as_string(result)
|
||||
tool_call_results.append(
|
||||
FunctionExecutionResult(call_id=call.id, content=result_as_str, is_error=False, name=call.name)
|
||||
)
|
||||
elif call.name in self._delegate_tools:
|
||||
# Execute the tool to get the delegate agent's topic type.
|
||||
result = await self._delegate_tools[call.name].run_json(arguments, ctx.cancellation_token)
|
||||
result = await self._delegate_tools[call.name].run_json(arguments, ctx.cancellation_token, call_id=call.id)
|
||||
topic_type = self._delegate_tools[call.name].return_value_as_string(result)
|
||||
# Create the context for the delegate agent, including the function call and the result.
|
||||
delegate_messages = list(message.context) + [
|
||||
|
||||
119
python/uv.lock
generated
119
python/uv.lock
generated
@ -471,6 +471,7 @@ source = { editable = "packages/autogen-core" }
|
||||
dependencies = [
|
||||
{ name = "jsonref" },
|
||||
{ name = "opentelemetry-api" },
|
||||
{ name = "opentelemetry-semantic-conventions" },
|
||||
{ name = "pillow" },
|
||||
{ name = "protobuf" },
|
||||
{ name = "pydantic" },
|
||||
@ -499,6 +500,7 @@ dev = [
|
||||
{ name = "markdownify" },
|
||||
{ name = "myst-nb" },
|
||||
{ name = "nbqa" },
|
||||
{ name = "opentelemetry-instrumentation-openai" },
|
||||
{ name = "opentelemetry-sdk" },
|
||||
{ name = "pip" },
|
||||
{ name = "polars" },
|
||||
@ -528,7 +530,8 @@ dev = [
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "jsonref", specifier = "~=1.1.0" },
|
||||
{ name = "opentelemetry-api", specifier = ">=1.27.0" },
|
||||
{ name = "opentelemetry-api", specifier = ">=1.34.1" },
|
||||
{ name = "opentelemetry-semantic-conventions", specifier = "==0.55b1" },
|
||||
{ name = "pillow", specifier = ">=11.0.0" },
|
||||
{ name = "protobuf", specifier = "~=5.29.3" },
|
||||
{ name = "pydantic", specifier = ">=2.10.0,<3.0.0" },
|
||||
@ -557,7 +560,8 @@ dev = [
|
||||
{ name = "markdownify" },
|
||||
{ name = "myst-nb", specifier = "==1.1.2" },
|
||||
{ name = "nbqa" },
|
||||
{ name = "opentelemetry-sdk", specifier = ">=1.27.0" },
|
||||
{ name = "opentelemetry-instrumentation-openai" },
|
||||
{ name = "opentelemetry-sdk", specifier = ">=1.34.1" },
|
||||
{ name = "pip" },
|
||||
{ name = "polars" },
|
||||
{ name = "pydata-sphinx-theme", specifier = "==0.16.0" },
|
||||
@ -5060,81 +5064,81 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-api"
|
||||
version = "1.29.0"
|
||||
version = "1.34.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "deprecated" },
|
||||
{ name = "importlib-metadata" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/bc/8e/b886a5e9861afa188d1fe671fb96ff9a1d90a23d57799331e137cc95d573/opentelemetry_api-1.29.0.tar.gz", hash = "sha256:d04a6cf78aad09614f52964ecb38021e248f5714dc32c2e0d8fd99517b4d69cf", size = 62900 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/4d/5e/94a8cb759e4e409022229418294e098ca7feca00eb3c467bb20cbd329bda/opentelemetry_api-1.34.1.tar.gz", hash = "sha256:64f0bd06d42824843731d05beea88d4d4b6ae59f9fe347ff7dfa2cc14233bbb3", size = 64987 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/43/53/5249ea860d417a26a3a6f1bdedfc0748c4f081a3adaec3d398bc0f7c6a71/opentelemetry_api-1.29.0-py3-none-any.whl", hash = "sha256:5fcd94c4141cc49c736271f3e1efb777bebe9cc535759c54c936cca4f1b312b8", size = 64304 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a5/3a/2ba85557e8dc024c0842ad22c570418dc02c36cbd1ab4b832a93edf071b8/opentelemetry_api-1.34.1-py3-none-any.whl", hash = "sha256:b7df4cb0830d5a6c29ad0c0691dbae874d8daefa934b8b1d642de48323d32a8c", size = 65767 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-exporter-otlp"
|
||||
version = "1.29.0"
|
||||
version = "1.34.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "opentelemetry-exporter-otlp-proto-grpc" },
|
||||
{ name = "opentelemetry-exporter-otlp-proto-http" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/91/23/824e71822969cd3018897f5b0246baf8305bf7635f20df1ce5dfc423c32d/opentelemetry_exporter_otlp-1.29.0.tar.gz", hash = "sha256:ee7dfcccbb5e87ad9b389908452e10b7beeab55f70a83f41ce5b8c4efbde6544", size = 6159 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/27/3c/5670ffcb88691f8a29b753d2639550cf6726be4bd5d101373294419b7992/opentelemetry_exporter_otlp-1.34.0.tar.gz", hash = "sha256:d23df4fc22e0a863db2b2117c5a5780d5fa3bbeb65fdce9848d1f98fc3ace4cd", size = 6176 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/cf/54/2a84533f39bb240958d691bb3ddf1c3fb6a92356654fb2e02a210f65ce6b/opentelemetry_exporter_otlp-1.29.0-py3-none-any.whl", hash = "sha256:b8da6e20f5b0ffe604154b1e16a407eade17ce310c42fb85bb4e1246fc3688ad", size = 7011 },
|
||||
{ url = "https://files.pythonhosted.org/packages/71/9e/53cf14827b7e3084f28c73934f899cd3a3dde22efc3fe869a0ff7151ffd4/opentelemetry_exporter_otlp-1.34.0-py3-none-any.whl", hash = "sha256:73e2465560ef4e932b5348598593db202621eb666c34349c9cefc90a19aaf5c6", size = 7039 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-exporter-otlp-proto-common"
|
||||
version = "1.29.0"
|
||||
version = "1.34.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "opentelemetry-proto" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b1/58/f7fd7eaf592b2521999a4271ab3ce1c82fe37fe9b0dc25c348398d95d66a/opentelemetry_exporter_otlp_proto_common-1.29.0.tar.gz", hash = "sha256:e7c39b5dbd1b78fe199e40ddfe477e6983cb61aa74ba836df09c3869a3e3e163", size = 19133 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/81/12/0d549f53e70a8297c1817705febe2bdb81479dc74c5b2496014f35f74455/opentelemetry_exporter_otlp_proto_common-1.34.0.tar.gz", hash = "sha256:5916d9ceda8c733adbec5e9cecf654fbf359e9f619ff43214277076fba888557", size = 20818 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/9e/75/7609bda3d72bf307839570b226180513e854c01443ebe265ed732a4980fc/opentelemetry_exporter_otlp_proto_common-1.29.0-py3-none-any.whl", hash = "sha256:a9d7376c06b4da9cf350677bcddb9618ed4b8255c3f6476975f5e38274ecd3aa", size = 18459 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e9/78/7bfd2d027aa36a68fff4019950569f8cda27793441098cda0a82ea2ecb89/opentelemetry_exporter_otlp_proto_common-1.34.0-py3-none-any.whl", hash = "sha256:a5ab7a9b7c3c7ba957c8ddcb08c0c93b1d732e066f544682a250ecf4d7a9ceef", size = 18835 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-exporter-otlp-proto-grpc"
|
||||
version = "1.29.0"
|
||||
version = "1.34.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "deprecated" },
|
||||
{ name = "googleapis-common-protos" },
|
||||
{ name = "grpcio" },
|
||||
{ name = "opentelemetry-api" },
|
||||
{ name = "opentelemetry-exporter-otlp-proto-common" },
|
||||
{ name = "opentelemetry-proto" },
|
||||
{ name = "opentelemetry-sdk" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/41/aa/b3f2190613141f35fe15145bf438334fdd1eac8aeeee4f7ecbc887999443/opentelemetry_exporter_otlp_proto_grpc-1.29.0.tar.gz", hash = "sha256:3d324d07d64574d72ed178698de3d717f62a059a93b6b7685ee3e303384e73ea", size = 26224 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/51/d3/2d1037ba79cfd0cc01258ebf4ec5140d6cec5c337d885ed2f07502d2a1d3/opentelemetry_exporter_otlp_proto_grpc-1.34.0.tar.gz", hash = "sha256:a634425340f506d5ebf641c92d88eb873754d4c5259b5b816afb234c6f87b37d", size = 22565 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/de/4b4127a25d1594851d99032f3a9acb09cb512d11edec713410fb906607f4/opentelemetry_exporter_otlp_proto_grpc-1.29.0-py3-none-any.whl", hash = "sha256:5a2a3a741a2543ed162676cf3eefc2b4150e6f4f0a193187afb0d0e65039c69c", size = 18520 },
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/42/094c93ffda2834631a121cdc89af80c7a9f2ee6249a4498d7f5151beb57e/opentelemetry_exporter_otlp_proto_grpc-1.34.0-py3-none-any.whl", hash = "sha256:31c41017af85833242d49beb07bde7341b0a145f0b898ee383f3e3019037afb1", size = 18612 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-exporter-otlp-proto-http"
|
||||
version = "1.29.0"
|
||||
version = "1.34.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "deprecated" },
|
||||
{ name = "googleapis-common-protos" },
|
||||
{ name = "opentelemetry-api" },
|
||||
{ name = "opentelemetry-exporter-otlp-proto-common" },
|
||||
{ name = "opentelemetry-proto" },
|
||||
{ name = "opentelemetry-sdk" },
|
||||
{ name = "requests" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ab/88/e70a2e9fbb1bddb1ab7b6d74fb02c68601bff5948292ce33464c84ee082e/opentelemetry_exporter_otlp_proto_http-1.29.0.tar.gz", hash = "sha256:b10d174e3189716f49d386d66361fbcf6f2b9ad81e05404acdee3f65c8214204", size = 15041 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/99/80/c382acdddc75d440a4bc5283a1cda997435985031ec2d978d99ab3ef9461/opentelemetry_exporter_otlp_proto_http-1.34.0.tar.gz", hash = "sha256:3f674dbc32549a2fae413a77428d59b38e8c8b4caaf7f594ae2c2f8d2f018014", size = 15353 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/31/49/a1c3d24e8fe73b5f422e21b46c24aed3db7fd9427371c06442e7bdfe4d3b/opentelemetry_exporter_otlp_proto_http-1.29.0-py3-none-any.whl", hash = "sha256:b228bdc0f0cfab82eeea834a7f0ffdd2a258b26aa33d89fb426c29e8e934d9d0", size = 17217 },
|
||||
{ url = "https://files.pythonhosted.org/packages/69/c5/468c245231feff02ac41573a1d73b1bbd5ff0412365f441de785a4fa178c/opentelemetry_exporter_otlp_proto_http-1.34.0-py3-none-any.whl", hash = "sha256:b3cc9dd5152fae2dd32f3566bbfbc7d26d6ab3ef6c6b3f85bc9f6adc059d713f", size = 17743 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-instrumentation"
|
||||
version = "0.50b0"
|
||||
version = "0.55b1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "opentelemetry-api" },
|
||||
@ -5142,14 +5146,14 @@ dependencies = [
|
||||
{ name = "packaging" },
|
||||
{ name = "wrapt" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/79/2e/2e59a7cb636dc394bd7cf1758ada5e8ed87590458ca6bb2f9c26e0243847/opentelemetry_instrumentation-0.50b0.tar.gz", hash = "sha256:7d98af72de8dec5323e5202e46122e5f908592b22c6d24733aad619f07d82979", size = 26539 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/cb/69/d8995f229ddf4d98b9c85dd126aeca03dd1742f6dc5d3bc0d2f6dae1535c/opentelemetry_instrumentation-0.55b1.tar.gz", hash = "sha256:2dc50aa207b9bfa16f70a1a0571e011e737a9917408934675b89ef4d5718c87b", size = 28552 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/b1/55a77152a83ec8998e520a3a575f44af1020cfe4bdc000b7538583293b85/opentelemetry_instrumentation-0.50b0-py3-none-any.whl", hash = "sha256:b8f9fc8812de36e1c6dffa5bfc6224df258841fb387b6dfe5df15099daa10630", size = 30728 },
|
||||
{ url = "https://files.pythonhosted.org/packages/60/7d/8ddfda1506c2fcca137924d5688ccabffa1aed9ec0955b7d0772de02cec3/opentelemetry_instrumentation-0.55b1-py3-none-any.whl", hash = "sha256:cbb1496b42bc394e01bc63701b10e69094e8564e281de063e4328d122cc7a97e", size = 31108 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-instrumentation-asgi"
|
||||
version = "0.50b0"
|
||||
version = "0.55b1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "asgiref" },
|
||||
@ -5158,14 +5162,14 @@ dependencies = [
|
||||
{ name = "opentelemetry-semantic-conventions" },
|
||||
{ name = "opentelemetry-util-http" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/49/cc/a7b2fd243c6d2621803092eba62e450071b6752dfe4f64f530bbfd91a328/opentelemetry_instrumentation_asgi-0.50b0.tar.gz", hash = "sha256:3ca4cb5616ae6a3e8ce86e7d5c360a8d8cc8ed722cf3dc8a5e44300774e87d49", size = 24105 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/51/4a/900ea42d36757e3b7219f873d3d16358107da43fcb8d7f11a2b1d0bb56a0/opentelemetry_instrumentation_asgi-0.55b1.tar.gz", hash = "sha256:615cde388dd3af4d0e52629a6c75828253618aebcc6e65d93068463811528606", size = 24356 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d2/81/0899c6b56b1023835f266d909250d439174afa0c34ed5944c5021d3da263/opentelemetry_instrumentation_asgi-0.50b0-py3-none-any.whl", hash = "sha256:2ba1297f746e55dec5a17fe825689da0613662fb25c004c3965a6c54b1d5be22", size = 16304 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ef/45/b5f78f0456f8e2e2ec152d7b6496197f5661c7ca49f610fe19c63b350aa4/opentelemetry_instrumentation_asgi-0.55b1-py3-none-any.whl", hash = "sha256:186620f7d0a71c8c817c5cbe91c80faa8f9c50967d458b8131c5694e21eb8583", size = 16402 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-instrumentation-fastapi"
|
||||
version = "0.50b0"
|
||||
version = "0.55b1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "opentelemetry-api" },
|
||||
@ -5174,57 +5178,82 @@ dependencies = [
|
||||
{ name = "opentelemetry-semantic-conventions" },
|
||||
{ name = "opentelemetry-util-http" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/8d/f8/1917b0b3e414e23c7d71c9a33f0ce020f94bc47d22a30f54ace704e07588/opentelemetry_instrumentation_fastapi-0.50b0.tar.gz", hash = "sha256:16b9181682136da210295def2bb304a32fb9bdee9a935cdc9da43567f7c1149e", size = 19214 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/2b/76/0df9cdff4cce18b1967e97152d419e2325c307ff96eb6ba8e69294690c18/opentelemetry_instrumentation_fastapi-0.55b1.tar.gz", hash = "sha256:bb9f8c13a053e7ff7da221248067529cc320e9308d57f3908de0afa36f6c5744", size = 20275 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/cb/d6/37784bb30b213e2dd6838b9f96c2940907022c1b75ef1ff18a99afe42433/opentelemetry_instrumentation_fastapi-0.50b0-py3-none-any.whl", hash = "sha256:8f03b738495e4705fbae51a2826389c7369629dace89d0f291c06ffefdff5e52", size = 12079 },
|
||||
{ url = "https://files.pythonhosted.org/packages/84/6e/d608a9336ede3d15869c70ebdd4ec670f774641104b0873bb973bce9d822/opentelemetry_instrumentation_fastapi-0.55b1-py3-none-any.whl", hash = "sha256:af4c09aebb0bd6b4a0881483b175e76547d2bc96329c94abfb794bf44f29f6bb", size = 12713 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-instrumentation-openai"
|
||||
version = "0.40.8"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "opentelemetry-api" },
|
||||
{ name = "opentelemetry-instrumentation" },
|
||||
{ name = "opentelemetry-semantic-conventions" },
|
||||
{ name = "opentelemetry-semantic-conventions-ai" },
|
||||
{ name = "tiktoken" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/bf/54/2949b4ffa301c09f0baa2addeb622dcc4b8e2f353903552e8a167929ffac/opentelemetry_instrumentation_openai-0.40.8.tar.gz", hash = "sha256:e151ccdcaae58713693b0ede860511eb560f839fedb34b46c7ccc18cd75da692", size = 15121 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d9/a3/6d09c4544ab6715b59a549cfc5d72b7e3d357d57aae4b60a25070b1a10c3/opentelemetry_instrumentation_openai-0.40.8-py3-none-any.whl", hash = "sha256:a0b352f6612dd00dba68e6d8bb83029ce6b1162caa74a232eaf0a55e52a8753e", size = 23121 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-proto"
|
||||
version = "1.29.0"
|
||||
version = "1.34.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "protobuf" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/80/52/fd3b3d79e1b00ad2dcac92db6885e49bedbf7a6828647954e4952d653132/opentelemetry_proto-1.29.0.tar.gz", hash = "sha256:3c136aa293782e9b44978c738fff72877a4b78b5d21a64e879898db7b2d93e5d", size = 34320 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/95/19/45adb533d0a34990942d12eefb2077d59b22958940c71484a45e694f5dd7/opentelemetry_proto-1.34.0.tar.gz", hash = "sha256:73e40509b692630a47192888424f7e0b8fb19d9ecf2f04e6f708170cd3346dfe", size = 34343 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/66/a500e38ee322d89fce61c74bd7769c8ef3bebc6c2f43fda5f3fc3441286d/opentelemetry_proto-1.29.0-py3-none-any.whl", hash = "sha256:495069c6f5495cbf732501cdcd3b7f60fda2b9d3d4255706ca99b7ca8dec53ff", size = 55818 },
|
||||
{ url = "https://files.pythonhosted.org/packages/db/58/708881f5ad3c72954caa61ac970d3c01209dbebf5e534fb840dfb777bad2/opentelemetry_proto-1.34.0-py3-none-any.whl", hash = "sha256:ffb1f1b27552fda5a1cd581e34243cc0b6f134fb14c1c2a33cc3b4b208c9bf97", size = 55691 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-sdk"
|
||||
version = "1.29.0"
|
||||
version = "1.34.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "opentelemetry-api" },
|
||||
{ name = "opentelemetry-semantic-conventions" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/0c/5a/1ed4c3cf6c09f80565fc085f7e8efa0c222712fd2a9412d07424705dcf72/opentelemetry_sdk-1.29.0.tar.gz", hash = "sha256:b0787ce6aade6ab84315302e72bd7a7f2f014b0fb1b7c3295b88afe014ed0643", size = 157229 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/6f/41/fe20f9036433da8e0fcef568984da4c1d1c771fa072ecd1a4d98779dccdd/opentelemetry_sdk-1.34.1.tar.gz", hash = "sha256:8091db0d763fcd6098d4781bbc80ff0971f94e260739aa6afe6fd379cdf3aa4d", size = 159441 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d1/1d/512b86af21795fb463726665e2f61db77d384e8779fdcf4cb0ceec47866d/opentelemetry_sdk-1.29.0-py3-none-any.whl", hash = "sha256:173be3b5d3f8f7d671f20ea37056710217959e774e2749d984355d1f9391a30a", size = 118078 },
|
||||
{ url = "https://files.pythonhosted.org/packages/07/1b/def4fe6aa73f483cabf4c748f4c25070d5f7604dcc8b52e962983491b29e/opentelemetry_sdk-1.34.1-py3-none-any.whl", hash = "sha256:308effad4059562f1d92163c61c8141df649da24ce361827812c40abb2a1e96e", size = 118477 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-semantic-conventions"
|
||||
version = "0.50b0"
|
||||
version = "0.55b1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "deprecated" },
|
||||
{ name = "opentelemetry-api" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e7/4e/d7c7c91ff47cd96fe4095dd7231701aec7347426fd66872ff320d6cd1fcc/opentelemetry_semantic_conventions-0.50b0.tar.gz", hash = "sha256:02dc6dbcb62f082de9b877ff19a3f1ffaa3c306300fa53bfac761c4567c83d38", size = 100459 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/5d/f0/f33458486da911f47c4aa6db9bda308bb80f3236c111bf848bd870c16b16/opentelemetry_semantic_conventions-0.55b1.tar.gz", hash = "sha256:ef95b1f009159c28d7a7849f5cbc71c4c34c845bb514d66adfdf1b3fff3598b3", size = 119829 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/da/fb/dc15fad105450a015e913cfa4f5c27b6a5f1bea8fb649f8cae11e699c8af/opentelemetry_semantic_conventions-0.50b0-py3-none-any.whl", hash = "sha256:e87efba8fdb67fb38113efea6a349531e75ed7ffc01562f65b802fcecb5e115e", size = 166602 },
|
||||
{ url = "https://files.pythonhosted.org/packages/1a/89/267b0af1b1d0ba828f0e60642b6a5116ac1fd917cde7fc02821627029bd1/opentelemetry_semantic_conventions-0.55b1-py3-none-any.whl", hash = "sha256:5da81dfdf7d52e3d37f8fe88d5e771e191de924cfff5f550ab0b8f7b2409baed", size = 196223 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-semantic-conventions-ai"
|
||||
version = "0.4.9"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/8c/ba/2405abde825cf654d09ba16bfcfb8c863156bccdc47d1f2a86df6331e7bb/opentelemetry_semantic_conventions_ai-0.4.9.tar.gz", hash = "sha256:54a0b901959e2de5124384925846bac2ea0a6dab3de7e501ba6aecf5e293fe04", size = 4920 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/34/98/f5196ba0f4105a4790cec8c6671cf676c96dfa29bfedfe3c4f112bf4e6ad/opentelemetry_semantic_conventions_ai-0.4.9-py3-none-any.whl", hash = "sha256:71149e46a72554ae17de46bca6c11ba540c19c89904bd4cc3111aac6edf10315", size = 5617 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-util-http"
|
||||
version = "0.50b0"
|
||||
version = "0.55b1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/69/10/ce3f0d1157cedbd819194f0b27a6bbb7c19a8bceb3941e4a4775014076cf/opentelemetry_util_http-0.50b0.tar.gz", hash = "sha256:dc4606027e1bc02aabb9533cc330dd43f874fca492e4175c31d7154f341754af", size = 7859 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/12/f7/3cc23b95921177cdda6d61d3475659b86bac335ed02dd19f994a850ceee3/opentelemetry_util_http-0.55b1.tar.gz", hash = "sha256:29e119c1f6796cccf5fc2aedb55274435cde5976d0ac3fec3ca20a80118f821e", size = 8038 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/64/8a/9e1b54f50d1fddebbeac9a9b0632f8db6ece7add904fb593ee2e268ee4de/opentelemetry_util_http-0.50b0-py3-none-any.whl", hash = "sha256:21f8aedac861ffa3b850f8c0a6c373026189eb8630ac6e14a2bf8c55695cc090", size = 6942 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a3/0a/49c5464efc0e6f6aa94a9ec054879efe2a59d7c1f6aacc500665b3d8afdc/opentelemetry_util_http-0.55b1-py3-none-any.whl", hash = "sha256:e134218df8ff010e111466650e5f019496b29c3b4f1b7de0e8ff8ebeafeebdf4", size = 7299 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -8014,7 +8043,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "uptrace"
|
||||
version = "1.29.0"
|
||||
version = "1.27.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "opentelemetry-api" },
|
||||
@ -8022,9 +8051,9 @@ dependencies = [
|
||||
{ name = "opentelemetry-instrumentation" },
|
||||
{ name = "opentelemetry-sdk" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c1/ec/733298e94e1388d51c4b91d83f32d3a55f2c73fa1a40756c22ea36066aac/uptrace-1.29.0.tar.gz", hash = "sha256:11247ca28d78aeb5a2651ca810305413c40f5ecc2dd001d7b94626d5fb9c47ea", size = 7695 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f3/89/ba1df9328e4bd4b440ac6979e20ec8c63a26f6400598e806cc9dfef764f4/uptrace-1.27.0.tar.gz", hash = "sha256:983f783b2f4303d1d2bdfaf6ace1b7a5f072af47f78a7815f82c51fcf5099cac", size = 7633 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d7/08/976391773b90290c2f7a57d1962e994e7cb04f99afd3b094fe7f61545112/uptrace-1.29.0-py3-none-any.whl", hash = "sha256:05242df905390bc986988f9a9a2990a137841a560a50f62ec0318a26dcbd4118", size = 8615 },
|
||||
{ url = "https://files.pythonhosted.org/packages/77/00/054ac30e9e8312c3c79371c495dd570865eab2a05bfcd640f6242d460c8b/uptrace-1.27.0-py3-none-any.whl", hash = "sha256:d5473efa33c34e3d5738d32d19301dbf004d4e19598c658f2fa9f3f09458f630", size = 8627 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user