mirror of
				https://github.com/microsoft/autogen.git
				synced 2025-10-31 09:50:11 +00:00 
			
		
		
		
	 88c895fd48
			
		
	
	
		88c895fd48
		
			
		
	
	
	
	
		
			
			* Separate agent and team examples * Add streaming output * Refactor to better use the chainlit API * Removed the user proxy example -- this needs a bit more work to improve the presentation on the ChainLit interface. --------- Co-authored-by: Victor Dibia <victordibia@microsoft.com>
		
			
				
	
	
		
			69 lines
		
	
	
		
			2.4 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			69 lines
		
	
	
		
			2.4 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| from typing import List, cast
 | |
| 
 | |
| import chainlit as cl
 | |
| import yaml
 | |
| from autogen_agentchat.agents import AssistantAgent
 | |
| from autogen_agentchat.base import Response
 | |
| from autogen_agentchat.messages import ModelClientStreamingChunkEvent, TextMessage
 | |
| from autogen_core import CancellationToken
 | |
| from autogen_core.models import ChatCompletionClient
 | |
| 
 | |
| 
 | |
| @cl.set_starters  # type: ignore
 | |
| async def set_starts() -> List[cl.Starter]:
 | |
|     return [
 | |
|         cl.Starter(
 | |
|             label="Greetings",
 | |
|             message="Hello! What can you help me with today?",
 | |
|         ),
 | |
|         cl.Starter(
 | |
|             label="Weather",
 | |
|             message="Find the weather in New York City.",
 | |
|         ),
 | |
|     ]
 | |
| 
 | |
| 
 | |
| @cl.step(type="tool")  # type: ignore
 | |
| async def get_weather(city: str) -> str:
 | |
|     return f"The weather in {city} is 73 degrees and Sunny."
 | |
| 
 | |
| 
 | |
| @cl.on_chat_start  # type: ignore
 | |
| async def start_chat() -> None:
 | |
|     # Load model configuration and create the model client.
 | |
|     with open("model_config.yaml", "r") as f:
 | |
|         model_config = yaml.safe_load(f)
 | |
|     model_client = ChatCompletionClient.load_component(model_config)
 | |
| 
 | |
|     # Create the assistant agent with the get_weather tool.
 | |
|     assistant = AssistantAgent(
 | |
|         name="assistant",
 | |
|         tools=[get_weather],
 | |
|         model_client=model_client,
 | |
|         system_message="You are a helpful assistant",
 | |
|         model_client_stream=True,  # Enable model client streaming.
 | |
|         reflect_on_tool_use=True,  # Reflect on tool use.
 | |
|     )
 | |
| 
 | |
|     # Set the assistant agent in the user session.
 | |
|     cl.user_session.set("prompt_history", "")  # type: ignore
 | |
|     cl.user_session.set("agent", assistant)  # type: ignore
 | |
| 
 | |
| 
 | |
| @cl.on_message  # type: ignore
 | |
| async def chat(message: cl.Message) -> None:
 | |
|     # Get the assistant agent from the user session.
 | |
|     agent = cast(AssistantAgent, cl.user_session.get("agent"))  # type: ignore
 | |
|     # Construct the response message.
 | |
|     response = cl.Message(content="")
 | |
|     async for msg in agent.on_messages_stream(
 | |
|         messages=[TextMessage(content=message.content, source="user")],
 | |
|         cancellation_token=CancellationToken(),
 | |
|     ):
 | |
|         if isinstance(msg, ModelClientStreamingChunkEvent):
 | |
|             # Stream the model client response to the user.
 | |
|             await response.stream_token(msg.content)
 | |
|         elif isinstance(msg, Response):
 | |
|             # Done streaming the model client response. Send the message.
 | |
|             await response.send()
 |