mirror of
				https://github.com/microsoft/autogen.git
				synced 2025-11-04 11:49:45 +00:00 
			
		
		
		
	* add assistant config * add test * change notebook to use assistant config * use assistant config in testing * code refinement --------- Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
		
			
				
	
	
		
			209 lines
		
	
	
		
			7.1 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			209 lines
		
	
	
		
			7.1 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
{
 | 
						|
 "cells": [
 | 
						|
  {
 | 
						|
   "cell_type": "markdown",
 | 
						|
   "metadata": {},
 | 
						|
   "source": [
 | 
						|
    "## OpenAI Assistants in AutoGen\n",
 | 
						|
    "\n",
 | 
						|
    "This notebook shows a very basic example of the [`GPTAssistantAgent`](https://github.com/microsoft/autogen/blob/main/autogen/agentchat/contrib/gpt_assistant_agent.py#L16C43-L16C43), which is an experimental AutoGen agent class that leverages the [OpenAI Assistant API](https://platform.openai.com/docs/assistants/overview) for conversational capabilities,  working with\n",
 | 
						|
    "`UserProxyAgent` in AutoGen."
 | 
						|
   ]
 | 
						|
  },
 | 
						|
  {
 | 
						|
   "cell_type": "code",
 | 
						|
   "execution_count": 1,
 | 
						|
   "metadata": {},
 | 
						|
   "outputs": [
 | 
						|
    {
 | 
						|
     "name": "stderr",
 | 
						|
     "output_type": "stream",
 | 
						|
     "text": [
 | 
						|
      "OpenAI client config of GPTAssistantAgent(assistant) - model: gpt-4-turbo-preview\n",
 | 
						|
      "GPT Assistant only supports one OpenAI client. Using the first client in the list.\n",
 | 
						|
      "No matching assistant found, creating a new assistant\n"
 | 
						|
     ]
 | 
						|
    },
 | 
						|
    {
 | 
						|
     "name": "stdout",
 | 
						|
     "output_type": "stream",
 | 
						|
     "text": [
 | 
						|
      "\u001b[33muser_proxy\u001b[0m (to assistant):\n",
 | 
						|
      "\n",
 | 
						|
      "Print hello world\n",
 | 
						|
      "\n",
 | 
						|
      "--------------------------------------------------------------------------------\n",
 | 
						|
      "\u001b[33massistant\u001b[0m (to user_proxy):\n",
 | 
						|
      "\n",
 | 
						|
      "```python\n",
 | 
						|
      "print(\"Hello, world!\")\n",
 | 
						|
      "```\n",
 | 
						|
      "\n",
 | 
						|
      "\n",
 | 
						|
      "--------------------------------------------------------------------------------\n",
 | 
						|
      "\u001b[31m\n",
 | 
						|
      ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n",
 | 
						|
      "\u001b[33muser_proxy\u001b[0m (to assistant):\n",
 | 
						|
      "\n",
 | 
						|
      "exitcode: 0 (execution succeeded)\n",
 | 
						|
      "Code output: \n",
 | 
						|
      "Hello, world!\n",
 | 
						|
      "\n",
 | 
						|
      "\n",
 | 
						|
      "--------------------------------------------------------------------------------\n",
 | 
						|
      "\u001b[33massistant\u001b[0m (to user_proxy):\n",
 | 
						|
      "\n",
 | 
						|
      "TERMINATE\n",
 | 
						|
      "\n",
 | 
						|
      "\n",
 | 
						|
      "--------------------------------------------------------------------------------\n"
 | 
						|
     ]
 | 
						|
    },
 | 
						|
    {
 | 
						|
     "data": {
 | 
						|
      "text/plain": [
 | 
						|
       "ChatResult(chat_id=None, chat_history=[{'content': 'Print hello world', 'role': 'assistant'}, {'content': '```python\\nprint(\"Hello, world!\")\\n```\\n', 'role': 'user'}, {'content': 'exitcode: 0 (execution succeeded)\\nCode output: \\nHello, world!\\n', 'role': 'assistant'}, {'content': 'TERMINATE\\n', 'role': 'user'}], summary='\\n', cost=({'total_cost': 0}, {'total_cost': 0}), human_input=[])"
 | 
						|
      ]
 | 
						|
     },
 | 
						|
     "execution_count": 1,
 | 
						|
     "metadata": {},
 | 
						|
     "output_type": "execute_result"
 | 
						|
    }
 | 
						|
   ],
 | 
						|
   "source": [
 | 
						|
    "import logging\n",
 | 
						|
    "import os\n",
 | 
						|
    "\n",
 | 
						|
    "from autogen import AssistantAgent, UserProxyAgent, config_list_from_json\n",
 | 
						|
    "from autogen.agentchat.contrib.gpt_assistant_agent import GPTAssistantAgent\n",
 | 
						|
    "\n",
 | 
						|
    "logger = logging.getLogger(__name__)\n",
 | 
						|
    "logger.setLevel(logging.WARNING)\n",
 | 
						|
    "\n",
 | 
						|
    "assistant_id = os.environ.get(\"ASSISTANT_ID\", None)\n",
 | 
						|
    "\n",
 | 
						|
    "config_list = config_list_from_json(\"OAI_CONFIG_LIST\")\n",
 | 
						|
    "llm_config = {\"config_list\": config_list}\n",
 | 
						|
    "\n",
 | 
						|
    "assistant_config = {\"assistant_id\": assistant_id}\n",
 | 
						|
    "\n",
 | 
						|
    "gpt_assistant = GPTAssistantAgent(\n",
 | 
						|
    "    name=\"assistant\",\n",
 | 
						|
    "    instructions=AssistantAgent.DEFAULT_SYSTEM_MESSAGE,\n",
 | 
						|
    "    llm_config=llm_config,\n",
 | 
						|
    "    assistant_config=assistant_config,\n",
 | 
						|
    ")\n",
 | 
						|
    "\n",
 | 
						|
    "user_proxy = UserProxyAgent(\n",
 | 
						|
    "    name=\"user_proxy\",\n",
 | 
						|
    "    code_execution_config={\n",
 | 
						|
    "        \"work_dir\": \"coding\",\n",
 | 
						|
    "        \"use_docker\": False,\n",
 | 
						|
    "    },  # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.\n",
 | 
						|
    "    is_termination_msg=lambda msg: \"TERMINATE\" in msg[\"content\"],\n",
 | 
						|
    "    human_input_mode=\"NEVER\",\n",
 | 
						|
    "    max_consecutive_auto_reply=1,\n",
 | 
						|
    ")\n",
 | 
						|
    "user_proxy.initiate_chat(gpt_assistant, message=\"Print hello world\")"
 | 
						|
   ]
 | 
						|
  },
 | 
						|
  {
 | 
						|
   "cell_type": "code",
 | 
						|
   "execution_count": 2,
 | 
						|
   "metadata": {},
 | 
						|
   "outputs": [
 | 
						|
    {
 | 
						|
     "name": "stdout",
 | 
						|
     "output_type": "stream",
 | 
						|
     "text": [
 | 
						|
      "\u001b[33muser_proxy\u001b[0m (to assistant):\n",
 | 
						|
      "\n",
 | 
						|
      "Write py code to eval 2 + 2\n",
 | 
						|
      "\n",
 | 
						|
      "--------------------------------------------------------------------------------\n",
 | 
						|
      "\u001b[33massistant\u001b[0m (to user_proxy):\n",
 | 
						|
      "\n",
 | 
						|
      "```python\n",
 | 
						|
      "# Calculate 2+2 and print the result\n",
 | 
						|
      "result = 2 + 2\n",
 | 
						|
      "print(result)\n",
 | 
						|
      "```\n",
 | 
						|
      "\n",
 | 
						|
      "\n",
 | 
						|
      "--------------------------------------------------------------------------------\n",
 | 
						|
      "\u001b[31m\n",
 | 
						|
      ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n",
 | 
						|
      "\u001b[33muser_proxy\u001b[0m (to assistant):\n",
 | 
						|
      "\n",
 | 
						|
      "exitcode: 0 (execution succeeded)\n",
 | 
						|
      "Code output: \n",
 | 
						|
      "4\n",
 | 
						|
      "\n",
 | 
						|
      "\n",
 | 
						|
      "--------------------------------------------------------------------------------\n",
 | 
						|
      "\u001b[33massistant\u001b[0m (to user_proxy):\n",
 | 
						|
      "\n",
 | 
						|
      "The Python code successfully calculated \\(2 + 2\\) and printed the result, which is \\(4\\).\n",
 | 
						|
      "\n",
 | 
						|
      "TERMINATE\n",
 | 
						|
      "\n",
 | 
						|
      "\n",
 | 
						|
      "--------------------------------------------------------------------------------\n"
 | 
						|
     ]
 | 
						|
    },
 | 
						|
    {
 | 
						|
     "data": {
 | 
						|
      "text/plain": [
 | 
						|
       "ChatResult(chat_id=None, chat_history=[{'content': 'Write py code to eval 2 + 2', 'role': 'assistant'}, {'content': '```python\\n# Calculate 2+2 and print the result\\nresult = 2 + 2\\nprint(result)\\n```\\n', 'role': 'user'}, {'content': 'exitcode: 0 (execution succeeded)\\nCode output: \\n4\\n', 'role': 'assistant'}, {'content': 'The Python code successfully calculated \\\\(2 + 2\\\\) and printed the result, which is \\\\(4\\\\).\\n\\nTERMINATE\\n', 'role': 'user'}], summary='The Python code successfully calculated \\\\(2 + 2\\\\) and printed the result, which is \\\\(4\\\\).\\n\\n\\n', cost=({'total_cost': 0}, {'total_cost': 0}), human_input=[])"
 | 
						|
      ]
 | 
						|
     },
 | 
						|
     "execution_count": 2,
 | 
						|
     "metadata": {},
 | 
						|
     "output_type": "execute_result"
 | 
						|
    }
 | 
						|
   ],
 | 
						|
   "source": [
 | 
						|
    "user_proxy.initiate_chat(gpt_assistant, message=\"Write py code to eval 2 + 2\", clear_history=True)"
 | 
						|
   ]
 | 
						|
  },
 | 
						|
  {
 | 
						|
   "cell_type": "code",
 | 
						|
   "execution_count": 3,
 | 
						|
   "metadata": {},
 | 
						|
   "outputs": [
 | 
						|
    {
 | 
						|
     "name": "stderr",
 | 
						|
     "output_type": "stream",
 | 
						|
     "text": [
 | 
						|
      "Permanently deleting assistant...\n"
 | 
						|
     ]
 | 
						|
    }
 | 
						|
   ],
 | 
						|
   "source": [
 | 
						|
    "gpt_assistant.delete_assistant()"
 | 
						|
   ]
 | 
						|
  }
 | 
						|
 ],
 | 
						|
 "metadata": {
 | 
						|
  "kernelspec": {
 | 
						|
   "display_name": "Python 3",
 | 
						|
   "language": "python",
 | 
						|
   "name": "python3"
 | 
						|
  },
 | 
						|
  "language_info": {
 | 
						|
   "codemirror_mode": {
 | 
						|
    "name": "ipython",
 | 
						|
    "version": 3
 | 
						|
   },
 | 
						|
   "file_extension": ".py",
 | 
						|
   "mimetype": "text/x-python",
 | 
						|
   "name": "python",
 | 
						|
   "nbconvert_exporter": "python",
 | 
						|
   "pygments_lexer": "ipython3",
 | 
						|
   "version": "3.10.13"
 | 
						|
  }
 | 
						|
 },
 | 
						|
 "nbformat": 4,
 | 
						|
 "nbformat_minor": 2
 | 
						|
}
 |