mirror of
https://github.com/microsoft/autogen.git
synced 2025-12-03 02:20:58 +00:00
Separate openai assistant related config items from llm_config (#2037)
* add assistant config * add test * change notebook to use assistant config * use assistant config in testing * code refinement --------- Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
This commit is contained in:
parent
4429d4d19f
commit
36c4d6aa3e
@ -28,6 +28,7 @@ class GPTAssistantAgent(ConversableAgent):
|
||||
name="GPT Assistant",
|
||||
instructions: Optional[str] = None,
|
||||
llm_config: Optional[Union[Dict, bool]] = None,
|
||||
assistant_config: Optional[Dict] = None,
|
||||
overwrite_instructions: bool = False,
|
||||
overwrite_tools: bool = False,
|
||||
**kwargs,
|
||||
@ -43,8 +44,9 @@ class GPTAssistantAgent(ConversableAgent):
|
||||
AssistantAgent.DEFAULT_SYSTEM_MESSAGE. If the assistant exists, the
|
||||
system message will be set to the existing assistant instructions.
|
||||
llm_config (dict or False): llm inference configuration.
|
||||
- assistant_id: ID of the assistant to use. If None, a new assistant will be created.
|
||||
- model: Model to use for the assistant (gpt-4-1106-preview, gpt-3.5-turbo-1106).
|
||||
assistant_config
|
||||
- assistant_id: ID of the assistant to use. If None, a new assistant will be created.
|
||||
- check_every_ms: check thread run status interval
|
||||
- tools: Give Assistants access to OpenAI-hosted tools like Code Interpreter and Knowledge Retrieval,
|
||||
or build your own tools using Function calling. ref https://platform.openai.com/docs/assistants/tools
|
||||
@ -57,23 +59,19 @@ class GPTAssistantAgent(ConversableAgent):
|
||||
"""
|
||||
|
||||
self._verbose = kwargs.pop("verbose", False)
|
||||
super().__init__(
|
||||
name=name, system_message=instructions, human_input_mode="NEVER", llm_config=llm_config, **kwargs
|
||||
)
|
||||
openai_client_cfg, openai_assistant_cfg = self._process_assistant_config(llm_config, assistant_config)
|
||||
|
||||
if llm_config is False:
|
||||
raise ValueError("llm_config=False is not supported for GPTAssistantAgent.")
|
||||
# Use AutooGen OpenAIWrapper to create a client
|
||||
openai_client_cfg = copy.deepcopy(llm_config)
|
||||
# Use the class variable
|
||||
model_name = GPTAssistantAgent.DEFAULT_MODEL_NAME
|
||||
super().__init__(
|
||||
name=name, system_message=instructions, human_input_mode="NEVER", llm_config=openai_client_cfg, **kwargs
|
||||
)
|
||||
|
||||
# GPTAssistantAgent's azure_deployment param may cause NotFoundError (404) in client.beta.assistants.list()
|
||||
# See: https://github.com/microsoft/autogen/pull/1721
|
||||
model_name = self.DEFAULT_MODEL_NAME
|
||||
if openai_client_cfg.get("config_list") is not None and len(openai_client_cfg["config_list"]) > 0:
|
||||
model_name = openai_client_cfg["config_list"][0].pop("model", GPTAssistantAgent.DEFAULT_MODEL_NAME)
|
||||
model_name = openai_client_cfg["config_list"][0].pop("model", self.DEFAULT_MODEL_NAME)
|
||||
else:
|
||||
model_name = openai_client_cfg.pop("model", GPTAssistantAgent.DEFAULT_MODEL_NAME)
|
||||
model_name = openai_client_cfg.pop("model", self.DEFAULT_MODEL_NAME)
|
||||
|
||||
logger.warning("OpenAI client config of GPTAssistantAgent(%s) - model: %s", name, model_name)
|
||||
|
||||
@ -82,14 +80,17 @@ class GPTAssistantAgent(ConversableAgent):
|
||||
logger.warning("GPT Assistant only supports one OpenAI client. Using the first client in the list.")
|
||||
|
||||
self._openai_client = oai_wrapper._clients[0]._oai_client
|
||||
openai_assistant_id = llm_config.get("assistant_id", None)
|
||||
openai_assistant_id = openai_assistant_cfg.get("assistant_id", None)
|
||||
if openai_assistant_id is None:
|
||||
# try to find assistant by name first
|
||||
candidate_assistants = retrieve_assistants_by_name(self._openai_client, name)
|
||||
if len(candidate_assistants) > 0:
|
||||
# Filter out candidates with the same name but different instructions, file IDs, and function names.
|
||||
candidate_assistants = self.find_matching_assistant(
|
||||
candidate_assistants, instructions, llm_config.get("tools", []), llm_config.get("file_ids", [])
|
||||
candidate_assistants,
|
||||
instructions,
|
||||
openai_assistant_cfg.get("tools", []),
|
||||
openai_assistant_cfg.get("file_ids", []),
|
||||
)
|
||||
|
||||
if len(candidate_assistants) == 0:
|
||||
@ -103,9 +104,9 @@ class GPTAssistantAgent(ConversableAgent):
|
||||
self._openai_assistant = self._openai_client.beta.assistants.create(
|
||||
name=name,
|
||||
instructions=instructions,
|
||||
tools=llm_config.get("tools", []),
|
||||
tools=openai_assistant_cfg.get("tools", []),
|
||||
model=model_name,
|
||||
file_ids=llm_config.get("file_ids", []),
|
||||
file_ids=openai_assistant_cfg.get("file_ids", []),
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
@ -135,8 +136,8 @@ class GPTAssistantAgent(ConversableAgent):
|
||||
"overwrite_instructions is False. Provided instructions will be used without permanently modifying the assistant in the API."
|
||||
)
|
||||
|
||||
# Check if tools are specified in llm_config
|
||||
specified_tools = llm_config.get("tools", None)
|
||||
# Check if tools are specified in assistant_config
|
||||
specified_tools = openai_assistant_cfg.get("tools", None)
|
||||
|
||||
if specified_tools is None:
|
||||
# Check if the current assistant has tools defined
|
||||
@ -155,7 +156,7 @@ class GPTAssistantAgent(ConversableAgent):
|
||||
)
|
||||
self._openai_assistant = self._openai_client.beta.assistants.update(
|
||||
assistant_id=openai_assistant_id,
|
||||
tools=llm_config.get("tools", []),
|
||||
tools=openai_assistant_cfg.get("tools", []),
|
||||
)
|
||||
else:
|
||||
# Tools are specified but overwrite_tools is False; do not update the assistant's tools
|
||||
@ -414,6 +415,10 @@ class GPTAssistantAgent(ConversableAgent):
|
||||
def openai_client(self):
|
||||
return self._openai_client
|
||||
|
||||
@property
|
||||
def openai_assistant(self):
|
||||
return self._openai_assistant
|
||||
|
||||
def get_assistant_instructions(self):
|
||||
"""Return the assistant instructions from OAI assistant API"""
|
||||
return self._openai_assistant.instructions
|
||||
@ -472,3 +477,31 @@ class GPTAssistantAgent(ConversableAgent):
|
||||
matching_assistants.append(assistant)
|
||||
|
||||
return matching_assistants
|
||||
|
||||
def _process_assistant_config(self, llm_config, assistant_config):
|
||||
"""
|
||||
Process the llm_config and assistant_config to extract the model name and assistant related configurations.
|
||||
"""
|
||||
|
||||
if llm_config is False:
|
||||
raise ValueError("llm_config=False is not supported for GPTAssistantAgent.")
|
||||
|
||||
if llm_config is None:
|
||||
openai_client_cfg = {}
|
||||
else:
|
||||
openai_client_cfg = copy.deepcopy(llm_config)
|
||||
|
||||
if assistant_config is None:
|
||||
openai_assistant_cfg = {}
|
||||
else:
|
||||
openai_assistant_cfg = copy.deepcopy(assistant_config)
|
||||
|
||||
# Move the assistant related configurations to assistant_config
|
||||
# It's important to keep forward compatibility
|
||||
assistant_config_items = ["assistant_id", "tools", "file_ids", "check_every_ms"]
|
||||
for item in assistant_config_items:
|
||||
if openai_client_cfg.get(item) is not None and openai_assistant_cfg.get(item) is None:
|
||||
openai_assistant_cfg[item] = openai_client_cfg[item]
|
||||
openai_client_cfg.pop(item, None)
|
||||
|
||||
return openai_client_cfg, openai_assistant_cfg
|
||||
|
||||
@ -50,8 +50,6 @@
|
||||
"import logging\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"import requests\n",
|
||||
"\n",
|
||||
"from autogen import UserProxyAgent, config_list_from_json\n",
|
||||
"from autogen.agentchat.contrib.gpt_assistant_agent import GPTAssistantAgent\n",
|
||||
"\n",
|
||||
@ -79,32 +77,24 @@
|
||||
"\n",
|
||||
"def get_ossinsight(question):\n",
|
||||
" \"\"\"\n",
|
||||
" Retrieve the top 10 developers with the most followers on GitHub.\n",
|
||||
" [Mock] Retrieve the top 10 developers with the most followers on GitHub.\n",
|
||||
" \"\"\"\n",
|
||||
" url = \"https://api.ossinsight.io/explorer/answer\"\n",
|
||||
" headers = {\"Content-Type\": \"application/json\"}\n",
|
||||
" data = {\"question\": question, \"ignoreCache\": True}\n",
|
||||
"\n",
|
||||
" response = requests.post(url, headers=headers, json=data)\n",
|
||||
" if response.status_code == 200:\n",
|
||||
" answer = response.json()\n",
|
||||
" else:\n",
|
||||
" return f\"Request to {url} failed with status code: {response.status_code}\"\n",
|
||||
"\n",
|
||||
" report_components = []\n",
|
||||
" report_components.append(f\"Question: {answer['question']['title']}\")\n",
|
||||
" if answer[\"query\"][\"sql\"] != \"\":\n",
|
||||
" report_components.append(f\"querySQL: {answer['query']['sql']}\")\n",
|
||||
"\n",
|
||||
" if answer.get(\"result\", None) is None or len(answer[\"result\"][\"rows\"]) == 0:\n",
|
||||
" result = \"Result: N/A\"\n",
|
||||
" else:\n",
|
||||
" result = \"Result:\\n \" + \"\\n \".join([str(row) for row in answer[\"result\"][\"rows\"]])\n",
|
||||
" report_components.append(result)\n",
|
||||
"\n",
|
||||
" if answer.get(\"error\", None) is not None:\n",
|
||||
" report_components.append(f\"Error: {answer['error']}\")\n",
|
||||
" return \"\\n\\n\".join(report_components) + \"\\n\\n\""
|
||||
" report_components = [\n",
|
||||
" f\"Question: {question}\",\n",
|
||||
" \"SQL: SELECT `login` AS `user_login`, `followers` AS `followers` FROM `github_users` ORDER BY `followers` DESC LIMIT 10\",\n",
|
||||
" \"\"\"Results:\n",
|
||||
" {'followers': 166730, 'user_login': 'torvalds'}\n",
|
||||
" {'followers': 86239, 'user_login': 'yyx990803'}\n",
|
||||
" {'followers': 77611, 'user_login': 'gaearon'}\n",
|
||||
" {'followers': 72668, 'user_login': 'ruanyf'}\n",
|
||||
" {'followers': 65415, 'user_login': 'JakeWharton'}\n",
|
||||
" {'followers': 60972, 'user_login': 'peng-zhihui'}\n",
|
||||
" {'followers': 58172, 'user_login': 'bradtraversy'}\n",
|
||||
" {'followers': 52143, 'user_login': 'gustavoguanabara'}\n",
|
||||
" {'followers': 51542, 'user_login': 'sindresorhus'}\n",
|
||||
" {'followers': 49621, 'user_login': 'tj'}\"\"\",\n",
|
||||
" ]\n",
|
||||
" return \"\\n\" + \"\\n\\n\".join(report_components) + \"\\n\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -120,12 +110,24 @@
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"OpenAI client config of GPTAssistantAgent(OSS Analyst) - model: gpt-4-turbo-preview\n",
|
||||
"GPT Assistant only supports one OpenAI client. Using the first client in the list.\n",
|
||||
"No matching assistant found, creating a new assistant\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"assistant_id = os.environ.get(\"ASSISTANT_ID\", None)\n",
|
||||
"config_list = config_list_from_json(\"OAI_CONFIG_LIST\")\n",
|
||||
"llm_config = {\n",
|
||||
" \"config_list\": config_list,\n",
|
||||
"}\n",
|
||||
"assistant_config = {\n",
|
||||
" \"assistant_id\": assistant_id,\n",
|
||||
" \"tools\": [\n",
|
||||
" {\n",
|
||||
@ -143,6 +145,7 @@
|
||||
" \"Please carefully read the context of the conversation to identify the current analysis question or problem that needs addressing.\"\n",
|
||||
" ),\n",
|
||||
" llm_config=llm_config,\n",
|
||||
" assistant_config=assistant_config,\n",
|
||||
" verbose=True,\n",
|
||||
")\n",
|
||||
"oss_analyst.register_function(\n",
|
||||
@ -178,13 +181,14 @@
|
||||
"\u001b[35m\n",
|
||||
">>>>>>>> EXECUTING FUNCTION ossinsight_data_api...\u001b[0m\n",
|
||||
"\u001b[35m\n",
|
||||
"Input arguments: {'question': 'Who are the top 10 developers with the most followers on GitHub?'}\n",
|
||||
"Input arguments: {'question': 'Top 10 developers with the most followers'}\n",
|
||||
"Output:\n",
|
||||
"Question: Who are the top 10 developers with the most followers on GitHub?\n",
|
||||
"\n",
|
||||
"querySQL: SELECT `login` AS `user_login`, `followers` AS `followers` FROM `github_users` ORDER BY `followers` DESC LIMIT 10\n",
|
||||
"Question: Top 10 developers with the most followers\n",
|
||||
"\n",
|
||||
"Result:\n",
|
||||
"SQL: SELECT `login` AS `user_login`, `followers` AS `followers` FROM `github_users` ORDER BY `followers` DESC LIMIT 10\n",
|
||||
"\n",
|
||||
"Results:\n",
|
||||
" {'followers': 166730, 'user_login': 'torvalds'}\n",
|
||||
" {'followers': 86239, 'user_login': 'yyx990803'}\n",
|
||||
" {'followers': 77611, 'user_login': 'gaearon'}\n",
|
||||
@ -195,24 +199,21 @@
|
||||
" {'followers': 52143, 'user_login': 'gustavoguanabara'}\n",
|
||||
" {'followers': 51542, 'user_login': 'sindresorhus'}\n",
|
||||
" {'followers': 49621, 'user_login': 'tj'}\n",
|
||||
"\n",
|
||||
"\u001b[0m\n",
|
||||
"\u001b[33mOSS Analyst\u001b[0m (to user_proxy):\n",
|
||||
"\n",
|
||||
"The top 10 developers with the most followers on GitHub are as follows:\n",
|
||||
"The top 10 developers with the most followers on GitHub are:\n",
|
||||
"\n",
|
||||
"1. `torvalds` with 166,730 followers\n",
|
||||
"2. `yyx990803` with 86,239 followers\n",
|
||||
"3. `gaearon` with 77,611 followers\n",
|
||||
"4. `ruanyf` with 72,668 followers\n",
|
||||
"5. `JakeWharton` with 65,415 followers\n",
|
||||
"6. `peng-zhihui` with 60,972 followers\n",
|
||||
"7. `bradtraversy` with 58,172 followers\n",
|
||||
"8. `gustavoguanabara` with 52,143 followers\n",
|
||||
"9. `sindresorhus` with 51,542 followers\n",
|
||||
"10. `tj` with 49,621 followers\n",
|
||||
"\n",
|
||||
"These figures indicate the number of followers these developers had at the time of the analysis.\n",
|
||||
"1. **Linus Torvalds** (`torvalds`) with 166,730 followers\n",
|
||||
"2. **Evan You** (`yyx990803`) with 86,239 followers\n",
|
||||
"3. **Dan Abramov** (`gaearon`) with 77,611 followers\n",
|
||||
"4. **Ruan YiFeng** (`ruanyf`) with 72,668 followers\n",
|
||||
"5. **Jake Wharton** (`JakeWharton`) with 65,415 followers\n",
|
||||
"6. **Peng Zhihui** (`peng-zhihui`) with 60,972 followers\n",
|
||||
"7. **Brad Traversy** (`bradtraversy`) with 58,172 followers\n",
|
||||
"8. **Gustavo Guanabara** (`gustavoguanabara`) with 52,143 followers\n",
|
||||
"9. **Sindre Sorhus** (`sindresorhus`) with 51,542 followers\n",
|
||||
"10. **TJ Holowaychuk** (`tj`) with 49,621 followers\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
@ -223,11 +224,18 @@
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mOSS Analyst\u001b[0m (to user_proxy):\n",
|
||||
"\n",
|
||||
"It seems you haven't entered a question or a request. Could you please provide more details or specify how I can assist you further?\n",
|
||||
"It looks like there is no question or prompt for me to respond to. Could you please provide more details or ask a question that you would like assistance with?\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Permanently deleting assistant...\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
@ -242,7 +250,8 @@
|
||||
" max_consecutive_auto_reply=1,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"user_proxy.initiate_chat(oss_analyst, message=\"Top 10 developers with the most followers\")"
|
||||
"user_proxy.initiate_chat(oss_analyst, message=\"Top 10 developers with the most followers\")\n",
|
||||
"oss_analyst.delete_assistant()"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@ -19,7 +19,9 @@
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"assistant_id was None, creating a new assistant\n"
|
||||
"OpenAI client config of GPTAssistantAgent(assistant) - model: gpt-4-turbo-preview\n",
|
||||
"GPT Assistant only supports one OpenAI client. Using the first client in the list.\n",
|
||||
"Matching assistant found, using the first matching assistant: {'id': 'asst_sKUCUXkaXyTidtlyovbqppH3', 'created_at': 1710320924, 'description': None, 'file_ids': ['file-AcnBk5PCwAjJMCVO0zLSbzKP'], 'instructions': 'You are adapt at question answering', 'metadata': {}, 'model': 'gpt-4-turbo-preview', 'name': 'assistant', 'object': 'assistant', 'tools': [ToolRetrieval(type='retrieval')]}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -28,50 +30,32 @@
|
||||
"text": [
|
||||
"\u001b[33muser_proxy\u001b[0m (to assistant):\n",
|
||||
"\n",
|
||||
"What is the name of the class of agents I gave you?\n",
|
||||
"Please explain the code in this file!\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33massistant\u001b[0m (to user_proxy):\n",
|
||||
"\n",
|
||||
"The class of agents provided in the file is called `ConversableAgent`.\n",
|
||||
"The code in the file appears to define tests for various functionalities related to a GPT-based assistant agent. Here is a summary of the main components and functionalities described in the visible portion of the code:\n",
|
||||
"\n",
|
||||
"1. **Imports and Setup**: The script imports necessary libraries and modules, such as `pytest` for testing, `uuid` for generating unique identifiers, and `openai` along with custom modules like `autogen` and `OpenAIWrapper`. It sets up the system path to include specific directories for importing test configurations and dependencies.\n",
|
||||
"\n",
|
||||
"2. **Conditional Test Skipping**: The script includes logic to conditionally skip tests based on the execution environment or missing dependencies. This is done through the `@pytest.mark.skipif` decorator, which conditionally skips test functions based on the `skip` flag, determined by whether certain imports are successful or other conditions are met.\n",
|
||||
"\n",
|
||||
"3. **Test Configurations**: The code loads configurations for interacting with the OpenAI API and a hypothetical Azure API (as indicated by the placeholder `azure`), filtering these configurations by certain criteria such as API type and version.\n",
|
||||
"\n",
|
||||
"4. **Test Cases**:\n",
|
||||
" - **Configuration List Test (`test_config_list`)**: Ensures that the configurations for both OpenAI and the hypothetical Azure API are loaded correctly by asserting the presence of at least one configuration for each.\n",
|
||||
" - **GPT Assistant Chat Test (`test_gpt_assistant_chat`)**: Tests the functionality of a GPT Assistant Agent by simulating a chat interaction. It uses a mock function to simulate an external API call and checks if the GPT Assistant properly processes the chat input, invokes the external function, and provides an expected response.\n",
|
||||
" - **Assistant Instructions Test (`test_get_assistant_instructions` and related functions)**: These tests verify that instructions can be set and retrieved correctly for a GPTAssistantAgent. It covers creating an agent, setting instructions, and ensuring the set instructions can be retrieved as expected.\n",
|
||||
" - **Instructions Overwrite Test (`test_gpt_assistant_instructions_overwrite`)**: Examines whether the instructions for a GPTAssistantAgent can be successfully overwritten by creating a new agent with the same ID but different instructions, with an explicit indication to overwrite the previous instructions.\n",
|
||||
"\n",
|
||||
"Each test case aims to cover different aspects of the GPTAssistantAgent's functionality, such as configuration loading, interactive chat behavior, function registration and invocation, and instruction management. The mocks and assertions within the tests are designed to ensure that each component of the GPTAssistantAgent behaves as expected under controlled conditions.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33muser_proxy\u001b[0m (to assistant):\n",
|
||||
"\n",
|
||||
"What does it do?\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33massistant\u001b[0m (to user_proxy):\n",
|
||||
"\n",
|
||||
"The `ConversableAgent` class is designed as a generic conversable agent that can be configured to act either as an assistant or user proxy. When it receives a message, it automatically generates and sends a reply unless the message is a termination message. It features a method to initiate a chat with another agent and can have its auto-reply behavior adjusted by overriding the `generate_reply` method.\n",
|
||||
"\n",
|
||||
"Here are some specific functionalities and mechanisms included in the `ConversableAgent` class:\n",
|
||||
"\n",
|
||||
"- It can process received messages and decide whether or not a reply is necessary or requested.\n",
|
||||
"- It can reset consecutive auto-reply counters and clear chat history when starting a new conversation.\n",
|
||||
"- It allows initiating chats either synchronously or asynchronously with the ability to pass context information and control chattiness.\n",
|
||||
"- It has the ability to register reply functions with specific triggers and order them based on preference.\n",
|
||||
"- The class supports setting a maximum number of consecutive auto-replies, after which it can prompt for human input based on configured criteria (e.g., always, on termination, or never).\n",
|
||||
"- The auto-reply trigger mechanism can be finely controlled by associating reply functions with triggers such as instances of particular classes, specific strings, or custom callable conditions.\n",
|
||||
"\n",
|
||||
"This class provides an extensible framework for creating bots or agents that can interact in a chat-like context, with custom behavior that developers can tailor to specific applications.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[31m\n",
|
||||
">>>>>>>> NO HUMAN INPUT RECEIVED.\u001b[0m\n",
|
||||
"\u001b[31m\n",
|
||||
">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
|
||||
"\u001b[33muser_proxy\u001b[0m (to assistant):\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33massistant\u001b[0m (to user_proxy):\n",
|
||||
"\n",
|
||||
"It seems that your request was incomplete. Could you please provide more information or clarify your request?\n",
|
||||
"\n",
|
||||
"TERMINATE\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
@ -99,15 +83,20 @@
|
||||
"config_list = config_list_from_json(\"OAI_CONFIG_LIST\")\n",
|
||||
"llm_config = {\n",
|
||||
" \"config_list\": config_list,\n",
|
||||
"}\n",
|
||||
"assistant_config = {\n",
|
||||
" \"assistant_id\": assistant_id,\n",
|
||||
" \"tools\": [{\"type\": \"retrieval\"}],\n",
|
||||
" \"file_ids\": [\"file-CmlT0YKLB3ZCdHmslF9FOv69\"],\n",
|
||||
" \"file_ids\": [\"file-AcnBk5PCwAjJMCVO0zLSbzKP\"],\n",
|
||||
" # add id of an existing file in your openai account\n",
|
||||
" # in this case I added the implementation of conversable_agent.py\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"gpt_assistant = GPTAssistantAgent(\n",
|
||||
" name=\"assistant\", instructions=\"You are adapt at question answering\", llm_config=llm_config\n",
|
||||
" name=\"assistant\",\n",
|
||||
" instructions=\"You are adapt at question answering\",\n",
|
||||
" llm_config=llm_config,\n",
|
||||
" assistant_config=assistant_config,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"user_proxy = UserProxyAgent(\n",
|
||||
@ -116,7 +105,7 @@
|
||||
" is_termination_msg=lambda msg: \"TERMINATE\" in msg[\"content\"],\n",
|
||||
" human_input_mode=\"ALWAYS\",\n",
|
||||
")\n",
|
||||
"user_proxy.initiate_chat(gpt_assistant, message=\"What is the name of the class of agents I gave you?\")\n",
|
||||
"user_proxy.initiate_chat(gpt_assistant, message=\"Please explain the code in this file!\")\n",
|
||||
"\n",
|
||||
"gpt_assistant.delete_assistant()"
|
||||
]
|
||||
|
||||
@ -19,7 +19,9 @@
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"assistant_id was None, creating a new assistant\n"
|
||||
"OpenAI client config of GPTAssistantAgent(assistant) - model: gpt-4-turbo-preview\n",
|
||||
"GPT Assistant only supports one OpenAI client. Using the first client in the list.\n",
|
||||
"No matching assistant found, creating a new assistant\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -34,45 +36,38 @@
|
||||
"\u001b[33massistant\u001b[0m (to user_proxy):\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"print(\"Hello, World!\")\n",
|
||||
"print(\"Hello, world!\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Please run this Python code to print \"Hello, World!\" to the console.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[31m\n",
|
||||
">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"execute_code was called without specifying a value for use_docker. Since the python docker package is not available, code will be run natively. Note: this fallback behavior is subject to change\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n",
|
||||
"\u001b[33muser_proxy\u001b[0m (to assistant):\n",
|
||||
"\n",
|
||||
"exitcode: 0 (execution succeeded)\n",
|
||||
"Code output: \n",
|
||||
"Hello, World!\n",
|
||||
"Hello, world!\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33massistant\u001b[0m (to user_proxy):\n",
|
||||
"\n",
|
||||
"The code executed successfully and printed \"Hello, World!\" as expected.\n",
|
||||
"\n",
|
||||
"TERMINATE\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ChatResult(chat_id=None, chat_history=[{'content': 'Print hello world', 'role': 'assistant'}, {'content': '```python\\nprint(\"Hello, world!\")\\n```\\n', 'role': 'user'}, {'content': 'exitcode: 0 (execution succeeded)\\nCode output: \\nHello, world!\\n', 'role': 'assistant'}, {'content': 'TERMINATE\\n', 'role': 'user'}], summary='\\n', cost=({'total_cost': 0}, {'total_cost': 0}), human_input=[])"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
@ -88,10 +83,15 @@
|
||||
"assistant_id = os.environ.get(\"ASSISTANT_ID\", None)\n",
|
||||
"\n",
|
||||
"config_list = config_list_from_json(\"OAI_CONFIG_LIST\")\n",
|
||||
"llm_config = {\"config_list\": config_list, \"assistant_id\": assistant_id}\n",
|
||||
"llm_config = {\"config_list\": config_list}\n",
|
||||
"\n",
|
||||
"assistant_config = {\"assistant_id\": assistant_id}\n",
|
||||
"\n",
|
||||
"gpt_assistant = GPTAssistantAgent(\n",
|
||||
" name=\"assistant\", instructions=AssistantAgent.DEFAULT_SYSTEM_MESSAGE, llm_config=llm_config\n",
|
||||
" name=\"assistant\",\n",
|
||||
" instructions=AssistantAgent.DEFAULT_SYSTEM_MESSAGE,\n",
|
||||
" llm_config=llm_config,\n",
|
||||
" assistant_config=assistant_config,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"user_proxy = UserProxyAgent(\n",
|
||||
@ -124,8 +124,7 @@
|
||||
"\u001b[33massistant\u001b[0m (to user_proxy):\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"# Let's write a simple Python code to evaluate 2 + 2 and print the result.\n",
|
||||
"\n",
|
||||
"# Calculate 2+2 and print the result\n",
|
||||
"result = 2 + 2\n",
|
||||
"print(result)\n",
|
||||
"```\n",
|
||||
@ -133,20 +132,7 @@
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[31m\n",
|
||||
">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"execute_code was called without specifying a value for use_docker. Since the python docker package is not available, code will be run natively. Note: this fallback behavior is subject to change\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n",
|
||||
"\u001b[33muser_proxy\u001b[0m (to assistant):\n",
|
||||
"\n",
|
||||
"exitcode: 0 (execution succeeded)\n",
|
||||
@ -157,13 +143,23 @@
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33massistant\u001b[0m (to user_proxy):\n",
|
||||
"\n",
|
||||
"The Python code was executed successfully and the result of evaluating 2 + 2 is 4.\n",
|
||||
"The Python code successfully calculated \\(2 + 2\\) and printed the result, which is \\(4\\).\n",
|
||||
"\n",
|
||||
"TERMINATE\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"ChatResult(chat_id=None, chat_history=[{'content': 'Write py code to eval 2 + 2', 'role': 'assistant'}, {'content': '```python\\n# Calculate 2+2 and print the result\\nresult = 2 + 2\\nprint(result)\\n```\\n', 'role': 'user'}, {'content': 'exitcode: 0 (execution succeeded)\\nCode output: \\n4\\n', 'role': 'assistant'}, {'content': 'The Python code successfully calculated \\\\(2 + 2\\\\) and printed the result, which is \\\\(4\\\\).\\n\\nTERMINATE\\n', 'role': 'user'}], summary='The Python code successfully calculated \\\\(2 + 2\\\\) and printed the result, which is \\\\(4\\\\).\\n\\n\\n', cost=({'total_cost': 0}, {'total_cost': 0}), human_input=[])"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
@ -204,7 +200,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
File diff suppressed because one or more lines are too long
@ -8,6 +8,7 @@ import sys
|
||||
import openai
|
||||
import autogen
|
||||
from autogen import OpenAIWrapper
|
||||
from autogen import UserProxyAgent
|
||||
from autogen.agentchat.contrib.gpt_assistant_agent import GPTAssistantAgent
|
||||
from autogen.oai.openai_utils import retrieve_assistants_by_name
|
||||
|
||||
@ -85,7 +86,8 @@ def _test_gpt_assistant_chat(gpt_config) -> None:
|
||||
name = f"For test_gpt_assistant_chat {uuid.uuid4()}"
|
||||
analyst = GPTAssistantAgent(
|
||||
name=name,
|
||||
llm_config={"tools": [{"type": "function", "function": ossinsight_api_schema}], **gpt_config},
|
||||
llm_config=gpt_config,
|
||||
assistant_config={"tools": [{"type": "function", "function": ossinsight_api_schema}]},
|
||||
instructions="Hello, Open Source Project Analyst. You'll conduct comprehensive evaluations of open source projects or organizations on the GitHub platform",
|
||||
)
|
||||
try:
|
||||
@ -193,6 +195,7 @@ def _test_gpt_assistant_instructions_overwrite(gpt_config) -> None:
|
||||
instructions=instructions2,
|
||||
llm_config={
|
||||
"config_list": gpt_config,
|
||||
# keep it to test older version of assistant config
|
||||
"assistant_id": assistant_id,
|
||||
},
|
||||
overwrite_instructions=True,
|
||||
@ -207,6 +210,7 @@ def _test_gpt_assistant_instructions_overwrite(gpt_config) -> None:
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
skip,
|
||||
reason="requested to skip",
|
||||
)
|
||||
def test_gpt_assistant_existing_no_instructions() -> None:
|
||||
@ -233,8 +237,8 @@ def test_gpt_assistant_existing_no_instructions() -> None:
|
||||
name,
|
||||
llm_config={
|
||||
"config_list": openai_config_list,
|
||||
"assistant_id": assistant_id,
|
||||
},
|
||||
assistant_config={"assistant_id": assistant_id},
|
||||
)
|
||||
|
||||
instruction_match = assistant.get_assistant_instructions() == instructions
|
||||
@ -259,6 +263,7 @@ def test_get_assistant_files() -> None:
|
||||
file = openai_client.files.create(file=open(current_file_path, "rb"), purpose="assistants")
|
||||
name = f"For test_get_assistant_files {uuid.uuid4()}"
|
||||
|
||||
# keep it to test older version of assistant config
|
||||
assistant = GPTAssistantAgent(
|
||||
name,
|
||||
instructions="This is a test",
|
||||
@ -276,8 +281,7 @@ def test_get_assistant_files() -> None:
|
||||
|
||||
finally:
|
||||
assistant.delete_assistant()
|
||||
|
||||
openai_client.files.delete(file.id)
|
||||
openai_client.files.delete(file.id)
|
||||
|
||||
assert expected_file_id in retrieved_file_ids
|
||||
|
||||
@ -312,6 +316,9 @@ def test_assistant_retrieval() -> None:
|
||||
|
||||
try:
|
||||
all_llm_config = {
|
||||
"config_list": openai_config_list,
|
||||
}
|
||||
assistant_config = {
|
||||
"tools": [
|
||||
{"type": "function", "function": function_1_schema},
|
||||
{"type": "function", "function": function_2_schema},
|
||||
@ -319,7 +326,6 @@ def test_assistant_retrieval() -> None:
|
||||
{"type": "code_interpreter"},
|
||||
],
|
||||
"file_ids": [file_1.id, file_2.id],
|
||||
"config_list": openai_config_list,
|
||||
}
|
||||
|
||||
name = f"For test_assistant_retrieval {uuid.uuid4()}"
|
||||
@ -328,6 +334,7 @@ def test_assistant_retrieval() -> None:
|
||||
name,
|
||||
instructions="This is a test",
|
||||
llm_config=all_llm_config,
|
||||
assistant_config=assistant_config,
|
||||
)
|
||||
candidate_first = retrieve_assistants_by_name(assistant_first.openai_client, name)
|
||||
|
||||
@ -336,6 +343,7 @@ def test_assistant_retrieval() -> None:
|
||||
name,
|
||||
instructions="This is a test",
|
||||
llm_config=all_llm_config,
|
||||
assistant_config=assistant_config,
|
||||
)
|
||||
candidate_second = retrieve_assistants_by_name(assistant_second.openai_client, name)
|
||||
|
||||
@ -386,6 +394,7 @@ def test_assistant_mismatch_retrieval() -> None:
|
||||
file_2 = openai_client.files.create(file=open(current_file_path, "rb"), purpose="assistants")
|
||||
|
||||
try:
|
||||
# keep it to test older version of assistant config
|
||||
all_llm_config = {
|
||||
"tools": [
|
||||
{"type": "function", "function": function_1_schema},
|
||||
@ -567,6 +576,8 @@ def test_gpt_assistant_tools_overwrite() -> None:
|
||||
name,
|
||||
llm_config={
|
||||
"config_list": openai_config_list,
|
||||
},
|
||||
assistant_config={
|
||||
"tools": original_tools,
|
||||
},
|
||||
)
|
||||
@ -579,6 +590,8 @@ def test_gpt_assistant_tools_overwrite() -> None:
|
||||
name,
|
||||
llm_config={
|
||||
"config_list": openai_config_list,
|
||||
},
|
||||
assistant_config={
|
||||
"assistant_id": assistant_id,
|
||||
"tools": new_tools,
|
||||
},
|
||||
@ -586,11 +599,43 @@ def test_gpt_assistant_tools_overwrite() -> None:
|
||||
)
|
||||
|
||||
# Add logic to retrieve the tools from the assistant and assert
|
||||
retrieved_tools = assistant.llm_config.get("tools", [])
|
||||
retrieved_tools = assistant.openai_assistant.tools
|
||||
retrieved_tools_name = [tool.function.name for tool in retrieved_tools]
|
||||
finally:
|
||||
assistant_org.delete_assistant()
|
||||
|
||||
assert retrieved_tools == new_tools
|
||||
assert retrieved_tools_name == [tool["function"]["name"] for tool in new_tools]
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
skip,
|
||||
reason="requested to skip",
|
||||
)
|
||||
def test_gpt_reflection_with_llm() -> None:
|
||||
gpt_assistant = GPTAssistantAgent(
|
||||
name="assistant", llm_config={"config_list": openai_config_list, "assistant_id": None}
|
||||
)
|
||||
|
||||
user_proxy = UserProxyAgent(
|
||||
name="user_proxy",
|
||||
code_execution_config=False,
|
||||
is_termination_msg=lambda msg: "TERMINATE" in msg["content"],
|
||||
human_input_mode="NEVER",
|
||||
max_consecutive_auto_reply=1,
|
||||
)
|
||||
result = user_proxy.initiate_chat(gpt_assistant, message="Write a Joke!", summary_method="reflection_with_llm")
|
||||
assert result is not None
|
||||
|
||||
# use the assistant configuration
|
||||
agent_using_assistant_config = GPTAssistantAgent(
|
||||
name="assistant",
|
||||
llm_config={"config_list": openai_config_list},
|
||||
assistant_config={"assistant_id": gpt_assistant.assistant_id},
|
||||
)
|
||||
result = user_proxy.initiate_chat(
|
||||
agent_using_assistant_config, message="Write a Joke!", summary_method="reflection_with_llm"
|
||||
)
|
||||
assert result is not None
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user