autogen/test/agentchat/chat_with_teachable_agent.py

61 lines
2.3 KiB
Python
Raw Normal View History

TeachableAgent (#278) * Initial commit. * Disable LLM response caching. * Add teachability option to setup.py * Modify test to use OAI_CONFIG_LIST as suggested in the docs. * Expand unit test. * Complete unit test. * Add filter_dict * details * AnalysisAgent * details * More documentation and debug output. * Support retrieval of any number of relevant memos, including zero. * More robust analysis separator. * cleanup * teach_config * refactoring * For robustness, allow more flexibility on memo storage and retrieval. * de-dupe the retrieved memos. * Simplify AnalysisAgent. The unit tests now pass with gpt-3.5 * comments * Add a verbosity level to control analyzer messages. * refactoring * comments * Persist memory on disk. * cleanup * Use markdown to format retrieved memos. * Use markdown in TextAnalyzerAgent * Add another verbosity level. * clean up logging * notebook * minor edits * cleanup * linter fixes * Skip tests that fail to import openai * Address reviewer feedback. * lint * refactoring * Improve wording * Improve code coverage. * lint * Use llm_config to control caching. * lowercase notebook name * Sort out the parameters passed through to ConversableAgent, and supply full docstrings for the others. * lint * Allow TextAnalyzerAgent to be given a different llm_config than TeachableAgent. * documentation * Modifications to run openai workflow. * Test on just python 3.10. Replace agent with agent teachable_agent as recommended. * Test on python 3.9 instead of 3.10. * Remove space from name -> teachableagent --------- Co-authored-by: Li Jiang <bnujli@gmail.com> Co-authored-by: Chi Wang <wang.chi@microsoft.com>
2023-10-20 19:27:10 -07:00
from autogen import UserProxyAgent, config_list_from_json
from autogen.agentchat.contrib.teachable_agent import TeachableAgent
try:
from termcolor import colored
except ImportError:
def colored(x, *args, **kwargs):
return x
verbosity = 0 # 0 for basic info, 1 to add memory operations, 2 for analyzer messages, 3 for memo lists.
recall_threshold = 1.5 # Higher numbers allow more (but less relevant) memos to be recalled.
use_cache = False # If True, cached LLM calls will be skipped and responses pulled from cache. False exposes LLM non-determinism.
# Specify the model to use. GPT-3.5 is less reliable than GPT-4 at learning from user input.
filter_dict = {"model": ["gpt-4"]}
def create_teachable_agent(reset_db=False):
"""Instantiates a TeachableAgent using the settings from the top of this file."""
# Load LLM inference endpoints from an env variable or a file
# See https://microsoft.github.io/autogen/docs/FAQ#set-your-api-endpoints
# and OAI_CONFIG_LIST_sample
config_list = config_list_from_json(env_or_file="OAI_CONFIG_LIST", filter_dict=filter_dict)
teachable_agent = TeachableAgent(
name="teachableagent",
llm_config={"config_list": config_list, "request_timeout": 120, "use_cache": use_cache},
teach_config={
"verbosity": verbosity,
"reset_db": reset_db,
"path_to_db_dir": "./tmp/interactive/teachable_agent_db",
"recall_threshold": recall_threshold,
},
)
return teachable_agent
def interact_freely_with_user():
"""Starts a free-form chat between the user and TeachableAgent."""
# Create the agents.
print(colored("\nLoading previous memory (if any) from disk.", "light_cyan"))
teachable_agent = create_teachable_agent(reset_db=False)
user = UserProxyAgent("user", human_input_mode="ALWAYS")
# Start the chat.
teachable_agent.initiate_chat(user, message="Greetings, I'm a teachable user assistant! What's on your mind today?")
# Let the teachable agent remember things that should be learned from this chat.
teachable_agent.learn_from_user_feedback()
# Wrap up.
teachable_agent.close_db()
if __name__ == "__main__":
"""Lets the user test TeachableAgent interactively."""
interact_freely_with_user()