mirror of
https://github.com/microsoft/autogen.git
synced 2025-11-11 23:54:52 +00:00
improve test speed (#2406)
* improve test speed * speed up test * speed up test
This commit is contained in:
parent
297904f210
commit
d307818dd9
@ -39,7 +39,7 @@ if not skip_openai:
|
|||||||
config_list = autogen.config_list_from_json(
|
config_list = autogen.config_list_from_json(
|
||||||
OAI_CONFIG_LIST,
|
OAI_CONFIG_LIST,
|
||||||
filter_dict={
|
filter_dict={
|
||||||
"model": ["gpt-4", "gpt-4-0314", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-v0314"],
|
"model": ["gpt-3.5-turbo"],
|
||||||
},
|
},
|
||||||
file_location=KEY_LOC,
|
file_location=KEY_LOC,
|
||||||
)
|
)
|
||||||
|
|||||||
@ -85,15 +85,14 @@ def test_agent_usage():
|
|||||||
config_list = autogen.config_list_from_json(
|
config_list = autogen.config_list_from_json(
|
||||||
OAI_CONFIG_LIST,
|
OAI_CONFIG_LIST,
|
||||||
file_location=KEY_LOC,
|
file_location=KEY_LOC,
|
||||||
|
filter_dict={"tags": ["gpt-3.5-turbo"]},
|
||||||
)
|
)
|
||||||
assistant = AssistantAgent(
|
assistant = AssistantAgent(
|
||||||
"assistant",
|
"assistant",
|
||||||
system_message="You are a helpful assistant.",
|
system_message="You are a helpful assistant.",
|
||||||
llm_config={
|
llm_config={
|
||||||
"timeout": 600,
|
|
||||||
"cache_seed": None,
|
"cache_seed": None,
|
||||||
"config_list": config_list,
|
"config_list": config_list,
|
||||||
"model": "gpt-3.5-turbo-0613",
|
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -104,7 +103,6 @@ def test_agent_usage():
|
|||||||
code_execution_config=False,
|
code_execution_config=False,
|
||||||
llm_config={
|
llm_config={
|
||||||
"config_list": config_list,
|
"config_list": config_list,
|
||||||
"model": "gpt-3.5-turbo-0613",
|
|
||||||
},
|
},
|
||||||
# In the system message the "user" always refers to the other agent.
|
# In the system message the "user" always refers to the other agent.
|
||||||
system_message="You ask a user for help. You check the answer from the user and provide feedback.",
|
system_message="You ask a user for help. You check the answer from the user and provide feedback.",
|
||||||
@ -140,5 +138,5 @@ def test_agent_usage():
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
test_gathering()
|
# test_gathering()
|
||||||
test_agent_usage()
|
test_agent_usage()
|
||||||
|
|||||||
@ -9,14 +9,7 @@ import autogen
|
|||||||
from autogen.agentchat import AssistantAgent, UserProxyAgent
|
from autogen.agentchat import AssistantAgent, UserProxyAgent
|
||||||
|
|
||||||
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
||||||
from conftest import skip_openai # noqa: E402
|
from conftest import reason, skip_openai # noqa: E402
|
||||||
|
|
||||||
try:
|
|
||||||
from openai import OpenAI
|
|
||||||
except ImportError:
|
|
||||||
skip = True
|
|
||||||
else:
|
|
||||||
skip = False or skip_openai
|
|
||||||
|
|
||||||
KEY_LOC = "notebook"
|
KEY_LOC = "notebook"
|
||||||
OAI_CONFIG_LIST = "OAI_CONFIG_LIST"
|
OAI_CONFIG_LIST = "OAI_CONFIG_LIST"
|
||||||
@ -24,8 +17,8 @@ here = os.path.abspath(os.path.dirname(__file__))
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(
|
@pytest.mark.skipif(
|
||||||
sys.platform in ["darwin", "win32"] or skip,
|
sys.platform in ["darwin", "win32"] or skip_openai,
|
||||||
reason="do not run on MacOS or windows OR openai not installed OR requested to skip",
|
reason="do not run on MacOS or windows OR " + reason,
|
||||||
)
|
)
|
||||||
def test_ai_user_proxy_agent():
|
def test_ai_user_proxy_agent():
|
||||||
conversations = {}
|
conversations = {}
|
||||||
@ -34,6 +27,7 @@ def test_ai_user_proxy_agent():
|
|||||||
config_list = autogen.config_list_from_json(
|
config_list = autogen.config_list_from_json(
|
||||||
OAI_CONFIG_LIST,
|
OAI_CONFIG_LIST,
|
||||||
file_location=KEY_LOC,
|
file_location=KEY_LOC,
|
||||||
|
filter_dict={"tags": ["gpt-3.5-turbo"]},
|
||||||
)
|
)
|
||||||
assistant = AssistantAgent(
|
assistant = AssistantAgent(
|
||||||
"assistant",
|
"assistant",
|
||||||
@ -67,7 +61,7 @@ def test_ai_user_proxy_agent():
|
|||||||
print("Result summary:", res.summary)
|
print("Result summary:", res.summary)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
|
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||||
def test_gpt35(human_input_mode="NEVER", max_consecutive_auto_reply=5):
|
def test_gpt35(human_input_mode="NEVER", max_consecutive_auto_reply=5):
|
||||||
config_list = autogen.config_list_from_json(
|
config_list = autogen.config_list_from_json(
|
||||||
OAI_CONFIG_LIST,
|
OAI_CONFIG_LIST,
|
||||||
@ -111,9 +105,13 @@ If "Thank you" or "You\'re welcome" are said in the conversation, then say TERMI
|
|||||||
assert not isinstance(user.use_docker, bool) # None or str
|
assert not isinstance(user.use_docker, bool) # None or str
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
|
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||||
def test_create_execute_script(human_input_mode="NEVER", max_consecutive_auto_reply=10):
|
def test_create_execute_script(human_input_mode="NEVER", max_consecutive_auto_reply=3):
|
||||||
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, file_location=KEY_LOC)
|
config_list = autogen.config_list_from_json(
|
||||||
|
OAI_CONFIG_LIST,
|
||||||
|
file_location=KEY_LOC,
|
||||||
|
filter_dict={"tags": ["gpt-3.5-turbo"]},
|
||||||
|
)
|
||||||
conversations = {}
|
conversations = {}
|
||||||
# autogen.ChatCompletion.start_logging(conversations)
|
# autogen.ChatCompletion.start_logging(conversations)
|
||||||
llm_config = {
|
llm_config = {
|
||||||
@ -160,13 +158,13 @@ print('Hello world!')
|
|||||||
# autogen.ChatCompletion.stop_logging()
|
# autogen.ChatCompletion.stop_logging()
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
|
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||||
def test_tsp(human_input_mode="NEVER", max_consecutive_auto_reply=10):
|
def test_tsp(human_input_mode="NEVER", max_consecutive_auto_reply=2):
|
||||||
config_list = autogen.config_list_from_json(
|
config_list = autogen.config_list_from_json(
|
||||||
OAI_CONFIG_LIST,
|
OAI_CONFIG_LIST,
|
||||||
file_location=KEY_LOC,
|
file_location=KEY_LOC,
|
||||||
filter_dict={
|
filter_dict={
|
||||||
"model": ["gpt-4", "gpt4", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-v0314"],
|
"tags": ["gpt-4", "gpt-4-32k"],
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
hard_questions = [
|
hard_questions = [
|
||||||
@ -207,4 +205,5 @@ if __name__ == "__main__":
|
|||||||
# when GPT-4, i.e., the DEFAULT_MODEL, is used, conversation in the following test
|
# when GPT-4, i.e., the DEFAULT_MODEL, is used, conversation in the following test
|
||||||
# should terminate in 2-3 rounds of interactions (because is_termination_msg should be true after 2-3 rounds)
|
# should terminate in 2-3 rounds of interactions (because is_termination_msg should be true after 2-3 rounds)
|
||||||
# although the max_consecutive_auto_reply is set to 10.
|
# although the max_consecutive_auto_reply is set to 10.
|
||||||
test_tsp(human_input_mode="NEVER", max_consecutive_auto_reply=10)
|
test_tsp(human_input_mode="NEVER", max_consecutive_auto_reply=2)
|
||||||
|
# test_ai_user_proxy_agent()
|
||||||
|
|||||||
@ -10,14 +10,7 @@ from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST
|
|||||||
import autogen
|
import autogen
|
||||||
|
|
||||||
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
||||||
from conftest import skip_openai # noqa: E402
|
from conftest import reason, skip_openai # noqa: E402
|
||||||
|
|
||||||
try:
|
|
||||||
from openai import OpenAI
|
|
||||||
except ImportError:
|
|
||||||
skip = True
|
|
||||||
else:
|
|
||||||
skip = False or skip_openai
|
|
||||||
|
|
||||||
|
|
||||||
def get_market_news(ind, ind_upper):
|
def get_market_news(ind, ind_upper):
|
||||||
@ -61,24 +54,15 @@ def get_market_news(ind, ind_upper):
|
|||||||
return feeds_summary
|
return feeds_summary
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
|
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_async_groupchat():
|
async def test_async_groupchat():
|
||||||
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
|
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC, filter_dict={"tags": ["gpt-3.5-turbo"]})
|
||||||
|
|
||||||
llm_config = {
|
|
||||||
"timeout": 600,
|
|
||||||
"cache_seed": 41,
|
|
||||||
"config_list": config_list,
|
|
||||||
"temperature": 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
# create an AssistantAgent instance named "assistant"
|
# create an AssistantAgent instance named "assistant"
|
||||||
assistant = autogen.AssistantAgent(
|
assistant = autogen.AssistantAgent(
|
||||||
name="assistant",
|
name="assistant",
|
||||||
llm_config={
|
llm_config={
|
||||||
"timeout": 600,
|
|
||||||
"cache_seed": 41,
|
|
||||||
"config_list": config_list,
|
"config_list": config_list,
|
||||||
"temperature": 0,
|
"temperature": 0,
|
||||||
},
|
},
|
||||||
@ -93,20 +77,21 @@ async def test_async_groupchat():
|
|||||||
default_auto_reply=None,
|
default_auto_reply=None,
|
||||||
)
|
)
|
||||||
|
|
||||||
groupchat = autogen.GroupChat(agents=[user_proxy, assistant], messages=[], max_round=12)
|
groupchat = autogen.GroupChat(
|
||||||
|
agents=[user_proxy, assistant], messages=[], max_round=3, speaker_selection_method="round_robin"
|
||||||
|
)
|
||||||
manager = autogen.GroupChatManager(
|
manager = autogen.GroupChatManager(
|
||||||
groupchat=groupchat,
|
groupchat=groupchat,
|
||||||
llm_config=llm_config,
|
|
||||||
is_termination_msg=lambda x: "TERMINATE" in x.get("content", ""),
|
is_termination_msg=lambda x: "TERMINATE" in x.get("content", ""),
|
||||||
)
|
)
|
||||||
await user_proxy.a_initiate_chat(manager, message="""Have a short conversation with the assistant.""")
|
await user_proxy.a_initiate_chat(manager, message="""223434*3422=?.""")
|
||||||
assert len(user_proxy.chat_messages) > 0
|
assert len(user_proxy.chat_messages) > 0
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
|
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_stream():
|
async def test_stream():
|
||||||
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
|
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC, filter_dict={"tags": ["gpt-3.5-turbo"]})
|
||||||
data = asyncio.Future()
|
data = asyncio.Future()
|
||||||
|
|
||||||
async def add_stock_price_data():
|
async def add_stock_price_data():
|
||||||
@ -167,9 +152,10 @@ async def test_stream():
|
|||||||
while not data_task.done() and not data_task.cancelled():
|
while not data_task.done() and not data_task.cancelled():
|
||||||
reply = await user_proxy.a_generate_reply(sender=assistant)
|
reply = await user_proxy.a_generate_reply(sender=assistant)
|
||||||
if reply is not None:
|
if reply is not None:
|
||||||
res = await user_proxy.a_send(reply, assistant)
|
await user_proxy.a_send(reply, assistant)
|
||||||
print("Chat summary and cost:", res.summary, res.cost)
|
# print("Chat summary and cost:", res.summary, res.cost)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
asyncio.run(test_stream())
|
# asyncio.run(test_stream())
|
||||||
|
asyncio.run(test_async_groupchat())
|
||||||
|
|||||||
@ -17,9 +17,10 @@ from conftest import skip_openai # noqa: E402
|
|||||||
@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")
|
@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_async_chats():
|
async def test_async_chats():
|
||||||
config_list = autogen.config_list_from_json(
|
config_list_35 = autogen.config_list_from_json(
|
||||||
OAI_CONFIG_LIST,
|
OAI_CONFIG_LIST,
|
||||||
file_location=KEY_LOC,
|
file_location=KEY_LOC,
|
||||||
|
filter_dict={"tags": ["gpt-3.5-turbo"]},
|
||||||
)
|
)
|
||||||
|
|
||||||
financial_tasks = [
|
financial_tasks = [
|
||||||
@ -32,16 +33,16 @@ async def test_async_chats():
|
|||||||
|
|
||||||
financial_assistant_1 = AssistantAgent(
|
financial_assistant_1 = AssistantAgent(
|
||||||
name="Financial_assistant_1",
|
name="Financial_assistant_1",
|
||||||
llm_config={"config_list": config_list},
|
llm_config={"config_list": config_list_35},
|
||||||
system_message="You are a knowledgeable AI Assistant. Reply TERMINATE when everything is done.",
|
system_message="You are a knowledgeable AI Assistant. Reply TERMINATE when everything is done.",
|
||||||
)
|
)
|
||||||
financial_assistant_2 = AssistantAgent(
|
financial_assistant_2 = AssistantAgent(
|
||||||
name="Financial_assistant_2",
|
name="Financial_assistant_2",
|
||||||
llm_config={"config_list": config_list},
|
llm_config={"config_list": config_list_35},
|
||||||
)
|
)
|
||||||
writer = AssistantAgent(
|
writer = AssistantAgent(
|
||||||
name="Writer",
|
name="Writer",
|
||||||
llm_config={"config_list": config_list},
|
llm_config={"config_list": config_list_35},
|
||||||
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
|
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
|
||||||
system_message="""
|
system_message="""
|
||||||
You are a professional writer, known for
|
You are a professional writer, known for
|
||||||
|
|||||||
@ -11,26 +11,19 @@ from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST
|
|||||||
import autogen
|
import autogen
|
||||||
|
|
||||||
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
||||||
from conftest import skip_openai # noqa: E402
|
from conftest import reason, skip_openai # noqa: E402
|
||||||
|
|
||||||
try:
|
|
||||||
from openai import OpenAI
|
|
||||||
except ImportError:
|
|
||||||
skip = True
|
|
||||||
else:
|
|
||||||
skip = False or skip_openai
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
|
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_async_get_human_input():
|
async def test_async_get_human_input():
|
||||||
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
|
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC, filter_dict={"tags": ["gpt-3.5-turbo"]})
|
||||||
|
|
||||||
# create an AssistantAgent instance named "assistant"
|
# create an AssistantAgent instance named "assistant"
|
||||||
assistant = autogen.AssistantAgent(
|
assistant = autogen.AssistantAgent(
|
||||||
name="assistant",
|
name="assistant",
|
||||||
max_consecutive_auto_reply=2,
|
max_consecutive_auto_reply=2,
|
||||||
llm_config={"seed": 41, "config_list": config_list, "temperature": 0},
|
llm_config={"config_list": config_list, "temperature": 0},
|
||||||
)
|
)
|
||||||
|
|
||||||
user_proxy = autogen.UserProxyAgent(name="user", human_input_mode="ALWAYS", code_execution_config=False)
|
user_proxy = autogen.UserProxyAgent(name="user", human_input_mode="ALWAYS", code_execution_config=False)
|
||||||
@ -48,10 +41,10 @@ async def test_async_get_human_input():
|
|||||||
print("Human input:", res.human_input)
|
print("Human input:", res.human_input)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
|
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_async_max_turn():
|
async def test_async_max_turn():
|
||||||
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
|
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC, filter_dict={"tags": ["gpt-3.5-turbo"]})
|
||||||
|
|
||||||
# create an AssistantAgent instance named "assistant"
|
# create an AssistantAgent instance named "assistant"
|
||||||
assistant = autogen.AssistantAgent(
|
assistant = autogen.AssistantAgent(
|
||||||
@ -79,5 +72,5 @@ async def test_async_max_turn():
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
asyncio.run(test_async_get_human_input())
|
# asyncio.run(test_async_get_human_input())
|
||||||
asyncio.run(test_async_max_turn())
|
asyncio.run(test_async_max_turn())
|
||||||
|
|||||||
@ -9,10 +9,10 @@ from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST
|
|||||||
from typing_extensions import Annotated
|
from typing_extensions import Annotated
|
||||||
|
|
||||||
import autogen
|
import autogen
|
||||||
from autogen import AssistantAgent, GroupChat, GroupChatManager, UserProxyAgent, initiate_chats
|
from autogen import AssistantAgent, GroupChat, GroupChatManager, UserProxyAgent, filter_config, initiate_chats
|
||||||
|
|
||||||
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
||||||
from conftest import skip_openai # noqa: E402
|
from conftest import reason, skip_openai # noqa: E402
|
||||||
|
|
||||||
config_list = (
|
config_list = (
|
||||||
[]
|
[]
|
||||||
@ -23,6 +23,18 @@ config_list = (
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
config_list_35 = (
|
||||||
|
[]
|
||||||
|
if skip_openai
|
||||||
|
else autogen.config_list_from_json(
|
||||||
|
OAI_CONFIG_LIST,
|
||||||
|
file_location=KEY_LOC,
|
||||||
|
filter_dict={"tags": ["gpt-3.5-turbo"]},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
config_list_tool = filter_config(config_list_35, {"tags": ["tool"]})
|
||||||
|
|
||||||
|
|
||||||
def test_chat_messages_for_summary():
|
def test_chat_messages_for_summary():
|
||||||
assistant = UserProxyAgent(name="assistant", human_input_mode="NEVER", code_execution_config={"use_docker": False})
|
assistant = UserProxyAgent(name="assistant", human_input_mode="NEVER", code_execution_config={"use_docker": False})
|
||||||
@ -45,7 +57,7 @@ def test_chat_messages_for_summary():
|
|||||||
assert len(messages) == 2
|
assert len(messages) == 2
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")
|
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||||
def test_chats_group():
|
def test_chats_group():
|
||||||
financial_tasks = [
|
financial_tasks = [
|
||||||
"""What are the full names of NVDA and TESLA.""",
|
"""What are the full names of NVDA and TESLA.""",
|
||||||
@ -68,12 +80,12 @@ def test_chats_group():
|
|||||||
|
|
||||||
financial_assistant = AssistantAgent(
|
financial_assistant = AssistantAgent(
|
||||||
name="Financial_assistant",
|
name="Financial_assistant",
|
||||||
llm_config={"config_list": config_list},
|
llm_config={"config_list": config_list_35},
|
||||||
)
|
)
|
||||||
|
|
||||||
writer = AssistantAgent(
|
writer = AssistantAgent(
|
||||||
name="Writer",
|
name="Writer",
|
||||||
llm_config={"config_list": config_list},
|
llm_config={"config_list": config_list_35},
|
||||||
system_message="""
|
system_message="""
|
||||||
You are a professional writer, known for
|
You are a professional writer, known for
|
||||||
your insightful and engaging articles.
|
your insightful and engaging articles.
|
||||||
@ -87,7 +99,7 @@ def test_chats_group():
|
|||||||
system_message="""Critic. Double check plan, claims, code from other agents and provide feedback. Check whether the plan includes adding verifiable info such as source URL.
|
system_message="""Critic. Double check plan, claims, code from other agents and provide feedback. Check whether the plan includes adding verifiable info such as source URL.
|
||||||
Reply "TERMINATE" in the end when everything is done.
|
Reply "TERMINATE" in the end when everything is done.
|
||||||
""",
|
""",
|
||||||
llm_config={"config_list": config_list},
|
llm_config={"config_list": config_list_35},
|
||||||
)
|
)
|
||||||
|
|
||||||
groupchat_1 = GroupChat(agents=[user_proxy, financial_assistant, critic], messages=[], max_round=3)
|
groupchat_1 = GroupChat(agents=[user_proxy, financial_assistant, critic], messages=[], max_round=3)
|
||||||
@ -97,7 +109,7 @@ def test_chats_group():
|
|||||||
manager_1 = GroupChatManager(
|
manager_1 = GroupChatManager(
|
||||||
groupchat=groupchat_1,
|
groupchat=groupchat_1,
|
||||||
name="Research_manager",
|
name="Research_manager",
|
||||||
llm_config={"config_list": config_list},
|
llm_config={"config_list": config_list_35},
|
||||||
code_execution_config={
|
code_execution_config={
|
||||||
"last_n_messages": 1,
|
"last_n_messages": 1,
|
||||||
"work_dir": "groupchat",
|
"work_dir": "groupchat",
|
||||||
@ -108,7 +120,7 @@ def test_chats_group():
|
|||||||
manager_2 = GroupChatManager(
|
manager_2 = GroupChatManager(
|
||||||
groupchat=groupchat_2,
|
groupchat=groupchat_2,
|
||||||
name="Writing_manager",
|
name="Writing_manager",
|
||||||
llm_config={"config_list": config_list},
|
llm_config={"config_list": config_list_35},
|
||||||
code_execution_config={
|
code_execution_config={
|
||||||
"last_n_messages": 1,
|
"last_n_messages": 1,
|
||||||
"work_dir": "groupchat",
|
"work_dir": "groupchat",
|
||||||
@ -154,7 +166,7 @@ def test_chats_group():
|
|||||||
print(all_res[1].summary)
|
print(all_res[1].summary)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")
|
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||||
def test_chats():
|
def test_chats():
|
||||||
import random
|
import random
|
||||||
|
|
||||||
@ -182,17 +194,17 @@ def test_chats():
|
|||||||
func = Function()
|
func = Function()
|
||||||
financial_assistant_1 = AssistantAgent(
|
financial_assistant_1 = AssistantAgent(
|
||||||
name="Financial_assistant_1",
|
name="Financial_assistant_1",
|
||||||
llm_config={"config_list": config_list},
|
llm_config={"config_list": config_list_35},
|
||||||
function_map={"get_random_number": func.get_random_number},
|
function_map={"get_random_number": func.get_random_number},
|
||||||
)
|
)
|
||||||
financial_assistant_2 = AssistantAgent(
|
financial_assistant_2 = AssistantAgent(
|
||||||
name="Financial_assistant_2",
|
name="Financial_assistant_2",
|
||||||
llm_config={"config_list": config_list},
|
llm_config={"config_list": config_list_35},
|
||||||
function_map={"get_random_number": func.get_random_number},
|
function_map={"get_random_number": func.get_random_number},
|
||||||
)
|
)
|
||||||
writer = AssistantAgent(
|
writer = AssistantAgent(
|
||||||
name="Writer",
|
name="Writer",
|
||||||
llm_config={"config_list": config_list},
|
llm_config={"config_list": config_list_35},
|
||||||
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
|
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
|
||||||
system_message="""
|
system_message="""
|
||||||
You are a professional writer, known for
|
You are a professional writer, known for
|
||||||
@ -284,7 +296,7 @@ def test_chats():
|
|||||||
# print(blogpost.summary, insights_and_blogpost)
|
# print(blogpost.summary, insights_and_blogpost)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")
|
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||||
def test_chats_general():
|
def test_chats_general():
|
||||||
financial_tasks = [
|
financial_tasks = [
|
||||||
"""What are the full names of NVDA and TESLA.""",
|
"""What are the full names of NVDA and TESLA.""",
|
||||||
@ -296,15 +308,15 @@ def test_chats_general():
|
|||||||
|
|
||||||
financial_assistant_1 = AssistantAgent(
|
financial_assistant_1 = AssistantAgent(
|
||||||
name="Financial_assistant_1",
|
name="Financial_assistant_1",
|
||||||
llm_config={"config_list": config_list},
|
llm_config={"config_list": config_list_35},
|
||||||
)
|
)
|
||||||
financial_assistant_2 = AssistantAgent(
|
financial_assistant_2 = AssistantAgent(
|
||||||
name="Financial_assistant_2",
|
name="Financial_assistant_2",
|
||||||
llm_config={"config_list": config_list},
|
llm_config={"config_list": config_list_35},
|
||||||
)
|
)
|
||||||
writer = AssistantAgent(
|
writer = AssistantAgent(
|
||||||
name="Writer",
|
name="Writer",
|
||||||
llm_config={"config_list": config_list},
|
llm_config={"config_list": config_list_35},
|
||||||
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
|
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
|
||||||
system_message="""
|
system_message="""
|
||||||
You are a professional writer, known for
|
You are a professional writer, known for
|
||||||
@ -388,7 +400,7 @@ def test_chats_general():
|
|||||||
# print(blogpost.summary, insights_and_blogpost)
|
# print(blogpost.summary, insights_and_blogpost)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")
|
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||||
def test_chats_exceptions():
|
def test_chats_exceptions():
|
||||||
financial_tasks = [
|
financial_tasks = [
|
||||||
"""What are the full names of NVDA and TESLA.""",
|
"""What are the full names of NVDA and TESLA.""",
|
||||||
@ -472,10 +484,10 @@ def test_chats_exceptions():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")
|
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||||
def test_chats_w_func():
|
def test_chats_w_func():
|
||||||
llm_config = {
|
llm_config = {
|
||||||
"config_list": config_list,
|
"config_list": config_list_tool,
|
||||||
"timeout": 120,
|
"timeout": 120,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -528,9 +540,9 @@ def test_chats_w_func():
|
|||||||
print(res.summary, res.cost, res.chat_history)
|
print(res.summary, res.cost, res.chat_history)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")
|
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||||
def test_udf_message_in_chats():
|
def test_udf_message_in_chats():
|
||||||
llm_config = {"config_list": config_list}
|
llm_config_35 = {"config_list": config_list_35}
|
||||||
|
|
||||||
research_task = """
|
research_task = """
|
||||||
## NVDA (NVIDIA Corporation)
|
## NVDA (NVIDIA Corporation)
|
||||||
@ -560,11 +572,11 @@ def test_udf_message_in_chats():
|
|||||||
|
|
||||||
researcher = autogen.AssistantAgent(
|
researcher = autogen.AssistantAgent(
|
||||||
name="Financial_researcher",
|
name="Financial_researcher",
|
||||||
llm_config=llm_config,
|
llm_config=llm_config_35,
|
||||||
)
|
)
|
||||||
writer = autogen.AssistantAgent(
|
writer = autogen.AssistantAgent(
|
||||||
name="Writer",
|
name="Writer",
|
||||||
llm_config=llm_config,
|
llm_config=llm_config_35,
|
||||||
system_message="""
|
system_message="""
|
||||||
You are a professional writer, known for
|
You are a professional writer, known for
|
||||||
your insightful and engaging articles.
|
your insightful and engaging articles.
|
||||||
@ -609,8 +621,8 @@ def test_udf_message_in_chats():
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
# test_chats()
|
test_chats()
|
||||||
test_chats_general()
|
# test_chats_general()
|
||||||
# test_chats_exceptions()
|
# test_chats_exceptions()
|
||||||
# test_chats_group()
|
# test_chats_group()
|
||||||
# test_chats_w_func()
|
# test_chats_w_func()
|
||||||
|
|||||||
@ -21,10 +21,9 @@ from autogen.agentchat.conversable_agent import register_function
|
|||||||
from autogen.exception_utils import InvalidCarryOverType, SenderRequired
|
from autogen.exception_utils import InvalidCarryOverType, SenderRequired
|
||||||
|
|
||||||
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
||||||
from conftest import MOCK_OPEN_AI_API_KEY, skip_openai # noqa: E402
|
from conftest import MOCK_OPEN_AI_API_KEY, reason, skip_openai # noqa: E402
|
||||||
|
|
||||||
here = os.path.abspath(os.path.dirname(__file__))
|
here = os.path.abspath(os.path.dirname(__file__))
|
||||||
REASON = "requested to skip openai tests"
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
@ -918,13 +917,13 @@ def test_register_functions():
|
|||||||
|
|
||||||
@pytest.mark.skipif(
|
@pytest.mark.skipif(
|
||||||
skip_openai,
|
skip_openai,
|
||||||
reason=REASON,
|
reason=reason,
|
||||||
)
|
)
|
||||||
def test_function_registration_e2e_sync() -> None:
|
def test_function_registration_e2e_sync() -> None:
|
||||||
config_list = autogen.config_list_from_json(
|
config_list = autogen.config_list_from_json(
|
||||||
OAI_CONFIG_LIST,
|
OAI_CONFIG_LIST,
|
||||||
filter_dict={
|
filter_dict={
|
||||||
"model": ["gpt-4", "gpt-4-0314", "gpt4", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-v0314"],
|
"tags": ["tool"],
|
||||||
},
|
},
|
||||||
file_location=KEY_LOC,
|
file_location=KEY_LOC,
|
||||||
)
|
)
|
||||||
@ -995,7 +994,7 @@ def test_function_registration_e2e_sync() -> None:
|
|||||||
|
|
||||||
@pytest.mark.skipif(
|
@pytest.mark.skipif(
|
||||||
skip_openai,
|
skip_openai,
|
||||||
reason=REASON,
|
reason=reason,
|
||||||
)
|
)
|
||||||
@pytest.mark.asyncio()
|
@pytest.mark.asyncio()
|
||||||
async def test_function_registration_e2e_async() -> None:
|
async def test_function_registration_e2e_async() -> None:
|
||||||
@ -1071,15 +1070,15 @@ async def test_function_registration_e2e_async() -> None:
|
|||||||
stopwatch_mock.assert_called_once_with(num_seconds="2")
|
stopwatch_mock.assert_called_once_with(num_seconds="2")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(skip_openai, reason=REASON)
|
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||||
def test_max_turn():
|
def test_max_turn():
|
||||||
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
|
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC, filter_dict={"model": ["gpt-3.5-turbo"]})
|
||||||
|
|
||||||
# create an AssistantAgent instance named "assistant"
|
# create an AssistantAgent instance named "assistant"
|
||||||
assistant = autogen.AssistantAgent(
|
assistant = autogen.AssistantAgent(
|
||||||
name="assistant",
|
name="assistant",
|
||||||
max_consecutive_auto_reply=10,
|
max_consecutive_auto_reply=10,
|
||||||
llm_config={"timeout": 600, "cache_seed": 41, "config_list": config_list},
|
llm_config={"config_list": config_list},
|
||||||
)
|
)
|
||||||
|
|
||||||
user_proxy = autogen.UserProxyAgent(name="user", human_input_mode="ALWAYS", code_execution_config=False)
|
user_proxy = autogen.UserProxyAgent(name="user", human_input_mode="ALWAYS", code_execution_config=False)
|
||||||
@ -1093,7 +1092,7 @@ def test_max_turn():
|
|||||||
assert len(res.chat_history) <= 6
|
assert len(res.chat_history) <= 6
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(skip_openai, reason=REASON)
|
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||||
def test_message_func():
|
def test_message_func():
|
||||||
import random
|
import random
|
||||||
|
|
||||||
@ -1149,7 +1148,7 @@ def test_message_func():
|
|||||||
print(chat_res_play.summary)
|
print(chat_res_play.summary)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(skip_openai, reason=REASON)
|
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||||
def test_summary():
|
def test_summary():
|
||||||
import random
|
import random
|
||||||
|
|
||||||
@ -1161,8 +1160,7 @@ def test_summary():
|
|||||||
return random.randint(0, 100)
|
return random.randint(0, 100)
|
||||||
|
|
||||||
config_list = autogen.config_list_from_json(
|
config_list = autogen.config_list_from_json(
|
||||||
OAI_CONFIG_LIST,
|
OAI_CONFIG_LIST, file_location=KEY_LOC, filter_dict={"tags": ["gpt-3.5-turbo"]}
|
||||||
file_location=KEY_LOC,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def my_message_play(sender, recipient, context):
|
def my_message_play(sender, recipient, context):
|
||||||
@ -1322,5 +1320,6 @@ if __name__ == "__main__":
|
|||||||
# test_no_llm_config()
|
# test_no_llm_config()
|
||||||
# test_max_turn()
|
# test_max_turn()
|
||||||
# test_process_before_send()
|
# test_process_before_send()
|
||||||
test_message_func()
|
# test_message_func()
|
||||||
test_summary()
|
test_summary()
|
||||||
|
# test_function_registration_e2e_sync()
|
||||||
|
|||||||
@ -45,21 +45,18 @@ async def test_function_call_groupchat(key, value, sync):
|
|||||||
self.call_count += 1
|
self.call_count += 1
|
||||||
return random.randint(0, 100)
|
return random.randint(0, 100)
|
||||||
|
|
||||||
config_list_gpt4 = autogen.config_list_from_json(
|
# llm_config without functions
|
||||||
|
config_list_35 = autogen.config_list_from_json(
|
||||||
OAI_CONFIG_LIST,
|
OAI_CONFIG_LIST,
|
||||||
filter_dict={
|
|
||||||
"model": ["gpt-4", "gpt-4-0314", "gpt4", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-v0314"],
|
|
||||||
},
|
|
||||||
file_location=KEY_LOC,
|
file_location=KEY_LOC,
|
||||||
|
filter_dict={"tags": ["gpt-3.5-turbo", "gpt-3.5-turbo-16k"]},
|
||||||
)
|
)
|
||||||
|
llm_config_no_function = {"config_list": config_list_35}
|
||||||
|
config_list_tool = autogen.filter_config(config_list_35, {"tags": ["tool"]})
|
||||||
llm_config = {
|
llm_config = {
|
||||||
"config_list": config_list_gpt4,
|
"config_list": config_list_tool,
|
||||||
"cache_seed": 42,
|
|
||||||
key: value,
|
key: value,
|
||||||
}
|
}
|
||||||
# llm_config without functions
|
|
||||||
llm_config_no_function = llm_config.copy()
|
|
||||||
del llm_config_no_function[key]
|
|
||||||
|
|
||||||
func = Function()
|
func = Function()
|
||||||
user_proxy = autogen.UserProxyAgent(
|
user_proxy = autogen.UserProxyAgent(
|
||||||
|
|||||||
@ -15,7 +15,7 @@ from conftest import reason, skip_openai # noqa: E402
|
|||||||
|
|
||||||
@pytest.mark.skipif(skip_openai, reason=reason)
|
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||||
def test_get_human_input():
|
def test_get_human_input():
|
||||||
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
|
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC, filter_dict={"tags": ["gpt-3.5-turbo"]})
|
||||||
|
|
||||||
# create an AssistantAgent instance named "assistant"
|
# create an AssistantAgent instance named "assistant"
|
||||||
assistant = autogen.AssistantAgent(
|
assistant = autogen.AssistantAgent(
|
||||||
|
|||||||
@ -38,14 +38,13 @@ def test_math_user_proxy_agent():
|
|||||||
OAI_CONFIG_LIST,
|
OAI_CONFIG_LIST,
|
||||||
file_location=KEY_LOC,
|
file_location=KEY_LOC,
|
||||||
filter_dict={
|
filter_dict={
|
||||||
"model": ["gpt-4", "gpt4", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-v0314"],
|
"tags": ["gpt-3.5-turbo"],
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
assistant = AssistantAgent(
|
assistant = AssistantAgent(
|
||||||
"assistant",
|
"assistant",
|
||||||
system_message="You are a helpful assistant.",
|
system_message="You are a helpful assistant.",
|
||||||
llm_config={
|
llm_config={
|
||||||
"timeout": 600,
|
|
||||||
"cache_seed": 42,
|
"cache_seed": 42,
|
||||||
"config_list": config_list,
|
"config_list": config_list,
|
||||||
},
|
},
|
||||||
@ -127,5 +126,5 @@ def test_generate_prompt():
|
|||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
# test_add_remove_print()
|
# test_add_remove_print()
|
||||||
# test_execute_one_python_code()
|
# test_execute_one_python_code()
|
||||||
test_generate_prompt()
|
# test_generate_prompt()
|
||||||
test_math_user_proxy_agent()
|
test_math_user_proxy_agent()
|
||||||
|
|||||||
@ -16,6 +16,11 @@ from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST # noqa: E402
|
|||||||
@pytest.mark.skipif(skip_openai, reason=reason)
|
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||||
def test_nested():
|
def test_nested():
|
||||||
config_list = autogen.config_list_from_json(env_or_file=OAI_CONFIG_LIST, file_location=KEY_LOC)
|
config_list = autogen.config_list_from_json(env_or_file=OAI_CONFIG_LIST, file_location=KEY_LOC)
|
||||||
|
config_list_35 = autogen.config_list_from_json(
|
||||||
|
OAI_CONFIG_LIST,
|
||||||
|
file_location=KEY_LOC,
|
||||||
|
filter_dict={"tags": ["gpt-3.5-turbo"]},
|
||||||
|
)
|
||||||
llm_config = {"config_list": config_list}
|
llm_config = {"config_list": config_list}
|
||||||
|
|
||||||
tasks = [
|
tasks = [
|
||||||
@ -60,13 +65,13 @@ def test_nested():
|
|||||||
|
|
||||||
assistant = autogen.AssistantAgent(
|
assistant = autogen.AssistantAgent(
|
||||||
name="Assistant",
|
name="Assistant",
|
||||||
llm_config={"config_list": config_list},
|
llm_config=False,
|
||||||
# is_termination_msg=lambda x: x.get("content", "") == "",
|
# is_termination_msg=lambda x: x.get("content", "") == "",
|
||||||
)
|
)
|
||||||
|
|
||||||
assistant_2 = autogen.AssistantAgent(
|
assistant_2 = autogen.AssistantAgent(
|
||||||
name="Assistant",
|
name="Assistant",
|
||||||
llm_config={"config_list": config_list},
|
llm_config={"config_list": config_list_35},
|
||||||
# is_termination_msg=lambda x: x.get("content", "") == "",
|
# is_termination_msg=lambda x: x.get("content", "") == "",
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -94,7 +99,7 @@ def test_nested():
|
|||||||
|
|
||||||
writer = autogen.AssistantAgent(
|
writer = autogen.AssistantAgent(
|
||||||
name="Writer",
|
name="Writer",
|
||||||
llm_config={"config_list": config_list},
|
llm_config={"config_list": config_list_35},
|
||||||
system_message="""
|
system_message="""
|
||||||
You are a professional writer, known for
|
You are a professional writer, known for
|
||||||
your insightful and engaging articles.
|
your insightful and engaging articles.
|
||||||
@ -105,7 +110,7 @@ def test_nested():
|
|||||||
|
|
||||||
autogen.AssistantAgent(
|
autogen.AssistantAgent(
|
||||||
name="Reviewer",
|
name="Reviewer",
|
||||||
llm_config={"config_list": config_list},
|
llm_config={"config_list": config_list_35},
|
||||||
system_message="""
|
system_message="""
|
||||||
You are a compliance reviewer, known for your thoroughness and commitment to standards.
|
You are a compliance reviewer, known for your thoroughness and commitment to standards.
|
||||||
Your task is to scrutinize content for any harmful elements or regulatory violations, ensuring
|
Your task is to scrutinize content for any harmful elements or regulatory violations, ensuring
|
||||||
@ -130,7 +135,10 @@ def test_nested():
|
|||||||
trigger=user,
|
trigger=user,
|
||||||
)
|
)
|
||||||
user.initiate_chats(
|
user.initiate_chats(
|
||||||
[{"recipient": assistant, "message": tasks[0], "max_turns": 1}, {"recipient": assistant_2, "message": tasks[1]}]
|
[
|
||||||
|
{"recipient": assistant, "message": tasks[0], "max_turns": 1},
|
||||||
|
{"recipient": assistant_2, "message": tasks[1], "max_turns": 1},
|
||||||
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user