mirror of
https://github.com/microsoft/autogen.git
synced 2025-08-04 23:02:09 +00:00

* add function decorator to converasble agent * polishing * polishing * added function decorator to the notebook with async function calls * added support for return type hint and JSON encoding of returned value if needed * polishing * polishing * refactored async case * Python 3.8 support added * polishing * polishing * missing docs added * refacotring and changes as requested * getLogger * documentation added * test fix * test fix * added testing of agentchat_function_call_currency_calculator.ipynb to test_notebook.py * added support for Pydantic parameters in function decorator * polishing * Update website/docs/Use-Cases/agent_chat.md Co-authored-by: Li Jiang <bnujli@gmail.com> * Update website/docs/Use-Cases/agent_chat.md Co-authored-by: Li Jiang <bnujli@gmail.com> * fixes problem with logprob parameter in openai.types.chat.chat_completion.Choice added by openai version 1.5.0 * get 100% code coverage on code added * updated docs * default values added to JSON schema * serialization using json.dump() add for values not string or BaseModel * added limit to openai version because of breaking changes in 1.5.0 * added line-by-line comments in docs to explain the process * polishing --------- Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com> Co-authored-by: Li Jiang <bnujli@gmail.com>
147 lines
5.1 KiB
Python
147 lines
5.1 KiB
Python
import pytest
|
|
from autogen import OpenAIWrapper, config_list_from_json, config_list_openai_aoai
|
|
from test_utils import OAI_CONFIG_LIST, KEY_LOC
|
|
|
|
TOOL_ENABLED = False
|
|
try:
|
|
from openai import OpenAI
|
|
from openai.types.chat.chat_completion import ChatCompletionMessage
|
|
except ImportError:
|
|
skip = True
|
|
else:
|
|
skip = False
|
|
import openai
|
|
|
|
if openai.__version__ >= "1.1.0":
|
|
TOOL_ENABLED = True
|
|
|
|
|
|
@pytest.mark.skipif(skip, reason="openai>=1 not installed")
|
|
def test_aoai_chat_completion():
|
|
config_list = config_list_from_json(
|
|
env_or_file=OAI_CONFIG_LIST,
|
|
file_location=KEY_LOC,
|
|
filter_dict={"api_type": ["azure"], "model": ["gpt-3.5-turbo", "gpt-35-turbo"]},
|
|
)
|
|
client = OpenAIWrapper(config_list=config_list)
|
|
# for config in config_list:
|
|
# print(config)
|
|
# client = OpenAIWrapper(**config)
|
|
# response = client.create(messages=[{"role": "user", "content": "2+2="}], cache_seed=None)
|
|
response = client.create(messages=[{"role": "user", "content": "2+2="}], cache_seed=None)
|
|
print(response)
|
|
print(client.extract_text_or_completion_object(response))
|
|
|
|
|
|
@pytest.mark.skipif(skip and not TOOL_ENABLED, reason="openai>=1.1.0 not installed")
|
|
def test_oai_tool_calling_extraction():
|
|
config_list = config_list_from_json(
|
|
env_or_file=OAI_CONFIG_LIST,
|
|
file_location=KEY_LOC,
|
|
filter_dict={"api_type": ["azure"], "model": ["gpt-3.5-turbo", "gpt-35-turbo"]},
|
|
)
|
|
client = OpenAIWrapper(config_list=config_list)
|
|
response = client.create(
|
|
messages=[
|
|
{
|
|
"role": "user",
|
|
"content": "What is the weather in San Francisco?",
|
|
},
|
|
],
|
|
tools=[
|
|
{
|
|
"type": "function",
|
|
"function": {
|
|
"name": "getCurrentWeather",
|
|
"description": "Get the weather in location",
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {
|
|
"location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"},
|
|
"unit": {"type": "string", "enum": ["c", "f"]},
|
|
},
|
|
"required": ["location"],
|
|
},
|
|
},
|
|
}
|
|
],
|
|
)
|
|
print(response)
|
|
print(client.extract_text_or_completion_object(response))
|
|
|
|
|
|
@pytest.mark.skipif(skip, reason="openai>=1 not installed")
|
|
def test_chat_completion():
|
|
config_list = config_list_from_json(
|
|
env_or_file=OAI_CONFIG_LIST,
|
|
file_location=KEY_LOC,
|
|
)
|
|
client = OpenAIWrapper(config_list=config_list)
|
|
response = client.create(messages=[{"role": "user", "content": "1+1="}])
|
|
print(response)
|
|
print(client.extract_text_or_completion_object(response))
|
|
|
|
|
|
@pytest.mark.skipif(skip, reason="openai>=1 not installed")
|
|
def test_completion():
|
|
config_list = config_list_openai_aoai(KEY_LOC)
|
|
client = OpenAIWrapper(config_list=config_list)
|
|
response = client.create(prompt="1+1=", model="gpt-3.5-turbo-instruct")
|
|
print(response)
|
|
print(client.extract_text_or_completion_object(response))
|
|
|
|
|
|
@pytest.mark.skipif(skip, reason="openai>=1 not installed")
|
|
@pytest.mark.parametrize(
|
|
"cache_seed, model",
|
|
[
|
|
(None, "gpt-3.5-turbo-instruct"),
|
|
(42, "gpt-3.5-turbo-instruct"),
|
|
(None, "text-ada-001"),
|
|
],
|
|
)
|
|
def test_cost(cache_seed, model):
|
|
config_list = config_list_openai_aoai(KEY_LOC)
|
|
client = OpenAIWrapper(config_list=config_list, cache_seed=cache_seed)
|
|
response = client.create(prompt="1+3=", model=model)
|
|
print(response.cost)
|
|
|
|
|
|
@pytest.mark.skipif(skip, reason="openai>=1 not installed")
|
|
def test_usage_summary():
|
|
config_list = config_list_openai_aoai(KEY_LOC)
|
|
client = OpenAIWrapper(config_list=config_list)
|
|
response = client.create(prompt="1+3=", model="gpt-3.5-turbo-instruct", cache_seed=None)
|
|
|
|
# usage should be recorded
|
|
assert client.actual_usage_summary["total_cost"] > 0, "total_cost should be greater than 0"
|
|
assert client.total_usage_summary["total_cost"] > 0, "total_cost should be greater than 0"
|
|
|
|
# check print
|
|
client.print_usage_summary()
|
|
|
|
# check update
|
|
client._update_usage_summary(response, use_cache=True)
|
|
assert (
|
|
client.total_usage_summary["total_cost"] == response.cost * 2
|
|
), "total_cost should be equal to response.cost * 2"
|
|
|
|
# check clear
|
|
client.clear_usage_summary()
|
|
assert client.actual_usage_summary is None, "actual_usage_summary should be None"
|
|
assert client.total_usage_summary is None, "total_usage_summary should be None"
|
|
|
|
# actual usage and all usage should be different
|
|
response = client.create(prompt="1+3=", model="gpt-3.5-turbo-instruct", cache_seed=42)
|
|
assert client.total_usage_summary["total_cost"] > 0, "total_cost should be greater than 0"
|
|
assert client.actual_usage_summary is None, "No actual cost should be recorded"
|
|
|
|
|
|
if __name__ == "__main__":
|
|
test_aoai_chat_completion()
|
|
test_oai_tool_calling_extraction()
|
|
test_chat_completion()
|
|
test_completion()
|
|
test_cost()
|
|
test_usage_summary()
|