autogen/test/oai/test_client_stream.py
Davor Runje 4b5ec5a52f
Add decorator for function calling (#1018)
* add function decorator to converasble agent

* polishing

* polishing

* added function decorator to the notebook with async function calls

* added support for return type hint and JSON encoding of returned value if needed

* polishing

* polishing

* refactored async case

* Python 3.8 support added

* polishing

* polishing

* missing docs added

* refacotring and changes as requested

* getLogger

* documentation added

* test fix

* test fix

* added testing of agentchat_function_call_currency_calculator.ipynb to test_notebook.py

* added support for Pydantic parameters in function decorator

* polishing

* Update website/docs/Use-Cases/agent_chat.md

Co-authored-by: Li Jiang <bnujli@gmail.com>

* Update website/docs/Use-Cases/agent_chat.md

Co-authored-by: Li Jiang <bnujli@gmail.com>

* fixes problem with logprob parameter in openai.types.chat.chat_completion.Choice added by openai version 1.5.0

* get 100% code coverage on code added

* updated docs

* default values added to JSON schema

* serialization using json.dump() add for values not string or BaseModel

* added limit to openai version because of breaking changes in 1.5.0

* added line-by-line comments in docs to explain the process

* polishing

---------

Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
Co-authored-by: Li Jiang <bnujli@gmail.com>
2023-12-25 16:07:20 +00:00

86 lines
2.8 KiB
Python

import pytest
from autogen import OpenAIWrapper, config_list_from_json, config_list_openai_aoai
from test_utils import OAI_CONFIG_LIST, KEY_LOC
try:
from openai import OpenAI
except ImportError:
skip = True
else:
skip = False
@pytest.mark.skipif(skip, reason="openai>=1 not installed")
def test_aoai_chat_completion_stream():
config_list = config_list_from_json(
env_or_file=OAI_CONFIG_LIST,
file_location=KEY_LOC,
filter_dict={"api_type": ["azure"], "model": ["gpt-3.5-turbo", "gpt-35-turbo"]},
)
client = OpenAIWrapper(config_list=config_list)
response = client.create(messages=[{"role": "user", "content": "2+2="}], stream=True)
print(response)
print(client.extract_text_or_completion_object(response))
@pytest.mark.skipif(skip, reason="openai>=1 not installed")
def test_chat_completion_stream():
config_list = config_list_from_json(
env_or_file=OAI_CONFIG_LIST,
file_location=KEY_LOC,
filter_dict={"model": ["gpt-3.5-turbo", "gpt-35-turbo"]},
)
client = OpenAIWrapper(config_list=config_list)
response = client.create(messages=[{"role": "user", "content": "1+1="}], stream=True)
print(response)
print(client.extract_text_or_completion_object(response))
@pytest.mark.skipif(skip, reason="openai>=1 not installed")
def test_chat_functions_stream():
config_list = config_list_from_json(
env_or_file=OAI_CONFIG_LIST,
file_location=KEY_LOC,
filter_dict={"model": ["gpt-3.5-turbo", "gpt-35-turbo"]},
)
functions = [
{
"name": "get_current_weather",
"description": "Get the current weather",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
},
"required": ["location"],
},
},
]
client = OpenAIWrapper(config_list=config_list)
response = client.create(
messages=[{"role": "user", "content": "What's the weather like today in San Francisco?"}],
functions=functions,
stream=True,
)
print(response)
print(client.extract_text_or_completion_object(response))
@pytest.mark.skipif(skip, reason="openai>=1 not installed")
def test_completion_stream():
config_list = config_list_openai_aoai(KEY_LOC)
client = OpenAIWrapper(config_list=config_list)
response = client.create(prompt="1+1=", model="gpt-3.5-turbo-instruct", stream=True)
print(response)
print(client.extract_text_or_completion_object(response))
if __name__ == "__main__":
test_aoai_chat_completion_stream()
test_chat_completion_stream()
test_chat_functions_stream()
test_completion_stream()