Skip tests that depend on OpenAI via --skip-openai (#1097)

* --skip-openai

* All tests pass

* Update build.yml

* Update Contribute.md

* Fix for failing Ubuntu tests

* More tests skipped, fixing 3.10 build

* Apply suggestions from code review

Co-authored-by: Qingyun Wu <qingyun0327@gmail.com>

* Added more comments

* fixed test__wrap_function_*

---------

Co-authored-by: Qingyun Wu <qingyun0327@gmail.com>
Co-authored-by: Davor Runje <davor@airt.ai>
This commit is contained in:
Maxim Saplin 2023-12-31 22:37:21 +03:00 committed by GitHub
parent 3b0e059699
commit c80df8acab
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 139 additions and 107 deletions

View File

@ -41,17 +41,15 @@ jobs:
pip install -e .
python -c "import autogen"
pip install -e. pytest mock
pip uninstall -y openai
- name: Test with pytest
if: matrix.python-version != '3.10'
run: |
pytest test
pytest test --skip-openai
- name: Coverage
if: matrix.python-version == '3.10'
run: |
pip install -e .[test]
pip uninstall -y openai
coverage run -a -m pytest test --ignore=test/agentchat/contrib
coverage run -a -m pytest test --ignore=test/agentchat/contrib --skip-openai
coverage xml
- name: Upload coverage to Codecov
if: matrix.python-version == '3.10'

4
.gitignore vendored
View File

@ -171,3 +171,7 @@ test/my_tmp/*
# Storage for the AgentEval output
test/test_files/agenteval-in-out/out/
# Files created by tests
*tmp_code_*
test/agentchat/test_agent_scripts/*

View File

@ -5,6 +5,7 @@ import sys
from packaging.requirements import Requirement
from autogen.agentchat.contrib.agent_builder import AgentBuilder
from autogen import UserProxyAgent
from conftest import skip_openai
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST # noqa: E402
@ -20,15 +21,15 @@ try:
from openai.types.completion import Completion
from openai.types.completion_usage import CompletionUsage
import diskcache
OPENAI_INSTALLED = True
except ImportError:
OPENAI_INSTALLED = False
skip = True
else:
skip = False or skip_openai
@pytest.mark.skipif(
not OPENAI_INSTALLED,
reason="do not run when dependency is not installed",
skip,
reason="openai not installed OR requested to skip",
)
def test_build():
builder = AgentBuilder(config_path=oai_config_path, builder_model="gpt-4", agent_model="gpt-4")
@ -57,8 +58,8 @@ def test_build():
@pytest.mark.skipif(
not OPENAI_INSTALLED,
reason="do not run when dependency is not installed",
skip,
reason="openai not installed OR requested to skip",
)
def test_save():
builder = AgentBuilder(config_path=oai_config_path, builder_model="gpt-4", agent_model="gpt-4")
@ -93,8 +94,8 @@ def test_save():
@pytest.mark.skipif(
not OPENAI_INSTALLED,
reason="do not run when dependency is not installed",
skip,
reason="openai not installed OR requested to skip",
)
def test_load():
builder = AgentBuilder(config_path=oai_config_path, builder_model="gpt-4", agent_model="gpt-4")
@ -128,8 +129,8 @@ def test_load():
@pytest.mark.skipif(
not OPENAI_INSTALLED,
reason="do not run when dependency is not installed",
skip,
reason="openai not installed OR requested to skip",
)
def test_clear_agent():
builder = AgentBuilder(config_path=oai_config_path, builder_model="gpt-4", agent_model="gpt-4")

View File

@ -2,6 +2,7 @@ import pytest
import sys
import autogen
import os
from conftest import skip_openai
from autogen.agentchat.contrib.compressible_agent import CompressibleAgent
here = os.path.abspath(os.path.dirname(__file__))
@ -19,15 +20,15 @@ config_list = autogen.config_list_from_json(
try:
import openai
OPENAI_INSTALLED = True
except ImportError:
OPENAI_INSTALLED = False
skip = True
else:
skip = False or skip_openai
@pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or not OPENAI_INSTALLED,
reason="do not run on MacOS or windows or dependency is not installed",
sys.platform in ["darwin", "win32"] or skip,
reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip",
)
def test_mode_compress():
conversations = {}
@ -65,8 +66,8 @@ def test_mode_compress():
@pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or not OPENAI_INSTALLED,
reason="do not run on MacOS or windows or dependency is not installed",
sys.platform in ["darwin", "win32"] or skip,
reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip",
)
def test_mode_customized():
try:
@ -135,8 +136,8 @@ def test_mode_customized():
@pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or not OPENAI_INSTALLED,
reason="do not run on MacOS or windows or dependency is not installed",
sys.platform in ["darwin", "win32"] or skip,
reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip",
)
def test_compress_message():
assistant = CompressibleAgent(
@ -169,6 +170,10 @@ def test_compress_message():
assert is_success, "Compression failed."
@pytest.mark.skipif(
skip,
reason="do not run if dependency is not installed OR requested to skip",
)
def test_mode_terminate():
assistant = CompressibleAgent(
name="assistant",

View File

@ -3,6 +3,7 @@ import os
import sys
import autogen
from autogen import OpenAIWrapper
from conftest import skip_openai
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST # noqa: E402
@ -11,10 +12,10 @@ try:
import openai
from autogen.agentchat.contrib.gpt_assistant_agent import GPTAssistantAgent
from autogen.oai.openai_utils import retrieve_assistants_by_name
skip_test = False
except ImportError:
skip_test = True
skip = True
else:
skip = False or skip_openai
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST, file_location=KEY_LOC, filter_dict={"api_type": ["openai"]}
@ -26,8 +27,8 @@ def ask_ossinsight(question):
@pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or skip_test,
reason="do not run on MacOS or windows or dependency is not installed",
sys.platform in ["darwin", "win32"] or skip,
reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip",
)
def test_gpt_assistant_chat():
ossinsight_api_schema = {
@ -73,8 +74,8 @@ def test_gpt_assistant_chat():
@pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or skip_test,
reason="do not run on MacOS or windows or dependency is not installed",
sys.platform in ["darwin", "win32"] or skip,
reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip",
)
def test_get_assistant_instructions():
"""
@ -97,8 +98,8 @@ def test_get_assistant_instructions():
@pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or skip_test,
reason="do not run on MacOS or windows or dependency is not installed",
sys.platform in ["darwin", "win32"] or skip,
reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip",
)
def test_gpt_assistant_instructions_overwrite():
"""
@ -142,8 +143,8 @@ def test_gpt_assistant_instructions_overwrite():
@pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or skip_test,
reason="do not run on MacOS or windows or dependency is not installed",
sys.platform in ["darwin", "win32"] or skip,
reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip",
)
def test_gpt_assistant_existing_no_instructions():
"""
@ -178,8 +179,8 @@ def test_gpt_assistant_existing_no_instructions():
@pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or skip_test,
reason="do not run on MacOS or windows or dependency is not installed",
sys.platform in ["darwin", "win32"] or skip,
reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip",
)
def test_get_assistant_files():
"""
@ -212,8 +213,8 @@ def test_get_assistant_files():
@pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or skip_test,
reason="do not run on MacOS or windows or dependency is not installed",
sys.platform in ["darwin", "win32"] or skip,
reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip",
)
def test_assistant_retrieval():
"""
@ -283,8 +284,8 @@ def test_assistant_retrieval():
@pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or skip_test,
reason="do not run on MacOS or windows or dependency is not installed",
sys.platform in ["darwin", "win32"] or skip,
reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip",
)
def test_assistant_mismatch_retrieval():
"""Test function to check if the GPTAssistantAgent can filter out the mismatch assistant"""

View File

@ -2,6 +2,7 @@ import pytest
import os
import sys
from autogen import ConversableAgent, config_list_from_json
from conftest import skip_openai
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from test_assistant_agent import OAI_CONFIG_LIST, KEY_LOC # noqa: E402
@ -12,7 +13,7 @@ try:
except ImportError:
skip = True
else:
skip = False
skip = False or skip_openai
try:
from termcolor import colored

View File

@ -2,23 +2,26 @@ import os
import sys
import pytest
import autogen
from conftest import skip_openai
from autogen.agentchat import AssistantAgent, UserProxyAgent
try:
from openai import OpenAI
except ImportError:
skip = True
else:
skip = False or skip_openai
KEY_LOC = "notebook"
OAI_CONFIG_LIST = "OAI_CONFIG_LIST"
here = os.path.abspath(os.path.dirname(__file__))
@pytest.mark.skipif(
sys.platform in ["darwin", "win32"],
reason="do not run on MacOS or windows",
sys.platform in ["darwin", "win32"] or skip,
reason="do not run on MacOS or windows OR openai not installed OR requested to skip",
)
def test_ai_user_proxy_agent():
try:
import openai
except ImportError:
return
conversations = {}
# autogen.ChatCompletion.start_logging(conversations)
@ -57,11 +60,8 @@ def test_ai_user_proxy_agent():
print(conversations)
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
def test_gpt35(human_input_mode="NEVER", max_consecutive_auto_reply=5):
try:
import openai
except ImportError:
return
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST,
file_location=KEY_LOC,
@ -115,12 +115,8 @@ If "Thank you" or "You\'re welcome" are said in the conversation, then say TERMI
assert not isinstance(user.use_docker, bool) # None or str
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
def test_create_execute_script(human_input_mode="NEVER", max_consecutive_auto_reply=10):
try:
import openai
except ImportError:
return
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, file_location=KEY_LOC)
conversations = {}
# autogen.ChatCompletion.start_logging(conversations)
@ -160,12 +156,8 @@ print('Hello world!')
# autogen.ChatCompletion.stop_logging()
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
def test_tsp(human_input_mode="NEVER", max_consecutive_auto_reply=10):
try:
import openai
except ImportError:
return
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST,
file_location=KEY_LOC,

View File

@ -1,8 +1,16 @@
import pytest
import asyncio
import autogen
from conftest import skip_openai
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST
try:
from openai import OpenAI
except ImportError:
skip = True
else:
skip = False or skip_openai
def get_market_news(ind, ind_upper):
data = {
@ -45,13 +53,9 @@ def get_market_news(ind, ind_upper):
return feeds_summary
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
@pytest.mark.asyncio
async def test_async_groupchat():
try:
import openai
except ImportError:
return
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
llm_config = {
@ -91,12 +95,9 @@ async def test_async_groupchat():
assert len(user_proxy.chat_messages) > 0
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
@pytest.mark.asyncio
async def test_stream():
try:
import openai
except ImportError:
return
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
data = asyncio.Future()

View File

@ -1,15 +1,20 @@
import asyncio
import autogen
import pytest
from conftest import skip_openai
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST
try:
from openai import OpenAI
except ImportError:
skip = True
else:
skip = False or skip_openai
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
@pytest.mark.asyncio
async def test_async_get_human_input():
try:
import openai
except ImportError:
return
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
# create an AssistantAgent instance named "assistant"

View File

@ -2,6 +2,7 @@ import copy
from typing import Any, Callable, Dict, Literal
import pytest
from unittest.mock import patch
from pydantic import BaseModel, Field
from typing_extensions import Annotated
@ -421,7 +422,7 @@ def test__wrap_function_sync():
else:
raise ValueError(f"Unknown currencies {base_currency}, {quote_currency}")
agent = ConversableAgent(name="agent", llm_config={})
agent = ConversableAgent(name="agent", llm_config=False)
@agent._wrap_function
def currency_calculator(
@ -457,7 +458,7 @@ async def test__wrap_function_async():
else:
raise ValueError(f"Unknown currencies {base_currency}, {quote_currency}")
agent = ConversableAgent(name="agent", llm_config={})
agent = ConversableAgent(name="agent", llm_config=False)
@agent._wrap_function
async def currency_calculator(

View File

@ -1,17 +1,21 @@
try:
from openai import OpenAI
except ImportError:
OpenAI = None
import pytest
import asyncio
import json
import autogen
from conftest import skip_openai
from autogen.math_utils import eval_math_responses
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST
import sys
try:
from openai import OpenAI
except ImportError:
skip = True
else:
skip = False or skip_openai
@pytest.mark.skipif(OpenAI is None, reason="openai>=1 not installed")
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
def test_eval_math_responses():
config_list = autogen.config_list_from_models(
KEY_LOC, exclude="aoai", model_list=["gpt-4-0613", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k"]
@ -190,8 +194,8 @@ async def test_a_execute_function():
@pytest.mark.skipif(
not OpenAI or not sys.version.startswith("3.10"),
reason="do not run if openai is not installed or py!=3.10",
skip or not sys.version.startswith("3.10"),
reason="do not run if openai is not installed OR reeusted to skip OR py!=3.10",
)
def test_update_function():
config_list_gpt4 = autogen.config_list_from_json(

View File

@ -1,6 +1,7 @@
import autogen
import pytest
import sys
from conftest import skip_openai
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST
try:
@ -8,7 +9,7 @@ try:
except ImportError:
skip = True
else:
skip = False
skip = False or skip_openai
@pytest.mark.skipif(

View File

@ -6,6 +6,7 @@ from autogen.agentchat.contrib.math_user_proxy_agent import (
_remove_print,
_add_print_to_last_line,
)
from conftest import skip_openai
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST
try:
@ -13,7 +14,7 @@ try:
except ImportError:
skip = True
else:
skip = False
skip = False or skip_openai
@pytest.mark.skipif(

16
test/conftest.py Normal file
View File

@ -0,0 +1,16 @@
import pytest
skip_openai = False
# Registers command-line option '--skip-openai' via pytest hook.
# When this flag is set, it indicates that tests requiring OpenAI should be skipped.
def pytest_addoption(parser):
parser.addoption("--skip-openai", action="store_true", help="Skip all tests that require openai")
# pytest hook implementation extracting the '--skip-openai' command line arg and exposing it globally
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
global skip_openai
skip_openai = config.getoption("--skip-openai", False)

View File

@ -1,18 +1,19 @@
import pytest
from autogen import OpenAIWrapper, config_list_from_json, config_list_openai_aoai
from conftest import skip_openai
TOOL_ENABLED = False
try:
from openai import OpenAI
from openai.types.chat.chat_completion import ChatCompletionMessage
except ImportError:
skip = True
else:
skip = False
import openai
if openai.__version__ >= "1.1.0":
TOOL_ENABLED = True
from openai.types.chat.chat_completion import ChatCompletionMessage
except ImportError:
skip = True
else:
skip = False or skip_openai
KEY_LOC = "notebook"
OAI_CONFIG_LIST = "OAI_CONFIG_LIST"
@ -35,7 +36,7 @@ def test_aoai_chat_completion():
print(client.extract_text_or_completion_object(response))
@pytest.mark.skipif(skip and not TOOL_ENABLED, reason="openai>=1.1.0 not installed")
@pytest.mark.skipif(skip or not TOOL_ENABLED, reason="openai>=1.1.0 not installed")
def test_oai_tool_calling_extraction():
config_list = config_list_from_json(
env_or_file=OAI_CONFIG_LIST,

View File

@ -1,12 +1,13 @@
import pytest
from autogen import OpenAIWrapper, config_list_from_json, config_list_openai_aoai
from conftest import skip_openai
try:
from openai import OpenAI
except ImportError:
skip = True
else:
skip = False
skip = False or skip_openai
KEY_LOC = "notebook"
OAI_CONFIG_LIST = "OAI_CONFIG_LIST"

View File

@ -1,13 +1,14 @@
import sys
import os
import pytest
from conftest import skip_openai
try:
import openai
skip = False
except ImportError:
skip = True
else:
skip = False or skip_openai
here = os.path.abspath(os.path.dirname(__file__))

View File

@ -1,6 +1,8 @@
"""
Unit test for retrieve_utils.py
"""
import pytest
try:
import chromadb
from autogen.retrieve_utils import (
@ -18,8 +20,6 @@ except ImportError:
else:
skip = False
import os
import sys
import pytest
try:
from unstructured.partition.auto import partition

View File

@ -120,22 +120,20 @@ Tests are automatically run via GitHub actions. There are two workflows:
1. [build.yml](https://github.com/microsoft/autogen/blob/main/.github/workflows/build.yml)
1. [openai.yml](https://github.com/microsoft/autogen/blob/main/.github/workflows/openai.yml)
The first workflow is required to pass for all PRs (and it doesn't do any OpenAI calls). The second workflow is required for changes that affect the OpenAI tests (and does actually call LLM). The second workflow requires approval to run. When writing tests that require OpenAI calls, please use [`pytest.mark.skipif`](https://github.com/microsoft/autogen/blob/b1adac515931bf236ac59224269eeec683a162ba/test/oai/test_client.py#L19) to make them run in one python version only when `openai` package is installed. If additional dependency for this test is required, install the dependency in the corresponding python version in [openai.yml](https://github.com/microsoft/autogen/blob/main/.github/workflows/openai.yml).
The first workflow is required to pass for all PRs (and it doesn't do any OpenAI calls). The second workflow is required for changes that affect the OpenAI tests (and does actually call LLM). The second workflow requires approval to run. When writing tests that require OpenAI calls, please use [`pytest.mark.skipif`](https://github.com/microsoft/autogen/blob/b1adac515931bf236ac59224269eeec683a162ba/test/oai/test_client.py#L19) to make them run in only when `openai` package is installed. If additional dependency for this test is required, install the dependency in the corresponding python version in [openai.yml](https://github.com/microsoft/autogen/blob/main/.github/workflows/openai.yml).
#### Run non-OpenAI tests
To run the subset of the tests not depending on `openai` (and not calling LLMs)):
- Install `pytest`
- Remove `openai` library, this is required to skip the tests which check for `openai` presence ([`pytest.mark.skipif`](https://github.com/microsoft/autogen/blob/b1adac515931bf236ac59224269eeec683a162ba/test/oai/test_client.py#L19))
- Run the tests from the `test` folder. Make sure they all pass, this is required for [build.yml](https://github.com/microsoft/autogen/blob/main/.github/workflows/build.yml) checks to pass
- Reinstall `openai`
- Install pytest:
```
pip install pytest
pip uninstall -y openai
pytest test
pip install openai
```
- Run the tests from the `test` folder using the `--skip-openai` flag.
```
pytest test --skip-openai
```
- Make sure all tests pass, this is required for [build.yml](https://github.com/microsoft/autogen/blob/main/.github/workflows/build.yml) checks to pass
### Coverage