Skip tests that depend on OpenAI via --skip-openai (#1097)

* --skip-openai

* All tests pass

* Update build.yml

* Update Contribute.md

* Fix for failing Ubuntu tests

* More tests skipped, fixing 3.10 build

* Apply suggestions from code review

Co-authored-by: Qingyun Wu <qingyun0327@gmail.com>

* Added more comments

* fixed test__wrap_function_*

---------

Co-authored-by: Qingyun Wu <qingyun0327@gmail.com>
Co-authored-by: Davor Runje <davor@airt.ai>
This commit is contained in:
Maxim Saplin 2023-12-31 22:37:21 +03:00 committed by GitHub
parent 3b0e059699
commit c80df8acab
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 139 additions and 107 deletions

View File

@ -41,17 +41,15 @@ jobs:
pip install -e . pip install -e .
python -c "import autogen" python -c "import autogen"
pip install -e. pytest mock pip install -e. pytest mock
pip uninstall -y openai
- name: Test with pytest - name: Test with pytest
if: matrix.python-version != '3.10' if: matrix.python-version != '3.10'
run: | run: |
pytest test pytest test --skip-openai
- name: Coverage - name: Coverage
if: matrix.python-version == '3.10' if: matrix.python-version == '3.10'
run: | run: |
pip install -e .[test] pip install -e .[test]
pip uninstall -y openai coverage run -a -m pytest test --ignore=test/agentchat/contrib --skip-openai
coverage run -a -m pytest test --ignore=test/agentchat/contrib
coverage xml coverage xml
- name: Upload coverage to Codecov - name: Upload coverage to Codecov
if: matrix.python-version == '3.10' if: matrix.python-version == '3.10'

4
.gitignore vendored
View File

@ -171,3 +171,7 @@ test/my_tmp/*
# Storage for the AgentEval output # Storage for the AgentEval output
test/test_files/agenteval-in-out/out/ test/test_files/agenteval-in-out/out/
# Files created by tests
*tmp_code_*
test/agentchat/test_agent_scripts/*

View File

@ -5,6 +5,7 @@ import sys
from packaging.requirements import Requirement from packaging.requirements import Requirement
from autogen.agentchat.contrib.agent_builder import AgentBuilder from autogen.agentchat.contrib.agent_builder import AgentBuilder
from autogen import UserProxyAgent from autogen import UserProxyAgent
from conftest import skip_openai
sys.path.append(os.path.join(os.path.dirname(__file__), "..")) sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST # noqa: E402 from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST # noqa: E402
@ -20,15 +21,15 @@ try:
from openai.types.completion import Completion from openai.types.completion import Completion
from openai.types.completion_usage import CompletionUsage from openai.types.completion_usage import CompletionUsage
import diskcache import diskcache
OPENAI_INSTALLED = True
except ImportError: except ImportError:
OPENAI_INSTALLED = False skip = True
else:
skip = False or skip_openai
@pytest.mark.skipif( @pytest.mark.skipif(
not OPENAI_INSTALLED, skip,
reason="do not run when dependency is not installed", reason="openai not installed OR requested to skip",
) )
def test_build(): def test_build():
builder = AgentBuilder(config_path=oai_config_path, builder_model="gpt-4", agent_model="gpt-4") builder = AgentBuilder(config_path=oai_config_path, builder_model="gpt-4", agent_model="gpt-4")
@ -57,8 +58,8 @@ def test_build():
@pytest.mark.skipif( @pytest.mark.skipif(
not OPENAI_INSTALLED, skip,
reason="do not run when dependency is not installed", reason="openai not installed OR requested to skip",
) )
def test_save(): def test_save():
builder = AgentBuilder(config_path=oai_config_path, builder_model="gpt-4", agent_model="gpt-4") builder = AgentBuilder(config_path=oai_config_path, builder_model="gpt-4", agent_model="gpt-4")
@ -93,8 +94,8 @@ def test_save():
@pytest.mark.skipif( @pytest.mark.skipif(
not OPENAI_INSTALLED, skip,
reason="do not run when dependency is not installed", reason="openai not installed OR requested to skip",
) )
def test_load(): def test_load():
builder = AgentBuilder(config_path=oai_config_path, builder_model="gpt-4", agent_model="gpt-4") builder = AgentBuilder(config_path=oai_config_path, builder_model="gpt-4", agent_model="gpt-4")
@ -128,8 +129,8 @@ def test_load():
@pytest.mark.skipif( @pytest.mark.skipif(
not OPENAI_INSTALLED, skip,
reason="do not run when dependency is not installed", reason="openai not installed OR requested to skip",
) )
def test_clear_agent(): def test_clear_agent():
builder = AgentBuilder(config_path=oai_config_path, builder_model="gpt-4", agent_model="gpt-4") builder = AgentBuilder(config_path=oai_config_path, builder_model="gpt-4", agent_model="gpt-4")

View File

@ -2,6 +2,7 @@ import pytest
import sys import sys
import autogen import autogen
import os import os
from conftest import skip_openai
from autogen.agentchat.contrib.compressible_agent import CompressibleAgent from autogen.agentchat.contrib.compressible_agent import CompressibleAgent
here = os.path.abspath(os.path.dirname(__file__)) here = os.path.abspath(os.path.dirname(__file__))
@ -19,15 +20,15 @@ config_list = autogen.config_list_from_json(
try: try:
import openai import openai
OPENAI_INSTALLED = True
except ImportError: except ImportError:
OPENAI_INSTALLED = False skip = True
else:
skip = False or skip_openai
@pytest.mark.skipif( @pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or not OPENAI_INSTALLED, sys.platform in ["darwin", "win32"] or skip,
reason="do not run on MacOS or windows or dependency is not installed", reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip",
) )
def test_mode_compress(): def test_mode_compress():
conversations = {} conversations = {}
@ -65,8 +66,8 @@ def test_mode_compress():
@pytest.mark.skipif( @pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or not OPENAI_INSTALLED, sys.platform in ["darwin", "win32"] or skip,
reason="do not run on MacOS or windows or dependency is not installed", reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip",
) )
def test_mode_customized(): def test_mode_customized():
try: try:
@ -135,8 +136,8 @@ def test_mode_customized():
@pytest.mark.skipif( @pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or not OPENAI_INSTALLED, sys.platform in ["darwin", "win32"] or skip,
reason="do not run on MacOS or windows or dependency is not installed", reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip",
) )
def test_compress_message(): def test_compress_message():
assistant = CompressibleAgent( assistant = CompressibleAgent(
@ -169,6 +170,10 @@ def test_compress_message():
assert is_success, "Compression failed." assert is_success, "Compression failed."
@pytest.mark.skipif(
skip,
reason="do not run if dependency is not installed OR requested to skip",
)
def test_mode_terminate(): def test_mode_terminate():
assistant = CompressibleAgent( assistant = CompressibleAgent(
name="assistant", name="assistant",

View File

@ -3,6 +3,7 @@ import os
import sys import sys
import autogen import autogen
from autogen import OpenAIWrapper from autogen import OpenAIWrapper
from conftest import skip_openai
sys.path.append(os.path.join(os.path.dirname(__file__), "..")) sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST # noqa: E402 from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST # noqa: E402
@ -11,10 +12,10 @@ try:
import openai import openai
from autogen.agentchat.contrib.gpt_assistant_agent import GPTAssistantAgent from autogen.agentchat.contrib.gpt_assistant_agent import GPTAssistantAgent
from autogen.oai.openai_utils import retrieve_assistants_by_name from autogen.oai.openai_utils import retrieve_assistants_by_name
skip_test = False
except ImportError: except ImportError:
skip_test = True skip = True
else:
skip = False or skip_openai
config_list = autogen.config_list_from_json( config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST, file_location=KEY_LOC, filter_dict={"api_type": ["openai"]} OAI_CONFIG_LIST, file_location=KEY_LOC, filter_dict={"api_type": ["openai"]}
@ -26,8 +27,8 @@ def ask_ossinsight(question):
@pytest.mark.skipif( @pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or skip_test, sys.platform in ["darwin", "win32"] or skip,
reason="do not run on MacOS or windows or dependency is not installed", reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip",
) )
def test_gpt_assistant_chat(): def test_gpt_assistant_chat():
ossinsight_api_schema = { ossinsight_api_schema = {
@ -73,8 +74,8 @@ def test_gpt_assistant_chat():
@pytest.mark.skipif( @pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or skip_test, sys.platform in ["darwin", "win32"] or skip,
reason="do not run on MacOS or windows or dependency is not installed", reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip",
) )
def test_get_assistant_instructions(): def test_get_assistant_instructions():
""" """
@ -97,8 +98,8 @@ def test_get_assistant_instructions():
@pytest.mark.skipif( @pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or skip_test, sys.platform in ["darwin", "win32"] or skip,
reason="do not run on MacOS or windows or dependency is not installed", reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip",
) )
def test_gpt_assistant_instructions_overwrite(): def test_gpt_assistant_instructions_overwrite():
""" """
@ -142,8 +143,8 @@ def test_gpt_assistant_instructions_overwrite():
@pytest.mark.skipif( @pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or skip_test, sys.platform in ["darwin", "win32"] or skip,
reason="do not run on MacOS or windows or dependency is not installed", reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip",
) )
def test_gpt_assistant_existing_no_instructions(): def test_gpt_assistant_existing_no_instructions():
""" """
@ -178,8 +179,8 @@ def test_gpt_assistant_existing_no_instructions():
@pytest.mark.skipif( @pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or skip_test, sys.platform in ["darwin", "win32"] or skip,
reason="do not run on MacOS or windows or dependency is not installed", reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip",
) )
def test_get_assistant_files(): def test_get_assistant_files():
""" """
@ -212,8 +213,8 @@ def test_get_assistant_files():
@pytest.mark.skipif( @pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or skip_test, sys.platform in ["darwin", "win32"] or skip,
reason="do not run on MacOS or windows or dependency is not installed", reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip",
) )
def test_assistant_retrieval(): def test_assistant_retrieval():
""" """
@ -283,8 +284,8 @@ def test_assistant_retrieval():
@pytest.mark.skipif( @pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or skip_test, sys.platform in ["darwin", "win32"] or skip,
reason="do not run on MacOS or windows or dependency is not installed", reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip",
) )
def test_assistant_mismatch_retrieval(): def test_assistant_mismatch_retrieval():
"""Test function to check if the GPTAssistantAgent can filter out the mismatch assistant""" """Test function to check if the GPTAssistantAgent can filter out the mismatch assistant"""

View File

@ -2,6 +2,7 @@ import pytest
import os import os
import sys import sys
from autogen import ConversableAgent, config_list_from_json from autogen import ConversableAgent, config_list_from_json
from conftest import skip_openai
sys.path.append(os.path.join(os.path.dirname(__file__), "..")) sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from test_assistant_agent import OAI_CONFIG_LIST, KEY_LOC # noqa: E402 from test_assistant_agent import OAI_CONFIG_LIST, KEY_LOC # noqa: E402
@ -12,7 +13,7 @@ try:
except ImportError: except ImportError:
skip = True skip = True
else: else:
skip = False skip = False or skip_openai
try: try:
from termcolor import colored from termcolor import colored

View File

@ -2,23 +2,26 @@ import os
import sys import sys
import pytest import pytest
import autogen import autogen
from conftest import skip_openai
from autogen.agentchat import AssistantAgent, UserProxyAgent from autogen.agentchat import AssistantAgent, UserProxyAgent
try:
from openai import OpenAI
except ImportError:
skip = True
else:
skip = False or skip_openai
KEY_LOC = "notebook" KEY_LOC = "notebook"
OAI_CONFIG_LIST = "OAI_CONFIG_LIST" OAI_CONFIG_LIST = "OAI_CONFIG_LIST"
here = os.path.abspath(os.path.dirname(__file__)) here = os.path.abspath(os.path.dirname(__file__))
@pytest.mark.skipif( @pytest.mark.skipif(
sys.platform in ["darwin", "win32"], sys.platform in ["darwin", "win32"] or skip,
reason="do not run on MacOS or windows", reason="do not run on MacOS or windows OR openai not installed OR requested to skip",
) )
def test_ai_user_proxy_agent(): def test_ai_user_proxy_agent():
try:
import openai
except ImportError:
return
conversations = {} conversations = {}
# autogen.ChatCompletion.start_logging(conversations) # autogen.ChatCompletion.start_logging(conversations)
@ -57,11 +60,8 @@ def test_ai_user_proxy_agent():
print(conversations) print(conversations)
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
def test_gpt35(human_input_mode="NEVER", max_consecutive_auto_reply=5): def test_gpt35(human_input_mode="NEVER", max_consecutive_auto_reply=5):
try:
import openai
except ImportError:
return
config_list = autogen.config_list_from_json( config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST, OAI_CONFIG_LIST,
file_location=KEY_LOC, file_location=KEY_LOC,
@ -115,12 +115,8 @@ If "Thank you" or "You\'re welcome" are said in the conversation, then say TERMI
assert not isinstance(user.use_docker, bool) # None or str assert not isinstance(user.use_docker, bool) # None or str
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
def test_create_execute_script(human_input_mode="NEVER", max_consecutive_auto_reply=10): def test_create_execute_script(human_input_mode="NEVER", max_consecutive_auto_reply=10):
try:
import openai
except ImportError:
return
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, file_location=KEY_LOC) config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, file_location=KEY_LOC)
conversations = {} conversations = {}
# autogen.ChatCompletion.start_logging(conversations) # autogen.ChatCompletion.start_logging(conversations)
@ -160,12 +156,8 @@ print('Hello world!')
# autogen.ChatCompletion.stop_logging() # autogen.ChatCompletion.stop_logging()
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
def test_tsp(human_input_mode="NEVER", max_consecutive_auto_reply=10): def test_tsp(human_input_mode="NEVER", max_consecutive_auto_reply=10):
try:
import openai
except ImportError:
return
config_list = autogen.config_list_from_json( config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST, OAI_CONFIG_LIST,
file_location=KEY_LOC, file_location=KEY_LOC,

View File

@ -1,8 +1,16 @@
import pytest import pytest
import asyncio import asyncio
import autogen import autogen
from conftest import skip_openai
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST
try:
from openai import OpenAI
except ImportError:
skip = True
else:
skip = False or skip_openai
def get_market_news(ind, ind_upper): def get_market_news(ind, ind_upper):
data = { data = {
@ -45,13 +53,9 @@ def get_market_news(ind, ind_upper):
return feeds_summary return feeds_summary
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_async_groupchat(): async def test_async_groupchat():
try:
import openai
except ImportError:
return
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC) config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
llm_config = { llm_config = {
@ -91,12 +95,9 @@ async def test_async_groupchat():
assert len(user_proxy.chat_messages) > 0 assert len(user_proxy.chat_messages) > 0
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_stream(): async def test_stream():
try:
import openai
except ImportError:
return
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC) config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
data = asyncio.Future() data = asyncio.Future()

View File

@ -1,15 +1,20 @@
import asyncio import asyncio
import autogen import autogen
import pytest import pytest
from conftest import skip_openai
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST
try:
from openai import OpenAI
except ImportError:
skip = True
else:
skip = False or skip_openai
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_async_get_human_input(): async def test_async_get_human_input():
try:
import openai
except ImportError:
return
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC) config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
# create an AssistantAgent instance named "assistant" # create an AssistantAgent instance named "assistant"

View File

@ -2,6 +2,7 @@ import copy
from typing import Any, Callable, Dict, Literal from typing import Any, Callable, Dict, Literal
import pytest import pytest
from unittest.mock import patch
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from typing_extensions import Annotated from typing_extensions import Annotated
@ -421,7 +422,7 @@ def test__wrap_function_sync():
else: else:
raise ValueError(f"Unknown currencies {base_currency}, {quote_currency}") raise ValueError(f"Unknown currencies {base_currency}, {quote_currency}")
agent = ConversableAgent(name="agent", llm_config={}) agent = ConversableAgent(name="agent", llm_config=False)
@agent._wrap_function @agent._wrap_function
def currency_calculator( def currency_calculator(
@ -457,7 +458,7 @@ async def test__wrap_function_async():
else: else:
raise ValueError(f"Unknown currencies {base_currency}, {quote_currency}") raise ValueError(f"Unknown currencies {base_currency}, {quote_currency}")
agent = ConversableAgent(name="agent", llm_config={}) agent = ConversableAgent(name="agent", llm_config=False)
@agent._wrap_function @agent._wrap_function
async def currency_calculator( async def currency_calculator(

View File

@ -1,17 +1,21 @@
try:
from openai import OpenAI
except ImportError:
OpenAI = None
import pytest import pytest
import asyncio import asyncio
import json import json
import autogen import autogen
from conftest import skip_openai
from autogen.math_utils import eval_math_responses from autogen.math_utils import eval_math_responses
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST
import sys import sys
try:
from openai import OpenAI
except ImportError:
skip = True
else:
skip = False or skip_openai
@pytest.mark.skipif(OpenAI is None, reason="openai>=1 not installed")
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
def test_eval_math_responses(): def test_eval_math_responses():
config_list = autogen.config_list_from_models( config_list = autogen.config_list_from_models(
KEY_LOC, exclude="aoai", model_list=["gpt-4-0613", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k"] KEY_LOC, exclude="aoai", model_list=["gpt-4-0613", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k"]
@ -190,8 +194,8 @@ async def test_a_execute_function():
@pytest.mark.skipif( @pytest.mark.skipif(
not OpenAI or not sys.version.startswith("3.10"), skip or not sys.version.startswith("3.10"),
reason="do not run if openai is not installed or py!=3.10", reason="do not run if openai is not installed OR reeusted to skip OR py!=3.10",
) )
def test_update_function(): def test_update_function():
config_list_gpt4 = autogen.config_list_from_json( config_list_gpt4 = autogen.config_list_from_json(

View File

@ -1,6 +1,7 @@
import autogen import autogen
import pytest import pytest
import sys import sys
from conftest import skip_openai
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST
try: try:
@ -8,7 +9,7 @@ try:
except ImportError: except ImportError:
skip = True skip = True
else: else:
skip = False skip = False or skip_openai
@pytest.mark.skipif( @pytest.mark.skipif(

View File

@ -6,6 +6,7 @@ from autogen.agentchat.contrib.math_user_proxy_agent import (
_remove_print, _remove_print,
_add_print_to_last_line, _add_print_to_last_line,
) )
from conftest import skip_openai
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST
try: try:
@ -13,7 +14,7 @@ try:
except ImportError: except ImportError:
skip = True skip = True
else: else:
skip = False skip = False or skip_openai
@pytest.mark.skipif( @pytest.mark.skipif(

16
test/conftest.py Normal file
View File

@ -0,0 +1,16 @@
import pytest
skip_openai = False
# Registers command-line option '--skip-openai' via pytest hook.
# When this flag is set, it indicates that tests requiring OpenAI should be skipped.
def pytest_addoption(parser):
parser.addoption("--skip-openai", action="store_true", help="Skip all tests that require openai")
# pytest hook implementation extracting the '--skip-openai' command line arg and exposing it globally
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
global skip_openai
skip_openai = config.getoption("--skip-openai", False)

View File

@ -1,18 +1,19 @@
import pytest import pytest
from autogen import OpenAIWrapper, config_list_from_json, config_list_openai_aoai from autogen import OpenAIWrapper, config_list_from_json, config_list_openai_aoai
from conftest import skip_openai
TOOL_ENABLED = False TOOL_ENABLED = False
try: try:
from openai import OpenAI from openai import OpenAI
from openai.types.chat.chat_completion import ChatCompletionMessage
except ImportError:
skip = True
else:
skip = False
import openai import openai
if openai.__version__ >= "1.1.0": if openai.__version__ >= "1.1.0":
TOOL_ENABLED = True TOOL_ENABLED = True
from openai.types.chat.chat_completion import ChatCompletionMessage
except ImportError:
skip = True
else:
skip = False or skip_openai
KEY_LOC = "notebook" KEY_LOC = "notebook"
OAI_CONFIG_LIST = "OAI_CONFIG_LIST" OAI_CONFIG_LIST = "OAI_CONFIG_LIST"
@ -35,7 +36,7 @@ def test_aoai_chat_completion():
print(client.extract_text_or_completion_object(response)) print(client.extract_text_or_completion_object(response))
@pytest.mark.skipif(skip and not TOOL_ENABLED, reason="openai>=1.1.0 not installed") @pytest.mark.skipif(skip or not TOOL_ENABLED, reason="openai>=1.1.0 not installed")
def test_oai_tool_calling_extraction(): def test_oai_tool_calling_extraction():
config_list = config_list_from_json( config_list = config_list_from_json(
env_or_file=OAI_CONFIG_LIST, env_or_file=OAI_CONFIG_LIST,

View File

@ -1,12 +1,13 @@
import pytest import pytest
from autogen import OpenAIWrapper, config_list_from_json, config_list_openai_aoai from autogen import OpenAIWrapper, config_list_from_json, config_list_openai_aoai
from conftest import skip_openai
try: try:
from openai import OpenAI from openai import OpenAI
except ImportError: except ImportError:
skip = True skip = True
else: else:
skip = False skip = False or skip_openai
KEY_LOC = "notebook" KEY_LOC = "notebook"
OAI_CONFIG_LIST = "OAI_CONFIG_LIST" OAI_CONFIG_LIST = "OAI_CONFIG_LIST"

View File

@ -1,13 +1,14 @@
import sys import sys
import os import os
import pytest import pytest
from conftest import skip_openai
try: try:
import openai import openai
skip = False
except ImportError: except ImportError:
skip = True skip = True
else:
skip = False or skip_openai
here = os.path.abspath(os.path.dirname(__file__)) here = os.path.abspath(os.path.dirname(__file__))

View File

@ -1,6 +1,8 @@
""" """
Unit test for retrieve_utils.py Unit test for retrieve_utils.py
""" """
import pytest
try: try:
import chromadb import chromadb
from autogen.retrieve_utils import ( from autogen.retrieve_utils import (
@ -18,8 +20,6 @@ except ImportError:
else: else:
skip = False skip = False
import os import os
import sys
import pytest
try: try:
from unstructured.partition.auto import partition from unstructured.partition.auto import partition

View File

@ -120,22 +120,20 @@ Tests are automatically run via GitHub actions. There are two workflows:
1. [build.yml](https://github.com/microsoft/autogen/blob/main/.github/workflows/build.yml) 1. [build.yml](https://github.com/microsoft/autogen/blob/main/.github/workflows/build.yml)
1. [openai.yml](https://github.com/microsoft/autogen/blob/main/.github/workflows/openai.yml) 1. [openai.yml](https://github.com/microsoft/autogen/blob/main/.github/workflows/openai.yml)
The first workflow is required to pass for all PRs (and it doesn't do any OpenAI calls). The second workflow is required for changes that affect the OpenAI tests (and does actually call LLM). The second workflow requires approval to run. When writing tests that require OpenAI calls, please use [`pytest.mark.skipif`](https://github.com/microsoft/autogen/blob/b1adac515931bf236ac59224269eeec683a162ba/test/oai/test_client.py#L19) to make them run in one python version only when `openai` package is installed. If additional dependency for this test is required, install the dependency in the corresponding python version in [openai.yml](https://github.com/microsoft/autogen/blob/main/.github/workflows/openai.yml). The first workflow is required to pass for all PRs (and it doesn't do any OpenAI calls). The second workflow is required for changes that affect the OpenAI tests (and does actually call LLM). The second workflow requires approval to run. When writing tests that require OpenAI calls, please use [`pytest.mark.skipif`](https://github.com/microsoft/autogen/blob/b1adac515931bf236ac59224269eeec683a162ba/test/oai/test_client.py#L19) to make them run in only when `openai` package is installed. If additional dependency for this test is required, install the dependency in the corresponding python version in [openai.yml](https://github.com/microsoft/autogen/blob/main/.github/workflows/openai.yml).
#### Run non-OpenAI tests #### Run non-OpenAI tests
To run the subset of the tests not depending on `openai` (and not calling LLMs)): To run the subset of the tests not depending on `openai` (and not calling LLMs)):
- Install `pytest` - Install pytest:
- Remove `openai` library, this is required to skip the tests which check for `openai` presence ([`pytest.mark.skipif`](https://github.com/microsoft/autogen/blob/b1adac515931bf236ac59224269eeec683a162ba/test/oai/test_client.py#L19))
- Run the tests from the `test` folder. Make sure they all pass, this is required for [build.yml](https://github.com/microsoft/autogen/blob/main/.github/workflows/build.yml) checks to pass
- Reinstall `openai`
``` ```
pip install pytest pip install pytest
pip uninstall -y openai
pytest test
pip install openai
``` ```
- Run the tests from the `test` folder using the `--skip-openai` flag.
```
pytest test --skip-openai
```
- Make sure all tests pass, this is required for [build.yml](https://github.com/microsoft/autogen/blob/main/.github/workflows/build.yml) checks to pass
### Coverage ### Coverage