mirror of
https://github.com/microsoft/autogen.git
synced 2025-10-18 03:18:58 +00:00
Update mimum openai version to 1.66.5 as import path changed (#5996)
Resolves #5994 Open AI moved `openai.types.beta.vector_store` to `openai.types.vector_store`. https://github.com/openai/openai-python/compare/v1.65.5...v1.66.0 Also fixed unit tests and use parameterized fixture to run all scenarios.
This commit is contained in:
parent
d83927e22a
commit
69292e6ff4
@ -95,7 +95,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 2,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@ -243,7 +243,7 @@
|
|||||||
" # Upload the file.\n",
|
" # Upload the file.\n",
|
||||||
" await ctx.cancellation_token.link_future(\n",
|
" await ctx.cancellation_token.link_future(\n",
|
||||||
" asyncio.ensure_future(\n",
|
" asyncio.ensure_future(\n",
|
||||||
" self._client.beta.vector_stores.file_batches.upload_and_poll(\n",
|
" self._client.vector_stores.file_batches.upload_and_poll(\n",
|
||||||
" vector_store_id=message.vector_store_id,\n",
|
" vector_store_id=message.vector_store_id,\n",
|
||||||
" files=[(file_name, file_content)],\n",
|
" files=[(file_name, file_content)],\n",
|
||||||
" )\n",
|
" )\n",
|
||||||
@ -349,7 +349,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 4,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@ -364,7 +364,7 @@
|
|||||||
")\n",
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Create a vector store to be used for file search.\n",
|
"# Create a vector store to be used for file search.\n",
|
||||||
"vector_store = openai.beta.vector_stores.create()\n",
|
"vector_store = openai.vector_stores.create()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Create a thread which is used as the memory for the assistant.\n",
|
"# Create a thread which is used as the memory for the assistant.\n",
|
||||||
"thread = openai.beta.threads.create(\n",
|
"thread = openai.beta.threads.create(\n",
|
||||||
@ -820,7 +820,7 @@
|
|||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "autogen_core",
|
"display_name": ".venv",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python3"
|
"name": "python3"
|
||||||
},
|
},
|
||||||
@ -834,7 +834,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.11.9"
|
"version": "3.12.3"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -27,7 +27,7 @@ azure = [
|
|||||||
]
|
]
|
||||||
docker = ["docker~=7.0", "asyncio_atexit>=1.0.1"]
|
docker = ["docker~=7.0", "asyncio_atexit>=1.0.1"]
|
||||||
ollama = ["ollama>=0.4.7", "tiktoken>=0.8.0"]
|
ollama = ["ollama>=0.4.7", "tiktoken>=0.8.0"]
|
||||||
openai = ["openai>=1.52.2", "tiktoken>=0.8.0", "aiofiles"]
|
openai = ["openai>=1.66.5", "tiktoken>=0.8.0", "aiofiles"]
|
||||||
file-surfer = [
|
file-surfer = [
|
||||||
"autogen-agentchat==0.4.9",
|
"autogen-agentchat==0.4.9",
|
||||||
"magika>=0.6.1rc2",
|
"magika>=0.6.1rc2",
|
||||||
|
@ -52,8 +52,8 @@ from openai.types.beta.file_search_tool_param import FileSearchToolParam
|
|||||||
from openai.types.beta.function_tool_param import FunctionToolParam
|
from openai.types.beta.function_tool_param import FunctionToolParam
|
||||||
from openai.types.beta.thread import Thread, ToolResources, ToolResourcesCodeInterpreter
|
from openai.types.beta.thread import Thread, ToolResources, ToolResourcesCodeInterpreter
|
||||||
from openai.types.beta.threads import Message, MessageDeleted, Run
|
from openai.types.beta.threads import Message, MessageDeleted, Run
|
||||||
from openai.types.beta.vector_store import VectorStore
|
|
||||||
from openai.types.shared_params.function_definition import FunctionDefinition
|
from openai.types.shared_params.function_definition import FunctionDefinition
|
||||||
|
from openai.types.vector_store import VectorStore
|
||||||
|
|
||||||
event_logger = logging.getLogger(EVENT_LOGGER_NAME)
|
event_logger = logging.getLogger(EVENT_LOGGER_NAME)
|
||||||
|
|
||||||
@ -223,7 +223,7 @@ class OpenAIAssistantAgent(BaseChatAgent):
|
|||||||
tools (Optional[Iterable[Union[Literal["code_interpreter", "file_search"], Tool | Callable[..., Any] | Callable[..., Awaitable[Any]]]]]): Tools the assistant can use
|
tools (Optional[Iterable[Union[Literal["code_interpreter", "file_search"], Tool | Callable[..., Any] | Callable[..., Awaitable[Any]]]]]): Tools the assistant can use
|
||||||
assistant_id (Optional[str]): ID of existing assistant to use
|
assistant_id (Optional[str]): ID of existing assistant to use
|
||||||
thread_id (Optional[str]): ID of existing thread to use
|
thread_id (Optional[str]): ID of existing thread to use
|
||||||
metadata (Optional[object]): Additional metadata for the assistant
|
metadata (Optional[Dict[str, str]]): Additional metadata for the assistant.
|
||||||
response_format (Optional[AssistantResponseFormatOptionParam]): Response format settings
|
response_format (Optional[AssistantResponseFormatOptionParam]): Response format settings
|
||||||
temperature (Optional[float]): Temperature for response generation
|
temperature (Optional[float]): Temperature for response generation
|
||||||
tool_resources (Optional[ToolResources]): Additional tool configuration
|
tool_resources (Optional[ToolResources]): Additional tool configuration
|
||||||
@ -247,7 +247,7 @@ class OpenAIAssistantAgent(BaseChatAgent):
|
|||||||
] = None,
|
] = None,
|
||||||
assistant_id: Optional[str] = None,
|
assistant_id: Optional[str] = None,
|
||||||
thread_id: Optional[str] = None,
|
thread_id: Optional[str] = None,
|
||||||
metadata: Optional[object] = None,
|
metadata: Optional[Dict[str, str]] = None,
|
||||||
response_format: Optional["AssistantResponseFormatOptionParam"] = None,
|
response_format: Optional["AssistantResponseFormatOptionParam"] = None,
|
||||||
temperature: Optional[float] = None,
|
temperature: Optional[float] = None,
|
||||||
tool_resources: Optional["ToolResources"] = None,
|
tool_resources: Optional["ToolResources"] = None,
|
||||||
@ -625,7 +625,7 @@ class OpenAIAssistantAgent(BaseChatAgent):
|
|||||||
# Create vector store if not already created
|
# Create vector store if not already created
|
||||||
if self._vector_store_id is None:
|
if self._vector_store_id is None:
|
||||||
vector_store: VectorStore = await cancellation_token.link_future(
|
vector_store: VectorStore = await cancellation_token.link_future(
|
||||||
asyncio.ensure_future(self._client.beta.vector_stores.create())
|
asyncio.ensure_future(self._client.vector_stores.create())
|
||||||
)
|
)
|
||||||
self._vector_store_id = vector_store.id
|
self._vector_store_id = vector_store.id
|
||||||
|
|
||||||
@ -644,7 +644,7 @@ class OpenAIAssistantAgent(BaseChatAgent):
|
|||||||
# Create file batch with the file IDs
|
# Create file batch with the file IDs
|
||||||
await cancellation_token.link_future(
|
await cancellation_token.link_future(
|
||||||
asyncio.ensure_future(
|
asyncio.ensure_future(
|
||||||
self._client.beta.vector_stores.file_batches.create_and_poll(
|
self._client.vector_stores.file_batches.create_and_poll(
|
||||||
vector_store_id=self._vector_store_id, file_ids=file_ids
|
vector_store_id=self._vector_store_id, file_ids=file_ids
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
@ -678,7 +678,7 @@ class OpenAIAssistantAgent(BaseChatAgent):
|
|||||||
if self._vector_store_id is not None:
|
if self._vector_store_id is not None:
|
||||||
try:
|
try:
|
||||||
await cancellation_token.link_future(
|
await cancellation_token.link_future(
|
||||||
asyncio.ensure_future(self._client.beta.vector_stores.delete(vector_store_id=self._vector_store_id))
|
asyncio.ensure_future(self._client.vector_stores.delete(vector_store_id=self._vector_store_id))
|
||||||
)
|
)
|
||||||
self._vector_store_id = None
|
self._vector_store_id = None
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -13,7 +13,7 @@ from autogen_core import CancellationToken
|
|||||||
from autogen_core.tools._base import BaseTool, Tool
|
from autogen_core.tools._base import BaseTool, Tool
|
||||||
from autogen_ext.agents.openai import OpenAIAssistantAgent
|
from autogen_ext.agents.openai import OpenAIAssistantAgent
|
||||||
from azure.identity import DefaultAzureCredential, get_bearer_token_provider
|
from azure.identity import DefaultAzureCredential, get_bearer_token_provider
|
||||||
from openai import AsyncAzureOpenAI
|
from openai import AsyncAzureOpenAI, AsyncOpenAI
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
|
||||||
@ -88,9 +88,9 @@ class FakeCursorPage:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def create_mock_openai_client() -> AsyncAzureOpenAI:
|
def create_mock_openai_client() -> AsyncOpenAI:
|
||||||
# Create the base client as an AsyncMock.
|
# Create the base client as an AsyncMock.
|
||||||
client = AsyncMock(spec=AsyncAzureOpenAI)
|
client = AsyncMock(spec=AsyncOpenAI)
|
||||||
|
|
||||||
# Create a "beta" attribute with the required nested structure.
|
# Create a "beta" attribute with the required nested structure.
|
||||||
beta = MagicMock()
|
beta = MagicMock()
|
||||||
@ -130,12 +130,12 @@ def create_mock_openai_client() -> AsyncAzureOpenAI:
|
|||||||
beta.threads.runs.retrieve = AsyncMock(return_value=MagicMock(id="run-mock", status="completed"))
|
beta.threads.runs.retrieve = AsyncMock(return_value=MagicMock(id="run-mock", status="completed"))
|
||||||
beta.threads.runs.submit_tool_outputs = AsyncMock(return_value=MagicMock(id="run-mock", status="completed"))
|
beta.threads.runs.submit_tool_outputs = AsyncMock(return_value=MagicMock(id="run-mock", status="completed"))
|
||||||
|
|
||||||
# Setup beta.vector_stores with create, delete, and file_batches.
|
# Setup client.vector_stores with create, delete, and file_batches.
|
||||||
beta.vector_stores = MagicMock()
|
client.vector_stores = MagicMock()
|
||||||
beta.vector_stores.create = AsyncMock(return_value=MagicMock(id="vector-mock"))
|
client.vector_stores.create = AsyncMock(return_value=MagicMock(id="vector-mock"))
|
||||||
beta.vector_stores.delete = AsyncMock(return_value=None)
|
client.vector_stores.delete = AsyncMock(return_value=None)
|
||||||
beta.vector_stores.file_batches = MagicMock()
|
client.vector_stores.file_batches = MagicMock()
|
||||||
beta.vector_stores.file_batches.create_and_poll = AsyncMock(return_value=None)
|
client.vector_stores.file_batches.create_and_poll = AsyncMock(return_value=None)
|
||||||
|
|
||||||
# Setup client.files with create and delete.
|
# Setup client.files with create and delete.
|
||||||
client.files = MagicMock()
|
client.files = MagicMock()
|
||||||
@ -147,22 +147,33 @@ def create_mock_openai_client() -> AsyncAzureOpenAI:
|
|||||||
|
|
||||||
# Fixture for the mock client.
|
# Fixture for the mock client.
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def mock_openai_client() -> AsyncAzureOpenAI:
|
def mock_openai_client() -> AsyncOpenAI:
|
||||||
return create_mock_openai_client()
|
return create_mock_openai_client()
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture(params=["openai", "azure", "mock"])
|
||||||
def client() -> AsyncAzureOpenAI:
|
def client(request: pytest.FixtureRequest) -> AsyncOpenAI:
|
||||||
|
client_type = request.param
|
||||||
|
|
||||||
|
if client_type == "mock":
|
||||||
|
# Return a mock OpenAI client.
|
||||||
|
return create_mock_openai_client()
|
||||||
|
|
||||||
|
if client_type == "openai":
|
||||||
|
# Check for OpenAI credentials in environment variables.
|
||||||
|
openai_api_key = os.getenv("OPENAI_API_KEY")
|
||||||
|
if openai_api_key:
|
||||||
|
return AsyncOpenAI(api_key=openai_api_key)
|
||||||
|
else:
|
||||||
|
pytest.skip("OPENAI_API_KEY not set in environment variables.")
|
||||||
|
|
||||||
|
# Check for Azure OpenAI credentials in environment variables.
|
||||||
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
|
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
|
||||||
api_version = os.getenv("AZURE_OPENAI_API_VERSION", "2024-08-01-preview")
|
api_version = os.getenv("AZURE_OPENAI_API_VERSION", "2024-08-01-preview")
|
||||||
api_key = os.getenv("AZURE_OPENAI_API_KEY")
|
api_key = os.getenv("AZURE_OPENAI_API_KEY")
|
||||||
|
|
||||||
# Return mock client if credentials not available
|
if azure_endpoint and not api_key:
|
||||||
if not azure_endpoint or not api_key:
|
# Try Azure CLI credentials if API key not provided
|
||||||
return create_mock_openai_client()
|
|
||||||
|
|
||||||
# Try Azure CLI credentials if API key not provided
|
|
||||||
if not api_key:
|
|
||||||
try:
|
try:
|
||||||
token_provider = get_bearer_token_provider(
|
token_provider = get_bearer_token_provider(
|
||||||
DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default"
|
DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default"
|
||||||
@ -171,14 +182,17 @@ def client() -> AsyncAzureOpenAI:
|
|||||||
azure_endpoint=azure_endpoint, api_version=api_version, azure_ad_token_provider=token_provider
|
azure_endpoint=azure_endpoint, api_version=api_version, azure_ad_token_provider=token_provider
|
||||||
)
|
)
|
||||||
except Exception:
|
except Exception:
|
||||||
return create_mock_openai_client()
|
pytest.skip("Failed to obtain Azure CLI credentials.")
|
||||||
|
|
||||||
# Fall back to API key auth if provided
|
if azure_endpoint and api_key:
|
||||||
return AsyncAzureOpenAI(azure_endpoint=azure_endpoint, api_version=api_version, api_key=api_key)
|
# Use Azure OpenAI with API key authentication.
|
||||||
|
return AsyncAzureOpenAI(azure_endpoint=azure_endpoint, api_version=api_version, api_key=api_key)
|
||||||
|
|
||||||
|
pytest.skip("AZURE_OPENAI_ENDPOINT not set in environment variables.")
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def agent(client: AsyncAzureOpenAI) -> OpenAIAssistantAgent:
|
def agent(client: AsyncOpenAI) -> OpenAIAssistantAgent:
|
||||||
tools: List[Union[Literal["code_interpreter", "file_search"], Tool]] = [
|
tools: List[Union[Literal["code_interpreter", "file_search"], Tool]] = [
|
||||||
"code_interpreter",
|
"code_interpreter",
|
||||||
"file_search",
|
"file_search",
|
||||||
@ -266,6 +280,7 @@ async def test_code_interpreter(
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
|
@pytest.mark.parametrize("client", ["mock"], indirect=True)
|
||||||
async def test_quiz_creation(
|
async def test_quiz_creation(
|
||||||
agent: OpenAIAssistantAgent, cancellation_token: CancellationToken, monkeypatch: pytest.MonkeyPatch
|
agent: OpenAIAssistantAgent, cancellation_token: CancellationToken, monkeypatch: pytest.MonkeyPatch
|
||||||
) -> None:
|
) -> None:
|
||||||
@ -322,7 +337,7 @@ async def test_quiz_creation(
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_on_reset_behavior(client: AsyncAzureOpenAI, cancellation_token: CancellationToken) -> None:
|
async def test_on_reset_behavior(client: AsyncOpenAI, cancellation_token: CancellationToken) -> None:
|
||||||
# Arrange: Use the default behavior for reset.
|
# Arrange: Use the default behavior for reset.
|
||||||
thread = await client.beta.threads.create()
|
thread = await client.beta.threads.create()
|
||||||
await client.beta.threads.messages.create(
|
await client.beta.threads.messages.create(
|
||||||
@ -356,7 +371,7 @@ async def test_on_reset_behavior(client: AsyncAzureOpenAI, cancellation_token: C
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_save_and_load_state(mock_openai_client: AsyncAzureOpenAI) -> None:
|
async def test_save_and_load_state(mock_openai_client: AsyncOpenAI) -> None:
|
||||||
agent = OpenAIAssistantAgent(
|
agent = OpenAIAssistantAgent(
|
||||||
name="assistant",
|
name="assistant",
|
||||||
description="Dummy assistant for state testing",
|
description="Dummy assistant for state testing",
|
||||||
|
8
python/uv.lock
generated
8
python/uv.lock
generated
@ -751,7 +751,7 @@ requires-dist = [
|
|||||||
{ name = "mcp", marker = "extra == 'mcp'", specifier = ">=1.1.3" },
|
{ name = "mcp", marker = "extra == 'mcp'", specifier = ">=1.1.3" },
|
||||||
{ name = "nbclient", marker = "extra == 'jupyter-executor'", specifier = ">=0.10.2" },
|
{ name = "nbclient", marker = "extra == 'jupyter-executor'", specifier = ">=0.10.2" },
|
||||||
{ name = "ollama", marker = "extra == 'ollama'", specifier = ">=0.4.7" },
|
{ name = "ollama", marker = "extra == 'ollama'", specifier = ">=0.4.7" },
|
||||||
{ name = "openai", marker = "extra == 'openai'", specifier = ">=1.52.2" },
|
{ name = "openai", marker = "extra == 'openai'", specifier = ">=1.66.5" },
|
||||||
{ name = "openai-whisper", marker = "extra == 'video-surfer'" },
|
{ name = "openai-whisper", marker = "extra == 'video-surfer'" },
|
||||||
{ name = "opencv-python", marker = "extra == 'video-surfer'", specifier = ">=4.5" },
|
{ name = "opencv-python", marker = "extra == 'video-surfer'", specifier = ">=4.5" },
|
||||||
{ name = "pillow", marker = "extra == 'magentic-one'", specifier = ">=11.0.0" },
|
{ name = "pillow", marker = "extra == 'magentic-one'", specifier = ">=11.0.0" },
|
||||||
@ -4880,7 +4880,7 @@ wheels = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "openai"
|
name = "openai"
|
||||||
version = "1.60.2"
|
version = "1.66.5"
|
||||||
source = { registry = "https://pypi.org/simple" }
|
source = { registry = "https://pypi.org/simple" }
|
||||||
dependencies = [
|
dependencies = [
|
||||||
{ name = "anyio" },
|
{ name = "anyio" },
|
||||||
@ -4892,9 +4892,9 @@ dependencies = [
|
|||||||
{ name = "tqdm" },
|
{ name = "tqdm" },
|
||||||
{ name = "typing-extensions" },
|
{ name = "typing-extensions" },
|
||||||
]
|
]
|
||||||
sdist = { url = "https://files.pythonhosted.org/packages/08/ae/8d9706b8ff2363287b4a8807de2dd29cdbdad5424e9d05d345df724320f5/openai-1.60.2.tar.gz", hash = "sha256:a8f843e10f2855713007f491d96afb2694b11b5e02cb97c7d01a0be60bc5bb51", size = 348185 }
|
sdist = { url = "https://files.pythonhosted.org/packages/bb/10/b19dc682c806e6735a8387f2003afe2abada9f9e5227318de642c6949524/openai-1.66.5.tar.gz", hash = "sha256:f61b8fac29490ca8fdc6d996aa6926c18dbe5639536f8c40219c40db05511b11", size = 398595 }
|
||||||
wheels = [
|
wheels = [
|
||||||
{ url = "https://files.pythonhosted.org/packages/e5/5a/d5474ca67a547dde9b87b5bc8a8f90eadf29f523d410f2ba23d63c9b82ec/openai-1.60.2-py3-none-any.whl", hash = "sha256:993bd11b96900b9098179c728026f016b4982ded7ee30dfcf4555eab1171fff9", size = 456107 },
|
{ url = "https://files.pythonhosted.org/packages/c7/3b/1ba418920ecd1eae7cc4d4ac8a01711ee0879b1a57dd81d10551e5b9a2ea/openai-1.66.5-py3-none-any.whl", hash = "sha256:74be528175f8389f67675830c51a15bd51e874425c86d3de6153bf70ed6c2884", size = 571144 },
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
Loading…
x
Reference in New Issue
Block a user