206 lines
4.3 KiB
TOML
Raw Normal View History

[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[project]
name = "autogen-ext"
2025-08-19 11:20:09 -07:00
version = "0.7.4"
license = {file = "LICENSE-CODE"}
description = "AutoGen extensions library"
readme = "README.md"
requires-python = ">=3.10"
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
dependencies = [
2025-08-19 11:20:09 -07:00
"autogen-core==0.7.4",
]
[project.optional-dependencies]
anthropic = ["anthropic>=0.48"]
langchain = ["langchain_core~= 0.3.3"]
azure = [
"azure-ai-inference>=1.0.0b9",
"azure-ai-projects>=1.0.0b11",
"azure-core",
"azure-identity",
Add Azure AI Search tool implementation (#5844) # Azure AI Search Tool Implementation This PR adds a new tool for Azure AI Search integration to autogen-ext, enabling agents to search and retrieve information from Azure AI Search indexes. ## Why Are These Changes Needed? AutoGen currently lacks native integration with Azure AI Search, which is a powerful enterprise search service that supports semantic, vector, and hybrid search capabilities. This integration enables agents to: 1. Retrieve relevant information from large document collections 2. Perform semantic search with AI-powered ranking 3. Execute vector similarity search using embeddings 4. Combine text and vector approaches for optimal results This tool complements existing retrieval capabilities and provides a seamless way to integrate with Azure's search infrastructure. ## Features - **Multiple Search Types**: Support for text, semantic, vector, and hybrid search - **Flexible Configuration**: Customizable search parameters and fields - **Robust Error Handling**: User-friendly error messages with actionable guidance - **Performance Optimizations**: Configurable caching and retry mechanisms - **Vector Search Support**: Built-in embedding generation with extensibility ## Usage Example ```python from autogen_ext.tools.azure import AzureAISearchTool from azure.core.credentials import AzureKeyCredential from autogen import AssistantAgent, UserProxyAgent # Create the search tool search_tool = AzureAISearchTool.load_component({ "provider": "autogen_ext.tools.azure.AzureAISearchTool", "config": { "name": "DocumentSearch", "description": "Search for information in the knowledge base", "endpoint": "https://your-service.search.windows.net", "index_name": "your-index", "credential": {"api_key": "your-api-key"}, "query_type": "semantic", "semantic_config_name": "default" } }) # Create an agent with the search tool assistant = AssistantAgent( "assistant", llm_config={"tools": [search_tool]} ) # Create a user proxy agent user_proxy = UserProxyAgent( "user_proxy", human_input_mode="TERMINATE", max_consecutive_auto_reply=10, code_execution_config={"work_dir": "coding"} ) # Start the conversation user_proxy.initiate_chat( assistant, message="What information do we have about quantum computing in our knowledge base?" ) ``` ## Testing - Added unit tests for all search types (text, semantic, vector, hybrid) - Added tests for error handling and cancellation - All tests pass locally ## Documentation - Added comprehensive docstrings with examples - Included warnings about placeholder embedding implementation - Added links to Azure AI Search documentation ## Related issue number Closes #5419 ## Checks - [x] I've included any doc changes needed for <https://microsoft.github.io/autogen/>. See <https://github.com/microsoft/autogen/blob/main/CONTRIBUTING.md> to build and test documentation locally. - [x] I've added tests (if relevant) corresponding to the changes introduced in this PR. - [x] I've made sure all auto checks have passed. --------- Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
2025-04-02 16:16:48 -07:00
"azure-search-documents>=11.4.0",
]
docker = ["docker~=7.0", "asyncio_atexit>=1.0.1"]
5663 ollama client host (#5674) @ekzhu should likely be assigned as reviewer ## Why are these changes needed? These changes address the bug reported in #5663. Prevents TypeError from being thrown at inference time by ollama AsyncClient when `host` (and other) kwargs are passed to autogen OllamaChatCompletionClient constructor. It also adds ollama as a named optional extra so that the ollama requirements can be installed alongside autogen-ext (e.g. `pip install autogen-ext[ollama]` @ekzhu, I will need some help or guidance to ensure that the associated test (which requires ollama and tiktoken as dependencies of the OllamaChatCompletionClient) can run successfully in autogen's test execution environment. I have also left the "I've made sure all auto checks have passed" check below unchecked as this PR is coming from my fork. (UPDATE: auto checks appear to have passed after opening PR, so I have checked box below) ## Related issue number Intended to close #5663 ## Checks - [x] I've included any doc changes needed for <https://microsoft.github.io/autogen/>. See <https://github.com/microsoft/autogen/blob/main/CONTRIBUTING.md> to build and test documentation locally. - [x] I've added tests (if relevant) corresponding to the changes introduced in this PR. - [x] I've made sure all auto checks have passed. --------- Co-authored-by: Ryan Stewart <ryanstewart@Ryans-MacBook-Pro.local> Co-authored-by: Jack Gerrits <jackgerrits@users.noreply.github.com> Co-authored-by: peterychang <49209570+peterychang@users.noreply.github.com>
2025-02-26 11:02:48 -05:00
ollama = ["ollama>=0.4.7", "tiktoken>=0.8.0"]
openai = ["openai>=1.93", "tiktoken>=0.8.0", "aiofiles"]
2024-12-10 08:28:48 -08:00
file-surfer = [
2025-08-19 11:20:09 -07:00
"autogen-agentchat==0.7.4",
"magika>=0.6.1rc2",
"markitdown[all]~=0.1.0a3",
2024-12-10 08:28:48 -08:00
]
Feature add Add LlamaCppChatCompletionClient and llama-cpp (#5326) This pull request introduces the integration of the `llama-cpp` library into the `autogen-ext` package, with significant changes to the project dependencies and the implementation of a new chat completion client. The most important changes include updating the project dependencies, adding a new module for the `LlamaCppChatCompletionClient`, and implementing the client with various functionalities. ### Project Dependencies: * [`python/packages/autogen-ext/pyproject.toml`](diffhunk://#diff-095119d4420ff09059557bd25681211d1772c2be0fbe0ff2d551a3726eff1b4bR34-R38): Added `llama-cpp-python` as a new dependency under the `llama-cpp` section. ### New Module: * [`python/packages/autogen-ext/src/autogen_ext/models/llama_cpp/__init__.py`](diffhunk://#diff-42ae3ba17d51ca917634c4ea3c5969cf930297c288a783f8d9c126f2accef71dR1-R8): Introduced the `LlamaCppChatCompletionClient` class and handled import errors with a descriptive message for missing dependencies. ### Implementation of `LlamaCppChatCompletionClient`: * `python/packages/autogen-ext/src/autogen_ext/models/llama_cpp/_llama_cpp_completion_client.py`: - Added the `LlamaCppChatCompletionClient` class with methods to initialize the client, create chat completions, detect and execute tools, and handle streaming responses. - Included detailed logging for debugging purposes and implemented methods to count tokens, track usage, and provide model information.…d chat capabilities <!-- Thank you for your contribution! Please review https://microsoft.github.io/autogen/docs/Contribute before opening a pull request. --> <!-- Please add a reviewer to the assignee section when you create a PR. If you don't have the access to it, we will shortly find a reviewer and assign them to your PR. --> ## Why are these changes needed? <!-- Please give a short summary of the change and the problem this solves. --> ## Related issue number <!-- For example: "Closes #1234" --> ## Checks - [X ] I've included any doc changes needed for https://microsoft.github.io/autogen/. See https://microsoft.github.io/autogen/docs/Contribute#documentation to build and test documentation locally. - [X ] I've added tests (if relevant) corresponding to the changes introduced in this PR. - [ X] I've made sure all auto checks have passed. --------- Co-authored-by: aribornstein <x@x.com> Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com> Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-11 01:53:53 +02:00
llama-cpp = [
"llama-cpp-python>=0.3.8",
Feature add Add LlamaCppChatCompletionClient and llama-cpp (#5326) This pull request introduces the integration of the `llama-cpp` library into the `autogen-ext` package, with significant changes to the project dependencies and the implementation of a new chat completion client. The most important changes include updating the project dependencies, adding a new module for the `LlamaCppChatCompletionClient`, and implementing the client with various functionalities. ### Project Dependencies: * [`python/packages/autogen-ext/pyproject.toml`](diffhunk://#diff-095119d4420ff09059557bd25681211d1772c2be0fbe0ff2d551a3726eff1b4bR34-R38): Added `llama-cpp-python` as a new dependency under the `llama-cpp` section. ### New Module: * [`python/packages/autogen-ext/src/autogen_ext/models/llama_cpp/__init__.py`](diffhunk://#diff-42ae3ba17d51ca917634c4ea3c5969cf930297c288a783f8d9c126f2accef71dR1-R8): Introduced the `LlamaCppChatCompletionClient` class and handled import errors with a descriptive message for missing dependencies. ### Implementation of `LlamaCppChatCompletionClient`: * `python/packages/autogen-ext/src/autogen_ext/models/llama_cpp/_llama_cpp_completion_client.py`: - Added the `LlamaCppChatCompletionClient` class with methods to initialize the client, create chat completions, detect and execute tools, and handle streaming responses. - Included detailed logging for debugging purposes and implemented methods to count tokens, track usage, and provide model information.…d chat capabilities <!-- Thank you for your contribution! Please review https://microsoft.github.io/autogen/docs/Contribute before opening a pull request. --> <!-- Please add a reviewer to the assignee section when you create a PR. If you don't have the access to it, we will shortly find a reviewer and assign them to your PR. --> ## Why are these changes needed? <!-- Please give a short summary of the change and the problem this solves. --> ## Related issue number <!-- For example: "Closes #1234" --> ## Checks - [X ] I've included any doc changes needed for https://microsoft.github.io/autogen/. See https://microsoft.github.io/autogen/docs/Contribute#documentation to build and test documentation locally. - [X ] I've added tests (if relevant) corresponding to the changes introduced in this PR. - [ X] I've made sure all auto checks have passed. --------- Co-authored-by: aribornstein <x@x.com> Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com> Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
2025-03-11 01:53:53 +02:00
]
graphrag = ["graphrag>=2.3.0"]
chromadb = ["chromadb>=1.0.0"]
Add mem0 Memory Implementation (#6510) <!-- Thank you for your contribution! Please review https://microsoft.github.io/autogen/docs/Contribute before opening a pull request. --> <!-- Please add a reviewer to the assignee section when you create a PR. If you don't have the access to it, we will shortly find a reviewer and assign them to your PR. --> ## Why are these changes needed? These changes are needed to expand AutoGen's memory capabilities with a robust, production-ready integration with Mem0.ai. <!-- Please give a short summary of the change and the problem this solves. --> This PR adds a new memory component for AutoGen that integrates with Mem0.ai, providing a robust memory solution that supports both cloud and local backends. The Mem0Memory class enables agents to store and retrieve information persistently across conversation sessions. ## Key Features - Seamless integration with Mem0.ai memory system - Support for both cloud-based and local storage backends - Robust error handling with detailed logging - Full implementation of AutoGen's Memory interface - Context updating for enhanced agent conversations - Configurable search parameters for memory retrieval ## Related issue number <!-- For example: "Closes #1234" --> ## Checks - [x] I've included any doc changes needed for <https://microsoft.github.io/autogen/>. See <https://github.com/microsoft/autogen/blob/main/CONTRIBUTING.md> to build and test documentation locally. - [x] I've added tests (if relevant) corresponding to the changes introduced in this PR. - [ ] I've made sure all auto checks have passed. --------- Co-authored-by: Victor Dibia <victordibia@microsoft.com> Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com> Co-authored-by: Ricky Loynd <riloynd@microsoft.com>
2025-06-17 04:39:02 +06:00
mem0 = ["mem0ai>=0.1.98"]
mem0-local = [
"mem0ai>=0.1.98",
"neo4j>=5.25.0",
"chromadb>=1.0.0"
]
web-surfer = [
2025-08-19 11:20:09 -07:00
"autogen-agentchat==0.7.4",
"playwright>=1.48.0",
"pillow>=11.0.0",
"magika>=0.6.1rc2",
"markitdown[all]~=0.1.0a3",
]
magentic-one = [
2025-08-19 11:20:09 -07:00
"autogen-agentchat==0.7.4",
"magika>=0.6.1rc2",
"markitdown[all]~=0.1.0a3",
"playwright>=1.48.0",
"pillow>=11.0.0",
]
video-surfer = [
2025-08-19 11:20:09 -07:00
"autogen-agentchat==0.7.4",
"opencv-python>=4.5",
"ffmpeg-python",
"openai-whisper",
]
diskcache = [
"diskcache>=5.6.3"
]
redis = [
"redis>=5.2.1"
]
grpc = [
"grpcio~=1.70.0",
]
jupyter-executor = [
"ipykernel>=6.29.5",
"nbclient>=0.10.2",
]
docker-jupyter-executor = [
"docker~=7.0",
"asyncio_atexit>=1.0.1",
"websockets>=15.0.1",
"requests>=2.32.3",
"aiohttp>=3.11.16",
]
task-centric-memory = ["chromadb>=1.0.0"]
Task-Centric Memory (#5227) _(EXPERIMENTAL, RESEARCH IN PROGRESS)_ In 2023 AutoGen introduced [Teachable Agents](https://microsoft.github.io/autogen/0.2/blog/2023/10/26/TeachableAgent/) that users could teach new facts, preferences and skills. But teachable agents were limited in several ways: They could only be `ConversableAgent` subclasses, they couldn't learn a new skill unless the user stated (in a single turn) both the task and how to solve it, and they couldn't learn on their own. **Task-Centric Memory** overcomes these limitations, allowing users to teach arbitrary agents (or teams) more flexibly and reliably, and enabling agents to learn from their own trial-and-error experiences. This PR is large and complex. All of the files are new, and most of the added components depend on the others to run at all. But the review process can be accelerated if approached in the following order. 1. Start with the [Task-Centric Memory README](https://github.com/microsoft/autogen/tree/agentic_memory/python/packages/autogen-ext/src/autogen_ext/task_centric_memory). 1. Install the memory extension locally, since it won't be in pypi until it's merged. In the `agentic_memory` branch, and the `python/packages` directory: - `pip install -e autogen-agentchat` - `pip install -e autogen-ext[openai]` - `pip install -e autogen-ext[task-centric-memory]` 2. Run the Quickstart sample code, then immediately open the `./pagelogs/quick/0 Call Tree.html` file in a browser to view the work in progress. 3. Click through the web page links to see the details. 2. Continue through the rest of the main README to get a high-level overview of the architecture. 3. Read through the [code samples README](https://github.com/microsoft/autogen/tree/agentic_memory/python/samples/task_centric_memory), running each of the 4 code samples while viewing their page logs. 4. Skim through the 4 code samples, along with their corresponding yaml config files: 1. `chat_with_teachable_agent.py` 2. `eval_retrieval.py` 3. `eval_teachability.py` 4. `eval_learning_from_demonstration.py` 5. `eval_self_teaching.py` 6. Read `task_centric_memory_controller.py`, referring back to the previously generated page logs as needed. This is the most important and complex file in the PR. 7. Read the remaining core files. 1. `_task_centric_memory_bank.py` 2. `_string_similarity_map.py` 3. `_prompter.py` 8. Read the supporting files in the utils dir. 1. `teachability.py` 2. `apprentice.py` 3. `grader.py` 4. `page_logger.py` 5. `_functions.py`
2025-03-04 09:56:49 -08:00
semantic-kernel-core = [
"semantic-kernel>=1.17.1",
]
gemini = [
"google-genai>=1.0.0",
]
semantic-kernel-google = [
"semantic-kernel[google]>=1.17.1",
]
semantic-kernel-hugging-face = [
"semantic-kernel[hugging_face]>=1.17.1",
]
semantic-kernel-mistralai = [
"semantic-kernel[mistralai]>=1.17.1",
]
semantic-kernel-ollama = [
"semantic-kernel[ollama]>=1.17.1",
]
semantic-kernel-onnx = [
"semantic-kernel[onnx]>=1.17.1",
]
semantic-kernel-anthropic = [
"semantic-kernel[anthropic]>=1.17.1",
]
semantic-kernel-pandas = [
"semantic-kernel[pandas]>=1.17.1",
]
semantic-kernel-aws = [
"semantic-kernel[aws]>=1.17.1",
]
semantic-kernel-dapr = [
"semantic-kernel[dapr]>=1.17.1",
]
http-tool = [
"httpx>=0.27.0",
"json-schema-to-pydantic>=0.2.0"
]
semantic-kernel-all = [
"semantic-kernel[google,hugging_face,mistralai,ollama,onnx,anthropic,usearch,pandas,aws,dapr]>=1.17.1",
]
rich = ["rich>=13.9.4"]
mcp = ["mcp>=1.11.0"]
canvas = [
"unidiff>=0.7.5",
]
redisvl = ["redisvl>=0.6.0"]
[tool.hatch.build.targets.wheel]
packages = ["src/autogen_ext"]
[dependency-groups]
dev = [
"autogen_test_utils",
"langchain-experimental",
Graphrag integration (#4612) * add initial global search draft * add graphrag dep * fix local search embedding * linting * add from config constructor * remove draft notebook * update config factory and add docstrings * add graphrag sample * add sample prompts * update readme * update deps * Add API docs * Update python/samples/agentchat_graphrag/requirements.txt * Update python/samples/agentchat_graphrag/requirements.txt * update docstrings with snippet and doc ref * lint * improve set up instructions in docstring * lint * update lock * Update python/packages/autogen-ext/src/autogen_ext/tools/graphrag/_global_search.py Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com> * Update python/packages/autogen-ext/src/autogen_ext/tools/graphrag/_local_search.py Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com> * add unit tests * update lock * update uv lock * add docstring newlines * stubs and typing on graphrag tests * fix docstrings * fix mypy error * + linting and type fixes * type fix graphrag sample * Update python/packages/autogen-ext/src/autogen_ext/tools/graphrag/_global_search.py Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com> * Update python/packages/autogen-ext/src/autogen_ext/tools/graphrag/_local_search.py Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com> * Update python/samples/agentchat_graphrag/requirements.txt Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com> * update overrides * fix docstring client imports * additional docstring fix * add docstring missing import * use openai and fix db path * use console for displaying messages * add model config and gitignore * update readme * lint * Update python/samples/agentchat_graphrag/README.md * Update python/samples/agentchat_graphrag/README.md * Comment remaining azure config --------- Co-authored-by: Leonardo Pinheiro <lpinheiro@microsoft.com> Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
2025-01-15 21:04:17 +10:00
"pandas-stubs>=2.2.3.241126",
"httpx>=0.28.1",
"opentelemetry-proto>=1.28.0"
]
[tool.ruff]
extend = "../../pyproject.toml"
include = ["src/**", "tests/*.py"]
Task-Centric Memory (#5227) _(EXPERIMENTAL, RESEARCH IN PROGRESS)_ In 2023 AutoGen introduced [Teachable Agents](https://microsoft.github.io/autogen/0.2/blog/2023/10/26/TeachableAgent/) that users could teach new facts, preferences and skills. But teachable agents were limited in several ways: They could only be `ConversableAgent` subclasses, they couldn't learn a new skill unless the user stated (in a single turn) both the task and how to solve it, and they couldn't learn on their own. **Task-Centric Memory** overcomes these limitations, allowing users to teach arbitrary agents (or teams) more flexibly and reliably, and enabling agents to learn from their own trial-and-error experiences. This PR is large and complex. All of the files are new, and most of the added components depend on the others to run at all. But the review process can be accelerated if approached in the following order. 1. Start with the [Task-Centric Memory README](https://github.com/microsoft/autogen/tree/agentic_memory/python/packages/autogen-ext/src/autogen_ext/task_centric_memory). 1. Install the memory extension locally, since it won't be in pypi until it's merged. In the `agentic_memory` branch, and the `python/packages` directory: - `pip install -e autogen-agentchat` - `pip install -e autogen-ext[openai]` - `pip install -e autogen-ext[task-centric-memory]` 2. Run the Quickstart sample code, then immediately open the `./pagelogs/quick/0 Call Tree.html` file in a browser to view the work in progress. 3. Click through the web page links to see the details. 2. Continue through the rest of the main README to get a high-level overview of the architecture. 3. Read through the [code samples README](https://github.com/microsoft/autogen/tree/agentic_memory/python/samples/task_centric_memory), running each of the 4 code samples while viewing their page logs. 4. Skim through the 4 code samples, along with their corresponding yaml config files: 1. `chat_with_teachable_agent.py` 2. `eval_retrieval.py` 3. `eval_teachability.py` 4. `eval_learning_from_demonstration.py` 5. `eval_self_teaching.py` 6. Read `task_centric_memory_controller.py`, referring back to the previously generated page logs as needed. This is the most important and complex file in the PR. 7. Read the remaining core files. 1. `_task_centric_memory_bank.py` 2. `_string_similarity_map.py` 3. `_prompter.py` 8. Read the supporting files in the utils dir. 1. `teachability.py` 2. `apprentice.py` 3. `grader.py` 4. `page_logger.py` 5. `_functions.py`
2025-03-04 09:56:49 -08:00
exclude = ["src/autogen_ext/agents/web_surfer/*.js", "src/autogen_ext/runtimes/grpc/protos", "tests/protos", "README.md"]
[tool.pyright]
2024-09-18 16:23:53 -04:00
extends = "../../pyproject.toml"
include = ["src", "tests"]
exclude = ["src/autogen_ext/runtimes/grpc/protos", "tests/protos"]
[tool.pytest.ini_options]
minversion = "6.0"
testpaths = ["tests"]
2025-02-07 11:57:30 -05:00
markers = [
"grpc",
]
[tool.poe]
include = "../../shared_tasks.toml"
[tool.poe.tasks]
test.sequence = [
"playwright install",
"pytest -n 1 --cov=src --cov-report=term-missing --cov-report=xml",
]
test.default_item_type = "cmd"
2025-02-07 11:57:30 -05:00
test-grpc = "pytest -n 1 --cov=src --cov-report=term-missing --cov-report=xml --grpc"
test-windows = "pytest -n 1 --cov=src --cov-report=term-missing --cov-report=xml -m 'windows'"
mypy = "mypy --config-file ../../pyproject.toml --exclude src/autogen_ext/runtimes/grpc/protos --exclude tests/protos --ignore-missing-imports src tests"
[tool.mypy]
[[tool.mypy.overrides]]
module = "docker.*"
ignore_missing_imports = true