2024-09-18 06:12:08 -07:00
|
|
|
[build-system]
|
|
|
|
requires = ["hatchling"]
|
|
|
|
build-backend = "hatchling.build"
|
|
|
|
|
|
|
|
[project]
|
|
|
|
name = "autogen-ext"
|
2025-03-14 11:38:12 -07:00
|
|
|
version = "0.4.9.2"
|
2024-10-09 15:01:09 -04:00
|
|
|
license = {file = "LICENSE-CODE"}
|
2024-09-18 06:12:08 -07:00
|
|
|
description = "AutoGen extensions library"
|
|
|
|
readme = "README.md"
|
|
|
|
requires-python = ">=3.10"
|
|
|
|
classifiers = [
|
|
|
|
"Programming Language :: Python :: 3",
|
|
|
|
"License :: OSI Approved :: MIT License",
|
|
|
|
"Operating System :: OS Independent",
|
|
|
|
]
|
2024-10-01 09:35:49 +10:00
|
|
|
dependencies = [
|
2025-03-14 11:38:12 -07:00
|
|
|
"autogen-core==0.4.9.2",
|
2024-09-18 06:12:08 -07:00
|
|
|
]
|
|
|
|
|
2024-10-01 09:35:49 +10:00
|
|
|
[project.optional-dependencies]
|
2024-10-23 01:40:41 +10:00
|
|
|
langchain = ["langchain_core~= 0.3.3"]
|
2025-01-25 08:26:48 +10:00
|
|
|
azure = [
|
|
|
|
"azure-ai-inference>=1.0.0b7",
|
|
|
|
"azure-core",
|
|
|
|
"azure-identity",
|
|
|
|
]
|
2025-01-31 14:14:43 -08:00
|
|
|
docker = ["docker~=7.0", "asyncio_atexit>=1.0.1"]
|
2025-02-26 11:02:48 -05:00
|
|
|
ollama = ["ollama>=0.4.7", "tiktoken>=0.8.0"]
|
2025-01-08 14:05:08 -08:00
|
|
|
openai = ["openai>=1.52.2", "tiktoken>=0.8.0", "aiofiles"]
|
2024-12-10 08:28:48 -08:00
|
|
|
file-surfer = [
|
2025-03-14 11:38:12 -07:00
|
|
|
"autogen-agentchat==0.4.9.2",
|
2025-03-06 21:33:09 -08:00
|
|
|
"markitdown~=0.0.1",
|
2024-12-10 08:28:48 -08:00
|
|
|
]
|
2025-03-11 01:53:53 +02:00
|
|
|
|
|
|
|
llama-cpp = [
|
2025-03-14 12:20:42 -07:00
|
|
|
"llama-cpp-python>=0.3.8",
|
2025-03-11 01:53:53 +02:00
|
|
|
]
|
|
|
|
|
2025-01-15 21:04:17 +10:00
|
|
|
graphrag = ["graphrag>=1.0.1"]
|
Add ChromaDBVectorMemory in Extensions (#5308)
<!-- Thank you for your contribution! Please review
https://microsoft.github.io/autogen/docs/Contribute before opening a
pull request. -->
<!-- Please add a reviewer to the assignee section when you create a PR.
If you don't have the access to it, we will shortly find a reviewer and
assign them to your PR. -->
## Why are these changes needed?
<!-- Please give a short summary of the change and the problem this
solves. -->
Shows an example of how to use the `Memory` interface to implement a
just-in-time vector memory based on chromadb.
```python
import os
from pathlib import Path
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
from autogen_core.memory import MemoryContent, MemoryMimeType
from autogen_ext.memory.chromadb import ChromaDBVectorMemory, PersistentChromaDBVectorMemoryConfig
from autogen_ext.models.openai import OpenAIChatCompletionClient
# Initialize ChromaDB memory with custom config
chroma_user_memory = ChromaDBVectorMemory(
config=PersistentChromaDBVectorMemoryConfig(
collection_name="preferences",
persistence_path=os.path.join(str(Path.home()), ".chromadb_autogen"),
k=2, # Return top k results
score_threshold=0.4, # Minimum similarity score
)
)
# a HttpChromaDBVectorMemoryConfig is also supported for connecting to a remote ChromaDB server
# Add user preferences to memory
await chroma_user_memory.add(
MemoryContent(
content="The weather should be in metric units",
mime_type=MemoryMimeType.TEXT,
metadata={"category": "preferences", "type": "units"},
)
)
await chroma_user_memory.add(
MemoryContent(
content="Meal recipe must be vegan",
mime_type=MemoryMimeType.TEXT,
metadata={"category": "preferences", "type": "dietary"},
)
)
# Create assistant agent with ChromaDB memory
assistant_agent = AssistantAgent(
name="assistant_agent",
model_client=OpenAIChatCompletionClient(
model="gpt-4o",
),
tools=[get_weather],
memory=[user_memory],
)
stream = assistant_agent.run_stream(task="What is the weather in New York?")
await Console(stream)
await user_memory.close()
```
```txt
---------- user ----------
What is the weather in New York?
---------- assistant_agent ----------
[MemoryContent(content='The weather should be in metric units', mime_type='MemoryMimeType.TEXT', metadata={'category': 'preferences', 'mime_type': 'MemoryMimeType.TEXT', 'type': 'units', 'score': 0.4342913043162201, 'id': '8a8d683c-5866-41e1-ac17-08c4fda6da86'}), MemoryContent(content='The weather should be in metric units', mime_type='MemoryMimeType.TEXT', metadata={'category': 'preferences', 'mime_type': 'MemoryMimeType.TEXT', 'type': 'units', 'score': 0.4342913043162201, 'id': 'f27af42c-cb63-46f0-b26b-ffcc09955ca1'})]
---------- assistant_agent ----------
[FunctionCall(id='call_a8U3YEj2dxA065vyzdfXDtNf', arguments='{"city":"New York","units":"metric"}', name='get_weather')]
---------- assistant_agent ----------
[FunctionExecutionResult(content='The weather in New York is 23 °C and Sunny.', call_id='call_a8U3YEj2dxA065vyzdfXDtNf', is_error=False)]
---------- assistant_agent ----------
The weather in New York is 23 °C and Sunny.
```
Note that MemoryContent object in the MemoryQuery events have useful
metadata like the score and id retrieved memories.
## Related issue number
<!-- For example: "Closes #1234" -->
## Checks
- [ ] I've included any doc changes needed for
https://microsoft.github.io/autogen/. See
https://microsoft.github.io/autogen/docs/Contribute#documentation to
build and test documentation locally.
- [ ] I've added tests (if relevant) corresponding to the changes
introduced in this PR.
- [ ] I've made sure all auto checks have passed.
2025-03-01 07:41:01 -08:00
|
|
|
chromadb = ["chromadb"]
|
2024-11-07 16:47:53 -08:00
|
|
|
web-surfer = [
|
2025-03-14 11:38:12 -07:00
|
|
|
"autogen-agentchat==0.4.9.2",
|
2024-11-07 16:47:53 -08:00
|
|
|
"playwright>=1.48.0",
|
|
|
|
"pillow>=11.0.0",
|
2025-03-06 21:33:09 -08:00
|
|
|
"markitdown~=0.0.1",
|
2024-11-07 16:47:53 -08:00
|
|
|
]
|
2024-12-03 12:14:47 -08:00
|
|
|
magentic-one = [
|
2025-03-14 11:38:12 -07:00
|
|
|
"autogen-agentchat==0.4.9.2",
|
2025-03-06 21:33:09 -08:00
|
|
|
"markitdown~=0.0.1",
|
2024-12-03 12:14:47 -08:00
|
|
|
"playwright>=1.48.0",
|
|
|
|
"pillow>=11.0.0",
|
|
|
|
]
|
2024-11-29 14:41:23 -08:00
|
|
|
video-surfer = [
|
2025-03-14 11:38:12 -07:00
|
|
|
"autogen-agentchat==0.4.9.2",
|
2024-11-29 14:41:23 -08:00
|
|
|
"opencv-python>=4.5",
|
|
|
|
"ffmpeg-python",
|
|
|
|
"openai-whisper",
|
|
|
|
]
|
2025-01-16 15:47:38 -08:00
|
|
|
diskcache = [
|
|
|
|
"diskcache>=5.6.3"
|
|
|
|
]
|
|
|
|
redis = [
|
|
|
|
"redis>=5.2.1"
|
|
|
|
]
|
2024-10-01 09:35:49 +10:00
|
|
|
|
2024-12-04 16:23:20 -08:00
|
|
|
grpc = [
|
2025-01-28 22:11:54 -08:00
|
|
|
"grpcio~=1.70.0",
|
2024-12-04 16:23:20 -08:00
|
|
|
]
|
2025-01-25 08:26:48 +10:00
|
|
|
|
2025-01-18 22:11:40 +01:00
|
|
|
jupyter-executor = [
|
|
|
|
"ipykernel>=6.29.5",
|
|
|
|
"nbclient>=0.10.2",
|
|
|
|
]
|
2024-12-04 16:23:20 -08:00
|
|
|
|
Task-Centric Memory (#5227)
_(EXPERIMENTAL, RESEARCH IN PROGRESS)_
In 2023 AutoGen introduced [Teachable
Agents](https://microsoft.github.io/autogen/0.2/blog/2023/10/26/TeachableAgent/)
that users could teach new facts, preferences and skills. But teachable
agents were limited in several ways: They could only be
`ConversableAgent` subclasses, they couldn't learn a new skill unless
the user stated (in a single turn) both the task and how to solve it,
and they couldn't learn on their own. **Task-Centric Memory** overcomes
these limitations, allowing users to teach arbitrary agents (or teams)
more flexibly and reliably, and enabling agents to learn from their own
trial-and-error experiences.
This PR is large and complex. All of the files are new, and most of the
added components depend on the others to run at all. But the review
process can be accelerated if approached in the following order.
1. Start with the [Task-Centric Memory
README](https://github.com/microsoft/autogen/tree/agentic_memory/python/packages/autogen-ext/src/autogen_ext/task_centric_memory).
1. Install the memory extension locally, since it won't be in pypi until
it's merged. In the `agentic_memory` branch, and the `python/packages`
directory:
- `pip install -e autogen-agentchat`
- `pip install -e autogen-ext[openai]`
- `pip install -e autogen-ext[task-centric-memory]`
2. Run the Quickstart sample code, then immediately open the
`./pagelogs/quick/0 Call Tree.html` file in a browser to view the work
in progress.
3. Click through the web page links to see the details.
2. Continue through the rest of the main README to get a high-level
overview of the architecture.
3. Read through the [code samples
README](https://github.com/microsoft/autogen/tree/agentic_memory/python/samples/task_centric_memory),
running each of the 4 code samples while viewing their page logs.
4. Skim through the 4 code samples, along with their corresponding yaml
config files:
1. `chat_with_teachable_agent.py`
2. `eval_retrieval.py`
3. `eval_teachability.py`
4. `eval_learning_from_demonstration.py`
5. `eval_self_teaching.py`
6. Read `task_centric_memory_controller.py`, referring back to the
previously generated page logs as needed. This is the most important and
complex file in the PR.
7. Read the remaining core files.
1. `_task_centric_memory_bank.py`
2. `_string_similarity_map.py`
3. `_prompter.py`
8. Read the supporting files in the utils dir.
1. `teachability.py`
2. `apprentice.py`
3. `grader.py`
4. `page_logger.py`
5. `_functions.py`
2025-03-04 09:56:49 -08:00
|
|
|
task-centric-memory = ["chromadb>=0.6.3"]
|
|
|
|
|
2025-01-18 18:57:20 +10:00
|
|
|
semantic-kernel-core = [
|
|
|
|
"semantic-kernel>=1.17.1",
|
|
|
|
]
|
|
|
|
|
2025-02-10 16:29:43 -08:00
|
|
|
gemini = [
|
|
|
|
"google-genai>=1.0.0",
|
|
|
|
]
|
|
|
|
|
2025-01-18 18:57:20 +10:00
|
|
|
semantic-kernel-google = [
|
|
|
|
"semantic-kernel[google]>=1.17.1",
|
|
|
|
]
|
|
|
|
|
|
|
|
semantic-kernel-hugging-face = [
|
|
|
|
"semantic-kernel[hugging_face]>=1.17.1",
|
|
|
|
]
|
|
|
|
|
|
|
|
semantic-kernel-mistralai = [
|
|
|
|
"semantic-kernel[mistralai]>=1.17.1",
|
|
|
|
]
|
|
|
|
|
|
|
|
semantic-kernel-ollama = [
|
|
|
|
"semantic-kernel[ollama]>=1.17.1",
|
|
|
|
]
|
|
|
|
|
|
|
|
semantic-kernel-onnx = [
|
|
|
|
"semantic-kernel[onnx]>=1.17.1",
|
|
|
|
]
|
|
|
|
|
|
|
|
semantic-kernel-anthropic = [
|
|
|
|
"semantic-kernel[anthropic]>=1.17.1",
|
|
|
|
]
|
|
|
|
|
|
|
|
semantic-kernel-pandas = [
|
|
|
|
"semantic-kernel[pandas]>=1.17.1",
|
|
|
|
]
|
|
|
|
|
|
|
|
semantic-kernel-aws = [
|
|
|
|
"semantic-kernel[aws]>=1.17.1",
|
|
|
|
]
|
|
|
|
|
|
|
|
semantic-kernel-dapr = [
|
|
|
|
"semantic-kernel[dapr]>=1.17.1",
|
|
|
|
]
|
|
|
|
|
2025-02-10 15:27:27 -05:00
|
|
|
http-tool = [
|
|
|
|
"httpx>=0.27.0",
|
|
|
|
"json-schema-to-pydantic>=0.2.0"
|
|
|
|
]
|
|
|
|
|
2025-01-18 18:57:20 +10:00
|
|
|
semantic-kernel-all = [
|
|
|
|
"semantic-kernel[google,hugging_face,mistralai,ollama,onnx,anthropic,usearch,pandas,aws,dapr]>=1.17.1",
|
|
|
|
]
|
|
|
|
|
2025-01-24 09:50:38 -08:00
|
|
|
rich = ["rich>=13.9.4"]
|
|
|
|
|
2025-02-09 06:20:00 +01:00
|
|
|
mcp = [
|
|
|
|
"mcp>=1.1.3",
|
|
|
|
"json-schema-to-pydantic>=0.2.2"
|
|
|
|
]
|
|
|
|
|
2024-09-18 06:12:08 -07:00
|
|
|
[tool.hatch.build.targets.wheel]
|
|
|
|
packages = ["src/autogen_ext"]
|
|
|
|
|
2024-12-27 13:11:42 -05:00
|
|
|
[dependency-groups]
|
|
|
|
dev = [
|
2024-12-30 09:09:33 -08:00
|
|
|
"autogen_test_utils",
|
|
|
|
"langchain-experimental",
|
2025-01-15 21:04:17 +10:00
|
|
|
"pandas-stubs>=2.2.3.241126",
|
2025-01-30 11:03:54 -08:00
|
|
|
"httpx>=0.28.1",
|
2024-12-04 16:23:20 -08:00
|
|
|
]
|
2024-09-18 06:12:08 -07:00
|
|
|
|
|
|
|
[tool.ruff]
|
|
|
|
extend = "../../pyproject.toml"
|
|
|
|
include = ["src/**", "tests/*.py"]
|
Task-Centric Memory (#5227)
_(EXPERIMENTAL, RESEARCH IN PROGRESS)_
In 2023 AutoGen introduced [Teachable
Agents](https://microsoft.github.io/autogen/0.2/blog/2023/10/26/TeachableAgent/)
that users could teach new facts, preferences and skills. But teachable
agents were limited in several ways: They could only be
`ConversableAgent` subclasses, they couldn't learn a new skill unless
the user stated (in a single turn) both the task and how to solve it,
and they couldn't learn on their own. **Task-Centric Memory** overcomes
these limitations, allowing users to teach arbitrary agents (or teams)
more flexibly and reliably, and enabling agents to learn from their own
trial-and-error experiences.
This PR is large and complex. All of the files are new, and most of the
added components depend on the others to run at all. But the review
process can be accelerated if approached in the following order.
1. Start with the [Task-Centric Memory
README](https://github.com/microsoft/autogen/tree/agentic_memory/python/packages/autogen-ext/src/autogen_ext/task_centric_memory).
1. Install the memory extension locally, since it won't be in pypi until
it's merged. In the `agentic_memory` branch, and the `python/packages`
directory:
- `pip install -e autogen-agentchat`
- `pip install -e autogen-ext[openai]`
- `pip install -e autogen-ext[task-centric-memory]`
2. Run the Quickstart sample code, then immediately open the
`./pagelogs/quick/0 Call Tree.html` file in a browser to view the work
in progress.
3. Click through the web page links to see the details.
2. Continue through the rest of the main README to get a high-level
overview of the architecture.
3. Read through the [code samples
README](https://github.com/microsoft/autogen/tree/agentic_memory/python/samples/task_centric_memory),
running each of the 4 code samples while viewing their page logs.
4. Skim through the 4 code samples, along with their corresponding yaml
config files:
1. `chat_with_teachable_agent.py`
2. `eval_retrieval.py`
3. `eval_teachability.py`
4. `eval_learning_from_demonstration.py`
5. `eval_self_teaching.py`
6. Read `task_centric_memory_controller.py`, referring back to the
previously generated page logs as needed. This is the most important and
complex file in the PR.
7. Read the remaining core files.
1. `_task_centric_memory_bank.py`
2. `_string_similarity_map.py`
3. `_prompter.py`
8. Read the supporting files in the utils dir.
1. `teachability.py`
2. `apprentice.py`
3. `grader.py`
4. `page_logger.py`
5. `_functions.py`
2025-03-04 09:56:49 -08:00
|
|
|
exclude = ["src/autogen_ext/agents/web_surfer/*.js", "src/autogen_ext/runtimes/grpc/protos", "tests/protos", "README.md"]
|
2024-09-18 06:12:08 -07:00
|
|
|
|
|
|
|
[tool.pyright]
|
2024-09-18 16:23:53 -04:00
|
|
|
extends = "../../pyproject.toml"
|
2024-09-18 06:12:08 -07:00
|
|
|
include = ["src", "tests"]
|
2024-12-04 16:23:20 -08:00
|
|
|
exclude = ["src/autogen_ext/runtimes/grpc/protos", "tests/protos"]
|
2024-09-18 06:12:08 -07:00
|
|
|
|
|
|
|
[tool.pytest.ini_options]
|
|
|
|
minversion = "6.0"
|
|
|
|
testpaths = ["tests"]
|
2025-02-07 11:57:30 -05:00
|
|
|
markers = [
|
|
|
|
"grpc",
|
|
|
|
]
|
2024-09-18 06:12:08 -07:00
|
|
|
|
|
|
|
[tool.poe]
|
|
|
|
include = "../../shared_tasks.toml"
|
|
|
|
|
|
|
|
[tool.poe.tasks]
|
2024-12-12 14:42:40 -08:00
|
|
|
test.sequence = [
|
|
|
|
"playwright install",
|
2025-01-18 00:32:18 +10:00
|
|
|
"pytest -n 1 --cov=src --cov-report=term-missing --cov-report=xml",
|
2024-12-12 14:42:40 -08:00
|
|
|
]
|
|
|
|
test.default_item_type = "cmd"
|
2025-02-07 11:57:30 -05:00
|
|
|
test-grpc = "pytest -n 1 --cov=src --cov-report=term-missing --cov-report=xml --grpc"
|
2025-03-11 07:00:14 +10:00
|
|
|
test-windows = "pytest -n 1 --cov=src --cov-report=term-missing --cov-report=xml -m 'windows'"
|
2024-12-04 16:23:20 -08:00
|
|
|
mypy = "mypy --config-file ../../pyproject.toml --exclude src/autogen_ext/runtimes/grpc/protos --exclude tests/protos src tests"
|
2024-10-12 02:28:15 +10:00
|
|
|
|
|
|
|
[tool.mypy]
|
|
|
|
[[tool.mypy.overrides]]
|
|
|
|
module = "docker.*"
|
2024-10-23 08:24:36 -07:00
|
|
|
ignore_missing_imports = true
|