autogen/setup.py
Audel Rouhi 1b8d65df0a
2447 fix pgvector tests and notebook (#2455)
* Re-added missing notebook

* Test installing postgres

* Error handle the connection.

* Fixed import.

* Fixed import.

* Fixed creation of collection without client.

* PGVector portion working. OpenAI untested.

* Fixed prints.

* Added output.

* Fixed pre-commits.

* Run pgvector notebook

* Improve efficiency of get_collection

* Fix delete_collection

* Fixed issues with pytests and validated functions.

* Validated pytests.

* Fixed pre-commits

* Separated extra_requires to allow more logic. Retrieve_chat base dependencies included on pgvector and qdrant.

* Fixed extra newline.

* Added username and password fields.

* URL Encode the connection string parameters to support symbols like %

* Fixed pre-commits.

* Added pgvector service

* pgvector doesn't have health intervals.

* Switched to colon based key values.

* Run on Ubuntu only. Linux is only option with container service support.

* Using default credentials instead.

* Fix postgres setup

* Fix postgres setup

* Don't skip tests on win and mac

* Fix command error

* Try apt install postgresql

* Assert table does not exist when deleted.

* Raise value error on a empty list or None value provided for IDs

* pre-commit

* Add install pgvector

* Add install pgvector

* Reorg test files, create a separate job for test pgvector

* Fix format

* Fix env format

* Simplify job name, enable test_retrieve_config

* Fix test_retrieve_config

* Corrected behavior for get_docs_by_ids with no ids returning all docs.

* Corrected behavior for get_docs_by_ids with no ids returning all docs.

* Fixed pre-commits.

* Added return values for all functions.

* Validated distance search is implemented correctly.

* Validated all pytests

* Removed print.

* Added default clause.

* Make ids optional

* Fix test, make it more robust

* Bump version of openai for the vector_store support

* Added support for choosing the sentence transformer model.

* Added error handling for model name entered.

* Updated model info.

* Added model_name db_config param.

* pre-commit fixes and last link fix.

* Use secrets password.

* fix: link fixed

* updated tests

* Updated config_list.

* pre-commit fix.

* Added chat_result to all output.
Unable to re-run notebooks.

* Pre-commit fix detected this requirement.

* Fix python 3.8 and 3.9 not supported for macos

* Fix python 3.8 and 3.9 not supported for macos

* Fix format

* Reran notebook with MetaLlama3Instruct7BQ4_k_M

* added gpt model.

* Reran notebook

---------

Co-authored-by: Li Jiang <bnujli@gmail.com>
Co-authored-by: Hk669 <hrushi669@gmail.com>
2024-04-28 13:43:02 +00:00

95 lines
2.8 KiB
Python

import os
import setuptools
here = os.path.abspath(os.path.dirname(__file__))
with open("README.md", "r", encoding="UTF-8") as fh:
long_description = fh.read()
# Get the code version
version = {}
with open(os.path.join(here, "autogen/version.py")) as fp:
exec(fp.read(), version)
__version__ = version["__version__"]
install_requires = [
"openai>=1.23.3",
"diskcache",
"termcolor",
"flaml",
# numpy is installed by flaml, but we want to pin the version to below 2.x (see https://github.com/microsoft/autogen/issues/1960)
"numpy>=1.17.0,<2",
"python-dotenv",
"tiktoken",
# Disallowing 2.6.0 can be removed when this is fixed https://github.com/pydantic/pydantic/issues/8705
"pydantic>=1.10,<3,!=2.6.0", # could be both V1 and V2
"docker",
]
jupyter_executor = [
"jupyter-kernel-gateway",
"websocket-client",
"requests",
"jupyter-client>=8.6.0",
"ipykernel>=6.29.0",
]
retrieve_chat = ["chromadb", "sentence_transformers", "pypdf", "ipython", "beautifulsoup4", "markdownify"]
extra_require = {
"test": [
"coverage>=5.3",
"ipykernel",
"nbconvert",
"nbformat",
"pre-commit",
"pytest-asyncio",
"pytest>=6.1.1,<8",
"pandas",
],
"blendsearch": ["flaml[blendsearch]"],
"mathchat": ["sympy", "pydantic==1.10.9", "wolframalpha"],
"retrievechat": retrieve_chat,
"retrievechat-pgvector": [
*retrieve_chat,
"pgvector>=0.2.5",
"psycopg>=3.1.18",
],
"retrievechat-qdrant": [
*retrieve_chat,
"qdrant_client[fastembed]",
],
"autobuild": ["chromadb", "sentence-transformers", "huggingface-hub"],
"teachable": ["chromadb"],
"lmm": ["replicate", "pillow"],
"graph": ["networkx", "matplotlib"],
"gemini": ["google-generativeai>=0.5,<1", "pillow", "pydantic"],
"websurfer": ["beautifulsoup4", "markdownify", "pdfminer.six", "pathvalidate"],
"redis": ["redis"],
"cosmosdb": ["azure-cosmos>=4.2.0"],
"websockets": ["websockets>=12.0,<13"],
"jupyter-executor": jupyter_executor,
"types": ["mypy==1.9.0", "pytest>=6.1.1,<8"] + jupyter_executor,
}
setuptools.setup(
name="pyautogen",
version=__version__,
author="AutoGen",
author_email="auto-gen@outlook.com",
description="Enabling Next-Gen LLM Applications via Multi-Agent Conversation Framework",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/microsoft/autogen",
packages=setuptools.find_packages(include=["autogen*"], exclude=["test"]),
install_requires=install_requires,
extras_require=extra_require,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.8,<3.13",
)