mirror of
https://github.com/microsoft/autogen.git
synced 2025-07-10 18:41:30 +00:00

* Initial commit. * Disable LLM response caching. * Add teachability option to setup.py * Modify test to use OAI_CONFIG_LIST as suggested in the docs. * Expand unit test. * Complete unit test. * Add filter_dict * details * AnalysisAgent * details * More documentation and debug output. * Support retrieval of any number of relevant memos, including zero. * More robust analysis separator. * cleanup * teach_config * refactoring * For robustness, allow more flexibility on memo storage and retrieval. * de-dupe the retrieved memos. * Simplify AnalysisAgent. The unit tests now pass with gpt-3.5 * comments * Add a verbosity level to control analyzer messages. * refactoring * comments * Persist memory on disk. * cleanup * Use markdown to format retrieved memos. * Use markdown in TextAnalyzerAgent * Add another verbosity level. * clean up logging * notebook * minor edits * cleanup * linter fixes * Skip tests that fail to import openai * Address reviewer feedback. * lint * refactoring * Improve wording * Improve code coverage. * lint * Use llm_config to control caching. * lowercase notebook name * Sort out the parameters passed through to ConversableAgent, and supply full docstrings for the others. * lint * Allow TextAnalyzerAgent to be given a different llm_config than TeachableAgent. * documentation * Modifications to run openai workflow. * Test on just python 3.10. Replace agent with agent teachable_agent as recommended. * Test on python 3.9 instead of 3.10. * Remove space from name -> teachableagent --------- Co-authored-by: Li Jiang <bnujli@gmail.com> Co-authored-by: Chi Wang <wang.chi@microsoft.com>
69 lines
1.8 KiB
Python
69 lines
1.8 KiB
Python
import setuptools
|
|
import os
|
|
|
|
here = os.path.abspath(os.path.dirname(__file__))
|
|
|
|
with open("README.md", "r", encoding="UTF-8") as fh:
|
|
long_description = fh.read()
|
|
|
|
|
|
# Get the code version
|
|
version = {}
|
|
with open(os.path.join(here, "autogen/version.py")) as fp:
|
|
exec(fp.read(), version)
|
|
__version__ = version["__version__"]
|
|
|
|
install_requires = [
|
|
"openai",
|
|
"diskcache",
|
|
"termcolor",
|
|
"flaml",
|
|
"python-dotenv",
|
|
]
|
|
|
|
|
|
setuptools.setup(
|
|
name="pyautogen",
|
|
version=__version__,
|
|
author="AutoGen",
|
|
author_email="auto-gen@outlook.com",
|
|
description="Enabling Next-Gen LLM Applications via Multi-Agent Conversation Framework",
|
|
long_description=long_description,
|
|
long_description_content_type="text/markdown",
|
|
url="https://github.com/microsoft/autogen",
|
|
packages=setuptools.find_packages(include=["autogen*"], exclude=["test"]),
|
|
# package_data={
|
|
# "autogen.default": ["*/*.json"],
|
|
# },
|
|
# include_package_data=True,
|
|
install_requires=install_requires,
|
|
extras_require={
|
|
"test": [
|
|
"chromadb",
|
|
"lancedb",
|
|
"coverage>=5.3",
|
|
"datasets",
|
|
"ipykernel",
|
|
"nbconvert",
|
|
"nbformat",
|
|
"pre-commit",
|
|
"pydantic==1.10.9",
|
|
"pytest-asyncio",
|
|
"pytest>=6.1.1",
|
|
"sympy",
|
|
"tiktoken",
|
|
"wolframalpha",
|
|
],
|
|
"blendsearch": ["flaml[blendsearch]"],
|
|
"mathchat": ["sympy", "pydantic==1.10.9", "wolframalpha"],
|
|
"retrievechat": ["chromadb", "tiktoken", "sentence_transformers", "pypdf"],
|
|
"teachable": ["chromadb"],
|
|
},
|
|
classifiers=[
|
|
"Programming Language :: Python :: 3",
|
|
"License :: OSI Approved :: MIT License",
|
|
"Operating System :: OS Independent",
|
|
],
|
|
python_requires=">=3.8",
|
|
)
|