simplify getting-started; update news (#2175)

* simplify getting-started; update news

* bug fix
This commit is contained in:
Chi Wang 2024-03-27 20:43:01 -07:00 committed by GitHub
parent 5ef2dfc104
commit e6237d44a1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 38 additions and 37 deletions

View File

@ -12,23 +12,25 @@
<img src="https://github.com/microsoft/autogen/blob/main/website/static/img/flaml.svg" width=200>
<br>
</p> -->
:fire: Mar 26: Andrew Ng gave a shoutout to AutoGen in [What's next for AI agentic workflows](https://youtu.be/sal78ACtGTc?si=JduUzN_1kDnMq0vF) at Sequoia Capital's AI Ascent.
:fire: Mar 3: What's new in AutoGen? 📰[Blog](https://microsoft.github.io/autogen/blog/2024/03/03/AutoGen-Update); 📺[Youtube](https://www.youtube.com/watch?v=j_mtwQiaLGU).
:fire: Mar 1: the first AutoGen multi-agent experiment on the challenging [GAIA](https://huggingface.co/spaces/gaia-benchmark/leaderboard) benchmark achieved the No. 1 accuracy in all the three levels.
:fire: Jan 30: AutoGen is highlighted by Peter Lee in Microsoft Research Forum [Keynote](https://t.co/nUBSjPDjqD).
:tada: Jan 30: AutoGen is highlighted by Peter Lee in Microsoft Research Forum [Keynote](https://t.co/nUBSjPDjqD).
:fire: Dec 31: [AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation Framework](https://arxiv.org/abs/2308.08155) is selected by [TheSequence: My Five Favorite AI Papers of 2023](https://thesequence.substack.com/p/my-five-favorite-ai-papers-of-2023).
:tada: Dec 31: [AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation Framework](https://arxiv.org/abs/2308.08155) is selected by [TheSequence: My Five Favorite AI Papers of 2023](https://thesequence.substack.com/p/my-five-favorite-ai-papers-of-2023).
<!-- :fire: Nov 24: pyautogen [v0.2](https://github.com/microsoft/autogen/releases/tag/v0.2.0) is released with many updates and new features compared to v0.1.1. It switches to using openai-python v1. Please read the [migration guide](https://microsoft.github.io/autogen/docs/Installation#python). -->
<!-- :fire: Nov 11: OpenAI's Assistants are available in AutoGen and interoperatable with other AutoGen agents! Checkout our [blogpost](https://microsoft.github.io/autogen/blog/2023/11/13/OAI-assistants) for details and examples. -->
:fire: Nov 8: AutoGen is selected into [Open100: Top 100 Open Source achievements](https://www.benchcouncil.org/evaluation/opencs/annual.html) 35 days after spinoff.
:tada: Nov 8: AutoGen is selected into [Open100: Top 100 Open Source achievements](https://www.benchcouncil.org/evaluation/opencs/annual.html) 35 days after spinoff.
:fire: Nov 6: AutoGen is mentioned by Satya Nadella in a [fireside chat](https://youtu.be/0pLBvgYtv6U).
:tada: Nov 6: AutoGen is mentioned by Satya Nadella in a [fireside chat](https://youtu.be/0pLBvgYtv6U).
:fire: Nov 1: AutoGen is the top trending repo on GitHub in October 2023.
:tada: Nov 1: AutoGen is the top trending repo on GitHub in October 2023.
:tada: Oct 03: AutoGen spins off from FLAML on GitHub and has a major paper update (first version on Aug 16).

View File

@ -83,8 +83,7 @@ class DockerCommandLineCodeExecutor(CodeExecutor):
if isinstance(work_dir, str):
work_dir = Path(work_dir)
if not work_dir.exists():
raise ValueError(f"Working directory {work_dir} does not exist.")
work_dir.mkdir(exist_ok=True)
client = docker.from_env()

View File

@ -71,8 +71,7 @@ $functions"""
if isinstance(work_dir, str):
work_dir = Path(work_dir)
if not work_dir.exists():
raise ValueError(f"Working directory {work_dir} does not exist.")
work_dir.mkdir(exist_ok=True)
self._timeout = timeout
self._work_dir: Path = work_dir

View File

@ -1 +1 @@
__version__ = "0.2.20"
__version__ = "0.2.21"

View File

@ -1,5 +1,6 @@
from pathlib import Path
import sys
import os
import tempfile
import uuid
import pytest
@ -10,7 +11,8 @@ from autogen.coding.factory import CodeExecutorFactory
from autogen.coding.docker_commandline_code_executor import DockerCommandLineCodeExecutor
from autogen.coding.local_commandline_code_executor import LocalCommandLineCodeExecutor
from conftest import MOCK_OPEN_AI_API_KEY, skip_docker
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from conftest import MOCK_OPEN_AI_API_KEY, skip_docker # noqa: E402
if skip_docker or not is_docker_running():
classes_to_test = [LocalCommandLineCodeExecutor]
@ -52,7 +54,7 @@ def test_commandline_executor_init(cls) -> None:
assert executor.timeout == 10 and str(executor.work_dir) == "."
# Try invalid working directory.
with pytest.raises(ValueError, match="Working directory .* does not exist."):
with pytest.raises(FileNotFoundError):
executor = cls(timeout=111, work_dir="/invalid/directory")

View File

@ -38,30 +38,37 @@ pip install pyautogen
```
<Tabs>
<TabItem value="nocode" label="No code execution" default>
```python
from autogen import AssistantAgent, UserProxyAgent
llm_config = {"model": "gpt-4", "api_key": os.environ["OPENAI_API_KEY"]}
assistant = AssistantAgent("assistant", llm_config=llm_config)
user_proxy = UserProxyAgent("user_proxy", code_execution_config=False)
# Start the chat
user_proxy.initiate_chat(
assistant,
message="Tell me a joke about NVDA and TESLA stock prices.",
)
```
</TabItem>
<TabItem value="local" label="Local execution" default>
:::warning
When asked, be sure to check the generated code before continuing to ensure it is safe to run.
:::
```python
import autogen
from autogen import AssistantAgent, UserProxyAgent
from autogen.coding import LocalCommandLineCodeExecutor
import os
from pathlib import Path
llm_config = {
"config_list": [{"model": "gpt-4", "api_key": os.environ["OPENAI_API_KEY"]}],
}
work_dir = Path("coding")
work_dir.mkdir(exist_ok=True)
llm_config = {"model": "gpt-4", "api_key": os.environ["OPENAI_API_KEY"]}
assistant = AssistantAgent("assistant", llm_config=llm_config)
code_executor = LocalCommandLineCodeExecutor(work_dir=work_dir)
user_proxy = UserProxyAgent(
"user_proxy", code_execution_config={"executor": code_executor}
"user_proxy", code_execution_config={"executor": autogen.coding.LocalCommandLineCodeExecutor(work_dir="coding")}
)
# Start the chat
@ -75,20 +82,12 @@ user_proxy.initiate_chat(
<TabItem value="docker" label="Docker execution" default>
```python
import autogen
from autogen import AssistantAgent, UserProxyAgent
from autogen.coding import DockerCommandLineCodeExecutor
import os
from pathlib import Path
llm_config = {"model": "gpt-4", "api_key": os.environ["OPENAI_API_KEY"]}
llm_config = {
"config_list": [{"model": "gpt-4", "api_key": os.environ["OPENAI_API_KEY"]}],
}
work_dir = Path("coding")
work_dir.mkdir(exist_ok=True)
with DockerCommandLineCodeExecutor(work_dir=work_dir) as code_executor:
with autogen.coding.DockerCommandLineCodeExecutor(work_dir="coding") as code_executor:
assistant = AssistantAgent("assistant", llm_config=llm_config)
user_proxy = UserProxyAgent(
"user_proxy", code_execution_config={"executor": code_executor}
@ -103,7 +102,7 @@ with DockerCommandLineCodeExecutor(work_dir=work_dir) as code_executor:
Open `coding/plot.png` to see the generated plot.
</TabItem>
</TabItem>
</Tabs>
:::tip