rename GenericAgent -> ResponsiveAgent (#1146)

* rename GenericAgent -> ResponsiveAgent

* always
This commit is contained in:
Chi Wang 2023-07-25 22:22:25 -07:00 committed by GitHub
parent 3e7aac6e8b
commit ecf51f41bb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 46 additions and 22 deletions

View File

@ -1,7 +1,7 @@
from .agent import Agent
from .generic_agent import GenericAgent
from .responsive_agent import ResponsiveAgent
from .assistant_agent import AssistantAgent
from .user_proxy_agent import UserProxyAgent
__all__ = ["Agent", "GenericAgent", "AssistantAgent", "UserProxyAgent"]
__all__ = ["Agent", "ResponsiveAgent", "AssistantAgent", "UserProxyAgent"]

View File

@ -1,8 +1,9 @@
from typing import Dict, Union
from typing import Dict, List, Union
class Agent:
"""(Experimental) An abstract class for AI agent.
An agent can communicate with other agents and perform actions.
Different agents can differ in what actions they perform in the `receive` method.
"""
@ -31,3 +32,13 @@ class Agent:
def reset(self):
"""(Abstract method) Reset the agent."""
def generate_reply(self, messages: List[Dict], default_reply: Union[str, Dict] = "") -> Union[str, Dict]:
"""(Abstract method) Generate a reply based on the received messages.
Args:
messages (list[dict]): a list of messages received.
default_reply (str or dict): the default reply if no other reply is generated.
Returns:
str or dict: the generated reply.
"""

View File

@ -1,11 +1,11 @@
from .generic_agent import GenericAgent
from .responsive_agent import ResponsiveAgent
from typing import Callable, Dict, Optional, Union
class AssistantAgent(GenericAgent):
class AssistantAgent(ResponsiveAgent):
"""(Experimental) Assistant agent, designed to solve a task with LLM.
AssistantAgent is a subclass of GenericAgent configured with a default system message.
AssistantAgent is a subclass of ResponsiveAgent configured with a default system message.
The default system message is designed to solve a task with LLM,
including suggesting python code blocks and debugging.
`human_input_mode` is default to "NEVER"
@ -49,7 +49,7 @@ class AssistantAgent(GenericAgent):
default to None (no limit provided, class attribute MAX_CONSECUTIVE_AUTO_REPLY will be used as the limit in this case).
The limit only plays a role when human_input_mode is not "ALWAYS".
**kwargs (dict): Please refer to other kwargs in
[GenericAgent](generic_agent#__init__).
[ResponsiveAgent](responsive_agent#__init__).
"""
super().__init__(
name,

View File

@ -279,7 +279,7 @@ class MathUserProxyAgent(UserProxyAgent):
is_success = False
return output, is_success
def auto_reply(self, messages: List[Dict], default_reply: Union[str, Dict] = "") -> Union[str, Dict]:
def generate_reply(self, messages: List[Dict], default_reply: Union[str, Dict] = "") -> Union[str, Dict]:
"""Generate an auto reply."""
message = messages[-1]
message = message.get("content", "")

View File

@ -6,11 +6,19 @@ from .agent import Agent
from flaml.autogen.code_utils import DEFAULT_MODEL, UNKNOWN, execute_code, extract_code, infer_lang
class GenericAgent(Agent):
"""(Experimental) An generic agent which can be configured as assistant or user proxy.
class ResponsiveAgent(Agent):
"""(Experimental) A class for generic responsive agents which can be configured as assistant or user proxy.
For example, AssistantAgent and UserProxyAgent are subclasses of GenericAgent,
After receiving each message, the agent will send a reply to the sender unless the msg is a termination msg.
For example, AssistantAgent and UserProxyAgent are subclasses of ResponsiveAgent,
configured with different default settings.
To modify auto reply, override `generate_reply` method.
To disable/enable human response in every turn, set `human_input_mode` to "NEVER" or "ALWAYS".
To modify the way to get human input, override `get_human_input` method.
To modify the way to execute code blocks, single code block, or function call, override `execute_code_blocks`,
`run_code`, and `execute_function` methods respectively.
To customize the initial message when a conversation starts, override `generate_init_message` method.
"""
DEFAULT_CONFIG = {
@ -24,7 +32,7 @@ class GenericAgent(Agent):
system_message: Optional[str] = "You are a helpful AI Assistant.",
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
max_consecutive_auto_reply: Optional[int] = None,
human_input_mode: Optional[str] = "ALWAYS",
human_input_mode: Optional[str] = "TERMINATE",
function_map: Optional[Dict[str, Callable]] = None,
code_execution_config: Optional[Union[Dict, bool]] = None,
oai_config: Optional[Union[Dict, bool]] = None,
@ -220,7 +228,7 @@ class GenericAgent(Agent):
self._consecutive_auto_reply_counter[sender.name] += 1
if self.human_input_mode != "NEVER":
print("\n>>>>>>>> NO HUMAN INPUT RECEIVED. USING AUTO REPLY FOR THE USER...", flush=True)
self.send(self.auto_reply(self._oai_conversations[sender.name], default_reply=reply), sender)
self.send(self.generate_reply(self._oai_conversations[sender.name], default_reply=reply), sender)
def reset(self):
"""Reset the agent."""
@ -232,7 +240,7 @@ class GenericAgent(Agent):
response = oai.ChatCompletion.create(messages=self._oai_system_message + messages, **self.oai_config)
return oai.ChatCompletion.extract_text_or_function_call(response)[0]
def auto_reply(self, messages: List[Dict], default_reply: Union[str, Dict] = "") -> Union[str, Dict]:
def generate_reply(self, messages: List[Dict], default_reply: Union[str, Dict] = "") -> Union[str, Dict]:
"""Reply based on the conversation history.
First, execute function or code and return the result.

View File

@ -1,13 +1,18 @@
from .generic_agent import GenericAgent
from .responsive_agent import ResponsiveAgent
from typing import Callable, Dict, Optional, Union
class UserProxyAgent(GenericAgent):
class UserProxyAgent(ResponsiveAgent):
"""(Experimental) A proxy agent for the user, that can execute code and provide feedback to the other agents.
UserProxyAgent is a subclass of GenericAgent configured with `human_input_mode` to ALWAYS
UserProxyAgent is a subclass of ResponsiveAgent configured with `human_input_mode` to ALWAYS
and `oai_config` to False. By default, the agent will prompt for human input every time a message is received.
Code execution is enabled by default. LLM-based auto reply is disabled by default.
To modify auto reply, override `generate_reply` method.
To modify the way to get human input, override `get_human_input` method.
To modify the way to execute code blocks, single code block, or function call, override `execute_code_blocks`,
`run_code`, and `execute_function` methods respectively.
To customize the initial message when a conversation starts, override `generate_init_message` method.
"""
def __init__(

View File

@ -1,12 +1,12 @@
import sys
from io import StringIO
import pytest
from flaml.autogen.agent import GenericAgent
from flaml.autogen.agent import ResponsiveAgent
def test_generic_agent(monkeypatch):
dummy_agent_1 = GenericAgent(name="dummy_agent_1")
dummy_agent_2 = GenericAgent(name="dummy_agent_2", human_input_mode="TERMINATE")
def test_responsive_agent(monkeypatch):
dummy_agent_1 = ResponsiveAgent(name="dummy_agent_1", human_input_mode="ALWAYS")
dummy_agent_2 = ResponsiveAgent(name="dummy_agent_2", human_input_mode="TERMINATE")
monkeypatch.setattr(sys, "stdin", StringIO("exit"))
dummy_agent_1.receive("hello", dummy_agent_2) # receive a str
@ -45,4 +45,4 @@ def test_generic_agent(monkeypatch):
if __name__ == "__main__":
test_generic_agent(pytest.monkeypatch)
test_responsive_agent(pytest.monkeypatch)