fix some docstring issues affecting rendering (#1739)

* fix some docstring issues affecting rendering

* Update pydoc-markdown.yml

* undo double backtick

* Update compressible_agent.py
This commit is contained in:
Jack Gerrits 2024-02-20 17:29:39 -05:00 committed by GitHub
parent 2750391f84
commit d8a204a9a3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 130 additions and 117 deletions

View File

@ -84,10 +84,9 @@ Reply "TERMINATE" in the end when everything is done.
compress_config (dict or True/False): config for compression before oai_reply. Default to False.
You should contain the following keys:
- "mode" (Optional, str, default to "TERMINATE"): Choose from ["COMPRESS", "TERMINATE", "CUSTOMIZED"].
"TERMINATE": terminate the conversation ONLY when token count exceeds the max limit of current model.
`trigger_count` is NOT used in this mode.
"COMPRESS": compress the messages when the token count exceeds the limit.
"CUSTOMIZED": pass in a customized function to compress the messages.
1. `TERMINATE`: terminate the conversation ONLY when token count exceeds the max limit of current model. `trigger_count` is NOT used in this mode.
2. `COMPRESS`: compress the messages when the token count exceeds the limit.
3. `CUSTOMIZED`: pass in a customized function to compress the messages.
- "compress_function" (Optional, callable, default to None): Must be provided when mode is "CUSTOMIZED".
The function should takes a list of messages and returns a tuple of (is_compress_success: bool, compressed_messages: List[Dict]).
- "trigger_count" (Optional, float, int, default to 0.7): the threshold to trigger compression.

View File

@ -29,12 +29,12 @@ class QdrantRetrieveUserProxyAgent(RetrieveUserProxyAgent):
name (str): name of the agent.
human_input_mode (str): whether to ask for human inputs every time a message is received.
Possible values are "ALWAYS", "TERMINATE", "NEVER".
(1) When "ALWAYS", the agent prompts for human input every time a message is received.
1. When "ALWAYS", the agent prompts for human input every time a message is received.
Under this mode, the conversation stops when the human input is "exit",
or when is_termination_msg is True and there is no human input.
(2) When "TERMINATE", the agent only prompts for human input only when a termination message is received or
2. When "TERMINATE", the agent only prompts for human input only when a termination message is received or
the number of auto reply reaches the max_consecutive_auto_reply.
(3) When "NEVER", the agent will never prompt for human input. Under this mode, the conversation stops
3. When "NEVER", the agent will never prompt for human input. Under this mode, the conversation stops
when the number of auto reply reaches the max_consecutive_auto_reply or when is_termination_msg is True.
is_termination_msg (function): a function that takes a message in the form of a dictionary
and returns a boolean value indicating if this received message is a termination message.

View File

@ -77,17 +77,17 @@ class RetrieveUserProxyAgent(UserProxyAgent):
retrieve_config: Optional[Dict] = None, # config for the retrieve agent
**kwargs,
):
"""
r"""
Args:
name (str): name of the agent.
human_input_mode (str): whether to ask for human inputs every time a message is received.
Possible values are "ALWAYS", "TERMINATE", "NEVER".
(1) When "ALWAYS", the agent prompts for human input every time a message is received.
1. When "ALWAYS", the agent prompts for human input every time a message is received.
Under this mode, the conversation stops when the human input is "exit",
or when is_termination_msg is True and there is no human input.
(2) When "TERMINATE", the agent only prompts for human input only when a termination message is received or
2. When "TERMINATE", the agent only prompts for human input only when a termination message is received or
the number of auto reply reaches the max_consecutive_auto_reply.
(3) When "NEVER", the agent will never prompt for human input. Under this mode, the conversation stops
3. When "NEVER", the agent will never prompt for human input. Under this mode, the conversation stops
when the number of auto reply reaches the max_consecutive_auto_reply or when is_termination_msg is True.
is_termination_msg (function): a function that takes a message in the form of a dictionary
and returns a boolean value indicating if this received message is a termination message.
@ -136,10 +136,11 @@ class RetrieveUserProxyAgent(UserProxyAgent):
- custom_text_types (Optional, List[str]): a list of file types to be processed. Default is `autogen.retrieve_utils.TEXT_FORMATS`.
This only applies to files under the directories in `docs_path`. Explicitly included files and urls will be chunked regardless of their types.
- recursive (Optional, bool): whether to search documents recursively in the docs_path. Default is True.
**kwargs (dict): other kwargs in [UserProxyAgent](../user_proxy_agent#__init__).
`**kwargs` (dict): other kwargs in [UserProxyAgent](../user_proxy_agent#__init__).
Example of overriding retrieve_docs:
If you have set up a customized vector db, and it's not compatible with chromadb, you can easily plug in it with below code.
Example:
Example of overriding retrieve_docs - If you have set up a customized vector db, and it's not compatible with chromadb, you can easily plug in it with below code.
```python
class MyRetrieveUserProxyAgent(RetrieveUserProxyAgent):
def query_vector_db(

View File

@ -25,7 +25,7 @@ def consolidate_chat_info(chat_info, uniform_sender=None) -> None:
def gather_usage_summary(agents: List[Agent]) -> Tuple[Dict[str, any], Dict[str, any]]:
"""Gather usage summary from all agents.
r"""Gather usage summary from all agents.
Args:
agents: (list): List of agents.
@ -33,19 +33,24 @@ def gather_usage_summary(agents: List[Agent]) -> Tuple[Dict[str, any], Dict[str,
Returns:
tuple: (total_usage_summary, actual_usage_summary)
Example return:
total_usage_summary = {
'total_cost': 0.0006090000000000001,
'gpt-35-turbo':
{
'cost': 0.0006090000000000001,
'prompt_tokens': 242,
'completion_tokens': 123,
'total_tokens': 365
}
Example:
```python
total_usage_summary = {
"total_cost": 0.0006090000000000001,
"gpt-35-turbo": {
"cost": 0.0006090000000000001,
"prompt_tokens": 242,
"completion_tokens": 123,
"total_tokens": 365
}
`actual_usage_summary` follows the same format.
If none of the agents incurred any cost (not having a client), then the total_usage_summary and actual_usage_summary will be {'total_cost': 0}.
}
```
Note:
`actual_usage_summary` follows the same format.
If none of the agents incurred any cost (not having a client), then the total_usage_summary and actual_usage_summary will be `{'total_cost': 0}`.
"""
def aggregate_summary(usage_summary: Dict[str, any], agent_summary: Dict[str, any]) -> None:

View File

@ -14,16 +14,6 @@ class Cache:
Attributes:
config (Dict[str, Any]): A dictionary containing cache configuration.
cache: The cache instance created based on the provided configuration.
Methods:
redis(cache_seed=42, redis_url="redis://localhost:6379/0"): Static method to create a Redis cache instance.
disk(cache_seed=42, cache_path_root=".cache"): Static method to create a Disk cache instance.
__init__(self, config): Initializes the Cache with the given configuration.
__enter__(self): Context management entry, returning the cache instance.
__exit__(self, exc_type, exc_value, traceback): Context management exit.
get(self, key, default=None): Retrieves an item from the cache.
set(self, key, value): Sets an item in the cache.
close(self): Closes the cache.
"""
ALLOWED_CONFIG_KEYS = ["cache_seed", "redis_url", "cache_path_root"]

View File

@ -28,11 +28,17 @@ class CacheFactory:
and the provided redis_url.
Examples:
Creating a Redis cache
> redis_cache = cache_factory("myseed", "redis://localhost:6379/0")
Creating a Disk cache
> disk_cache = cache_factory("myseed", None)
Creating a Redis cache
```python
redis_cache = cache_factory("myseed", "redis://localhost:6379/0")
```
Creating a Disk cache
```python
disk_cache = cache_factory("myseed", None)
```
"""
if RedisCache is not None and redis_url is not None:
return RedisCache(seed, redis_url)

View File

@ -225,21 +225,22 @@ def get_function_schema(f: Callable[..., Any], *, name: Optional[str] = None, de
TypeError: If the function is not annotated
Examples:
```
def f(a: Annotated[str, "Parameter a"], b: int = 2, c: Annotated[float, "Parameter c"] = 0.1) -> None:
pass
get_function_schema(f, description="function f")
```python
def f(a: Annotated[str, "Parameter a"], b: int = 2, c: Annotated[float, "Parameter c"] = 0.1) -> None:
pass
# {'type': 'function',
# 'function': {'description': 'function f',
# 'name': 'f',
# 'parameters': {'type': 'object',
# 'properties': {'a': {'type': 'str', 'description': 'Parameter a'},
# 'b': {'type': 'int', 'description': 'b'},
# 'c': {'type': 'float', 'description': 'Parameter c'}},
# 'required': ['a']}}}
```
get_function_schema(f, description="function f")
# {'type': 'function',
# 'function': {'description': 'function f',
# 'name': 'f',
# 'parameters': {'type': 'object',
# 'properties': {'a': {'type': 'str', 'description': 'Parameter a'},
# 'b': {'type': 'int', 'description': 'b'},
# 'c': {'type': 'float', 'description': 'Parameter c'}},
# 'required': ['a']}}}
```
"""
typed_signature = get_typed_signature(f)

View File

@ -103,7 +103,7 @@ def get_config_list(
list: A list of configs for OepnAI API calls.
Example:
```
```python
# Define a list of API keys
api_keys = ['key1', 'key2', 'key3']
@ -292,32 +292,32 @@ def config_list_from_models(
list: A list of configs for OpenAI API calls, each including model information.
Example:
```
# Define the path where the API key files are located
key_file_path = '/path/to/key/files'
```python
# Define the path where the API key files are located
key_file_path = '/path/to/key/files'
# Define the file names for the OpenAI and Azure OpenAI API keys and bases
openai_api_key_file = 'key_openai.txt'
aoai_api_key_file = 'key_aoai.txt'
aoai_api_base_file = 'base_aoai.txt'
# Define the file names for the OpenAI and Azure OpenAI API keys and bases
openai_api_key_file = 'key_openai.txt'
aoai_api_key_file = 'key_aoai.txt'
aoai_api_base_file = 'base_aoai.txt'
# Define the list of models for which to create configurations
model_list = ['gpt-4', 'gpt-3.5-turbo']
# Define the list of models for which to create configurations
model_list = ['gpt-4', 'gpt-3.5-turbo']
# Call the function to get a list of configuration dictionaries
config_list = config_list_from_models(
key_file_path=key_file_path,
openai_api_key_file=openai_api_key_file,
aoai_api_key_file=aoai_api_key_file,
aoai_api_base_file=aoai_api_base_file,
model_list=model_list
)
# Call the function to get a list of configuration dictionaries
config_list = config_list_from_models(
key_file_path=key_file_path,
openai_api_key_file=openai_api_key_file,
aoai_api_key_file=aoai_api_key_file,
aoai_api_base_file=aoai_api_base_file,
model_list=model_list
)
# The `config_list` will contain configurations for the specified models, for example:
# [
# {'api_key': '...', 'base_url': 'https://api.openai.com', 'model': 'gpt-4'},
# {'api_key': '...', 'base_url': 'https://api.openai.com', 'model': 'gpt-3.5-turbo'}
# ]
# The `config_list` will contain configurations for the specified models, for example:
# [
# {'api_key': '...', 'base_url': 'https://api.openai.com', 'model': 'gpt-4'},
# {'api_key': '...', 'base_url': 'https://api.openai.com', 'model': 'gpt-3.5-turbo'}
# ]
```
"""
config_list = config_list_openai_aoai(
@ -383,40 +383,39 @@ def filter_config(config_list, filter_dict):
in `filter_dict`.
Example:
```
# Example configuration list with various models and API types
configs = [
{'model': 'gpt-3.5-turbo'},
{'model': 'gpt-4'},
{'model': 'gpt-3.5-turbo', 'api_type': 'azure'},
{'model': 'gpt-3.5-turbo', 'tags': ['gpt35_turbo', 'gpt-35-turbo']},
]
```python
# Example configuration list with various models and API types
configs = [
{'model': 'gpt-3.5-turbo'},
{'model': 'gpt-4'},
{'model': 'gpt-3.5-turbo', 'api_type': 'azure'},
{'model': 'gpt-3.5-turbo', 'tags': ['gpt35_turbo', 'gpt-35-turbo']},
]
# Define filter criteria to select configurations for the 'gpt-3.5-turbo' model
# that are also using the 'azure' API type
filter_criteria = {
'model': ['gpt-3.5-turbo'], # Only accept configurations for 'gpt-3.5-turbo'
'api_type': ['azure'] # Only accept configurations for 'azure' API type
}
# Define filter criteria to select configurations for the 'gpt-3.5-turbo' model
# that are also using the 'azure' API type
filter_criteria = {
'model': ['gpt-3.5-turbo'], # Only accept configurations for 'gpt-3.5-turbo'
'api_type': ['azure'] # Only accept configurations for 'azure' API type
}
# Apply the filter to the configuration list
filtered_configs = filter_config(configs, filter_criteria)
# Apply the filter to the configuration list
filtered_configs = filter_config(configs, filter_criteria)
# The resulting `filtered_configs` will be:
# [{'model': 'gpt-3.5-turbo', 'api_type': 'azure', ...}]
# The resulting `filtered_configs` will be:
# [{'model': 'gpt-3.5-turbo', 'api_type': 'azure', ...}]
# Define a filter to select a given tag
filter_criteria = {
'tags': ['gpt35_turbo'],
}
# Define a filter to select a given tag
filter_criteria = {
'tags': ['gpt35_turbo'],
}
# Apply the filter to the configuration list
filtered_configs = filter_config(configs, filter_criteria)
# The resulting `filtered_configs` will be:
# [{'model': 'gpt-3.5-turbo', 'tags': ['gpt35_turbo', 'gpt-35-turbo']}]
# Apply the filter to the configuration list
filtered_configs = filter_config(configs, filter_criteria)
# The resulting `filtered_configs` will be:
# [{'model': 'gpt-3.5-turbo', 'tags': ['gpt35_turbo', 'gpt-35-turbo']}]
```
Note:
@ -467,7 +466,7 @@ def config_list_from_json(
keys representing field names and values being lists or sets of acceptable values for those fields.
Example:
```
```python
# Suppose we have an environment variable 'CONFIG_JSON' with the following content:
# '[{"model": "gpt-3.5-turbo", "api_type": "azure"}, {"model": "gpt-4"}]'
@ -511,7 +510,7 @@ def get_config(
Constructs a configuration dictionary for a single model with the provided API configurations.
Example:
```
```python
config = get_config(
api_key="sk-abcdef1234567890",
base_url="https://api.openai.com",

View File

@ -276,8 +276,10 @@ def create_vector_db_from_dir(
custom_text_types (Optional, List[str]): a list of file types to be processed. Default is TEXT_FORMATS.
recursive (Optional, bool): whether to search documents recursively in the dir_path. Default is True.
extra_docs (Optional, bool): whether to add more documents in the collection. Default is False
Returns:
API: the chromadb client.
The chromadb client.
"""
if client is None:
client = chromadb.PersistentClient(path=db_path)
@ -353,13 +355,17 @@ def query_vector_db(
functions, you can pass it here, follow the examples in `https://docs.trychroma.com/embeddings`.
Returns:
QueryResult: the query result. The format is:
class QueryResult(TypedDict):
ids: List[IDs]
embeddings: Optional[List[List[Embedding]]]
documents: Optional[List[List[Document]]]
metadatas: Optional[List[List[Metadata]]]
distances: Optional[List[List[float]]]
The query result. The format is:
```python
class QueryResult(TypedDict):
ids: List[IDs]
embeddings: Optional[List[List[Embedding]]]
documents: Optional[List[List[Document]]]
metadatas: Optional[List[List[Metadata]]]
distances: Optional[List[List[float]]]
```
"""
if client is None:
client = chromadb.PersistentClient(path=db_path)

View File

@ -4,8 +4,7 @@ loaders:
processors:
- type: filter
skip_empty_modules: true
- type: smart
- type: crossref
- type: google
renderer:
type: docusaurus
docs_base_path: docs
@ -13,4 +12,11 @@ renderer:
relative_sidebar_path: sidebar.json
sidebar_top_level_label: Reference
markdown:
escape_html_in_docstring: true
escape_html_in_docstring: false
descriptive_class_title: false
header_level_by_type:
Module: 1
Class: 2
Method: 3
Function: 3
Variable: 4