docs: Update docstrings for haystack.utils (#7269)

* Update docstrings for haystack.utils

* Update haystack/utils/requests_utils.py

Co-authored-by: Massimiliano Pippi <mpippi@gmail.com>

* Update haystack/utils/requests_utils.py

Co-authored-by: Massimiliano Pippi <mpippi@gmail.com>

* Update haystack/utils/requests_utils.py

Co-authored-by: Massimiliano Pippi <mpippi@gmail.com>

---------

Co-authored-by: Massimiliano Pippi <mpippi@gmail.com>
This commit is contained in:
Vladimir Blagojevic 2024-03-01 17:25:12 +01:00 committed by GitHub
parent 247716f008
commit 3077a08c60
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 33 additions and 13 deletions

View File

@ -24,6 +24,15 @@ class SecretType(Enum):
class Secret(ABC): class Secret(ABC):
""" """
Encapsulates a secret used for authentication. Encapsulates a secret used for authentication.
Usage example:
```python
from haystack.components.generators import OpenAIGenerator
from haystack.utils import Secret
generator = OpenAIGenerator(api_key=Secret.from_token("<here_goes_your_token>"))
...
```
""" """
@staticmethod @staticmethod

View File

@ -30,6 +30,8 @@ def serialize_hf_model_kwargs(kwargs: Dict[str, Any]):
""" """
Recursively serialize HuggingFace specific model keyword arguments Recursively serialize HuggingFace specific model keyword arguments
in-place to make them JSON serializable. in-place to make them JSON serializable.
:param kwargs: The keyword arguments to serialize
""" """
torch_import.check() torch_import.check()
@ -46,6 +48,8 @@ def deserialize_hf_model_kwargs(kwargs: Dict[str, Any]):
""" """
Recursively deserialize HuggingFace specific model keyword arguments Recursively deserialize HuggingFace specific model keyword arguments
in-place to make them JSON serializable. in-place to make them JSON serializable.
:param kwargs: The keyword arguments to deserialize
""" """
torch_import.check() torch_import.check()
@ -99,7 +103,6 @@ def list_inference_deployed_models(headers: Optional[Dict] = None) -> List[str]:
List all currently deployed models on HF TGI free tier List all currently deployed models on HF TGI free tier
:param headers: Optional dictionary of headers to include in the request :param headers: Optional dictionary of headers to include in the request
:type headers: Optional[Dict]
:return: list of all currently deployed models :return: list of all currently deployed models
:raises Exception: If the request to the TGI API fails :raises Exception: If the request to the TGI API fails
@ -180,7 +183,7 @@ with LazyImport(message="Run 'pip install transformers[torch]'") as torch_and_tr
class StopWordsCriteria(StoppingCriteria): class StopWordsCriteria(StoppingCriteria):
""" """
Stops text generation if any one of the stop words is generated. Stops text generation in HuggingFace generators if any one of the stop words is generated.
Note: When a stop word is encountered, the generation of new text is stopped. Note: When a stop word is encountered, the generation of new text is stopped.
However, if the stop word is in the prompt itself, it can stop generating new text However, if the stop word is in the prompt itself, it can stop generating new text
@ -226,6 +229,15 @@ with LazyImport(message="Run 'pip install transformers[torch]'") as torch_and_tr
return result return result
class HFTokenStreamingHandler(TextStreamer): class HFTokenStreamingHandler(TextStreamer):
"""
Streaming handler for HuggingFaceLocalGenerator and HuggingFaceLocalChatGenerator.
Note: This is a helper class for HuggingFaceLocalGenerator & HuggingFaceLocalChatGenerator enabling streaming
of generated text via Haystack Callable[StreamingChunk, None] callbacks.
Do not use this class directly.
"""
def __init__( def __init__(
self, self,
tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast],

View File

@ -16,8 +16,15 @@ def request_with_retry(
All kwargs will be passed to ``requests.request``, so it accepts the same arguments. All kwargs will be passed to ``requests.request``, so it accepts the same arguments.
Example Usage: :param attempts: Maximum number of attempts to retry the request.
-------------- :param status_codes_to_retry: List of HTTP status codes that will trigger a retry. When param is `None`, HTTP 408, 418, 429 and 503 will be retried.
:param **kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
Usage examples:
```python
from haystack.utils import request_with_retry
# Sending an HTTP request with default retry configs # Sending an HTTP request with default retry configs
res = request_with_retry(method="GET", url="https://example.com") res = request_with_retry(method="GET", url="https://example.com")
@ -54,15 +61,7 @@ def request_with_retry(
# Retry all 5xx status codes # Retry all 5xx status codes
res = request_with_retry(method="GET", url="https://example.com", status_codes_to_retry=list(range(500, 600))) res = request_with_retry(method="GET", url="https://example.com", status_codes_to_retry=list(range(500, 600)))
```
:param attempts: Maximum number of attempts to retry the request, defaults to 3
:param status_codes_to_retry: List of HTTP status codes that will trigger a retry, defaults to [408, 418, 429, 503]:
- `408: Request Timeout`
- `418`
- `429: Too Many Requests`
- `503: Service Unavailable`
:param **kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
""" """
if status_codes_to_retry is None: if status_codes_to_retry is None: