mirror of
https://github.com/deepset-ai/haystack.git
synced 2026-01-08 04:56:45 +00:00
Sync Core Integrations API reference (meta_llama) on Docusaurus (#9938)
Co-authored-by: vblagoje <458335+vblagoje@users.noreply.github.com>
This commit is contained in:
parent
6a7e7754cc
commit
54930a4f23
@ -57,7 +57,7 @@ def __init__(*,
|
||||
streaming_callback: Optional[StreamingCallbackT] = None,
|
||||
api_base_url: Optional[str] = "https://api.llama.com/compat/v1/",
|
||||
generation_kwargs: Optional[Dict[str, Any]] = None,
|
||||
tools: Optional[Union[List[Tool], Toolset]] = None)
|
||||
tools: Optional[ToolsType] = None)
|
||||
```
|
||||
|
||||
Creates an instance of LlamaChatGenerator. Unless specified otherwise in the `model`, this is for Llama's
|
||||
@ -86,7 +86,8 @@ Some of the supported parameters:
|
||||
events as they become available, with the stream terminated by a data: [DONE] message.
|
||||
- `safe_prompt`: Whether to inject a safety prompt before all conversations.
|
||||
- `random_seed`: The seed to use for random sampling.
|
||||
- `tools`: A list of tools for which the model can prepare calls.
|
||||
- `tools`: A list of Tool and/or Toolset objects, or a single Toolset for which the model can prepare calls.
|
||||
Each tool should have a unique name.
|
||||
|
||||
<a id="haystack_integrations.components.generators.meta_llama.chat.chat_generator.MetaLlamaChatGenerator.to_dict"></a>
|
||||
|
||||
@ -102,93 +103,3 @@ Serialize this component to a dictionary.
|
||||
|
||||
The serialized component as a dictionary.
|
||||
|
||||
<a id="haystack_integrations.components.generators.meta_llama.chat.chat_generator.MetaLlamaChatGenerator.from_dict"></a>
|
||||
|
||||
#### MetaLlamaChatGenerator.from\_dict
|
||||
|
||||
```python
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict[str, Any]) -> "OpenAIChatGenerator"
|
||||
```
|
||||
|
||||
Deserialize this component from a dictionary.
|
||||
|
||||
**Arguments**:
|
||||
|
||||
- `data`: The dictionary representation of this component.
|
||||
|
||||
**Returns**:
|
||||
|
||||
The deserialized component instance.
|
||||
|
||||
<a id="haystack_integrations.components.generators.meta_llama.chat.chat_generator.MetaLlamaChatGenerator.run"></a>
|
||||
|
||||
#### MetaLlamaChatGenerator.run
|
||||
|
||||
```python
|
||||
@component.output_types(replies=list[ChatMessage])
|
||||
def run(messages: list[ChatMessage],
|
||||
streaming_callback: Optional[StreamingCallbackT] = None,
|
||||
generation_kwargs: Optional[dict[str, Any]] = None,
|
||||
*,
|
||||
tools: Optional[ToolsType] = None,
|
||||
tools_strict: Optional[bool] = None)
|
||||
```
|
||||
|
||||
Invokes chat completion based on the provided messages and generation parameters.
|
||||
|
||||
**Arguments**:
|
||||
|
||||
- `messages`: A list of ChatMessage instances representing the input messages.
|
||||
- `streaming_callback`: A callback function that is called when a new token is received from the stream.
|
||||
- `generation_kwargs`: Additional keyword arguments for text generation. These parameters will
|
||||
override the parameters passed during component initialization.
|
||||
For details on OpenAI API parameters, see [OpenAI documentation](https://platform.openai.com/docs/api-reference/chat/create).
|
||||
- `tools`: A list of Tool and/or Toolset objects, or a single Toolset for which the model can prepare calls.
|
||||
If set, it will override the `tools` parameter provided during initialization.
|
||||
- `tools_strict`: Whether to enable strict schema adherence for tool calls. If set to `True`, the model will follow exactly
|
||||
the schema provided in the `parameters` field of the tool definition, but this may increase latency.
|
||||
If set, it will override the `tools_strict` parameter set during component initialization.
|
||||
|
||||
**Returns**:
|
||||
|
||||
A dictionary with the following key:
|
||||
- `replies`: A list containing the generated responses as ChatMessage instances.
|
||||
|
||||
<a id="haystack_integrations.components.generators.meta_llama.chat.chat_generator.MetaLlamaChatGenerator.run_async"></a>
|
||||
|
||||
#### MetaLlamaChatGenerator.run\_async
|
||||
|
||||
```python
|
||||
@component.output_types(replies=list[ChatMessage])
|
||||
async def run_async(messages: list[ChatMessage],
|
||||
streaming_callback: Optional[StreamingCallbackT] = None,
|
||||
generation_kwargs: Optional[dict[str, Any]] = None,
|
||||
*,
|
||||
tools: Optional[ToolsType] = None,
|
||||
tools_strict: Optional[bool] = None)
|
||||
```
|
||||
|
||||
Asynchronously invokes chat completion based on the provided messages and generation parameters.
|
||||
|
||||
This is the asynchronous version of the `run` method. It has the same parameters and return values
|
||||
but can be used with `await` in async code.
|
||||
|
||||
**Arguments**:
|
||||
|
||||
- `messages`: A list of ChatMessage instances representing the input messages.
|
||||
- `streaming_callback`: A callback function that is called when a new token is received from the stream.
|
||||
Must be a coroutine.
|
||||
- `generation_kwargs`: Additional keyword arguments for text generation. These parameters will
|
||||
override the parameters passed during component initialization.
|
||||
For details on OpenAI API parameters, see [OpenAI documentation](https://platform.openai.com/docs/api-reference/chat/create).
|
||||
- `tools`: A list of Tool and/or Toolset objects, or a single Toolset for which the model can prepare calls.
|
||||
If set, it will override the `tools` parameter provided during initialization.
|
||||
- `tools_strict`: Whether to enable strict schema adherence for tool calls. If set to `True`, the model will follow exactly
|
||||
the schema provided in the `parameters` field of the tool definition, but this may increase latency.
|
||||
If set, it will override the `tools_strict` parameter set during component initialization.
|
||||
|
||||
**Returns**:
|
||||
|
||||
A dictionary with the following key:
|
||||
- `replies`: A list containing the generated responses as ChatMessage instances.
|
||||
|
||||
@ -57,7 +57,7 @@ def __init__(*,
|
||||
streaming_callback: Optional[StreamingCallbackT] = None,
|
||||
api_base_url: Optional[str] = "https://api.llama.com/compat/v1/",
|
||||
generation_kwargs: Optional[Dict[str, Any]] = None,
|
||||
tools: Optional[Union[List[Tool], Toolset]] = None)
|
||||
tools: Optional[ToolsType] = None)
|
||||
```
|
||||
|
||||
Creates an instance of LlamaChatGenerator. Unless specified otherwise in the `model`, this is for Llama's
|
||||
@ -86,7 +86,8 @@ Some of the supported parameters:
|
||||
events as they become available, with the stream terminated by a data: [DONE] message.
|
||||
- `safe_prompt`: Whether to inject a safety prompt before all conversations.
|
||||
- `random_seed`: The seed to use for random sampling.
|
||||
- `tools`: A list of tools for which the model can prepare calls.
|
||||
- `tools`: A list of Tool and/or Toolset objects, or a single Toolset for which the model can prepare calls.
|
||||
Each tool should have a unique name.
|
||||
|
||||
<a id="haystack_integrations.components.generators.meta_llama.chat.chat_generator.MetaLlamaChatGenerator.to_dict"></a>
|
||||
|
||||
@ -102,93 +103,3 @@ Serialize this component to a dictionary.
|
||||
|
||||
The serialized component as a dictionary.
|
||||
|
||||
<a id="haystack_integrations.components.generators.meta_llama.chat.chat_generator.MetaLlamaChatGenerator.from_dict"></a>
|
||||
|
||||
#### MetaLlamaChatGenerator.from\_dict
|
||||
|
||||
```python
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict[str, Any]) -> "OpenAIChatGenerator"
|
||||
```
|
||||
|
||||
Deserialize this component from a dictionary.
|
||||
|
||||
**Arguments**:
|
||||
|
||||
- `data`: The dictionary representation of this component.
|
||||
|
||||
**Returns**:
|
||||
|
||||
The deserialized component instance.
|
||||
|
||||
<a id="haystack_integrations.components.generators.meta_llama.chat.chat_generator.MetaLlamaChatGenerator.run"></a>
|
||||
|
||||
#### MetaLlamaChatGenerator.run
|
||||
|
||||
```python
|
||||
@component.output_types(replies=list[ChatMessage])
|
||||
def run(messages: list[ChatMessage],
|
||||
streaming_callback: Optional[StreamingCallbackT] = None,
|
||||
generation_kwargs: Optional[dict[str, Any]] = None,
|
||||
*,
|
||||
tools: Optional[ToolsType] = None,
|
||||
tools_strict: Optional[bool] = None)
|
||||
```
|
||||
|
||||
Invokes chat completion based on the provided messages and generation parameters.
|
||||
|
||||
**Arguments**:
|
||||
|
||||
- `messages`: A list of ChatMessage instances representing the input messages.
|
||||
- `streaming_callback`: A callback function that is called when a new token is received from the stream.
|
||||
- `generation_kwargs`: Additional keyword arguments for text generation. These parameters will
|
||||
override the parameters passed during component initialization.
|
||||
For details on OpenAI API parameters, see [OpenAI documentation](https://platform.openai.com/docs/api-reference/chat/create).
|
||||
- `tools`: A list of Tool and/or Toolset objects, or a single Toolset for which the model can prepare calls.
|
||||
If set, it will override the `tools` parameter provided during initialization.
|
||||
- `tools_strict`: Whether to enable strict schema adherence for tool calls. If set to `True`, the model will follow exactly
|
||||
the schema provided in the `parameters` field of the tool definition, but this may increase latency.
|
||||
If set, it will override the `tools_strict` parameter set during component initialization.
|
||||
|
||||
**Returns**:
|
||||
|
||||
A dictionary with the following key:
|
||||
- `replies`: A list containing the generated responses as ChatMessage instances.
|
||||
|
||||
<a id="haystack_integrations.components.generators.meta_llama.chat.chat_generator.MetaLlamaChatGenerator.run_async"></a>
|
||||
|
||||
#### MetaLlamaChatGenerator.run\_async
|
||||
|
||||
```python
|
||||
@component.output_types(replies=list[ChatMessage])
|
||||
async def run_async(messages: list[ChatMessage],
|
||||
streaming_callback: Optional[StreamingCallbackT] = None,
|
||||
generation_kwargs: Optional[dict[str, Any]] = None,
|
||||
*,
|
||||
tools: Optional[ToolsType] = None,
|
||||
tools_strict: Optional[bool] = None)
|
||||
```
|
||||
|
||||
Asynchronously invokes chat completion based on the provided messages and generation parameters.
|
||||
|
||||
This is the asynchronous version of the `run` method. It has the same parameters and return values
|
||||
but can be used with `await` in async code.
|
||||
|
||||
**Arguments**:
|
||||
|
||||
- `messages`: A list of ChatMessage instances representing the input messages.
|
||||
- `streaming_callback`: A callback function that is called when a new token is received from the stream.
|
||||
Must be a coroutine.
|
||||
- `generation_kwargs`: Additional keyword arguments for text generation. These parameters will
|
||||
override the parameters passed during component initialization.
|
||||
For details on OpenAI API parameters, see [OpenAI documentation](https://platform.openai.com/docs/api-reference/chat/create).
|
||||
- `tools`: A list of Tool and/or Toolset objects, or a single Toolset for which the model can prepare calls.
|
||||
If set, it will override the `tools` parameter provided during initialization.
|
||||
- `tools_strict`: Whether to enable strict schema adherence for tool calls. If set to `True`, the model will follow exactly
|
||||
the schema provided in the `parameters` field of the tool definition, but this may increase latency.
|
||||
If set, it will override the `tools_strict` parameter set during component initialization.
|
||||
|
||||
**Returns**:
|
||||
|
||||
A dictionary with the following key:
|
||||
- `replies`: A list containing the generated responses as ChatMessage instances.
|
||||
|
||||
@ -57,7 +57,7 @@ def __init__(*,
|
||||
streaming_callback: Optional[StreamingCallbackT] = None,
|
||||
api_base_url: Optional[str] = "https://api.llama.com/compat/v1/",
|
||||
generation_kwargs: Optional[Dict[str, Any]] = None,
|
||||
tools: Optional[Union[List[Tool], Toolset]] = None)
|
||||
tools: Optional[ToolsType] = None)
|
||||
```
|
||||
|
||||
Creates an instance of LlamaChatGenerator. Unless specified otherwise in the `model`, this is for Llama's
|
||||
@ -86,7 +86,8 @@ Some of the supported parameters:
|
||||
events as they become available, with the stream terminated by a data: [DONE] message.
|
||||
- `safe_prompt`: Whether to inject a safety prompt before all conversations.
|
||||
- `random_seed`: The seed to use for random sampling.
|
||||
- `tools`: A list of tools for which the model can prepare calls.
|
||||
- `tools`: A list of Tool and/or Toolset objects, or a single Toolset for which the model can prepare calls.
|
||||
Each tool should have a unique name.
|
||||
|
||||
<a id="haystack_integrations.components.generators.meta_llama.chat.chat_generator.MetaLlamaChatGenerator.to_dict"></a>
|
||||
|
||||
@ -102,93 +103,3 @@ Serialize this component to a dictionary.
|
||||
|
||||
The serialized component as a dictionary.
|
||||
|
||||
<a id="haystack_integrations.components.generators.meta_llama.chat.chat_generator.MetaLlamaChatGenerator.from_dict"></a>
|
||||
|
||||
#### MetaLlamaChatGenerator.from\_dict
|
||||
|
||||
```python
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict[str, Any]) -> "OpenAIChatGenerator"
|
||||
```
|
||||
|
||||
Deserialize this component from a dictionary.
|
||||
|
||||
**Arguments**:
|
||||
|
||||
- `data`: The dictionary representation of this component.
|
||||
|
||||
**Returns**:
|
||||
|
||||
The deserialized component instance.
|
||||
|
||||
<a id="haystack_integrations.components.generators.meta_llama.chat.chat_generator.MetaLlamaChatGenerator.run"></a>
|
||||
|
||||
#### MetaLlamaChatGenerator.run
|
||||
|
||||
```python
|
||||
@component.output_types(replies=list[ChatMessage])
|
||||
def run(messages: list[ChatMessage],
|
||||
streaming_callback: Optional[StreamingCallbackT] = None,
|
||||
generation_kwargs: Optional[dict[str, Any]] = None,
|
||||
*,
|
||||
tools: Optional[ToolsType] = None,
|
||||
tools_strict: Optional[bool] = None)
|
||||
```
|
||||
|
||||
Invokes chat completion based on the provided messages and generation parameters.
|
||||
|
||||
**Arguments**:
|
||||
|
||||
- `messages`: A list of ChatMessage instances representing the input messages.
|
||||
- `streaming_callback`: A callback function that is called when a new token is received from the stream.
|
||||
- `generation_kwargs`: Additional keyword arguments for text generation. These parameters will
|
||||
override the parameters passed during component initialization.
|
||||
For details on OpenAI API parameters, see [OpenAI documentation](https://platform.openai.com/docs/api-reference/chat/create).
|
||||
- `tools`: A list of Tool and/or Toolset objects, or a single Toolset for which the model can prepare calls.
|
||||
If set, it will override the `tools` parameter provided during initialization.
|
||||
- `tools_strict`: Whether to enable strict schema adherence for tool calls. If set to `True`, the model will follow exactly
|
||||
the schema provided in the `parameters` field of the tool definition, but this may increase latency.
|
||||
If set, it will override the `tools_strict` parameter set during component initialization.
|
||||
|
||||
**Returns**:
|
||||
|
||||
A dictionary with the following key:
|
||||
- `replies`: A list containing the generated responses as ChatMessage instances.
|
||||
|
||||
<a id="haystack_integrations.components.generators.meta_llama.chat.chat_generator.MetaLlamaChatGenerator.run_async"></a>
|
||||
|
||||
#### MetaLlamaChatGenerator.run\_async
|
||||
|
||||
```python
|
||||
@component.output_types(replies=list[ChatMessage])
|
||||
async def run_async(messages: list[ChatMessage],
|
||||
streaming_callback: Optional[StreamingCallbackT] = None,
|
||||
generation_kwargs: Optional[dict[str, Any]] = None,
|
||||
*,
|
||||
tools: Optional[ToolsType] = None,
|
||||
tools_strict: Optional[bool] = None)
|
||||
```
|
||||
|
||||
Asynchronously invokes chat completion based on the provided messages and generation parameters.
|
||||
|
||||
This is the asynchronous version of the `run` method. It has the same parameters and return values
|
||||
but can be used with `await` in async code.
|
||||
|
||||
**Arguments**:
|
||||
|
||||
- `messages`: A list of ChatMessage instances representing the input messages.
|
||||
- `streaming_callback`: A callback function that is called when a new token is received from the stream.
|
||||
Must be a coroutine.
|
||||
- `generation_kwargs`: Additional keyword arguments for text generation. These parameters will
|
||||
override the parameters passed during component initialization.
|
||||
For details on OpenAI API parameters, see [OpenAI documentation](https://platform.openai.com/docs/api-reference/chat/create).
|
||||
- `tools`: A list of Tool and/or Toolset objects, or a single Toolset for which the model can prepare calls.
|
||||
If set, it will override the `tools` parameter provided during initialization.
|
||||
- `tools_strict`: Whether to enable strict schema adherence for tool calls. If set to `True`, the model will follow exactly
|
||||
the schema provided in the `parameters` field of the tool definition, but this may increase latency.
|
||||
If set, it will override the `tools_strict` parameter set during component initialization.
|
||||
|
||||
**Returns**:
|
||||
|
||||
A dictionary with the following key:
|
||||
- `replies`: A list containing the generated responses as ChatMessage instances.
|
||||
|
||||
@ -57,7 +57,7 @@ def __init__(*,
|
||||
streaming_callback: Optional[StreamingCallbackT] = None,
|
||||
api_base_url: Optional[str] = "https://api.llama.com/compat/v1/",
|
||||
generation_kwargs: Optional[Dict[str, Any]] = None,
|
||||
tools: Optional[Union[List[Tool], Toolset]] = None)
|
||||
tools: Optional[ToolsType] = None)
|
||||
```
|
||||
|
||||
Creates an instance of LlamaChatGenerator. Unless specified otherwise in the `model`, this is for Llama's
|
||||
@ -86,7 +86,8 @@ Some of the supported parameters:
|
||||
events as they become available, with the stream terminated by a data: [DONE] message.
|
||||
- `safe_prompt`: Whether to inject a safety prompt before all conversations.
|
||||
- `random_seed`: The seed to use for random sampling.
|
||||
- `tools`: A list of tools for which the model can prepare calls.
|
||||
- `tools`: A list of Tool and/or Toolset objects, or a single Toolset for which the model can prepare calls.
|
||||
Each tool should have a unique name.
|
||||
|
||||
<a id="haystack_integrations.components.generators.meta_llama.chat.chat_generator.MetaLlamaChatGenerator.to_dict"></a>
|
||||
|
||||
@ -102,93 +103,3 @@ Serialize this component to a dictionary.
|
||||
|
||||
The serialized component as a dictionary.
|
||||
|
||||
<a id="haystack_integrations.components.generators.meta_llama.chat.chat_generator.MetaLlamaChatGenerator.from_dict"></a>
|
||||
|
||||
#### MetaLlamaChatGenerator.from\_dict
|
||||
|
||||
```python
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict[str, Any]) -> "OpenAIChatGenerator"
|
||||
```
|
||||
|
||||
Deserialize this component from a dictionary.
|
||||
|
||||
**Arguments**:
|
||||
|
||||
- `data`: The dictionary representation of this component.
|
||||
|
||||
**Returns**:
|
||||
|
||||
The deserialized component instance.
|
||||
|
||||
<a id="haystack_integrations.components.generators.meta_llama.chat.chat_generator.MetaLlamaChatGenerator.run"></a>
|
||||
|
||||
#### MetaLlamaChatGenerator.run
|
||||
|
||||
```python
|
||||
@component.output_types(replies=list[ChatMessage])
|
||||
def run(messages: list[ChatMessage],
|
||||
streaming_callback: Optional[StreamingCallbackT] = None,
|
||||
generation_kwargs: Optional[dict[str, Any]] = None,
|
||||
*,
|
||||
tools: Optional[ToolsType] = None,
|
||||
tools_strict: Optional[bool] = None)
|
||||
```
|
||||
|
||||
Invokes chat completion based on the provided messages and generation parameters.
|
||||
|
||||
**Arguments**:
|
||||
|
||||
- `messages`: A list of ChatMessage instances representing the input messages.
|
||||
- `streaming_callback`: A callback function that is called when a new token is received from the stream.
|
||||
- `generation_kwargs`: Additional keyword arguments for text generation. These parameters will
|
||||
override the parameters passed during component initialization.
|
||||
For details on OpenAI API parameters, see [OpenAI documentation](https://platform.openai.com/docs/api-reference/chat/create).
|
||||
- `tools`: A list of Tool and/or Toolset objects, or a single Toolset for which the model can prepare calls.
|
||||
If set, it will override the `tools` parameter provided during initialization.
|
||||
- `tools_strict`: Whether to enable strict schema adherence for tool calls. If set to `True`, the model will follow exactly
|
||||
the schema provided in the `parameters` field of the tool definition, but this may increase latency.
|
||||
If set, it will override the `tools_strict` parameter set during component initialization.
|
||||
|
||||
**Returns**:
|
||||
|
||||
A dictionary with the following key:
|
||||
- `replies`: A list containing the generated responses as ChatMessage instances.
|
||||
|
||||
<a id="haystack_integrations.components.generators.meta_llama.chat.chat_generator.MetaLlamaChatGenerator.run_async"></a>
|
||||
|
||||
#### MetaLlamaChatGenerator.run\_async
|
||||
|
||||
```python
|
||||
@component.output_types(replies=list[ChatMessage])
|
||||
async def run_async(messages: list[ChatMessage],
|
||||
streaming_callback: Optional[StreamingCallbackT] = None,
|
||||
generation_kwargs: Optional[dict[str, Any]] = None,
|
||||
*,
|
||||
tools: Optional[ToolsType] = None,
|
||||
tools_strict: Optional[bool] = None)
|
||||
```
|
||||
|
||||
Asynchronously invokes chat completion based on the provided messages and generation parameters.
|
||||
|
||||
This is the asynchronous version of the `run` method. It has the same parameters and return values
|
||||
but can be used with `await` in async code.
|
||||
|
||||
**Arguments**:
|
||||
|
||||
- `messages`: A list of ChatMessage instances representing the input messages.
|
||||
- `streaming_callback`: A callback function that is called when a new token is received from the stream.
|
||||
Must be a coroutine.
|
||||
- `generation_kwargs`: Additional keyword arguments for text generation. These parameters will
|
||||
override the parameters passed during component initialization.
|
||||
For details on OpenAI API parameters, see [OpenAI documentation](https://platform.openai.com/docs/api-reference/chat/create).
|
||||
- `tools`: A list of Tool and/or Toolset objects, or a single Toolset for which the model can prepare calls.
|
||||
If set, it will override the `tools` parameter provided during initialization.
|
||||
- `tools_strict`: Whether to enable strict schema adherence for tool calls. If set to `True`, the model will follow exactly
|
||||
the schema provided in the `parameters` field of the tool definition, but this may increase latency.
|
||||
If set, it will override the `tools_strict` parameter set during component initialization.
|
||||
|
||||
**Returns**:
|
||||
|
||||
A dictionary with the following key:
|
||||
- `replies`: A list containing the generated responses as ChatMessage instances.
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user