mirror of
https://github.com/deepset-ai/haystack.git
synced 2025-08-12 10:37:58 +00:00

* Enable Support for Meta LLama-2 Models in Amazon Sagemaker * Improve unit test for invocation layers positioning * Small adjustment, add more unit tests * mypy fixes * Improve unit tests * Update test/prompt/invocation_layer/test_sagemaker_meta.py Co-authored-by: Stefano Fiorucci <44616784+anakin87@users.noreply.github.com> * PR feedback * Add pydocs for newly extracted methods * simplify is_proper_chat_* --------- Co-authored-by: Stefano Fiorucci <44616784+anakin87@users.noreply.github.com> Co-authored-by: anakin87 <stefanofiorucci@gmail.com>
20 lines
880 B
Python
20 lines
880 B
Python
import pytest
|
|
|
|
from haystack.nodes.prompt.prompt_model import PromptModelInvocationLayer
|
|
from haystack.nodes.prompt.invocation_layer import HFLocalInvocationLayer, HFInferenceEndpointInvocationLayer
|
|
|
|
|
|
@pytest.mark.unit
|
|
def test_invocation_layer_order():
|
|
"""
|
|
Checks that the huggingface invocation layer is positioned further down the list of providers
|
|
as they can time out or be slow to respond.
|
|
"""
|
|
invocation_layers = PromptModelInvocationLayer.invocation_layer_providers
|
|
assert HFLocalInvocationLayer in invocation_layers
|
|
assert HFInferenceEndpointInvocationLayer in invocation_layers
|
|
index_hf = invocation_layers.index(HFLocalInvocationLayer) + 1
|
|
index_hf_inference = invocation_layers.index(HFInferenceEndpointInvocationLayer) + 1
|
|
assert index_hf > len(invocation_layers) / 2
|
|
assert index_hf_inference > len(invocation_layers) / 2
|