haystack/test/prompt/invocation_layer/test_invocation_layers.py
Vladimir Blagojevic 409e3471cb
feat: Enable Support for Meta LLama-2 Models in Amazon Sagemaker (#5437)
* Enable Support for Meta LLama-2 Models in Amazon Sagemaker

* Improve unit test for invocation layers positioning

* Small adjustment, add more unit tests

* mypy fixes

* Improve unit tests

* Update test/prompt/invocation_layer/test_sagemaker_meta.py

Co-authored-by: Stefano Fiorucci <44616784+anakin87@users.noreply.github.com>

* PR feedback

* Add pydocs for newly extracted methods

* simplify is_proper_chat_*

---------

Co-authored-by: Stefano Fiorucci <44616784+anakin87@users.noreply.github.com>
Co-authored-by: anakin87 <stefanofiorucci@gmail.com>
2023-07-26 15:26:39 +02:00

20 lines
880 B
Python

import pytest
from haystack.nodes.prompt.prompt_model import PromptModelInvocationLayer
from haystack.nodes.prompt.invocation_layer import HFLocalInvocationLayer, HFInferenceEndpointInvocationLayer
@pytest.mark.unit
def test_invocation_layer_order():
"""
Checks that the huggingface invocation layer is positioned further down the list of providers
as they can time out or be slow to respond.
"""
invocation_layers = PromptModelInvocationLayer.invocation_layer_providers
assert HFLocalInvocationLayer in invocation_layers
assert HFInferenceEndpointInvocationLayer in invocation_layers
index_hf = invocation_layers.index(HFLocalInvocationLayer) + 1
index_hf_inference = invocation_layers.index(HFInferenceEndpointInvocationLayer) + 1
assert index_hf > len(invocation_layers) / 2
assert index_hf_inference > len(invocation_layers) / 2