diff --git a/haystack/nodes/prompt/invocation_layer/hugging_face_inference.py b/haystack/nodes/prompt/invocation_layer/hugging_face_inference.py index 239f52652..5525ee727 100644 --- a/haystack/nodes/prompt/invocation_layer/hugging_face_inference.py +++ b/haystack/nodes/prompt/invocation_layer/hugging_face_inference.py @@ -191,7 +191,7 @@ class HFInferenceEndpointInvocationLayer(PromptModelInvocationLayer): :param stream_handler: The handler to invoke on each token. :param stop_words: The stop words to ignore. """ - client = sseclient.SSEClient(response) + client = sseclient.SSEClient(response) # type: ignore # requests.Response behaves like a generator but the typing does not reflect it tokens: List[str] = [] try: for event in client.events():