diff --git a/haystack/nodes/prompt/invocation_layer/hugging_face.py b/haystack/nodes/prompt/invocation_layer/hugging_face.py index 8dc847b90..aaeebc889 100644 --- a/haystack/nodes/prompt/invocation_layer/hugging_face.py +++ b/haystack/nodes/prompt/invocation_layer/hugging_face.py @@ -280,7 +280,7 @@ class HFLocalInvocationLayer(PromptModelInvocationLayer): # We want to exclude it to be consistent with other invocation layers for idx, _ in enumerate(generated_texts): for stop_word in stop_words: - generated_texts[idx] = generated_texts[idx].replace(stop_word, "").strip() + generated_texts[idx] = generated_texts[idx].replace(stop_word, "").rstrip() return generated_texts def _ensure_token_limit(self, prompt: Union[str, List[Dict[str, str]]]) -> Union[str, List[Dict[str, str]]]: diff --git a/releasenotes/notes/fix-stop-words-strip-issue-22ce51306e7b91e4.yaml b/releasenotes/notes/fix-stop-words-strip-issue-22ce51306e7b91e4.yaml new file mode 100644 index 000000000..3dab779ed --- /dev/null +++ b/releasenotes/notes/fix-stop-words-strip-issue-22ce51306e7b91e4.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Ensure the leading whitespace in the generated text is preserved when using `stop_words` in the Hugging Face invocation layer of the PromptNode.