diff --git a/haystack/nodes/prompt/prompt_template.py b/haystack/nodes/prompt/prompt_template.py index 46faa60e2..3e063569a 100644 --- a/haystack/nodes/prompt/prompt_template.py +++ b/haystack/nodes/prompt/prompt_template.py @@ -482,6 +482,13 @@ class PromptTemplate(BasePromptTemplate, ABC): if param in kwargs: params_dict[param] = kwargs[param] + if "documents" in self.prompt_params and "documents" not in params_dict: + params_dict["documents"] = [] + logger.warning( + "Expected prompt parameter 'documents' to be provided but it is missing. " + "Continuing with an empty list of documents." + ) + if not set(self.prompt_params).issubset(params_dict.keys()): available_params = {*params_dict.keys(), *kwargs.keys()} provided = set(self.prompt_params).intersection(available_params) diff --git a/test/prompt/test_prompt_node.py b/test/prompt/test_prompt_node.py index 142a0fae0..9c3e47bbd 100644 --- a/test/prompt/test_prompt_node.py +++ b/test/prompt/test_prompt_node.py @@ -1010,3 +1010,23 @@ def test_chatgpt_direct_prompting_w_messages(chatgpt_prompt_model): result = pn(messages) assert len(result) == 1 and all(w in result[0].casefold() for w in ["arlington", "texas"]) + + +@pytest.mark.unit +@patch("haystack.nodes.prompt.prompt_node.PromptModel") +def test_prompt_node_warns_about_missing_documents(mock_model, caplog): + lfqa_prompt = PromptTemplate( + prompt="""Synthesize a comprehensive answer from the following text for the given question. + Provide a clear and concise response that summarizes the key points and information presented in the text. + Your answer should be in your own words and be no longer than 50 words. + If answer is not in .text. say i dont know. + \n\n Related text: {join(documents)} \n\n Question: {query} \n\n Answer:""" + ) + prompt_node = PromptNode(default_prompt_template=lfqa_prompt) + + with caplog.at_level(logging.WARNING): + results, _ = prompt_node.run(query="non-matching query") + assert ( + "Expected prompt parameter 'documents' to be provided but it is missing. " + "Continuing with an empty list of documents." in caplog.text + ) diff --git a/test/prompt/test_prompt_template.py b/test/prompt/test_prompt_template.py index f053e4c51..ff627738b 100644 --- a/test/prompt/test_prompt_template.py +++ b/test/prompt/test_prompt_template.py @@ -211,6 +211,19 @@ def test_prompt_template_deserialization(mock_prompt_model): assert isinstance(loaded_generator.default_prompt_template.output_parser, AnswerParser) +@pytest.mark.unit +def test_prompt_template_fills_in_missing_documents(): + lfqa_prompt = PromptTemplate( + prompt="""Synthesize a comprehensive answer from the following text for the given question. + Provide a clear and concise response that summarizes the key points and information presented in the text. + Your answer should be in your own words and be no longer than 50 words. + If answer is not in .text. say i dont know. + \n\n Related text: {join(documents)} \n\n Question: {query} \n\n Answer:""" + ) + prepared_prompt = next(lfqa_prompt.fill(query="What is the meaning of life?")) # no documents provided but expected + assert "Related text: \n\n Question: What is the meaning of life?" in prepared_prompt + + class TestPromptTemplateSyntax: @pytest.mark.unit @pytest.mark.parametrize(