diff --git a/test/prompt/test_prompt_node.py b/test/prompt/test_prompt_node.py index b23ec9cd7..a9b8fc6d7 100644 --- a/test/prompt/test_prompt_node.py +++ b/test/prompt/test_prompt_node.py @@ -898,7 +898,7 @@ class TestTokenLimit: with caplog.at_level(logging.WARNING): _ = prompt_node.prompt(tt, documents=["Berlin is an amazing city."]) assert "The prompt has been truncated from" in caplog.text - assert "and answer length (2000 tokens) fits within the max token limit (2048 tokens)." in caplog.text + assert "and answer length (2000 tokens) fits within the max token limit (2049 tokens)." in caplog.text class TestRunBatch: