Sebastian 7a140c1524
feat: add ensure token limit for direct prompting of ChatGPT (#5166)
* Add support for prompt truncation when using chatgpt if direct prompting is used

* Update tests for test token limit for prompt node

* Update warning message to be correct

* Minor cleanup

* Mark back to integration

* Update count_openai_tokens_messages to reflect changes shown in tiktoken

* Use mocking to avoid request call

* Fix test to make it comply with unit test requirements

* Move tests to respective invocation layers

* Moved fixture to one spot
2023-06-21 15:41:28 +02:00

15 lines
711 B
Python

from unittest.mock import patch, MagicMock
import pytest
@pytest.fixture
def mock_openai_tokenizer():
with patch("haystack.nodes.prompt.invocation_layer.open_ai.load_openai_tokenizer") as mock_tokenizer_func:
mock_tokenizer = MagicMock() # this will be our mock tokenizer
# "This is a test for a mock openai tokenizer."
mock_tokenizer.encode.return_value = [2028, 374, 264, 1296, 369, 264, 8018, 1825, 2192, 47058, 13]
# Returning truncated prompt: [2028, 374, 264, 1296, 369, 264, 8018, 1825, 2192]
mock_tokenizer.decode.return_value = "This is a test for a mock openai"
mock_tokenizer_func.return_value = mock_tokenizer
yield mock_tokenizer_func