From acf4cd502fd67c50c3bbbd3eb580738ff041c28f Mon Sep 17 00:00:00 2001 From: Massimiliano Pippi Date: Fri, 26 Jan 2024 16:00:02 +0100 Subject: [PATCH] refact: Rename helper function (#6831) * change function name * add api docs * release notes --- docs/pydoc/config/generator.yml | 3 ++- haystack/components/generators/utils.py | 2 +- ...name-streaming-callback-f4e6653c0aac8765.yaml | 7 +++++++ test/components/generators/chat/test_azure.py | 6 +++--- test/components/generators/chat/test_openai.py | 16 ++++++++-------- test/components/generators/test_azure.py | 6 +++--- test/components/generators/test_openai.py | 16 ++++++++-------- test/components/generators/test_utils.py | 10 +++++----- 8 files changed, 37 insertions(+), 29 deletions(-) create mode 100644 releasenotes/notes/rename-streaming-callback-f4e6653c0aac8765.yaml diff --git a/docs/pydoc/config/generator.yml b/docs/pydoc/config/generator.yml index 1f0bb79fa..a5a9b5780 100644 --- a/docs/pydoc/config/generator.yml +++ b/docs/pydoc/config/generator.yml @@ -10,7 +10,8 @@ loaders: "chat/azure", "chat/hugging_face_local", "chat/hugging_face_tgi", - "chat/openai" + "chat/openai", + "utils", ] ignore_when_discovered: ["__init__"] processors: diff --git a/haystack/components/generators/utils.py b/haystack/components/generators/utils.py index a8113d867..8bccc4251 100644 --- a/haystack/components/generators/utils.py +++ b/haystack/components/generators/utils.py @@ -6,7 +6,7 @@ from haystack import DeserializationError from haystack.dataclasses import StreamingChunk -def default_streaming_callback(chunk: StreamingChunk) -> None: +def print_streaming_chunk(chunk: StreamingChunk) -> None: """ Default callback function for streaming responses. Prints the tokens of the first completion to stdout as soon as they are received diff --git a/releasenotes/notes/rename-streaming-callback-f4e6653c0aac8765.yaml b/releasenotes/notes/rename-streaming-callback-f4e6653c0aac8765.yaml new file mode 100644 index 000000000..ccba33952 --- /dev/null +++ b/releasenotes/notes/rename-streaming-callback-f4e6653c0aac8765.yaml @@ -0,0 +1,7 @@ +--- +upgrade: +enhancements: + - | + The default in `default_streaming_callback` was confusing, this function was the go-to-helper + one would use to quickly print the generated tokens as they come, but it was not used by default. + The function was then renamed to `print_streaming_chunk.` diff --git a/test/components/generators/chat/test_azure.py b/test/components/generators/chat/test_azure.py index 2a9f900f7..667f50625 100644 --- a/test/components/generators/chat/test_azure.py +++ b/test/components/generators/chat/test_azure.py @@ -4,7 +4,7 @@ import pytest from openai import OpenAIError from haystack.components.generators.chat import AzureOpenAIChatGenerator -from haystack.components.generators.utils import default_streaming_callback +from haystack.components.generators.utils import print_streaming_chunk from haystack.dataclasses import ChatMessage @@ -25,12 +25,12 @@ class TestOpenAIChatGenerator: component = AzureOpenAIChatGenerator( azure_endpoint="some-non-existing-endpoint", api_key="test-api-key", - streaming_callback=default_streaming_callback, + streaming_callback=print_streaming_chunk, generation_kwargs={"max_tokens": 10, "some_test_param": "test-params"}, ) assert component.client.api_key == "test-api-key" assert component.azure_deployment == "gpt-35-turbo" - assert component.streaming_callback is default_streaming_callback + assert component.streaming_callback is print_streaming_chunk assert component.generation_kwargs == {"max_tokens": 10, "some_test_param": "test-params"} def test_to_dict_default(self): diff --git a/test/components/generators/chat/test_openai.py b/test/components/generators/chat/test_openai.py index 914294cdd..bb4dd1d9c 100644 --- a/test/components/generators/chat/test_openai.py +++ b/test/components/generators/chat/test_openai.py @@ -4,7 +4,7 @@ import pytest from openai import OpenAIError from haystack.components.generators.chat import OpenAIChatGenerator -from haystack.components.generators.utils import default_streaming_callback +from haystack.components.generators.utils import print_streaming_chunk from haystack.dataclasses import ChatMessage, StreamingChunk @@ -33,13 +33,13 @@ class TestOpenAIChatGenerator: component = OpenAIChatGenerator( api_key="test-api-key", model="gpt-4", - streaming_callback=default_streaming_callback, + streaming_callback=print_streaming_chunk, api_base_url="test-base-url", generation_kwargs={"max_tokens": 10, "some_test_param": "test-params"}, ) assert component.client.api_key == "test-api-key" assert component.model == "gpt-4" - assert component.streaming_callback is default_streaming_callback + assert component.streaming_callback is print_streaming_chunk assert component.generation_kwargs == {"max_tokens": 10, "some_test_param": "test-params"} def test_to_dict_default(self): @@ -60,7 +60,7 @@ class TestOpenAIChatGenerator: component = OpenAIChatGenerator( api_key="test-api-key", model="gpt-4", - streaming_callback=default_streaming_callback, + streaming_callback=print_streaming_chunk, api_base_url="test-base-url", generation_kwargs={"max_tokens": 10, "some_test_param": "test-params"}, ) @@ -71,7 +71,7 @@ class TestOpenAIChatGenerator: "model": "gpt-4", "organization": None, "api_base_url": "test-base-url", - "streaming_callback": "haystack.components.generators.utils.default_streaming_callback", + "streaming_callback": "haystack.components.generators.utils.print_streaming_chunk", "generation_kwargs": {"max_tokens": 10, "some_test_param": "test-params"}, }, } @@ -102,13 +102,13 @@ class TestOpenAIChatGenerator: "init_parameters": { "model": "gpt-4", "api_base_url": "test-base-url", - "streaming_callback": "haystack.components.generators.utils.default_streaming_callback", + "streaming_callback": "haystack.components.generators.utils.print_streaming_chunk", "generation_kwargs": {"max_tokens": 10, "some_test_param": "test-params"}, }, } component = OpenAIChatGenerator.from_dict(data) assert component.model == "gpt-4" - assert component.streaming_callback is default_streaming_callback + assert component.streaming_callback is print_streaming_chunk assert component.api_base_url == "test-base-url" assert component.generation_kwargs == {"max_tokens": 10, "some_test_param": "test-params"} @@ -120,7 +120,7 @@ class TestOpenAIChatGenerator: "model": "gpt-4", "organization": None, "api_base_url": "test-base-url", - "streaming_callback": "haystack.components.generators.utils.default_streaming_callback", + "streaming_callback": "haystack.components.generators.utils.print_streaming_chunk", "generation_kwargs": {"max_tokens": 10, "some_test_param": "test-params"}, }, } diff --git a/test/components/generators/test_azure.py b/test/components/generators/test_azure.py index 816afb9a5..9c83deaff 100644 --- a/test/components/generators/test_azure.py +++ b/test/components/generators/test_azure.py @@ -4,7 +4,7 @@ import pytest from openai import OpenAIError from haystack.components.generators import AzureOpenAIGenerator -from haystack.components.generators.utils import default_streaming_callback +from haystack.components.generators.utils import print_streaming_chunk class TestAzureOpenAIGenerator: @@ -25,12 +25,12 @@ class TestAzureOpenAIGenerator: api_key="test-api-key", azure_endpoint="some-non-existing-endpoint", azure_deployment="gpt-35-turbo", - streaming_callback=default_streaming_callback, + streaming_callback=print_streaming_chunk, generation_kwargs={"max_tokens": 10, "some_test_param": "test-params"}, ) assert component.client.api_key == "test-api-key" assert component.azure_deployment == "gpt-35-turbo" - assert component.streaming_callback is default_streaming_callback + assert component.streaming_callback is print_streaming_chunk assert component.generation_kwargs == {"max_tokens": 10, "some_test_param": "test-params"} def test_to_dict_default(self): diff --git a/test/components/generators/test_openai.py b/test/components/generators/test_openai.py index 41abac589..d973407cc 100644 --- a/test/components/generators/test_openai.py +++ b/test/components/generators/test_openai.py @@ -5,7 +5,7 @@ import pytest from openai import OpenAIError from haystack.components.generators import OpenAIGenerator -from haystack.components.generators.utils import default_streaming_callback +from haystack.components.generators.utils import print_streaming_chunk from haystack.dataclasses import StreamingChunk, ChatMessage @@ -26,13 +26,13 @@ class TestOpenAIGenerator: component = OpenAIGenerator( api_key="test-api-key", model="gpt-4", - streaming_callback=default_streaming_callback, + streaming_callback=print_streaming_chunk, api_base_url="test-base-url", generation_kwargs={"max_tokens": 10, "some_test_param": "test-params"}, ) assert component.client.api_key == "test-api-key" assert component.model == "gpt-4" - assert component.streaming_callback is default_streaming_callback + assert component.streaming_callback is print_streaming_chunk assert component.generation_kwargs == {"max_tokens": 10, "some_test_param": "test-params"} def test_to_dict_default(self): @@ -53,7 +53,7 @@ class TestOpenAIGenerator: component = OpenAIGenerator( api_key="test-api-key", model="gpt-4", - streaming_callback=default_streaming_callback, + streaming_callback=print_streaming_chunk, api_base_url="test-base-url", generation_kwargs={"max_tokens": 10, "some_test_param": "test-params"}, ) @@ -64,7 +64,7 @@ class TestOpenAIGenerator: "model": "gpt-4", "system_prompt": None, "api_base_url": "test-base-url", - "streaming_callback": "haystack.components.generators.utils.default_streaming_callback", + "streaming_callback": "haystack.components.generators.utils.print_streaming_chunk", "generation_kwargs": {"max_tokens": 10, "some_test_param": "test-params"}, }, } @@ -97,13 +97,13 @@ class TestOpenAIGenerator: "model": "gpt-4", "system_prompt": None, "api_base_url": "test-base-url", - "streaming_callback": "haystack.components.generators.utils.default_streaming_callback", + "streaming_callback": "haystack.components.generators.utils.print_streaming_chunk", "generation_kwargs": {"max_tokens": 10, "some_test_param": "test-params"}, }, } component = OpenAIGenerator.from_dict(data) assert component.model == "gpt-4" - assert component.streaming_callback is default_streaming_callback + assert component.streaming_callback is print_streaming_chunk assert component.api_base_url == "test-base-url" assert component.generation_kwargs == {"max_tokens": 10, "some_test_param": "test-params"} @@ -114,7 +114,7 @@ class TestOpenAIGenerator: "init_parameters": { "model": "gpt-4", "api_base_url": "test-base-url", - "streaming_callback": "haystack.components.generators.utils.default_streaming_callback", + "streaming_callback": "haystack.components.generators.utils.print_streaming_chunk", "generation_kwargs": {"max_tokens": 10, "some_test_param": "test-params"}, }, } diff --git a/test/components/generators/test_utils.py b/test/components/generators/test_utils.py index aebdd8d2b..b3372774e 100644 --- a/test/components/generators/test_utils.py +++ b/test/components/generators/test_utils.py @@ -1,6 +1,6 @@ import pytest -from haystack.components.generators.utils import default_streaming_callback +from haystack.components.generators.utils import print_streaming_chunk from haystack.components.generators.utils import serialize_callback_handler, deserialize_callback_handler @@ -15,8 +15,8 @@ def test_callback_handler_serialization(): def test_callback_handler_serialization_non_local(): - result = serialize_callback_handler(default_streaming_callback) - assert result == "haystack.components.generators.utils.default_streaming_callback" + result = serialize_callback_handler(print_streaming_chunk) + assert result == "haystack.components.generators.utils.print_streaming_chunk" def test_callback_handler_deserialization(): @@ -27,7 +27,7 @@ def test_callback_handler_deserialization(): def test_callback_handler_deserialization_non_local(): - result = serialize_callback_handler(default_streaming_callback) + result = serialize_callback_handler(print_streaming_chunk) fn = deserialize_callback_handler(result) - assert fn is default_streaming_callback + assert fn is print_streaming_chunk