diff --git a/haystack/dataclasses/chat_message.py b/haystack/dataclasses/chat_message.py index a7e2d318d..4bfbe5582 100644 --- a/haystack/dataclasses/chat_message.py +++ b/haystack/dataclasses/chat_message.py @@ -5,7 +5,6 @@ from dataclasses import asdict, dataclass, field from enum import Enum from typing import Any, Dict, Optional -from warnings import warn class ChatRole(str, Enum): @@ -33,25 +32,6 @@ class ChatMessage: name: Optional[str] meta: Dict[str, Any] = field(default_factory=dict, hash=False) - def to_openai_format(self) -> Dict[str, Any]: - """ - Convert the message to the format expected by OpenAI's Chat API. - - See the [API reference](https://platform.openai.com/docs/api-reference/chat/create) for details. - - :returns: A dictionary with the following key: - - `role` - - `content` - - `name` (optional) - """ - warn("The `to_openai_format` method is deprecated and will be removed in Haystack 2.5.0.", DeprecationWarning) - - msg = {"role": self.role.value, "content": self.content} - if self.name: - msg["name"] = self.name - - return msg - def is_from(self, role: ChatRole) -> bool: """ Check if the message is from a specific role. diff --git a/releasenotes/notes/deprecate-chatmessage-toopenaiformat-9b1b2987a568d3d7.yaml b/releasenotes/notes/deprecate-chatmessage-toopenaiformat-9b1b2987a568d3d7.yaml new file mode 100644 index 000000000..8b861068a --- /dev/null +++ b/releasenotes/notes/deprecate-chatmessage-toopenaiformat-9b1b2987a568d3d7.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + Remove `ChatMessage.to_openai_format` method. Use `haystack.components.generators.openai_utils._convert_message_to_openai_format` instead. diff --git a/test/dataclasses/test_chat_message.py b/test/dataclasses/test_chat_message.py index ab4606d7b..642ff4d55 100644 --- a/test/dataclasses/test_chat_message.py +++ b/test/dataclasses/test_chat_message.py @@ -5,6 +5,7 @@ import pytest from transformers import AutoTokenizer from haystack.dataclasses import ChatMessage, ChatRole +from haystack.components.generators.openai_utils import _convert_message_to_openai_format def test_from_assistant_with_valid_content(): @@ -42,20 +43,24 @@ def test_from_function_with_empty_name(): def test_to_openai_format(): message = ChatMessage.from_system("You are good assistant") - assert message.to_openai_format() == {"role": "system", "content": "You are good assistant"} + assert _convert_message_to_openai_format(message) == {"role": "system", "content": "You are good assistant"} message = ChatMessage.from_user("I have a question") - assert message.to_openai_format() == {"role": "user", "content": "I have a question"} + assert _convert_message_to_openai_format(message) == {"role": "user", "content": "I have a question"} message = ChatMessage.from_function("Function call", "function_name") - assert message.to_openai_format() == {"role": "function", "content": "Function call", "name": "function_name"} + assert _convert_message_to_openai_format(message) == { + "role": "function", + "content": "Function call", + "name": "function_name", + } @pytest.mark.integration def test_apply_chat_templating_on_chat_message(): messages = [ChatMessage.from_system("You are good assistant"), ChatMessage.from_user("I have a question")] tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta") - formatted_messages = [m.to_openai_format() for m in messages] + formatted_messages = [_convert_message_to_openai_format(m) for m in messages] tokenized_messages = tokenizer.apply_chat_template(formatted_messages, tokenize=False) assert tokenized_messages == "<|system|>\nYou are good assistant\n<|user|>\nI have a question\n" @@ -76,7 +81,7 @@ def test_apply_custom_chat_templating_on_chat_message(): messages = [ChatMessage.from_system("You are good assistant"), ChatMessage.from_user("I have a question")] # could be any tokenizer, let's use the one we already likely have in cache tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta") - formatted_messages = [m.to_openai_format() for m in messages] + formatted_messages = [_convert_message_to_openai_format(m) for m in messages] tokenized_messages = tokenizer.apply_chat_template( formatted_messages, chat_template=anthropic_template, tokenize=False )