Refa: Update LLM stream response type to Generator (#9420)

### What problem does this PR solve?

Change return type of _generate_streamly from str to Generator[str,
None, None] to properly type hint streaming responses.

### Type of change

- [x] Refactoring
This commit is contained in:
Liu An 2025-08-12 18:05:52 +08:00 committed by GitHub
parent e845d5f9f8
commit d7b4e84cda
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -17,7 +17,7 @@ import json
import logging
import os
import re
from typing import Any
from typing import Any, Generator
import json_repair
from copy import deepcopy
@ -154,7 +154,7 @@ class LLM(ComponentBase):
return self.chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf(), **kwargs)
return self.chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf(), images=self.imgs, **kwargs)
def _generate_streamly(self, msg:list[dict], **kwargs) -> str:
def _generate_streamly(self, msg:list[dict], **kwargs) -> Generator[str, None, None]:
ans = ""
last_idx = 0
endswith_think = False