diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py index 26cf70abb..05a0c949a 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py @@ -1112,6 +1112,7 @@ class OpenAIChatCompletionClient(BaseOpenAIChatCompletionClient, Component[OpenA "this is content" becomes "Reviewer said: this is content." This can be useful for models that do not support the `name` field in message. Defaults to False. + stream_options (optional, dict): Additional options for streaming. Currently only `include_usage` is supported. Examples: diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/config/__init__.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/config/__init__.py index b85e7c22c..62c25c800 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/config/__init__.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/config/__init__.py @@ -10,6 +10,10 @@ class ResponseFormat(TypedDict): type: Literal["text", "json_object"] +class StreamOptions(TypedDict): + include_usage: bool + + class CreateArguments(TypedDict, total=False): frequency_penalty: Optional[float] logit_bias: Optional[Dict[str, int]] @@ -22,6 +26,7 @@ class CreateArguments(TypedDict, total=False): temperature: Optional[float] top_p: Optional[float] user: str + stream_options: Optional[StreamOptions] AsyncAzureADTokenProvider = Callable[[], Union[str, Awaitable[str]]] @@ -67,6 +72,7 @@ class CreateArgumentsConfigModel(BaseModel): temperature: float | None = None top_p: float | None = None user: str | None = None + stream_options: StreamOptions | None = None class BaseOpenAIClientConfigurationConfigModel(CreateArgumentsConfigModel):