# Use Open AI with key provider: autogen_ext.models.openai.OpenAIChatCompletionClient config: model: gpt-4o api_key: REPLACE_WITH_YOUR_API_KEY # Use a locally hosted model using Ollama. # provider: autogen_ext.models.openai.OpenAIChatCompletionClient # config: # model: deepseek-r1:8b # base_url: http://localhost:11434/v1 # api_key: ollama # model_info: # function_calling: false # json_output: false # vision: false # family: r1 # Use Azure Open AI with key # provider: autogen_ext.models.openai.AzureOpenAIChatCompletionClient # config: # model: gpt-4o # azure_endpoint: https://{your-custom-endpoint}.openai.azure.com/ # azure_deployment: {your-azure-deployment} # api_version: {your-api-version} # api_key: REPLACE_WITH_YOUR_API_KEY # Use Azure OpenAI with AD token provider. # provider: autogen_ext.models.openai.AzureOpenAIChatCompletionClient # config: # model: gpt-4o # azure_endpoint: https://{your-custom-endpoint}.openai.azure.com/ # azure_deployment: {your-azure-deployment} # api_version: {your-api-version} # azure_ad_token_provider: # provider: autogen_ext.auth.azure.AzureTokenProvider # config: # provider_kind: DefaultAzureCredential # scopes: # - https://cognitiveservices.azure.com/.default