feat: add qwen3 support (#6528)

## Why are these changes needed?

Add ollama qwen 3 support
This commit is contained in:
Miroslav Pokrovskii 2025-05-14 19:52:13 +03:00 committed by GitHub
parent cc2693b409
commit aa22b622d0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 18 additions and 2 deletions

View File

@ -258,6 +258,13 @@ _MODEL_INFO: Dict[str, ModelInfo] = {
"family": ModelFamily.UNKNOWN,
"structured_output": True,
},
"qwen3": {
"vision": False,
"function_calling": True,
"json_output": True,
"family": ModelFamily.UNKNOWN,
"structured_output": True,
},
"snowflake-arctic-embed": {
"vision": False,
"function_calling": False,
@ -351,6 +358,15 @@ _MODEL_TOKEN_LIMITS: Dict[str, int] = {
"qwen2.5-coder:0.5b": 32768,
"qwen2.5-coder:1.5b": 32768,
"qwen2.5-coder:3b": 32768,
"qwen3": 40960,
"qwen3:0.6b": 40960,
"qwen3:1.7b": 40960,
"qwen3:4b": 40960,
"qwen3:8b": 40960,
"qwen3:14b": 40960,
"qwen3:30b": 40960,
"qwen3:32b": 40960,
"qwen3:235b": 40960,
"snowflake-arctic-embed": 512,
"starcoder2": 16384,
"tinyllama": 2048,

View File

@ -570,7 +570,7 @@ async def test_ollama_create_structured_output(model: str, ollama_client: Ollama
@pytest.mark.asyncio
@pytest.mark.parametrize("model", ["qwen2.5:0.5b", "llama3.2:1b"])
@pytest.mark.parametrize("model", ["qwen2.5:0.5b", "llama3.2:1b", "qwen3:0.6b"])
async def test_ollama_create_tools(model: str, ollama_client: OllamaChatCompletionClient) -> None:
def add(x: int, y: int) -> str:
return str(x + y)
@ -653,7 +653,7 @@ async def test_ollama_create_structured_output_with_tools(
@pytest.mark.asyncio
@pytest.mark.parametrize("model", ["qwen2.5:0.5b", "llama3.2:1b"])
@pytest.mark.parametrize("model", ["qwen2.5:0.5b", "llama3.2:1b", "qwen3:0.6b"])
async def test_ollama_create_stream_tools(model: str, ollama_client: OllamaChatCompletionClient) -> None:
def add(x: int, y: int) -> str:
return str(x + y)