From c59c1b603d6c0a216341bb526cfbb40347de69b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=BB=84=E8=85=BE?= <101850389+hangters@users.noreply.github.com> Date: Thu, 15 Aug 2024 10:02:36 +0800 Subject: [PATCH] add support for 01.AI (#1951) ### What problem does this PR solve? #1853 add support for 01.AI ### Type of change - [x] New Feature (non-breaking change which adds functionality) --------- Co-authored-by: Zhedong Cen --- conf/llm_factories.json | 64 ++++++++++++++++++- rag/llm/__init__.py | 6 +- rag/llm/chat_model.py | 7 ++ rag/llm/cv_model.py | 17 +++-- web/src/assets/svg/llm/yi.svg | 7 ++ .../user-setting/setting-model/constant.ts | 1 + 6 files changed, 95 insertions(+), 7 deletions(-) create mode 100644 web/src/assets/svg/llm/yi.svg diff --git a/conf/llm_factories.json b/conf/llm_factories.json index 2e556edb6..4d7ee1df8 100644 --- a/conf/llm_factories.json +++ b/conf/llm_factories.json @@ -3051,6 +3051,68 @@ "model_type": "rerank" } ] - } + }, + { + "name": "01.AI", + "logo": "", + "tags": "LLM,IMAGE2TEXT", + "status": "1", + "llm": [ + { + "llm_name": "yi-large", + "tags": "LLM,CHAT,32k", + "max_tokens": 32768, + "model_type": "chat" + }, + { + "llm_name": "yi-medium", + "tags": "LLM,CHAT,16k", + "max_tokens": 16384, + "model_type": "chat" + }, + { + "llm_name": "yi-medium-200k", + "tags": "LLM,CHAT,200k", + "max_tokens": 204800, + "model_type": "chat" + }, + { + "llm_name": "yi-spark", + "tags": "LLM,CHAT,16k", + "max_tokens": 16384, + "model_type": "chat" + }, + { + "llm_name": "yi-large-rag", + "tags": "LLM,CHAT,16k", + "max_tokens": 16384, + "model_type": "chat" + }, + { + "llm_name": "yi-large-fc", + "tags": "LLM,CHAT,32k", + "max_tokens": 32768, + "model_type": "chat" + }, + { + "llm_name": "yi-large-turbo", + "tags": "LLM,CHAT,16k", + "max_tokens": 16384, + "model_type": "chat" + }, + { + "llm_name": "yi-large-preview", + "tags": "LLM,CHAT,16k", + "max_tokens": 16384, + "model_type": "chat" + }, + { + "llm_name": "yi-vision", + "tags": "LLM,CHAT,IMAGE2TEXT,16k", + "max_tokens": 16384, + "model_type": "image2text" + } + ] + } ] } diff --git a/rag/llm/__init__.py b/rag/llm/__init__.py index 1756051d3..4f0592918 100644 --- a/rag/llm/__init__.py +++ b/rag/llm/__init__.py @@ -61,7 +61,8 @@ CvModel = { "LM-Studio": LmStudioCV, "StepFun":StepFunCV, "OpenAI-API-Compatible": OpenAI_APICV, - "TogetherAI": TogetherAICV + "TogetherAI": TogetherAICV, + "01.AI": YiCV } @@ -94,7 +95,8 @@ ChatModel = { "PerfXCloud": PerfXCloudChat, "Upstage":UpstageChat, "novita.ai": NovitaAIChat, - "SILICONFLOW": SILICONFLOWChat + "SILICONFLOW": SILICONFLOWChat, + "01.AI": YiChat } diff --git a/rag/llm/chat_model.py b/rag/llm/chat_model.py index 4696499a5..f5cdf5d25 100644 --- a/rag/llm/chat_model.py +++ b/rag/llm/chat_model.py @@ -1022,4 +1022,11 @@ class SILICONFLOWChat(Base): def __init__(self, key, model_name, base_url="https://api.siliconflow.cn/v1"): if not base_url: base_url = "https://api.siliconflow.cn/v1" + super().__init__(key, model_name, base_url) + + +class YiChat(Base): + def __init__(self, key, model_name, base_url="https://api.lingyiwanwu.com/v1"): + if not base_url: + base_url = "https://api.lingyiwanwu.com/v1" super().__init__(key, model_name, base_url) \ No newline at end of file diff --git a/rag/llm/cv_model.py b/rag/llm/cv_model.py index ee3de6bbd..73d2ffbb0 100644 --- a/rag/llm/cv_model.py +++ b/rag/llm/cv_model.py @@ -622,6 +622,7 @@ class NvidiaCV(Base): } ] + class StepFunCV(GptV4): def __init__(self, key, model_name="step-1v-8k", lang="Chinese", base_url="https://api.stepfun.com/v1"): if not base_url: base_url="https://api.stepfun.com/v1" @@ -629,8 +630,9 @@ class StepFunCV(GptV4): self.model_name = model_name self.lang = lang + class LmStudioCV(GptV4): - def __init__(self, key, model_name, base_url, lang="Chinese"): + def __init__(self, key, model_name, lang="Chinese", base_url=""): if not base_url: raise ValueError("Local llm url cannot be None") if base_url.split("/")[-1] != "v1": @@ -641,7 +643,7 @@ class LmStudioCV(GptV4): class OpenAI_APICV(GptV4): - def __init__(self, key, model_name, base_url, lang="Chinese"): + def __init__(self, key, model_name, lang="Chinese", base_url=""): if not base_url: raise ValueError("url cannot be None") if base_url.split("/")[-1] != "v1": @@ -652,7 +654,14 @@ class OpenAI_APICV(GptV4): class TogetherAICV(GptV4): - def __init__(self, key, model_name, base_url="https://api.together.xyz/v1"): + def __init__(self, key, model_name, lang="Chinese", base_url="https://api.together.xyz/v1"): if not base_url: base_url = "https://api.together.xyz/v1" - super().__init__(key, model_name, base_url) \ No newline at end of file + super().__init__(key, model_name,lang,base_url) + + +class YiCV(GptV4): + def __init__(self, key, model_name, lang="Chinese",base_url="https://api.lingyiwanwu.com/v1",): + if not base_url: + base_url = "https://api.lingyiwanwu.com/v1" + super().__init__(key, model_name,lang,base_url) \ No newline at end of file diff --git a/web/src/assets/svg/llm/yi.svg b/web/src/assets/svg/llm/yi.svg new file mode 100644 index 000000000..83ebd22d9 --- /dev/null +++ b/web/src/assets/svg/llm/yi.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/web/src/pages/user-setting/setting-model/constant.ts b/web/src/pages/user-setting/setting-model/constant.ts index c13b22d40..714159dcd 100644 --- a/web/src/pages/user-setting/setting-model/constant.ts +++ b/web/src/pages/user-setting/setting-model/constant.ts @@ -30,6 +30,7 @@ export const IconMap = { Upstage: 'upstage', 'novita.ai': 'novita-ai', SILICONFLOW: 'siliconflow', + "01.AI": 'yi' }; export const BedrockRegionList = [