mirror of
				https://github.com/infiniflow/ragflow.git
				synced 2025-10-31 17:59:43 +00:00 
			
		
		
		
	 890561703b
			
		
	
	
		890561703b
		
			
		
	
	
	
	
		
			
			### What problem does this PR solve? Issue link:#326 ### Type of change - [x] New Feature (non-breaking change which adds functionality)
		
			
				
	
	
		
			196 lines
		
	
	
		
			7.2 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			196 lines
		
	
	
		
			7.2 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| #
 | |
| #  Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
 | |
| #
 | |
| #  Licensed under the Apache License, Version 2.0 (the "License");
 | |
| #  you may not use this file except in compliance with the License.
 | |
| #  You may obtain a copy of the License at
 | |
| #
 | |
| #      http://www.apache.org/licenses/LICENSE-2.0
 | |
| #
 | |
| #  Unless required by applicable law or agreed to in writing, software
 | |
| #  distributed under the License is distributed on an "AS IS" BASIS,
 | |
| #  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | |
| #  See the License for the specific language governing permissions and
 | |
| #  limitations under the License.
 | |
| #
 | |
| from flask import request
 | |
| from flask_login import login_required, current_user
 | |
| from api.db.services.llm_service import LLMFactoriesService, TenantLLMService, LLMService
 | |
| from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
 | |
| from api.db import StatusEnum, LLMType
 | |
| from api.db.db_models import TenantLLM
 | |
| from api.utils.api_utils import get_json_result
 | |
| from rag.llm import EmbeddingModel, ChatModel
 | |
| 
 | |
| 
 | |
| @manager.route('/factories', methods=['GET'])
 | |
| @login_required
 | |
| def factories():
 | |
|     try:
 | |
|         fac = LLMFactoriesService.get_all()
 | |
|         return get_json_result(data=[f.to_dict() for f in fac if f.name not in ["QAnything", "FastEmbed"]])
 | |
|     except Exception as e:
 | |
|         return server_error_response(e)
 | |
| 
 | |
| 
 | |
| @manager.route('/set_api_key', methods=['POST'])
 | |
| @login_required
 | |
| @validate_request("llm_factory", "api_key")
 | |
| def set_api_key():
 | |
|     req = request.json
 | |
|     # test if api key works
 | |
|     chat_passed = False
 | |
|     factory = req["llm_factory"]
 | |
|     msg = ""
 | |
|     for llm in LLMService.query(fid=factory):
 | |
|         if llm.model_type == LLMType.EMBEDDING.value:
 | |
|             mdl = EmbeddingModel[factory](
 | |
|                 req["api_key"], llm.llm_name, base_url=req.get("base_url"))
 | |
|             try:
 | |
|                 arr, tc = mdl.encode(["Test if the api key is available"])
 | |
|                 if len(arr[0]) == 0 or tc == 0:
 | |
|                     raise Exception("Fail")
 | |
|             except Exception as e:
 | |
|                 msg += f"\nFail to access embedding model({llm.llm_name}) using this api key." + str(e)
 | |
|         elif not chat_passed and llm.model_type == LLMType.CHAT.value:
 | |
|             mdl = ChatModel[factory](
 | |
|                 req["api_key"], llm.llm_name, base_url=req.get("base_url"))
 | |
|             try:
 | |
|                 m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
 | |
|                                  "temperature": 0.9})
 | |
|                 if not tc:
 | |
|                     raise Exception(m)
 | |
|                 chat_passed = True
 | |
|             except Exception as e:
 | |
|                 msg += f"\nFail to access model({llm.llm_name}) using this api key." + str(
 | |
|                     e)
 | |
| 
 | |
|     if msg:
 | |
|         return get_data_error_result(retmsg=msg)
 | |
| 
 | |
|     llm = {
 | |
|         "api_key": req["api_key"],
 | |
|         "api_base": req.get("base_url", "")
 | |
|     }
 | |
|     for n in ["model_type", "llm_name"]:
 | |
|         if n in req:
 | |
|             llm[n] = req[n]
 | |
| 
 | |
|     if not TenantLLMService.filter_update(
 | |
|             [TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory], llm):
 | |
|         for llm in LLMService.query(fid=factory):
 | |
|             TenantLLMService.save(
 | |
|                 tenant_id=current_user.id,
 | |
|                 llm_factory=factory,
 | |
|                 llm_name=llm.llm_name,
 | |
|                 model_type=llm.model_type,
 | |
|                 api_key=req["api_key"],
 | |
|                 api_base=req.get("base_url", "")
 | |
|             )
 | |
| 
 | |
|     return get_json_result(data=True)
 | |
| 
 | |
| 
 | |
| @manager.route('/add_llm', methods=['POST'])
 | |
| @login_required
 | |
| @validate_request("llm_factory", "llm_name", "model_type")
 | |
| def add_llm():
 | |
|     req = request.json
 | |
|     llm = {
 | |
|         "tenant_id": current_user.id,
 | |
|         "llm_factory": req["llm_factory"],
 | |
|         "model_type": req["model_type"],
 | |
|         "llm_name": req["llm_name"],
 | |
|         "api_base": req.get("api_base", ""),
 | |
|         "api_key": "xxxxxxxxxxxxxxx"
 | |
|     }
 | |
| 
 | |
|     factory = req["llm_factory"]
 | |
|     msg = ""
 | |
|     if llm["model_type"] == LLMType.EMBEDDING.value:
 | |
|         mdl = EmbeddingModel[factory](
 | |
|             key=None, model_name=llm["llm_name"], base_url=llm["api_base"])
 | |
|         try:
 | |
|             arr, tc = mdl.encode(["Test if the api key is available"])
 | |
|             if len(arr[0]) == 0 or tc == 0:
 | |
|                 raise Exception("Fail")
 | |
|         except Exception as e:
 | |
|             msg += f"\nFail to access embedding model({llm['llm_name']})." + str(e)
 | |
|     elif llm["model_type"] == LLMType.CHAT.value:
 | |
|         mdl = ChatModel[factory](
 | |
|             key=None, model_name=llm["llm_name"], base_url=llm["api_base"])
 | |
|         try:
 | |
|             m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
 | |
|                              "temperature": 0.9})
 | |
|             if not tc:
 | |
|                 raise Exception(m)
 | |
|         except Exception as e:
 | |
|             msg += f"\nFail to access model({llm['llm_name']})." + str(
 | |
|                 e)
 | |
|     else:
 | |
|         # TODO: check other type of models
 | |
|         pass
 | |
| 
 | |
|     if msg:
 | |
|         return get_data_error_result(retmsg=msg)
 | |
| 
 | |
| 
 | |
|     if not TenantLLMService.filter_update(
 | |
|             [TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory, TenantLLM.llm_name == llm["llm_name"]], llm):
 | |
|         TenantLLMService.save(**llm)
 | |
| 
 | |
|     return get_json_result(data=True)
 | |
| 
 | |
| 
 | |
| @manager.route('/my_llms', methods=['GET'])
 | |
| @login_required
 | |
| def my_llms():
 | |
|     try:
 | |
|         res = {}
 | |
|         for o in TenantLLMService.get_my_llms(current_user.id):
 | |
|             if o["llm_factory"] not in res:
 | |
|                 res[o["llm_factory"]] = {
 | |
|                     "tags": o["tags"],
 | |
|                     "llm": []
 | |
|                 }
 | |
|             res[o["llm_factory"]]["llm"].append({
 | |
|                 "type": o["model_type"],
 | |
|                 "name": o["llm_name"],
 | |
|                 "used_token": o["used_tokens"]
 | |
|             })
 | |
|         return get_json_result(data=res)
 | |
|     except Exception as e:
 | |
|         return server_error_response(e)
 | |
| 
 | |
| 
 | |
| @manager.route('/list', methods=['GET'])
 | |
| @login_required
 | |
| def list():
 | |
|     model_type = request.args.get("model_type")
 | |
|     try:
 | |
|         objs = TenantLLMService.query(tenant_id=current_user.id)
 | |
|         facts = set([o.to_dict()["llm_factory"] for o in objs if o.api_key])
 | |
|         llms = LLMService.get_all()
 | |
|         llms = [m.to_dict()
 | |
|                 for m in llms if m.status == StatusEnum.VALID.value]
 | |
|         for m in llms:
 | |
|             m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding" or m["fid"] in ["QAnything","FastEmbed"]
 | |
| 
 | |
|         llm_set = set([m["llm_name"] for m in llms])
 | |
|         for o in objs:
 | |
|             if not o.api_key:continue
 | |
|             if o.llm_name in llm_set:continue
 | |
|             llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True})
 | |
| 
 | |
|         res = {}
 | |
|         for m in llms:
 | |
|             if model_type and m["model_type"] != model_type:
 | |
|                 continue
 | |
|             if m["fid"] not in res:
 | |
|                 res[m["fid"]] = []
 | |
|             res[m["fid"]].append(m)
 | |
| 
 | |
|         return get_json_result(data=res)
 | |
|     except Exception as e:
 | |
|         return server_error_response(e)
 |