From 7e714b55229b8f92c194393257afa87ce3866de1 Mon Sep 17 00:00:00 2001 From: Tadashi Date: Wed, 5 Feb 2025 13:43:30 +0700 Subject: [PATCH] feat: add low request mode for local llm --- libs/ktem/ktem/index/file/pipelines.py | 3 ++- libs/ktem/ktem/pages/chat/__init__.py | 26 +++++++++++++++++++------- libs/ktem/ktem/reasoning/simple.py | 15 ++++++++++++++- 3 files changed, 35 insertions(+), 9 deletions(-) diff --git a/libs/ktem/ktem/index/file/pipelines.py b/libs/ktem/ktem/index/file/pipelines.py index 4d53e653..6038de88 100644 --- a/libs/ktem/ktem/index/file/pipelines.py +++ b/libs/ktem/ktem/index/file/pipelines.py @@ -14,6 +14,7 @@ from pathlib import Path from typing import Generator, Optional, Sequence import tiktoken +from decouple import config from ktem.db.models import engine from ktem.embeddings.manager import embedding_models_manager from ktem.llms.manager import llms @@ -270,7 +271,7 @@ class DocumentRetrievalPipeline(BaseFileIndexRetriever): }, "use_llm_reranking": { "name": "Use LLM relevant scoring", - "value": True, + "value": not config("USE_LOW_LLM_REQUESTS", default=False, cast=bool), "choices": [True, False], "component": "checkbox", }, diff --git a/libs/ktem/ktem/pages/chat/__init__.py b/libs/ktem/ktem/pages/chat/__init__.py index 2f9ab94f..451684d3 100644 --- a/libs/ktem/ktem/pages/chat/__init__.py +++ b/libs/ktem/ktem/pages/chat/__init__.py @@ -5,6 +5,7 @@ from copy import deepcopy from typing import Optional import gradio as gr +from decouple import config from ktem.app import BasePage from ktem.components import reasonings from ktem.db.models import Conversation, engine @@ -23,6 +24,7 @@ from theflow.utils.modules import import_dotted_string from kotaemon.base import Document from kotaemon.indices.ingests.files import KH_DEFAULT_FILE_EXTRACTORS +from kotaemon.indices.qa.utils import strip_think_tag from ...utils import SUPPORTED_LANGUAGE_MAP, get_file_names_regex, get_urls from ...utils.commands import WEB_SEARCH_COMMAND @@ -367,13 +369,22 @@ class ChatPage(BasePage): elem_id="citation-dropdown", ) - self.use_mindmap = gr.State(value=True) - self.use_mindmap_check = gr.Checkbox( - label="Mindmap (on)", - container=False, - elem_id="use-mindmap-checkbox", - value=True, - ) + if not config("USE_LOW_LLM_REQUESTS", default=False, cast=bool): + self.use_mindmap = gr.State(value=True) + self.use_mindmap_check = gr.Checkbox( + label="Mindmap (on)", + container=False, + elem_id="use-mindmap-checkbox", + value=True, + ) + else: + self.use_mindmap = gr.State(value=False) + self.use_mindmap_check = gr.Checkbox( + label="Mindmap (off)", + container=False, + elem_id="use-mindmap-checkbox", + value=False, + ) with gr.Column( scale=INFO_PANEL_SCALES[False], elem_id="chat-info-panel" @@ -1361,6 +1372,7 @@ class ChatPage(BasePage): # check if this is a newly created conversation if len(chat_history) == 1: suggested_name = suggest_pipeline(chat_history).text + suggested_name = strip_think_tag(suggested_name) suggested_name = suggested_name.replace('"', "").replace("'", "")[:40] new_name = gr.update(value=suggested_name) renamed = True diff --git a/libs/ktem/ktem/reasoning/simple.py b/libs/ktem/ktem/reasoning/simple.py index d11495d1..c40c9659 100644 --- a/libs/ktem/ktem/reasoning/simple.py +++ b/libs/ktem/ktem/reasoning/simple.py @@ -3,6 +3,7 @@ import threading from textwrap import dedent from typing import Generator +from decouple import config from ktem.embeddings.manager import embedding_models_manager as embeddings from ktem.llms.manager import llms from ktem.reasoning.prompt_optimization import ( @@ -29,6 +30,7 @@ from kotaemon.indices.qa.citation_qa import ( ) from kotaemon.indices.qa.citation_qa_inline import AnswerWithInlineCitation from kotaemon.indices.qa.format_context import PrepareEvidencePipeline +from kotaemon.indices.qa.utils import replace_think_tag_with_details from kotaemon.llms import ChatLLM from ..utils import SUPPORTED_LANGUAGE_MAP @@ -313,6 +315,13 @@ class FullQAPipeline(BaseReasoning): **kwargs, ) + # check tag from reasoning models + processed_answer = replace_think_tag_with_details(answer.text) + if processed_answer != answer.text: + # clear the chat message and render again + yield Document(channel="chat", content=None) + yield Document(channel="chat", content=processed_answer) + # show the evidence if scoring_thread: scoring_thread.join() @@ -410,7 +419,11 @@ class FullQAPipeline(BaseReasoning): }, "highlight_citation": { "name": "Citation style", - "value": "highlight", + "value": ( + "highlight" + if not config("USE_LOW_LLM_REQUESTS", default=False, cast=bool) + else "off" + ), "component": "radio", "choices": [ ("citation: highlight", "highlight"),