mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-02 09:57:50 +00:00
refine log format (#312)
### What problem does this PR solve? Issue link:#264 ### Type of change - [x] Documentation Update - [x] Refactoring
This commit is contained in:
parent
0b2808f990
commit
f6c7204002
@ -65,7 +65,7 @@
|
||||
|
||||
- CPU >= 2 cores
|
||||
- RAM >= 8 GB
|
||||
- Docker >= 24.0.0
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> If you have not installed Docker on your local machine (Windows, Mac, or Linux), see [Install Docker Engine](https://docs.docker.com/engine/install/).
|
||||
|
||||
### 🚀 Start up the server
|
||||
|
||||
@ -65,7 +65,7 @@
|
||||
|
||||
- CPU >= 2 cores
|
||||
- RAM >= 8 GB
|
||||
- Docker >= 24.0.0
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> ローカルマシン(Windows、Mac、または Linux)に Docker をインストールしていない場合は、[Docker Engine のインストール](https://docs.docker.com/engine/install/) を参照してください。
|
||||
|
||||
### 🚀 サーバーを起動
|
||||
|
||||
@ -65,7 +65,7 @@
|
||||
|
||||
- CPU >= 2 核
|
||||
- RAM >= 8 GB
|
||||
- Docker >= 24.0.0
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> 如果你并没有在本机安装 Docker(Windows、Mac,或者 Linux), 可以参考文档 [Install Docker Engine](https://docs.docker.com/engine/install/) 自行安装。
|
||||
|
||||
### 🚀 启动服务器
|
||||
|
||||
@ -695,7 +695,7 @@ class Dialog(DataBaseModel):
|
||||
language = CharField(
|
||||
max_length=32,
|
||||
null=True,
|
||||
default="English",
|
||||
default="Chinese",
|
||||
help_text="English|Chinese")
|
||||
llm_id = CharField(max_length=32, null=False, help_text="default llm ID")
|
||||
llm_setting = JSONField(null=False, default={"temperature": 0.1, "top_p": 0.3, "frequency_penalty": 0.7,
|
||||
|
||||
@ -13,15 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
|
||||
from rag.utils import ELASTICSEARCH
|
||||
from rag.nlp import search
|
||||
import os
|
||||
|
||||
from enum import IntEnum, Enum
|
||||
|
||||
from api.utils import get_base_config, decrypt_database_config
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from api.utils.log_utils import LoggerFactory, getLogger
|
||||
|
||||
@ -32,13 +25,16 @@ LoggerFactory.set_directory(
|
||||
"logs",
|
||||
"api"))
|
||||
# {CRITICAL: 50, FATAL:50, ERROR:40, WARNING:30, WARN:30, INFO:20, DEBUG:10, NOTSET:0}
|
||||
LoggerFactory.LEVEL = 10
|
||||
LoggerFactory.LEVEL = 30
|
||||
|
||||
stat_logger = getLogger("stat")
|
||||
access_logger = getLogger("access")
|
||||
database_logger = getLogger("database")
|
||||
chat_logger = getLogger("chat")
|
||||
database_logger.setLevel(logging.WARNING)
|
||||
|
||||
from rag.utils import ELASTICSEARCH
|
||||
from rag.nlp import search
|
||||
from api.utils import get_base_config, decrypt_database_config
|
||||
|
||||
API_VERSION = "v1"
|
||||
RAG_FLOW_SERVICE_NAME = "ragflow"
|
||||
@ -79,7 +75,7 @@ default_llm = {
|
||||
"image2text_model": "glm-4v",
|
||||
"asr_model": "",
|
||||
},
|
||||
"Local": {
|
||||
"Ollama": {
|
||||
"chat_model": "qwen-14B-chat",
|
||||
"embedding_model": "flag-embedding",
|
||||
"image2text_model": "",
|
||||
|
||||
@ -26,7 +26,8 @@ from api.utils import file_utils
|
||||
|
||||
class LoggerFactory(object):
|
||||
TYPE = "FILE"
|
||||
LOG_FORMAT = "[%(levelname)s] [%(asctime)s] [jobId] [%(process)s:%(thread)s] - [%(module)s.%(funcName)s] [line:%(lineno)d]: %(message)s"
|
||||
LOG_FORMAT = "[%(levelname)s] [%(asctime)s] [%(module)s.%(funcName)s] [line:%(lineno)d]: %(message)s"
|
||||
logging.basicConfig(format=LOG_FORMAT)
|
||||
LEVEL = logging.DEBUG
|
||||
logger_dict = {}
|
||||
global_handler_dict = {}
|
||||
|
||||
@ -110,7 +110,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
[t for t in random_choices([t for t, _ in sections], k=100)])
|
||||
if bull >= 0:
|
||||
chunks = ["\n".join(ck)
|
||||
for ck in hierarchical_merge(bull, sections, 3)]
|
||||
for ck in hierarchical_merge(bull, sections, 5)]
|
||||
else:
|
||||
sections = [s.split("@") for s, _ in sections]
|
||||
sections = [(pr[0], "@" + pr[1]) for pr in sections if len(pr) == 2]
|
||||
|
||||
@ -133,7 +133,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
|
||||
make_colon_as_title(sections)
|
||||
bull = bullets_category(sections)
|
||||
chunks = hierarchical_merge(bull, sections, 3)
|
||||
chunks = hierarchical_merge(bull, sections, 5)
|
||||
if not chunks:
|
||||
callback(0.99, "No chunk parsed out.")
|
||||
|
||||
|
||||
@ -98,7 +98,7 @@ class QWenChat(Base):
|
||||
tk_count = 0
|
||||
if response.status_code == HTTPStatus.OK:
|
||||
ans += response.output.choices[0]['message']['content']
|
||||
tk_count += response.usage.output_tokens
|
||||
tk_count += response.usage.total_tokens
|
||||
if response.output.choices[0].get("finish_reason", "") == "length":
|
||||
ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
|
||||
[ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user