2025-02-15 22:37:12 +01:00
|
|
|
from __future__ import annotations
|
2025-04-28 18:12:29 +08:00
|
|
|
from functools import partial
|
2025-02-15 22:37:12 +01:00
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
import asyncio
|
|
|
|
import json
|
|
|
|
import re
|
2025-03-09 15:25:10 +08:00
|
|
|
import os
|
2025-02-15 22:37:12 +01:00
|
|
|
from typing import Any, AsyncIterator
|
2024-10-10 15:02:30 +08:00
|
|
|
from collections import Counter, defaultdict
|
2025-02-22 13:25:12 +08:00
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
from .utils import (
|
|
|
|
logger,
|
|
|
|
clean_str,
|
|
|
|
compute_mdhash_id,
|
2025-04-17 10:56:23 +02:00
|
|
|
Tokenizer,
|
2024-10-10 15:02:30 +08:00
|
|
|
is_float_regex,
|
2025-04-12 19:26:02 +08:00
|
|
|
normalize_extracted_info,
|
2024-10-10 15:02:30 +08:00
|
|
|
pack_user_ass_to_openai_messages,
|
|
|
|
split_string_by_multi_markers,
|
|
|
|
truncate_list_by_token_size,
|
2024-10-31 11:32:44 +08:00
|
|
|
process_combine_contexts,
|
2024-12-08 17:35:52 +08:00
|
|
|
compute_args_hash,
|
|
|
|
handle_cache,
|
|
|
|
save_to_cache,
|
|
|
|
CacheData,
|
2025-01-24 18:59:24 +08:00
|
|
|
get_conversation_turns,
|
2025-04-10 03:57:36 +08:00
|
|
|
use_llm_func_with_cache,
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
|
|
|
from .base import (
|
|
|
|
BaseGraphStorage,
|
|
|
|
BaseKVStorage,
|
|
|
|
BaseVectorStorage,
|
|
|
|
TextChunkSchema,
|
|
|
|
QueryParam,
|
|
|
|
)
|
|
|
|
from .prompt import GRAPH_FIELD_SEP, PROMPTS
|
2024-12-29 15:25:57 +08:00
|
|
|
import time
|
2025-02-22 13:25:12 +08:00
|
|
|
from dotenv import load_dotenv
|
|
|
|
|
2025-03-29 03:48:38 +08:00
|
|
|
# use the .env that is inside the current folder
|
|
|
|
# allows to use different .env file for each lightrag instance
|
|
|
|
# the OS environment variables take precedence over the .env file
|
|
|
|
load_dotenv(dotenv_path=".env", override=False)
|
2024-10-10 15:02:30 +08:00
|
|
|
|
2024-10-19 09:43:17 +05:30
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
def chunking_by_token_size(
|
2025-04-17 10:56:23 +02:00
|
|
|
tokenizer: Tokenizer,
|
2025-01-07 16:26:12 +08:00
|
|
|
content: str,
|
2025-02-15 22:37:12 +01:00
|
|
|
split_by_character: str | None = None,
|
2025-02-09 13:18:47 +01:00
|
|
|
split_by_character_only: bool = False,
|
|
|
|
overlap_token_size: int = 128,
|
|
|
|
max_token_size: int = 1024,
|
2025-02-09 10:39:48 +01:00
|
|
|
) -> list[dict[str, Any]]:
|
2025-04-17 10:56:23 +02:00
|
|
|
tokens = tokenizer.encode(content)
|
2025-02-09 10:39:48 +01:00
|
|
|
results: list[dict[str, Any]] = []
|
2025-01-07 00:28:15 +08:00
|
|
|
if split_by_character:
|
|
|
|
raw_chunks = content.split(split_by_character)
|
|
|
|
new_chunks = []
|
2025-01-09 11:55:49 +08:00
|
|
|
if split_by_character_only:
|
|
|
|
for chunk in raw_chunks:
|
2025-04-17 10:56:23 +02:00
|
|
|
_tokens = tokenizer.encode(chunk)
|
2025-01-07 00:28:15 +08:00
|
|
|
new_chunks.append((len(_tokens), chunk))
|
2025-01-09 11:55:49 +08:00
|
|
|
else:
|
|
|
|
for chunk in raw_chunks:
|
2025-04-17 10:56:23 +02:00
|
|
|
_tokens = tokenizer.encode(chunk)
|
2025-01-09 11:55:49 +08:00
|
|
|
if len(_tokens) > max_token_size:
|
|
|
|
for start in range(
|
|
|
|
0, len(_tokens), max_token_size - overlap_token_size
|
|
|
|
):
|
2025-04-17 10:56:23 +02:00
|
|
|
chunk_content = tokenizer.decode(
|
|
|
|
_tokens[start : start + max_token_size]
|
2025-01-09 11:55:49 +08:00
|
|
|
)
|
|
|
|
new_chunks.append(
|
|
|
|
(min(max_token_size, len(_tokens) - start), chunk_content)
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
new_chunks.append((len(_tokens), chunk))
|
2025-01-07 00:28:15 +08:00
|
|
|
for index, (_len, chunk) in enumerate(new_chunks):
|
|
|
|
results.append(
|
|
|
|
{
|
|
|
|
"tokens": _len,
|
|
|
|
"content": chunk.strip(),
|
|
|
|
"chunk_order_index": index,
|
|
|
|
}
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
for index, start in enumerate(
|
2025-01-07 16:26:12 +08:00
|
|
|
range(0, len(tokens), max_token_size - overlap_token_size)
|
2025-01-07 00:28:15 +08:00
|
|
|
):
|
2025-04-18 16:14:31 +02:00
|
|
|
chunk_content = tokenizer.decode(tokens[start : start + max_token_size])
|
2025-01-07 00:28:15 +08:00
|
|
|
results.append(
|
|
|
|
{
|
|
|
|
"tokens": min(max_token_size, len(tokens) - start),
|
|
|
|
"content": chunk_content.strip(),
|
|
|
|
"chunk_order_index": index,
|
|
|
|
}
|
|
|
|
)
|
2024-10-10 15:02:30 +08:00
|
|
|
return results
|
|
|
|
|
2024-10-19 09:43:17 +05:30
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
async def _handle_entity_relation_summary(
|
2025-01-07 16:26:12 +08:00
|
|
|
entity_or_relation_name: str,
|
|
|
|
description: str,
|
|
|
|
global_config: dict,
|
2025-04-10 00:56:35 +08:00
|
|
|
pipeline_status: dict = None,
|
|
|
|
pipeline_status_lock=None,
|
2025-04-10 03:57:36 +08:00
|
|
|
llm_response_cache: BaseKVStorage | None = None,
|
2024-10-10 15:02:30 +08:00
|
|
|
) -> str:
|
2025-01-10 11:36:28 +08:00
|
|
|
"""Handle entity relation summary
|
|
|
|
For each entity or relation, input is the combined description of already existing description and new description.
|
|
|
|
If too long, use LLM to summarize.
|
|
|
|
"""
|
2024-10-10 15:02:30 +08:00
|
|
|
use_llm_func: callable = global_config["llm_model_func"]
|
2025-04-28 18:12:29 +08:00
|
|
|
# Apply higher priority (8) to entity/relation summary tasks
|
|
|
|
use_llm_func = partial(use_llm_func, _priority=8)
|
|
|
|
|
2025-04-17 10:56:23 +02:00
|
|
|
tokenizer: Tokenizer = global_config["tokenizer"]
|
2024-10-10 15:02:30 +08:00
|
|
|
llm_max_tokens = global_config["llm_model_max_token_size"]
|
2025-04-10 17:29:07 +08:00
|
|
|
summary_max_tokens = global_config["summary_to_max_tokens"]
|
|
|
|
|
2024-11-28 14:28:29 +01:00
|
|
|
language = global_config["addon_params"].get(
|
|
|
|
"language", PROMPTS["DEFAULT_LANGUAGE"]
|
|
|
|
)
|
2024-10-10 15:02:30 +08:00
|
|
|
|
2025-04-17 10:56:23 +02:00
|
|
|
tokens = tokenizer.encode(description)
|
2025-04-20 12:36:32 +08:00
|
|
|
|
|
|
|
### summarize is not determined here anymore (It's determined by num_fragment now)
|
|
|
|
# if len(tokens) < summary_max_tokens: # No need for summary
|
|
|
|
# return description
|
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
prompt_template = PROMPTS["summarize_entity_descriptions"]
|
2025-04-18 16:14:31 +02:00
|
|
|
use_description = tokenizer.decode(tokens[:llm_max_tokens])
|
2024-10-10 15:02:30 +08:00
|
|
|
context_base = dict(
|
|
|
|
entity_name=entity_or_relation_name,
|
|
|
|
description_list=use_description.split(GRAPH_FIELD_SEP),
|
2024-11-28 14:28:29 +01:00
|
|
|
language=language,
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
|
|
|
use_prompt = prompt_template.format(**context_base)
|
|
|
|
logger.debug(f"Trigger summary: {entity_or_relation_name}")
|
2025-04-10 03:57:36 +08:00
|
|
|
|
2025-04-28 18:12:29 +08:00
|
|
|
# Use LLM function with cache (higher priority for summary generation)
|
2025-04-10 03:57:36 +08:00
|
|
|
summary = await use_llm_func_with_cache(
|
2025-04-10 03:58:04 +08:00
|
|
|
use_prompt,
|
2025-04-10 03:57:36 +08:00
|
|
|
use_llm_func,
|
|
|
|
llm_response_cache=llm_response_cache,
|
|
|
|
max_tokens=summary_max_tokens,
|
|
|
|
cache_type="extract",
|
|
|
|
)
|
2024-10-10 15:02:30 +08:00
|
|
|
return summary
|
|
|
|
|
|
|
|
|
|
|
|
async def _handle_single_entity_extraction(
|
2025-01-07 16:26:12 +08:00
|
|
|
record_attributes: list[str],
|
|
|
|
chunk_key: str,
|
2025-03-17 23:32:35 +08:00
|
|
|
file_path: str = "unknown_source",
|
2024-10-10 15:02:30 +08:00
|
|
|
):
|
2025-04-22 15:22:37 +08:00
|
|
|
if len(record_attributes) < 4 or '"entity"' not in record_attributes[0]:
|
2024-10-10 15:02:30 +08:00
|
|
|
return None
|
2025-03-11 12:08:10 +08:00
|
|
|
|
|
|
|
# Clean and validate entity name
|
2025-04-17 22:58:36 +08:00
|
|
|
entity_name = clean_str(record_attributes[1]).strip()
|
|
|
|
if not entity_name:
|
2025-03-11 12:08:10 +08:00
|
|
|
logger.warning(
|
|
|
|
f"Entity extraction error: empty entity name in: {record_attributes}"
|
|
|
|
)
|
2024-10-10 15:02:30 +08:00
|
|
|
return None
|
2025-03-11 12:08:10 +08:00
|
|
|
|
2025-04-12 19:26:02 +08:00
|
|
|
# Normalize entity name
|
2025-04-12 20:45:41 +08:00
|
|
|
entity_name = normalize_extracted_info(entity_name, is_entity=True)
|
2025-04-12 19:26:02 +08:00
|
|
|
|
2025-03-11 12:08:10 +08:00
|
|
|
# Clean and validate entity type
|
2025-03-01 17:45:06 +08:00
|
|
|
entity_type = clean_str(record_attributes[2]).strip('"')
|
2025-03-11 12:08:10 +08:00
|
|
|
if not entity_type.strip() or entity_type.startswith('("'):
|
|
|
|
logger.warning(
|
|
|
|
f"Entity extraction error: invalid entity type in: {record_attributes}"
|
|
|
|
)
|
|
|
|
return None
|
|
|
|
|
|
|
|
# Clean and validate description
|
2025-04-12 20:45:41 +08:00
|
|
|
entity_description = clean_str(record_attributes[3])
|
2025-04-12 19:26:02 +08:00
|
|
|
entity_description = normalize_extracted_info(entity_description)
|
|
|
|
|
2025-03-11 12:08:10 +08:00
|
|
|
if not entity_description.strip():
|
|
|
|
logger.warning(
|
|
|
|
f"Entity extraction error: empty description for entity '{entity_name}' of type '{entity_type}'"
|
|
|
|
)
|
|
|
|
return None
|
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
return dict(
|
|
|
|
entity_name=entity_name,
|
|
|
|
entity_type=entity_type,
|
|
|
|
description=entity_description,
|
2025-03-11 12:08:10 +08:00
|
|
|
source_id=chunk_key,
|
2025-03-20 16:29:24 +08:00
|
|
|
file_path=file_path,
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
async def _handle_single_relationship_extraction(
|
2025-01-07 16:26:12 +08:00
|
|
|
record_attributes: list[str],
|
|
|
|
chunk_key: str,
|
2025-03-17 23:32:35 +08:00
|
|
|
file_path: str = "unknown_source",
|
2024-10-10 15:02:30 +08:00
|
|
|
):
|
2025-04-22 15:22:37 +08:00
|
|
|
if len(record_attributes) < 5 or '"relationship"' not in record_attributes[0]:
|
2024-10-10 15:02:30 +08:00
|
|
|
return None
|
|
|
|
# add this record as edge
|
2025-04-12 20:45:41 +08:00
|
|
|
source = clean_str(record_attributes[1])
|
|
|
|
target = clean_str(record_attributes[2])
|
2025-04-12 19:26:02 +08:00
|
|
|
|
|
|
|
# Normalize source and target entity names
|
2025-04-12 20:45:41 +08:00
|
|
|
source = normalize_extracted_info(source, is_entity=True)
|
|
|
|
target = normalize_extracted_info(target, is_entity=True)
|
2025-05-05 11:58:33 +08:00
|
|
|
if source == target:
|
|
|
|
logger.debug(
|
|
|
|
f"Relationship source and target are the same in: {record_attributes}"
|
|
|
|
)
|
|
|
|
return None
|
2025-04-12 19:26:02 +08:00
|
|
|
|
2025-04-12 20:45:41 +08:00
|
|
|
edge_description = clean_str(record_attributes[3])
|
2025-04-12 19:26:02 +08:00
|
|
|
edge_description = normalize_extracted_info(edge_description)
|
|
|
|
|
2025-04-12 20:45:41 +08:00
|
|
|
edge_keywords = clean_str(record_attributes[4]).strip('"').strip("'")
|
2024-10-10 15:02:30 +08:00
|
|
|
edge_source_id = chunk_key
|
|
|
|
weight = (
|
2025-04-12 20:45:41 +08:00
|
|
|
float(record_attributes[-1].strip('"').strip("'"))
|
2025-04-21 16:52:13 +08:00
|
|
|
if is_float_regex(record_attributes[-1].strip('"').strip("'"))
|
2025-03-01 17:45:06 +08:00
|
|
|
else 1.0
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
|
|
|
return dict(
|
|
|
|
src_id=source,
|
|
|
|
tgt_id=target,
|
|
|
|
weight=weight,
|
|
|
|
description=edge_description,
|
|
|
|
keywords=edge_keywords,
|
|
|
|
source_id=edge_source_id,
|
2025-03-20 16:29:24 +08:00
|
|
|
file_path=file_path,
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
async def _merge_nodes_then_upsert(
|
2025-01-07 16:26:12 +08:00
|
|
|
entity_name: str,
|
|
|
|
nodes_data: list[dict],
|
|
|
|
knowledge_graph_inst: BaseGraphStorage,
|
|
|
|
global_config: dict,
|
2025-04-10 00:56:35 +08:00
|
|
|
pipeline_status: dict = None,
|
|
|
|
pipeline_status_lock=None,
|
2025-04-10 03:57:36 +08:00
|
|
|
llm_response_cache: BaseKVStorage | None = None,
|
2024-10-10 15:02:30 +08:00
|
|
|
):
|
2025-01-10 11:36:28 +08:00
|
|
|
"""Get existing nodes from knowledge graph use name,if exists, merge data, else create, then upsert."""
|
2024-12-12 23:59:40 +08:00
|
|
|
already_entity_types = []
|
2024-10-10 15:02:30 +08:00
|
|
|
already_source_ids = []
|
|
|
|
already_description = []
|
2025-03-17 23:32:35 +08:00
|
|
|
already_file_paths = []
|
2024-10-10 15:02:30 +08:00
|
|
|
|
2024-10-26 00:11:21 -04:00
|
|
|
already_node = await knowledge_graph_inst.get_node(entity_name)
|
2024-10-10 15:02:30 +08:00
|
|
|
if already_node is not None:
|
2024-12-12 23:59:40 +08:00
|
|
|
already_entity_types.append(already_node["entity_type"])
|
2024-10-10 15:02:30 +08:00
|
|
|
already_source_ids.extend(
|
|
|
|
split_string_by_multi_markers(already_node["source_id"], [GRAPH_FIELD_SEP])
|
|
|
|
)
|
2025-03-17 23:32:35 +08:00
|
|
|
already_file_paths.extend(
|
2025-03-20 16:29:24 +08:00
|
|
|
split_string_by_multi_markers(already_node["file_path"], [GRAPH_FIELD_SEP])
|
2025-03-17 23:32:35 +08:00
|
|
|
)
|
2024-10-10 15:02:30 +08:00
|
|
|
already_description.append(already_node["description"])
|
|
|
|
|
|
|
|
entity_type = sorted(
|
|
|
|
Counter(
|
2024-12-12 23:59:40 +08:00
|
|
|
[dp["entity_type"] for dp in nodes_data] + already_entity_types
|
2024-10-10 15:02:30 +08:00
|
|
|
).items(),
|
|
|
|
key=lambda x: x[1],
|
|
|
|
reverse=True,
|
|
|
|
)[0][0]
|
|
|
|
description = GRAPH_FIELD_SEP.join(
|
|
|
|
sorted(set([dp["description"] for dp in nodes_data] + already_description))
|
|
|
|
)
|
|
|
|
source_id = GRAPH_FIELD_SEP.join(
|
|
|
|
set([dp["source_id"] for dp in nodes_data] + already_source_ids)
|
|
|
|
)
|
2025-03-17 23:32:35 +08:00
|
|
|
file_path = GRAPH_FIELD_SEP.join(
|
2025-03-20 16:29:24 +08:00
|
|
|
set([dp["file_path"] for dp in nodes_data] + already_file_paths)
|
2025-03-17 23:32:35 +08:00
|
|
|
)
|
2025-03-18 20:39:38 +08:00
|
|
|
|
2025-04-10 17:29:07 +08:00
|
|
|
force_llm_summary_on_merge = global_config["force_llm_summary_on_merge"]
|
|
|
|
|
|
|
|
num_fragment = description.count(GRAPH_FIELD_SEP) + 1
|
|
|
|
num_new_fragment = len(set([dp["description"] for dp in nodes_data]))
|
|
|
|
|
|
|
|
if num_fragment > 1:
|
|
|
|
if num_fragment >= force_llm_summary_on_merge:
|
|
|
|
status_message = f"LLM merge N: {entity_name} | {num_new_fragment}+{num_fragment-num_new_fragment}"
|
|
|
|
logger.info(status_message)
|
|
|
|
if pipeline_status is not None and pipeline_status_lock is not None:
|
|
|
|
async with pipeline_status_lock:
|
|
|
|
pipeline_status["latest_message"] = status_message
|
|
|
|
pipeline_status["history_messages"].append(status_message)
|
|
|
|
description = await _handle_entity_relation_summary(
|
|
|
|
entity_name,
|
|
|
|
description,
|
|
|
|
global_config,
|
|
|
|
pipeline_status,
|
|
|
|
pipeline_status_lock,
|
|
|
|
llm_response_cache,
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
status_message = f"Merge N: {entity_name} | {num_new_fragment}+{num_fragment-num_new_fragment}"
|
|
|
|
logger.info(status_message)
|
|
|
|
if pipeline_status is not None and pipeline_status_lock is not None:
|
|
|
|
async with pipeline_status_lock:
|
|
|
|
pipeline_status["latest_message"] = status_message
|
|
|
|
pipeline_status["history_messages"].append(status_message)
|
2025-04-10 14:19:06 +08:00
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
node_data = dict(
|
2025-03-09 00:24:55 +08:00
|
|
|
entity_id=entity_name,
|
2024-10-10 15:02:30 +08:00
|
|
|
entity_type=entity_type,
|
|
|
|
description=description,
|
|
|
|
source_id=source_id,
|
2025-03-17 23:32:35 +08:00
|
|
|
file_path=file_path,
|
2025-05-01 15:14:15 +08:00
|
|
|
created_at=int(time.time()),
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
2024-10-26 00:11:21 -04:00
|
|
|
await knowledge_graph_inst.upsert_node(
|
2024-10-10 15:02:30 +08:00
|
|
|
entity_name,
|
|
|
|
node_data=node_data,
|
|
|
|
)
|
|
|
|
node_data["entity_name"] = entity_name
|
|
|
|
return node_data
|
|
|
|
|
|
|
|
|
|
|
|
async def _merge_edges_then_upsert(
|
2025-01-07 16:26:12 +08:00
|
|
|
src_id: str,
|
|
|
|
tgt_id: str,
|
2025-02-17 23:21:14 +01:00
|
|
|
edges_data: list[dict],
|
2025-01-07 16:26:12 +08:00
|
|
|
knowledge_graph_inst: BaseGraphStorage,
|
2025-02-17 23:21:14 +01:00
|
|
|
global_config: dict,
|
2025-04-10 00:56:35 +08:00
|
|
|
pipeline_status: dict = None,
|
|
|
|
pipeline_status_lock=None,
|
2025-04-10 03:57:36 +08:00
|
|
|
llm_response_cache: BaseKVStorage | None = None,
|
2024-10-10 15:02:30 +08:00
|
|
|
):
|
2025-05-05 11:58:33 +08:00
|
|
|
if src_id == tgt_id:
|
|
|
|
return None
|
|
|
|
|
2025-02-17 23:21:14 +01:00
|
|
|
already_weights = []
|
|
|
|
already_source_ids = []
|
|
|
|
already_description = []
|
|
|
|
already_keywords = []
|
2025-03-17 23:32:35 +08:00
|
|
|
already_file_paths = []
|
2024-10-10 15:02:30 +08:00
|
|
|
|
2024-10-26 00:11:21 -04:00
|
|
|
if await knowledge_graph_inst.has_edge(src_id, tgt_id):
|
|
|
|
already_edge = await knowledge_graph_inst.get_edge(src_id, tgt_id)
|
2025-02-17 23:20:10 +01:00
|
|
|
# Handle the case where get_edge returns None or missing fields
|
2025-02-17 23:26:51 +01:00
|
|
|
if already_edge:
|
|
|
|
# Get weight with default 0.0 if missing
|
|
|
|
already_weights.append(already_edge.get("weight", 0.0))
|
2025-02-14 16:04:06 +01:00
|
|
|
|
2025-02-17 23:26:51 +01:00
|
|
|
# Get source_id with empty string default if missing or None
|
|
|
|
if already_edge.get("source_id") is not None:
|
|
|
|
already_source_ids.extend(
|
|
|
|
split_string_by_multi_markers(
|
|
|
|
already_edge["source_id"], [GRAPH_FIELD_SEP]
|
|
|
|
)
|
2025-02-14 16:04:06 +01:00
|
|
|
)
|
2025-03-17 23:36:00 +08:00
|
|
|
|
2025-03-17 23:32:35 +08:00
|
|
|
# Get file_path with empty string default if missing or None
|
|
|
|
if already_edge.get("file_path") is not None:
|
|
|
|
already_file_paths.extend(
|
|
|
|
split_string_by_multi_markers(
|
2025-03-20 16:29:24 +08:00
|
|
|
already_edge["file_path"], [GRAPH_FIELD_SEP]
|
2025-03-17 23:32:35 +08:00
|
|
|
)
|
|
|
|
)
|
2024-10-10 15:02:30 +08:00
|
|
|
|
2025-02-17 23:26:51 +01:00
|
|
|
# Get description with empty string default if missing or None
|
|
|
|
if already_edge.get("description") is not None:
|
|
|
|
already_description.append(already_edge["description"])
|
2025-02-14 16:04:06 +01:00
|
|
|
|
2025-02-17 23:26:51 +01:00
|
|
|
# Get keywords with empty string default if missing or None
|
|
|
|
if already_edge.get("keywords") is not None:
|
|
|
|
already_keywords.extend(
|
|
|
|
split_string_by_multi_markers(
|
|
|
|
already_edge["keywords"], [GRAPH_FIELD_SEP]
|
|
|
|
)
|
2025-02-14 16:04:06 +01:00
|
|
|
)
|
2024-10-10 15:02:30 +08:00
|
|
|
|
2025-02-14 16:04:06 +01:00
|
|
|
# Process edges_data with None checks
|
2024-10-10 15:02:30 +08:00
|
|
|
weight = sum([dp["weight"] for dp in edges_data] + already_weights)
|
|
|
|
description = GRAPH_FIELD_SEP.join(
|
2025-02-14 16:04:06 +01:00
|
|
|
sorted(
|
|
|
|
set(
|
|
|
|
[dp["description"] for dp in edges_data if dp.get("description")]
|
|
|
|
+ already_description
|
|
|
|
)
|
|
|
|
)
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
|
|
|
keywords = GRAPH_FIELD_SEP.join(
|
2025-02-14 16:04:06 +01:00
|
|
|
sorted(
|
|
|
|
set(
|
|
|
|
[dp["keywords"] for dp in edges_data if dp.get("keywords")]
|
|
|
|
+ already_keywords
|
|
|
|
)
|
|
|
|
)
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
|
|
|
source_id = GRAPH_FIELD_SEP.join(
|
2025-02-14 16:04:06 +01:00
|
|
|
set(
|
|
|
|
[dp["source_id"] for dp in edges_data if dp.get("source_id")]
|
|
|
|
+ already_source_ids
|
|
|
|
)
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
2025-03-17 23:32:35 +08:00
|
|
|
file_path = GRAPH_FIELD_SEP.join(
|
2025-03-17 23:36:00 +08:00
|
|
|
set(
|
2025-03-20 16:29:24 +08:00
|
|
|
[dp["file_path"] for dp in edges_data if dp.get("file_path")]
|
2025-03-17 23:36:00 +08:00
|
|
|
+ already_file_paths
|
|
|
|
)
|
2025-03-17 23:32:35 +08:00
|
|
|
)
|
2025-02-14 16:04:06 +01:00
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
for need_insert_id in [src_id, tgt_id]:
|
2024-10-26 00:11:21 -04:00
|
|
|
if not (await knowledge_graph_inst.has_node(need_insert_id)):
|
2025-04-21 18:32:33 +08:00
|
|
|
# # Discard this edge if the node does not exist
|
|
|
|
# if need_insert_id == src_id:
|
|
|
|
# logger.warning(
|
|
|
|
# f"Discard edge: {src_id} - {tgt_id} | Source node missing"
|
|
|
|
# )
|
|
|
|
# else:
|
|
|
|
# logger.warning(
|
|
|
|
# f"Discard edge: {src_id} - {tgt_id} | Target node missing"
|
|
|
|
# )
|
|
|
|
# return None
|
2024-10-26 00:11:21 -04:00
|
|
|
await knowledge_graph_inst.upsert_node(
|
2024-10-10 15:02:30 +08:00
|
|
|
need_insert_id,
|
|
|
|
node_data={
|
2025-03-09 00:24:55 +08:00
|
|
|
"entity_id": need_insert_id,
|
2024-10-10 15:02:30 +08:00
|
|
|
"source_id": source_id,
|
|
|
|
"description": description,
|
2025-02-23 18:47:10 +08:00
|
|
|
"entity_type": "UNKNOWN",
|
2025-03-17 23:32:35 +08:00
|
|
|
"file_path": file_path,
|
2025-05-01 15:14:15 +08:00
|
|
|
"created_at": int(time.time()),
|
2024-10-10 15:02:30 +08:00
|
|
|
},
|
|
|
|
)
|
2025-04-10 14:19:06 +08:00
|
|
|
|
2025-04-10 17:29:07 +08:00
|
|
|
force_llm_summary_on_merge = global_config["force_llm_summary_on_merge"]
|
|
|
|
|
|
|
|
num_fragment = description.count(GRAPH_FIELD_SEP) + 1
|
|
|
|
num_new_fragment = len(
|
|
|
|
set([dp["description"] for dp in edges_data if dp.get("description")])
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
2025-04-10 17:29:07 +08:00
|
|
|
|
|
|
|
if num_fragment > 1:
|
|
|
|
if num_fragment >= force_llm_summary_on_merge:
|
|
|
|
status_message = f"LLM merge E: {src_id} - {tgt_id} | {num_new_fragment}+{num_fragment-num_new_fragment}"
|
|
|
|
logger.info(status_message)
|
|
|
|
if pipeline_status is not None and pipeline_status_lock is not None:
|
|
|
|
async with pipeline_status_lock:
|
|
|
|
pipeline_status["latest_message"] = status_message
|
|
|
|
pipeline_status["history_messages"].append(status_message)
|
|
|
|
description = await _handle_entity_relation_summary(
|
|
|
|
f"({src_id}, {tgt_id})",
|
|
|
|
description,
|
|
|
|
global_config,
|
|
|
|
pipeline_status,
|
|
|
|
pipeline_status_lock,
|
|
|
|
llm_response_cache,
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
status_message = f"Merge E: {src_id} - {tgt_id} | {num_new_fragment}+{num_fragment-num_new_fragment}"
|
|
|
|
logger.info(status_message)
|
|
|
|
if pipeline_status is not None and pipeline_status_lock is not None:
|
|
|
|
async with pipeline_status_lock:
|
|
|
|
pipeline_status["latest_message"] = status_message
|
|
|
|
pipeline_status["history_messages"].append(status_message)
|
2025-04-10 14:19:06 +08:00
|
|
|
|
2024-10-26 00:11:21 -04:00
|
|
|
await knowledge_graph_inst.upsert_edge(
|
2024-10-10 15:02:30 +08:00
|
|
|
src_id,
|
|
|
|
tgt_id,
|
|
|
|
edge_data=dict(
|
|
|
|
weight=weight,
|
|
|
|
description=description,
|
|
|
|
keywords=keywords,
|
|
|
|
source_id=source_id,
|
2025-03-17 23:32:35 +08:00
|
|
|
file_path=file_path,
|
2025-05-01 15:14:15 +08:00
|
|
|
created_at=int(time.time()),
|
2024-10-10 15:02:30 +08:00
|
|
|
),
|
|
|
|
)
|
|
|
|
|
|
|
|
edge_data = dict(
|
|
|
|
src_id=src_id,
|
|
|
|
tgt_id=tgt_id,
|
|
|
|
description=description,
|
|
|
|
keywords=keywords,
|
2025-02-27 23:34:57 +07:00
|
|
|
source_id=source_id,
|
2025-03-17 23:32:35 +08:00
|
|
|
file_path=file_path,
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
2024-10-19 09:43:17 +05:30
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
return edge_data
|
|
|
|
|
2024-10-19 09:43:17 +05:30
|
|
|
|
2025-04-28 01:14:00 +08:00
|
|
|
async def merge_nodes_and_edges(
|
|
|
|
chunk_results: list,
|
|
|
|
knowledge_graph_inst: BaseGraphStorage,
|
|
|
|
entity_vdb: BaseVectorStorage,
|
|
|
|
relationships_vdb: BaseVectorStorage,
|
|
|
|
global_config: dict[str, str],
|
|
|
|
pipeline_status: dict = None,
|
|
|
|
pipeline_status_lock=None,
|
|
|
|
llm_response_cache: BaseKVStorage | None = None,
|
|
|
|
current_file_number: int = 0,
|
|
|
|
total_files: int = 0,
|
|
|
|
file_path: str = "unknown_source",
|
|
|
|
) -> None:
|
|
|
|
"""Merge nodes and edges from extraction results
|
2025-04-28 02:15:25 +08:00
|
|
|
|
2025-04-28 01:14:00 +08:00
|
|
|
Args:
|
|
|
|
chunk_results: List of tuples (maybe_nodes, maybe_edges) containing extracted entities and relationships
|
|
|
|
knowledge_graph_inst: Knowledge graph storage
|
|
|
|
entity_vdb: Entity vector database
|
|
|
|
relationships_vdb: Relationship vector database
|
|
|
|
global_config: Global configuration
|
|
|
|
pipeline_status: Pipeline status dictionary
|
|
|
|
pipeline_status_lock: Lock for pipeline status
|
|
|
|
llm_response_cache: LLM response cache
|
|
|
|
"""
|
|
|
|
# Get lock manager from shared storage
|
|
|
|
from .kg.shared_storage import get_graph_db_lock
|
2025-04-28 02:15:25 +08:00
|
|
|
|
2025-04-28 01:14:00 +08:00
|
|
|
# Collect all nodes and edges from all chunks
|
|
|
|
all_nodes = defaultdict(list)
|
|
|
|
all_edges = defaultdict(list)
|
|
|
|
|
|
|
|
for maybe_nodes, maybe_edges in chunk_results:
|
|
|
|
# Collect nodes
|
|
|
|
for entity_name, entities in maybe_nodes.items():
|
|
|
|
all_nodes[entity_name].extend(entities)
|
|
|
|
|
|
|
|
# Collect edges with sorted keys for undirected graph
|
|
|
|
for edge_key, edges in maybe_edges.items():
|
|
|
|
sorted_edge_key = tuple(sorted(edge_key))
|
|
|
|
all_edges[sorted_edge_key].extend(edges)
|
|
|
|
|
|
|
|
# Centralized processing of all nodes and edges
|
|
|
|
entities_data = []
|
|
|
|
relationships_data = []
|
|
|
|
|
|
|
|
# Merge nodes and edges
|
|
|
|
# Use graph database lock to ensure atomic merges and updates
|
2025-04-29 19:02:08 +08:00
|
|
|
graph_db_lock = get_graph_db_lock(enable_logging=False)
|
2025-04-28 01:14:00 +08:00
|
|
|
async with graph_db_lock:
|
|
|
|
async with pipeline_status_lock:
|
2025-04-28 02:15:25 +08:00
|
|
|
log_message = (
|
2025-04-28 02:39:18 +08:00
|
|
|
f"Merging stage {current_file_number}/{total_files}: {file_path}"
|
2025-04-28 02:15:25 +08:00
|
|
|
)
|
2025-04-28 01:14:00 +08:00
|
|
|
logger.info(log_message)
|
|
|
|
pipeline_status["latest_message"] = log_message
|
|
|
|
pipeline_status["history_messages"].append(log_message)
|
|
|
|
|
|
|
|
# Process and update all entities at once
|
|
|
|
for entity_name, entities in all_nodes.items():
|
|
|
|
entity_data = await _merge_nodes_then_upsert(
|
|
|
|
entity_name,
|
|
|
|
entities,
|
|
|
|
knowledge_graph_inst,
|
|
|
|
global_config,
|
|
|
|
pipeline_status,
|
|
|
|
pipeline_status_lock,
|
|
|
|
llm_response_cache,
|
|
|
|
)
|
|
|
|
entities_data.append(entity_data)
|
|
|
|
|
|
|
|
# Process and update all relationships at once
|
|
|
|
for edge_key, edges in all_edges.items():
|
|
|
|
edge_data = await _merge_edges_then_upsert(
|
|
|
|
edge_key[0],
|
|
|
|
edge_key[1],
|
|
|
|
edges,
|
|
|
|
knowledge_graph_inst,
|
|
|
|
global_config,
|
|
|
|
pipeline_status,
|
|
|
|
pipeline_status_lock,
|
|
|
|
llm_response_cache,
|
|
|
|
)
|
|
|
|
if edge_data is not None:
|
|
|
|
relationships_data.append(edge_data)
|
|
|
|
|
|
|
|
# Update total counts
|
|
|
|
total_entities_count = len(entities_data)
|
|
|
|
total_relations_count = len(relationships_data)
|
|
|
|
|
|
|
|
log_message = f"Updating {total_entities_count} entities {current_file_number}/{total_files}: {file_path}"
|
|
|
|
logger.info(log_message)
|
|
|
|
if pipeline_status is not None:
|
|
|
|
async with pipeline_status_lock:
|
|
|
|
pipeline_status["latest_message"] = log_message
|
|
|
|
pipeline_status["history_messages"].append(log_message)
|
|
|
|
|
|
|
|
# Update vector databases with all collected data
|
|
|
|
if entity_vdb is not None and entities_data:
|
|
|
|
data_for_vdb = {
|
|
|
|
compute_mdhash_id(dp["entity_name"], prefix="ent-"): {
|
|
|
|
"entity_name": dp["entity_name"],
|
|
|
|
"entity_type": dp["entity_type"],
|
|
|
|
"content": f"{dp['entity_name']}\n{dp['description']}",
|
|
|
|
"source_id": dp["source_id"],
|
|
|
|
"file_path": dp.get("file_path", "unknown_source"),
|
|
|
|
}
|
|
|
|
for dp in entities_data
|
|
|
|
}
|
|
|
|
await entity_vdb.upsert(data_for_vdb)
|
|
|
|
|
2025-04-28 02:15:25 +08:00
|
|
|
log_message = f"Updating {total_relations_count} relations {current_file_number}/{total_files}: {file_path}"
|
2025-04-28 01:14:00 +08:00
|
|
|
logger.info(log_message)
|
|
|
|
if pipeline_status is not None:
|
|
|
|
async with pipeline_status_lock:
|
|
|
|
pipeline_status["latest_message"] = log_message
|
|
|
|
pipeline_status["history_messages"].append(log_message)
|
|
|
|
|
|
|
|
if relationships_vdb is not None and relationships_data:
|
|
|
|
data_for_vdb = {
|
|
|
|
compute_mdhash_id(dp["src_id"] + dp["tgt_id"], prefix="rel-"): {
|
|
|
|
"src_id": dp["src_id"],
|
|
|
|
"tgt_id": dp["tgt_id"],
|
|
|
|
"keywords": dp["keywords"],
|
|
|
|
"content": f"{dp['src_id']}\t{dp['tgt_id']}\n{dp['keywords']}\n{dp['description']}",
|
|
|
|
"source_id": dp["source_id"],
|
|
|
|
"file_path": dp.get("file_path", "unknown_source"),
|
|
|
|
}
|
|
|
|
for dp in relationships_data
|
|
|
|
}
|
|
|
|
await relationships_vdb.upsert(data_for_vdb)
|
|
|
|
|
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
async def extract_entities(
|
2025-01-07 16:26:12 +08:00
|
|
|
chunks: dict[str, TextChunkSchema],
|
2025-02-14 23:31:27 +01:00
|
|
|
global_config: dict[str, str],
|
2025-03-10 16:48:59 +08:00
|
|
|
pipeline_status: dict = None,
|
2025-03-10 17:30:40 +08:00
|
|
|
pipeline_status_lock=None,
|
2025-02-14 23:31:27 +01:00
|
|
|
llm_response_cache: BaseKVStorage | None = None,
|
2025-04-28 01:14:00 +08:00
|
|
|
) -> list:
|
2024-10-10 15:02:30 +08:00
|
|
|
use_llm_func: callable = global_config["llm_model_func"]
|
|
|
|
entity_extract_max_gleaning = global_config["entity_extract_max_gleaning"]
|
|
|
|
|
|
|
|
ordered_chunks = list(chunks.items())
|
2024-11-25 13:29:55 +08:00
|
|
|
# add language and example number params to prompt
|
2024-11-25 13:40:38 +08:00
|
|
|
language = global_config["addon_params"].get(
|
|
|
|
"language", PROMPTS["DEFAULT_LANGUAGE"]
|
|
|
|
)
|
2024-12-11 13:53:05 +08:00
|
|
|
entity_types = global_config["addon_params"].get(
|
|
|
|
"entity_types", PROMPTS["DEFAULT_ENTITY_TYPES"]
|
|
|
|
)
|
2024-11-25 13:29:55 +08:00
|
|
|
example_number = global_config["addon_params"].get("example_number", None)
|
2024-11-25 13:40:38 +08:00
|
|
|
if example_number and example_number < len(PROMPTS["entity_extraction_examples"]):
|
|
|
|
examples = "\n".join(
|
|
|
|
PROMPTS["entity_extraction_examples"][: int(example_number)]
|
|
|
|
)
|
2024-11-25 13:29:55 +08:00
|
|
|
else:
|
2024-11-25 13:40:38 +08:00
|
|
|
examples = "\n".join(PROMPTS["entity_extraction_examples"])
|
|
|
|
|
2024-12-03 22:25:50 +08:00
|
|
|
example_context_base = dict(
|
|
|
|
tuple_delimiter=PROMPTS["DEFAULT_TUPLE_DELIMITER"],
|
|
|
|
record_delimiter=PROMPTS["DEFAULT_RECORD_DELIMITER"],
|
|
|
|
completion_delimiter=PROMPTS["DEFAULT_COMPLETION_DELIMITER"],
|
2025-02-26 23:04:21 +08:00
|
|
|
entity_types=", ".join(entity_types),
|
2024-12-03 22:25:50 +08:00
|
|
|
language=language,
|
|
|
|
)
|
|
|
|
# add example's format
|
|
|
|
examples = examples.format(**example_context_base)
|
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
entity_extract_prompt = PROMPTS["entity_extraction"]
|
|
|
|
context_base = dict(
|
|
|
|
tuple_delimiter=PROMPTS["DEFAULT_TUPLE_DELIMITER"],
|
|
|
|
record_delimiter=PROMPTS["DEFAULT_RECORD_DELIMITER"],
|
|
|
|
completion_delimiter=PROMPTS["DEFAULT_COMPLETION_DELIMITER"],
|
2024-12-11 13:53:05 +08:00
|
|
|
entity_types=",".join(entity_types),
|
2024-11-25 13:29:55 +08:00
|
|
|
examples=examples,
|
2024-11-25 13:40:38 +08:00
|
|
|
language=language,
|
|
|
|
)
|
|
|
|
|
2025-03-17 16:58:04 +08:00
|
|
|
continue_prompt = PROMPTS["entity_continue_extraction"].format(**context_base)
|
2025-03-09 01:21:39 +08:00
|
|
|
if_loop_prompt = PROMPTS["entity_if_loop_extraction"]
|
2024-10-10 15:02:30 +08:00
|
|
|
|
2025-02-23 19:47:43 +08:00
|
|
|
processed_chunks = 0
|
|
|
|
total_chunks = len(ordered_chunks)
|
2025-01-06 12:50:05 +08:00
|
|
|
|
2025-03-17 23:36:00 +08:00
|
|
|
async def _process_extraction_result(
|
|
|
|
result: str, chunk_key: str, file_path: str = "unknown_source"
|
|
|
|
):
|
2025-03-11 12:08:10 +08:00
|
|
|
"""Process a single extraction result (either initial or gleaning)
|
2025-01-10 11:36:28 +08:00
|
|
|
Args:
|
2025-03-11 12:08:10 +08:00
|
|
|
result (str): The extraction result to process
|
|
|
|
chunk_key (str): The chunk key for source tracking
|
2025-03-17 23:32:35 +08:00
|
|
|
file_path (str): The file path for citation
|
2025-03-11 12:08:10 +08:00
|
|
|
Returns:
|
|
|
|
tuple: (nodes_dict, edges_dict) containing the extracted entities and relationships
|
2025-01-10 11:36:28 +08:00
|
|
|
"""
|
2025-03-11 12:08:10 +08:00
|
|
|
maybe_nodes = defaultdict(list)
|
|
|
|
maybe_edges = defaultdict(list)
|
2025-03-11 12:23:51 +08:00
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
records = split_string_by_multi_markers(
|
2025-03-11 12:08:10 +08:00
|
|
|
result,
|
2024-10-10 15:02:30 +08:00
|
|
|
[context_base["record_delimiter"], context_base["completion_delimiter"]],
|
|
|
|
)
|
2025-03-11 12:23:51 +08:00
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
for record in records:
|
|
|
|
record = re.search(r"\((.*)\)", record)
|
|
|
|
if record is None:
|
|
|
|
continue
|
|
|
|
record = record.group(1)
|
|
|
|
record_attributes = split_string_by_multi_markers(
|
|
|
|
record, [context_base["tuple_delimiter"]]
|
|
|
|
)
|
2025-03-11 12:23:51 +08:00
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
if_entities = await _handle_single_entity_extraction(
|
2025-03-17 23:32:35 +08:00
|
|
|
record_attributes, chunk_key, file_path
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
|
|
|
if if_entities is not None:
|
|
|
|
maybe_nodes[if_entities["entity_name"]].append(if_entities)
|
|
|
|
continue
|
2025-03-11 12:23:51 +08:00
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
if_relation = await _handle_single_relationship_extraction(
|
2025-03-17 23:32:35 +08:00
|
|
|
record_attributes, chunk_key, file_path
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
|
|
|
if if_relation is not None:
|
|
|
|
maybe_edges[(if_relation["src_id"], if_relation["tgt_id"])].append(
|
|
|
|
if_relation
|
|
|
|
)
|
2025-03-11 12:23:51 +08:00
|
|
|
|
2025-03-11 12:08:10 +08:00
|
|
|
return maybe_nodes, maybe_edges
|
|
|
|
|
|
|
|
async def _process_single_content(chunk_key_dp: tuple[str, TextChunkSchema]):
|
|
|
|
"""Process a single chunk
|
|
|
|
Args:
|
|
|
|
chunk_key_dp (tuple[str, TextChunkSchema]):
|
|
|
|
("chunk-xxxxxx", {"tokens": int, "content": str, "full_doc_id": str, "chunk_order_index": int})
|
2025-04-10 14:19:06 +08:00
|
|
|
Returns:
|
|
|
|
tuple: (maybe_nodes, maybe_edges) containing extracted entities and relationships
|
2025-03-11 12:08:10 +08:00
|
|
|
"""
|
2025-04-10 14:19:06 +08:00
|
|
|
nonlocal processed_chunks
|
2025-03-11 12:08:10 +08:00
|
|
|
chunk_key = chunk_key_dp[0]
|
|
|
|
chunk_dp = chunk_key_dp[1]
|
|
|
|
content = chunk_dp["content"]
|
2025-03-17 23:32:35 +08:00
|
|
|
# Get file path from chunk data or use default
|
|
|
|
file_path = chunk_dp.get("file_path", "unknown_source")
|
2025-03-11 12:23:51 +08:00
|
|
|
|
2025-03-11 12:08:10 +08:00
|
|
|
# Get initial extraction
|
2025-04-23 19:35:05 +08:00
|
|
|
hint_prompt = entity_extract_prompt.format(
|
|
|
|
**{**context_base, "input_text": content}
|
|
|
|
)
|
2025-03-11 12:23:51 +08:00
|
|
|
|
2025-04-10 03:57:36 +08:00
|
|
|
final_result = await use_llm_func_with_cache(
|
|
|
|
hint_prompt,
|
|
|
|
use_llm_func,
|
|
|
|
llm_response_cache=llm_response_cache,
|
|
|
|
cache_type="extract",
|
|
|
|
)
|
2025-03-11 12:08:10 +08:00
|
|
|
history = pack_user_ass_to_openai_messages(hint_prompt, final_result)
|
2025-03-11 12:23:51 +08:00
|
|
|
|
2025-03-17 23:32:35 +08:00
|
|
|
# Process initial extraction with file path
|
2025-03-11 12:23:51 +08:00
|
|
|
maybe_nodes, maybe_edges = await _process_extraction_result(
|
2025-03-17 23:32:35 +08:00
|
|
|
final_result, chunk_key, file_path
|
2025-03-11 12:23:51 +08:00
|
|
|
)
|
|
|
|
|
2025-03-11 12:08:10 +08:00
|
|
|
# Process additional gleaning results
|
|
|
|
for now_glean_index in range(entity_extract_max_gleaning):
|
2025-04-10 03:57:36 +08:00
|
|
|
glean_result = await use_llm_func_with_cache(
|
|
|
|
continue_prompt,
|
|
|
|
use_llm_func,
|
|
|
|
llm_response_cache=llm_response_cache,
|
|
|
|
history_messages=history,
|
|
|
|
cache_type="extract",
|
2025-03-11 12:08:10 +08:00
|
|
|
)
|
2025-03-11 12:23:51 +08:00
|
|
|
|
2025-03-11 12:08:10 +08:00
|
|
|
history += pack_user_ass_to_openai_messages(continue_prompt, glean_result)
|
2025-03-11 12:23:51 +08:00
|
|
|
|
2025-03-17 23:32:35 +08:00
|
|
|
# Process gleaning result separately with file path
|
2025-03-11 12:23:51 +08:00
|
|
|
glean_nodes, glean_edges = await _process_extraction_result(
|
2025-03-17 23:32:35 +08:00
|
|
|
glean_result, chunk_key, file_path
|
2025-03-11 12:23:51 +08:00
|
|
|
)
|
|
|
|
|
2025-04-10 20:31:52 +08:00
|
|
|
# Merge results - only add entities and edges with new names
|
2025-03-11 12:08:10 +08:00
|
|
|
for entity_name, entities in glean_nodes.items():
|
2025-04-10 20:32:40 +08:00
|
|
|
if (
|
|
|
|
entity_name not in maybe_nodes
|
|
|
|
): # Only accetp entities with new name in gleaning stage
|
2025-04-10 20:31:52 +08:00
|
|
|
maybe_nodes[entity_name].extend(entities)
|
2025-03-11 12:08:10 +08:00
|
|
|
for edge_key, edges in glean_edges.items():
|
2025-04-10 20:32:40 +08:00
|
|
|
if (
|
|
|
|
edge_key not in maybe_edges
|
|
|
|
): # Only accetp edges with new name in gleaning stage
|
2025-04-10 20:31:52 +08:00
|
|
|
maybe_edges[edge_key].extend(edges)
|
2025-03-11 12:23:51 +08:00
|
|
|
|
2025-03-11 12:08:10 +08:00
|
|
|
if now_glean_index == entity_extract_max_gleaning - 1:
|
|
|
|
break
|
2025-03-11 12:23:51 +08:00
|
|
|
|
2025-04-10 03:57:36 +08:00
|
|
|
if_loop_result: str = await use_llm_func_with_cache(
|
|
|
|
if_loop_prompt,
|
|
|
|
use_llm_func,
|
|
|
|
llm_response_cache=llm_response_cache,
|
|
|
|
history_messages=history,
|
|
|
|
cache_type="extract",
|
2025-03-11 12:08:10 +08:00
|
|
|
)
|
|
|
|
if_loop_result = if_loop_result.strip().strip('"').strip("'").lower()
|
|
|
|
if if_loop_result != "yes":
|
|
|
|
break
|
2025-03-11 12:23:51 +08:00
|
|
|
|
2025-02-23 19:47:43 +08:00
|
|
|
processed_chunks += 1
|
|
|
|
entities_count = len(maybe_nodes)
|
|
|
|
relations_count = len(maybe_edges)
|
2025-04-29 18:20:35 +08:00
|
|
|
log_message = f"Chunk {processed_chunks} of {total_chunks} extracted {entities_count} Ent + {relations_count} Rel"
|
2025-02-28 13:53:40 +08:00
|
|
|
logger.info(log_message)
|
2025-03-10 16:48:59 +08:00
|
|
|
if pipeline_status is not None:
|
|
|
|
async with pipeline_status_lock:
|
|
|
|
pipeline_status["latest_message"] = log_message
|
|
|
|
pipeline_status["history_messages"].append(log_message)
|
2024-10-10 15:02:30 +08:00
|
|
|
|
2025-04-10 14:19:06 +08:00
|
|
|
# Return the extracted nodes and edges for centralized processing
|
|
|
|
return maybe_nodes, maybe_edges
|
|
|
|
|
2025-04-22 15:03:46 +08:00
|
|
|
# Get max async tasks limit from global_config
|
|
|
|
llm_model_max_async = global_config.get("llm_model_max_async", 4)
|
|
|
|
semaphore = asyncio.Semaphore(llm_model_max_async)
|
|
|
|
|
|
|
|
async def _process_with_semaphore(chunk):
|
|
|
|
async with semaphore:
|
|
|
|
return await _process_single_content(chunk)
|
|
|
|
|
2025-04-17 03:57:38 +08:00
|
|
|
tasks = []
|
|
|
|
for c in ordered_chunks:
|
2025-04-22 15:03:46 +08:00
|
|
|
task = asyncio.create_task(_process_with_semaphore(c))
|
2025-04-17 03:57:38 +08:00
|
|
|
tasks.append(task)
|
|
|
|
|
|
|
|
# Wait for tasks to complete or for the first exception to occur
|
|
|
|
# This allows us to cancel remaining tasks if any task fails
|
|
|
|
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_EXCEPTION)
|
|
|
|
|
|
|
|
# Check if any task raised an exception
|
|
|
|
for task in done:
|
|
|
|
if task.exception():
|
|
|
|
# If a task failed, cancel all pending tasks
|
|
|
|
# This prevents unnecessary processing since the parent function will abort anyway
|
|
|
|
for pending_task in pending:
|
|
|
|
pending_task.cancel()
|
|
|
|
|
|
|
|
# Wait for cancellation to complete
|
|
|
|
if pending:
|
|
|
|
await asyncio.wait(pending)
|
|
|
|
|
|
|
|
# Re-raise the exception to notify the caller
|
|
|
|
raise task.exception()
|
|
|
|
|
|
|
|
# If all tasks completed successfully, collect results
|
|
|
|
chunk_results = [task.result() for task in tasks]
|
2025-04-28 02:15:25 +08:00
|
|
|
|
2025-04-28 01:14:00 +08:00
|
|
|
# Return the chunk_results for later processing in merge_nodes_and_edges
|
|
|
|
return chunk_results
|
2024-10-10 15:02:30 +08:00
|
|
|
|
2024-11-25 13:40:38 +08:00
|
|
|
|
2024-11-25 13:29:55 +08:00
|
|
|
async def kg_query(
|
2025-02-14 23:31:27 +01:00
|
|
|
query: str,
|
2025-01-07 16:26:12 +08:00
|
|
|
knowledge_graph_inst: BaseGraphStorage,
|
|
|
|
entities_vdb: BaseVectorStorage,
|
|
|
|
relationships_vdb: BaseVectorStorage,
|
2025-02-08 23:33:11 +01:00
|
|
|
text_chunks_db: BaseKVStorage,
|
2025-01-07 16:26:12 +08:00
|
|
|
query_param: QueryParam,
|
2025-02-14 23:31:27 +01:00
|
|
|
global_config: dict[str, str],
|
|
|
|
hashing_kv: BaseKVStorage | None = None,
|
2025-02-17 16:45:00 +05:30
|
|
|
system_prompt: str | None = None,
|
2025-05-07 17:42:14 +08:00
|
|
|
chunks_vdb: BaseVectorStorage = None,
|
2025-02-20 14:47:31 +01:00
|
|
|
) -> str | AsyncIterator[str]:
|
2025-04-28 19:36:21 +08:00
|
|
|
if query_param.model_func:
|
|
|
|
use_model_func = query_param.model_func
|
|
|
|
else:
|
|
|
|
use_model_func = global_config["llm_model_func"]
|
|
|
|
# Apply higher priority (5) to query relation LLM function
|
|
|
|
use_model_func = partial(use_model_func, _priority=5)
|
|
|
|
|
2024-12-08 17:35:52 +08:00
|
|
|
# Handle cache
|
2025-01-24 18:59:24 +08:00
|
|
|
args_hash = compute_args_hash(query_param.mode, query, cache_type="query")
|
2024-12-08 17:35:52 +08:00
|
|
|
cached_response, quantized, min_val, max_val = await handle_cache(
|
2025-01-24 18:59:24 +08:00
|
|
|
hashing_kv, args_hash, query, query_param.mode, cache_type="query"
|
2024-12-08 17:35:52 +08:00
|
|
|
)
|
|
|
|
if cached_response is not None:
|
|
|
|
return cached_response
|
|
|
|
|
2025-04-02 21:15:40 +03:00
|
|
|
hl_keywords, ll_keywords = await get_keywords_from_query(
|
2025-01-24 18:59:24 +08:00
|
|
|
query, query_param, global_config, hashing_kv
|
2024-11-28 14:28:29 +01:00
|
|
|
)
|
2024-11-25 13:40:38 +08:00
|
|
|
|
2025-01-29 21:00:42 +08:00
|
|
|
logger.debug(f"High-level keywords: {hl_keywords}")
|
|
|
|
logger.debug(f"Low-level keywords: {ll_keywords}")
|
2025-01-25 20:11:52 +08:00
|
|
|
|
2025-01-24 18:59:24 +08:00
|
|
|
# Handle empty keywords
|
2024-11-25 13:29:55 +08:00
|
|
|
if hl_keywords == [] and ll_keywords == []:
|
|
|
|
logger.warning("low_level_keywords and high_level_keywords is empty")
|
2024-11-25 13:40:38 +08:00
|
|
|
return PROMPTS["fail_response"]
|
|
|
|
if ll_keywords == [] and query_param.mode in ["local", "hybrid"]:
|
2025-01-07 22:02:34 +08:00
|
|
|
logger.warning(
|
|
|
|
"low_level_keywords is empty, switching from %s mode to global mode",
|
|
|
|
query_param.mode,
|
|
|
|
)
|
2025-01-06 16:54:53 +08:00
|
|
|
query_param.mode = "global"
|
2024-11-25 13:40:38 +08:00
|
|
|
if hl_keywords == [] and query_param.mode in ["global", "hybrid"]:
|
2025-01-07 22:02:34 +08:00
|
|
|
logger.warning(
|
|
|
|
"high_level_keywords is empty, switching from %s mode to local mode",
|
|
|
|
query_param.mode,
|
|
|
|
)
|
2025-01-06 16:54:53 +08:00
|
|
|
query_param.mode = "local"
|
|
|
|
|
2025-02-18 09:09:12 +01:00
|
|
|
ll_keywords_str = ", ".join(ll_keywords) if ll_keywords else ""
|
|
|
|
hl_keywords_str = ", ".join(hl_keywords) if hl_keywords else ""
|
2024-11-25 13:40:38 +08:00
|
|
|
|
2024-11-25 13:29:55 +08:00
|
|
|
# Build context
|
|
|
|
context = await _build_query_context(
|
2025-02-18 09:09:12 +01:00
|
|
|
ll_keywords_str,
|
|
|
|
hl_keywords_str,
|
2024-11-25 13:40:38 +08:00
|
|
|
knowledge_graph_inst,
|
|
|
|
entities_vdb,
|
|
|
|
relationships_vdb,
|
|
|
|
text_chunks_db,
|
|
|
|
query_param,
|
2025-05-07 17:42:14 +08:00
|
|
|
chunks_vdb,
|
2024-11-25 13:40:38 +08:00
|
|
|
)
|
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
if query_param.only_need_context:
|
|
|
|
return context
|
|
|
|
if context is None:
|
|
|
|
return PROMPTS["fail_response"]
|
2024-10-19 09:43:17 +05:30
|
|
|
|
2025-01-24 18:59:24 +08:00
|
|
|
# Process conversation history
|
|
|
|
history_context = ""
|
|
|
|
if query_param.conversation_history:
|
2025-01-25 22:54:12 +08:00
|
|
|
history_context = get_conversation_turns(
|
|
|
|
query_param.conversation_history, query_param.history_turns
|
2025-01-14 22:23:14 +05:30
|
|
|
)
|
2025-01-14 22:10:47 +05:30
|
|
|
|
2025-02-17 16:45:00 +05:30
|
|
|
sys_prompt_temp = system_prompt if system_prompt else PROMPTS["rag_response"]
|
2025-01-14 22:10:47 +05:30
|
|
|
sys_prompt = sys_prompt_temp.format(
|
2025-01-24 18:59:24 +08:00
|
|
|
context_data=context,
|
|
|
|
response_type=query_param.response_type,
|
|
|
|
history=history_context,
|
2025-01-14 22:10:47 +05:30
|
|
|
)
|
|
|
|
|
|
|
|
if query_param.only_need_prompt:
|
|
|
|
return sys_prompt
|
2025-01-14 22:23:14 +05:30
|
|
|
|
2025-04-17 10:56:23 +02:00
|
|
|
tokenizer: Tokenizer = global_config["tokenizer"]
|
|
|
|
len_of_prompts = len(tokenizer.encode(query + sys_prompt))
|
2025-02-17 12:28:49 +08:00
|
|
|
logger.debug(f"[kg_query]Prompt Tokens: {len_of_prompts}")
|
2025-02-16 19:26:57 +08:00
|
|
|
|
2025-01-14 22:10:47 +05:30
|
|
|
response = await use_model_func(
|
|
|
|
query,
|
|
|
|
system_prompt=sys_prompt,
|
|
|
|
stream=query_param.stream,
|
|
|
|
)
|
|
|
|
if isinstance(response, str) and len(response) > len(sys_prompt):
|
|
|
|
response = (
|
|
|
|
response.replace(sys_prompt, "")
|
|
|
|
.replace("user", "")
|
|
|
|
.replace("model", "")
|
|
|
|
.replace(query, "")
|
|
|
|
.replace("<system>", "")
|
|
|
|
.replace("</system>", "")
|
|
|
|
.strip()
|
|
|
|
)
|
|
|
|
|
2025-04-16 01:24:59 +08:00
|
|
|
if hashing_kv.global_config.get("enable_llm_cache"):
|
|
|
|
# Save to cache
|
|
|
|
await save_to_cache(
|
|
|
|
hashing_kv,
|
|
|
|
CacheData(
|
|
|
|
args_hash=args_hash,
|
|
|
|
content=response,
|
|
|
|
prompt=query,
|
|
|
|
quantized=quantized,
|
|
|
|
min_val=min_val,
|
|
|
|
max_val=max_val,
|
|
|
|
mode=query_param.mode,
|
|
|
|
cache_type="query",
|
|
|
|
),
|
|
|
|
)
|
|
|
|
|
2025-01-14 22:10:47 +05:30
|
|
|
return response
|
|
|
|
|
2025-01-14 22:23:14 +05:30
|
|
|
|
2025-04-02 21:15:40 +03:00
|
|
|
async def get_keywords_from_query(
|
|
|
|
query: str,
|
|
|
|
query_param: QueryParam,
|
|
|
|
global_config: dict[str, str],
|
|
|
|
hashing_kv: BaseKVStorage | None = None,
|
|
|
|
) -> tuple[list[str], list[str]]:
|
|
|
|
"""
|
|
|
|
Retrieves high-level and low-level keywords for RAG operations.
|
2025-04-02 21:52:06 +03:00
|
|
|
|
2025-04-02 21:15:40 +03:00
|
|
|
This function checks if keywords are already provided in query parameters,
|
|
|
|
and if not, extracts them from the query text using LLM.
|
2025-04-02 21:52:06 +03:00
|
|
|
|
2025-04-02 21:15:40 +03:00
|
|
|
Args:
|
|
|
|
query: The user's query text
|
|
|
|
query_param: Query parameters that may contain pre-defined keywords
|
|
|
|
global_config: Global configuration dictionary
|
|
|
|
hashing_kv: Optional key-value storage for caching results
|
2025-04-02 21:52:06 +03:00
|
|
|
|
2025-04-02 21:15:40 +03:00
|
|
|
Returns:
|
|
|
|
A tuple containing (high_level_keywords, low_level_keywords)
|
|
|
|
"""
|
2025-04-03 17:46:28 +08:00
|
|
|
# Check if pre-defined keywords are already provided
|
|
|
|
if query_param.hl_keywords or query_param.ll_keywords:
|
2025-04-02 21:15:40 +03:00
|
|
|
return query_param.hl_keywords, query_param.ll_keywords
|
|
|
|
|
|
|
|
# Extract keywords using extract_keywords_only function which already supports conversation history
|
|
|
|
hl_keywords, ll_keywords = await extract_keywords_only(
|
|
|
|
query, query_param, global_config, hashing_kv
|
|
|
|
)
|
|
|
|
return hl_keywords, ll_keywords
|
|
|
|
|
|
|
|
|
2025-01-14 22:10:47 +05:30
|
|
|
async def extract_keywords_only(
|
|
|
|
text: str,
|
|
|
|
param: QueryParam,
|
2025-02-14 23:31:27 +01:00
|
|
|
global_config: dict[str, str],
|
|
|
|
hashing_kv: BaseKVStorage | None = None,
|
2025-01-14 22:10:47 +05:30
|
|
|
) -> tuple[list[str], list[str]]:
|
|
|
|
"""
|
|
|
|
Extract high-level and low-level keywords from the given 'text' using the LLM.
|
|
|
|
This method does NOT build the final RAG context or provide a final answer.
|
|
|
|
It ONLY extracts keywords (hl_keywords, ll_keywords).
|
|
|
|
"""
|
|
|
|
|
2025-01-24 18:59:24 +08:00
|
|
|
# 1. Handle cache if needed - add cache type for keywords
|
|
|
|
args_hash = compute_args_hash(param.mode, text, cache_type="keywords")
|
2025-01-14 22:10:47 +05:30
|
|
|
cached_response, quantized, min_val, max_val = await handle_cache(
|
2025-01-24 18:59:24 +08:00
|
|
|
hashing_kv, args_hash, text, param.mode, cache_type="keywords"
|
2025-01-14 22:10:47 +05:30
|
|
|
)
|
|
|
|
if cached_response is not None:
|
2025-01-24 18:59:24 +08:00
|
|
|
try:
|
|
|
|
keywords_data = json.loads(cached_response)
|
|
|
|
return keywords_data["high_level_keywords"], keywords_data[
|
|
|
|
"low_level_keywords"
|
|
|
|
]
|
|
|
|
except (json.JSONDecodeError, KeyError):
|
|
|
|
logger.warning(
|
|
|
|
"Invalid cache format for keywords, proceeding with extraction"
|
|
|
|
)
|
2025-01-14 22:10:47 +05:30
|
|
|
|
|
|
|
# 2. Build the examples
|
|
|
|
example_number = global_config["addon_params"].get("example_number", None)
|
|
|
|
if example_number and example_number < len(PROMPTS["keywords_extraction_examples"]):
|
|
|
|
examples = "\n".join(
|
|
|
|
PROMPTS["keywords_extraction_examples"][: int(example_number)]
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
examples = "\n".join(PROMPTS["keywords_extraction_examples"])
|
|
|
|
language = global_config["addon_params"].get(
|
|
|
|
"language", PROMPTS["DEFAULT_LANGUAGE"]
|
|
|
|
)
|
|
|
|
|
2025-01-24 18:59:24 +08:00
|
|
|
# 3. Process conversation history
|
|
|
|
history_context = ""
|
|
|
|
if param.conversation_history:
|
|
|
|
history_context = get_conversation_turns(
|
|
|
|
param.conversation_history, param.history_turns
|
|
|
|
)
|
2025-01-14 22:10:47 +05:30
|
|
|
|
2025-01-24 18:59:24 +08:00
|
|
|
# 4. Build the keyword-extraction prompt
|
|
|
|
kw_prompt = PROMPTS["keywords_extraction"].format(
|
|
|
|
query=text, examples=examples, language=language, history=history_context
|
|
|
|
)
|
|
|
|
|
2025-04-17 10:56:23 +02:00
|
|
|
tokenizer: Tokenizer = global_config["tokenizer"]
|
|
|
|
len_of_prompts = len(tokenizer.encode(kw_prompt))
|
2025-02-16 22:42:53 +08:00
|
|
|
logger.debug(f"[kg_query]Prompt Tokens: {len_of_prompts}")
|
2025-02-16 19:26:57 +08:00
|
|
|
|
2025-01-24 18:59:24 +08:00
|
|
|
# 5. Call the LLM for keyword extraction
|
2025-04-28 19:36:21 +08:00
|
|
|
if param.model_func:
|
|
|
|
use_model_func = param.model_func
|
|
|
|
else:
|
|
|
|
use_model_func = global_config["llm_model_func"]
|
|
|
|
# Apply higher priority (5) to query relation LLM function
|
|
|
|
use_model_func = partial(use_model_func, _priority=5)
|
|
|
|
|
2025-01-14 22:10:47 +05:30
|
|
|
result = await use_model_func(kw_prompt, keyword_extraction=True)
|
|
|
|
|
2025-01-24 18:59:24 +08:00
|
|
|
# 6. Parse out JSON from the LLM response
|
2025-01-14 22:10:47 +05:30
|
|
|
match = re.search(r"\{.*\}", result, re.DOTALL)
|
|
|
|
if not match:
|
2025-02-01 22:54:23 +08:00
|
|
|
logger.error("No JSON-like structure found in the LLM respond.")
|
2025-01-14 22:10:47 +05:30
|
|
|
return [], []
|
|
|
|
try:
|
|
|
|
keywords_data = json.loads(match.group(0))
|
|
|
|
except json.JSONDecodeError as e:
|
|
|
|
logger.error(f"JSON parsing error: {e}")
|
|
|
|
return [], []
|
|
|
|
|
|
|
|
hl_keywords = keywords_data.get("high_level_keywords", [])
|
|
|
|
ll_keywords = keywords_data.get("low_level_keywords", [])
|
|
|
|
|
2025-01-24 18:59:24 +08:00
|
|
|
# 7. Cache only the processed keywords with cache type
|
2025-02-01 22:54:23 +08:00
|
|
|
if hl_keywords or ll_keywords:
|
2025-02-02 01:56:32 +08:00
|
|
|
cache_data = {
|
|
|
|
"high_level_keywords": hl_keywords,
|
|
|
|
"low_level_keywords": ll_keywords,
|
|
|
|
}
|
2025-04-16 01:24:59 +08:00
|
|
|
if hashing_kv.global_config.get("enable_llm_cache"):
|
|
|
|
await save_to_cache(
|
|
|
|
hashing_kv,
|
|
|
|
CacheData(
|
|
|
|
args_hash=args_hash,
|
|
|
|
content=json.dumps(cache_data),
|
|
|
|
prompt=text,
|
|
|
|
quantized=quantized,
|
|
|
|
min_val=min_val,
|
|
|
|
max_val=max_val,
|
|
|
|
mode=param.mode,
|
|
|
|
cache_type="keywords",
|
|
|
|
),
|
|
|
|
)
|
|
|
|
|
2025-01-14 22:10:47 +05:30
|
|
|
return hl_keywords, ll_keywords
|
2024-10-19 09:43:17 +05:30
|
|
|
|
2025-01-14 22:23:14 +05:30
|
|
|
|
2025-05-07 10:51:44 +08:00
|
|
|
async def _get_vector_context(
|
|
|
|
query: str,
|
|
|
|
chunks_vdb: BaseVectorStorage,
|
|
|
|
query_param: QueryParam,
|
|
|
|
tokenizer: Tokenizer,
|
2025-05-07 17:42:14 +08:00
|
|
|
) -> tuple[list, list, list] | None:
|
2025-05-07 10:51:44 +08:00
|
|
|
"""
|
|
|
|
Retrieve vector context from the vector database.
|
|
|
|
|
|
|
|
This function performs vector search to find relevant text chunks for a query,
|
2025-05-07 17:42:14 +08:00
|
|
|
formats them with file path and creation time information.
|
2025-05-07 10:51:44 +08:00
|
|
|
|
|
|
|
Args:
|
|
|
|
query: The query string to search for
|
|
|
|
chunks_vdb: Vector database containing document chunks
|
|
|
|
query_param: Query parameters including top_k and ids
|
|
|
|
tokenizer: Tokenizer for counting tokens
|
|
|
|
|
|
|
|
Returns:
|
2025-05-07 17:42:14 +08:00
|
|
|
Tuple (empty_entities, empty_relations, text_units) for combine_contexts,
|
|
|
|
compatible with _get_edge_data and _get_node_data format
|
2025-05-07 10:51:44 +08:00
|
|
|
"""
|
|
|
|
try:
|
2025-05-07 17:42:14 +08:00
|
|
|
results = await chunks_vdb.query(
|
|
|
|
query, top_k=query_param.top_k, ids=query_param.ids
|
2025-05-07 10:51:44 +08:00
|
|
|
)
|
|
|
|
if not results:
|
2025-05-07 17:42:14 +08:00
|
|
|
return [], [], []
|
2025-05-07 10:51:44 +08:00
|
|
|
|
|
|
|
valid_chunks = []
|
|
|
|
for result in results:
|
|
|
|
if "content" in result:
|
|
|
|
# Directly use content from chunks_vdb.query result
|
|
|
|
chunk_with_time = {
|
|
|
|
"content": result["content"],
|
|
|
|
"created_at": result.get("created_at", None),
|
2025-05-07 17:42:14 +08:00
|
|
|
"file_path": result.get("file_path", "unknown_source"),
|
2025-05-07 10:51:44 +08:00
|
|
|
}
|
|
|
|
valid_chunks.append(chunk_with_time)
|
|
|
|
|
|
|
|
if not valid_chunks:
|
2025-05-07 17:42:14 +08:00
|
|
|
return [], [], []
|
2025-05-07 10:51:44 +08:00
|
|
|
|
|
|
|
maybe_trun_chunks = truncate_list_by_token_size(
|
|
|
|
valid_chunks,
|
|
|
|
key=lambda x: x["content"],
|
|
|
|
max_token_size=query_param.max_token_for_text_unit,
|
|
|
|
tokenizer=tokenizer,
|
|
|
|
)
|
|
|
|
|
|
|
|
logger.debug(
|
|
|
|
f"Truncate chunks from {len(valid_chunks)} to {len(maybe_trun_chunks)} (max tokens:{query_param.max_token_for_text_unit})"
|
|
|
|
)
|
2025-05-07 17:42:14 +08:00
|
|
|
logger.info(
|
|
|
|
f"Vector query: {len(maybe_trun_chunks)} chunks, top_k: {query_param.top_k}"
|
|
|
|
)
|
2025-05-07 10:51:44 +08:00
|
|
|
|
|
|
|
if not maybe_trun_chunks:
|
2025-05-07 17:42:14 +08:00
|
|
|
return [], [], []
|
|
|
|
|
|
|
|
# Create empty entities and relations contexts
|
|
|
|
entities_context = []
|
|
|
|
relations_context = []
|
|
|
|
|
2025-05-07 18:01:23 +08:00
|
|
|
# Create text_units_context directly as a list of dictionaries
|
|
|
|
text_units_context = []
|
2025-05-07 17:42:14 +08:00
|
|
|
for i, chunk in enumerate(maybe_trun_chunks):
|
2025-05-07 18:01:23 +08:00
|
|
|
text_units_context.append(
|
|
|
|
{
|
|
|
|
"id": i + 1,
|
|
|
|
"content": chunk["content"],
|
|
|
|
"file_path": chunk["file_path"],
|
|
|
|
}
|
2025-05-07 17:42:14 +08:00
|
|
|
)
|
2025-05-07 10:51:44 +08:00
|
|
|
|
2025-05-07 17:42:14 +08:00
|
|
|
return entities_context, relations_context, text_units_context
|
2025-05-07 10:51:44 +08:00
|
|
|
except Exception as e:
|
|
|
|
logger.error(f"Error in _get_vector_context: {e}")
|
2025-05-07 17:42:14 +08:00
|
|
|
return [], [], []
|
2025-05-07 10:51:44 +08:00
|
|
|
|
|
|
|
|
2024-11-25 13:29:55 +08:00
|
|
|
async def _build_query_context(
|
2025-02-18 09:05:51 +01:00
|
|
|
ll_keywords: str,
|
|
|
|
hl_keywords: str,
|
2025-01-07 16:26:12 +08:00
|
|
|
knowledge_graph_inst: BaseGraphStorage,
|
|
|
|
entities_vdb: BaseVectorStorage,
|
|
|
|
relationships_vdb: BaseVectorStorage,
|
2025-02-08 23:33:11 +01:00
|
|
|
text_chunks_db: BaseKVStorage,
|
2025-01-07 16:26:12 +08:00
|
|
|
query_param: QueryParam,
|
2025-05-07 17:42:14 +08:00
|
|
|
chunks_vdb: BaseVectorStorage = None, # Add chunks_vdb parameter for mix mode
|
2024-11-25 13:40:38 +08:00
|
|
|
):
|
2025-05-07 17:42:14 +08:00
|
|
|
logger.info(f"Process {os.getpid()} building query context...")
|
|
|
|
|
|
|
|
# Handle local and global modes as before
|
2025-01-06 16:54:53 +08:00
|
|
|
if query_param.mode == "local":
|
|
|
|
entities_context, relations_context, text_units_context = await _get_node_data(
|
|
|
|
ll_keywords,
|
|
|
|
knowledge_graph_inst,
|
|
|
|
entities_vdb,
|
|
|
|
text_chunks_db,
|
|
|
|
query_param,
|
2024-11-25 13:40:38 +08:00
|
|
|
)
|
2025-01-06 16:54:53 +08:00
|
|
|
elif query_param.mode == "global":
|
|
|
|
entities_context, relations_context, text_units_context = await _get_edge_data(
|
|
|
|
hl_keywords,
|
|
|
|
knowledge_graph_inst,
|
|
|
|
relationships_vdb,
|
|
|
|
text_chunks_db,
|
|
|
|
query_param,
|
|
|
|
)
|
2025-05-07 17:42:14 +08:00
|
|
|
else: # hybrid or mix mode
|
2025-04-16 17:55:49 +08:00
|
|
|
ll_data = await _get_node_data(
|
|
|
|
ll_keywords,
|
|
|
|
knowledge_graph_inst,
|
|
|
|
entities_vdb,
|
|
|
|
text_chunks_db,
|
|
|
|
query_param,
|
|
|
|
)
|
|
|
|
hl_data = await _get_edge_data(
|
|
|
|
hl_keywords,
|
|
|
|
knowledge_graph_inst,
|
|
|
|
relationships_vdb,
|
|
|
|
text_chunks_db,
|
|
|
|
query_param,
|
2025-01-24 16:06:04 +01:00
|
|
|
)
|
|
|
|
|
2025-01-07 22:02:34 +08:00
|
|
|
(
|
2024-11-25 13:40:38 +08:00
|
|
|
ll_entities_context,
|
|
|
|
ll_relations_context,
|
|
|
|
ll_text_units_context,
|
2025-01-24 16:06:04 +01:00
|
|
|
) = ll_data
|
|
|
|
|
2025-01-07 22:02:34 +08:00
|
|
|
(
|
2024-11-25 13:40:38 +08:00
|
|
|
hl_entities_context,
|
|
|
|
hl_relations_context,
|
|
|
|
hl_text_units_context,
|
2025-01-24 16:06:04 +01:00
|
|
|
) = hl_data
|
|
|
|
|
2025-05-07 17:42:14 +08:00
|
|
|
# Initialize vector data with empty lists
|
|
|
|
vector_entities_context, vector_relations_context, vector_text_units_context = (
|
|
|
|
[],
|
|
|
|
[],
|
|
|
|
[],
|
|
|
|
)
|
|
|
|
|
|
|
|
# Only get vector data if in mix mode
|
|
|
|
if query_param.mode == "mix" and hasattr(query_param, "original_query"):
|
|
|
|
# Get tokenizer from text_chunks_db
|
|
|
|
tokenizer = text_chunks_db.global_config.get("tokenizer")
|
|
|
|
|
|
|
|
# Get vector context in triple format
|
|
|
|
vector_data = await _get_vector_context(
|
|
|
|
query_param.original_query, # We need to pass the original query
|
|
|
|
chunks_vdb,
|
|
|
|
query_param,
|
|
|
|
tokenizer,
|
|
|
|
)
|
|
|
|
|
|
|
|
# If vector_data is not None, unpack it
|
|
|
|
if vector_data is not None:
|
|
|
|
(
|
|
|
|
vector_entities_context,
|
|
|
|
vector_relations_context,
|
|
|
|
vector_text_units_context,
|
|
|
|
) = vector_data
|
|
|
|
|
|
|
|
# Combine and deduplicate the entities, relationships, and sources
|
|
|
|
entities_context = process_combine_contexts(
|
|
|
|
hl_entities_context, ll_entities_context, vector_entities_context
|
|
|
|
)
|
|
|
|
relations_context = process_combine_contexts(
|
|
|
|
hl_relations_context, ll_relations_context, vector_relations_context
|
|
|
|
)
|
|
|
|
text_units_context = process_combine_contexts(
|
|
|
|
hl_text_units_context, ll_text_units_context, vector_text_units_context
|
2024-11-25 13:40:38 +08:00
|
|
|
)
|
2025-01-29 21:01:35 +08:00
|
|
|
# not necessary to use LLM to generate a response
|
2025-04-20 19:24:05 +08:00
|
|
|
if not entities_context and not relations_context:
|
2025-01-29 21:01:35 +08:00
|
|
|
return None
|
2025-01-29 22:14:18 +08:00
|
|
|
|
2025-04-20 19:24:05 +08:00
|
|
|
# 转换为 JSON 字符串
|
|
|
|
entities_str = json.dumps(entities_context, ensure_ascii=False)
|
|
|
|
relations_str = json.dumps(relations_context, ensure_ascii=False)
|
|
|
|
text_units_str = json.dumps(text_units_context, ensure_ascii=False)
|
|
|
|
|
2025-04-22 21:17:01 +08:00
|
|
|
result = f"""-----Entities-----
|
|
|
|
|
|
|
|
```json
|
|
|
|
{entities_str}
|
|
|
|
```
|
|
|
|
|
|
|
|
-----Relationships-----
|
|
|
|
|
|
|
|
```json
|
|
|
|
{relations_str}
|
|
|
|
```
|
|
|
|
|
|
|
|
-----Sources-----
|
|
|
|
|
|
|
|
```json
|
|
|
|
{text_units_str}
|
|
|
|
```
|
|
|
|
|
|
|
|
"""
|
2025-02-16 19:26:57 +08:00
|
|
|
return result
|
2024-11-25 13:29:55 +08:00
|
|
|
|
|
|
|
|
|
|
|
async def _get_node_data(
|
2025-02-18 09:05:51 +01:00
|
|
|
query: str,
|
2025-01-07 16:26:12 +08:00
|
|
|
knowledge_graph_inst: BaseGraphStorage,
|
|
|
|
entities_vdb: BaseVectorStorage,
|
2025-02-08 23:33:11 +01:00
|
|
|
text_chunks_db: BaseKVStorage,
|
2025-01-07 16:26:12 +08:00
|
|
|
query_param: QueryParam,
|
2024-10-10 15:02:30 +08:00
|
|
|
):
|
2024-11-26 10:19:28 +08:00
|
|
|
# get similar entities
|
2025-02-13 04:12:00 +08:00
|
|
|
logger.info(
|
|
|
|
f"Query nodes: {query}, top_k: {query_param.top_k}, cosine: {entities_vdb.cosine_better_than_threshold}"
|
|
|
|
)
|
2025-03-08 15:43:17 +00:00
|
|
|
|
2025-03-10 15:39:18 +00:00
|
|
|
results = await entities_vdb.query(
|
|
|
|
query, top_k=query_param.top_k, ids=query_param.ids
|
|
|
|
)
|
2025-03-08 15:43:17 +00:00
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
if not len(results):
|
2024-12-12 15:47:57 -05:00
|
|
|
return "", "", ""
|
2025-04-15 12:34:04 +08:00
|
|
|
|
2025-04-07 19:09:31 +02:00
|
|
|
# Extract all entity IDs from your results list
|
|
|
|
node_ids = [r["entity_name"] for r in results]
|
|
|
|
|
|
|
|
# Call the batch node retrieval and degree functions concurrently.
|
|
|
|
nodes_dict, degrees_dict = await asyncio.gather(
|
2025-04-15 12:34:04 +08:00
|
|
|
knowledge_graph_inst.get_nodes_batch(node_ids),
|
|
|
|
knowledge_graph_inst.node_degrees_batch(node_ids),
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
2025-01-24 16:06:04 +01:00
|
|
|
|
2025-04-07 19:09:31 +02:00
|
|
|
# Now, if you need the node data and degree in order:
|
|
|
|
node_datas = [nodes_dict.get(nid) for nid in node_ids]
|
|
|
|
node_degrees = [degrees_dict.get(nid, 0) for nid in node_ids]
|
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
if not all([n is not None for n in node_datas]):
|
|
|
|
logger.warning("Some nodes are missing, maybe the storage is damaged")
|
2024-11-25 13:40:38 +08:00
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
node_datas = [
|
2025-05-01 15:14:15 +08:00
|
|
|
{
|
|
|
|
**n,
|
|
|
|
"entity_name": k["entity_name"],
|
|
|
|
"rank": d,
|
|
|
|
"created_at": k.get("created_at"),
|
|
|
|
}
|
2024-10-10 15:02:30 +08:00
|
|
|
for k, n, d in zip(results, node_datas, node_degrees)
|
|
|
|
if n is not None
|
2024-11-06 11:18:14 -05:00
|
|
|
] # what is this text_chunks_db doing. dont remember it in airvx. check the diagram.
|
2024-11-26 10:21:39 +08:00
|
|
|
# get entitytext chunk
|
2025-04-16 17:55:49 +08:00
|
|
|
use_text_units = await _find_most_related_text_unit_from_entities(
|
2025-04-18 16:14:31 +02:00
|
|
|
node_datas,
|
|
|
|
query_param,
|
|
|
|
text_chunks_db,
|
|
|
|
knowledge_graph_inst,
|
2025-04-16 17:55:49 +08:00
|
|
|
)
|
|
|
|
use_relations = await _find_most_related_edges_from_entities(
|
2025-04-18 16:14:31 +02:00
|
|
|
node_datas,
|
|
|
|
query_param,
|
|
|
|
knowledge_graph_inst,
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
2025-02-16 21:26:29 +08:00
|
|
|
|
2025-04-17 16:57:53 +02:00
|
|
|
tokenizer: Tokenizer = text_chunks_db.global_config.get("tokenizer")
|
2025-02-16 21:26:29 +08:00
|
|
|
len_node_datas = len(node_datas)
|
|
|
|
node_datas = truncate_list_by_token_size(
|
|
|
|
node_datas,
|
2025-02-27 23:34:57 +07:00
|
|
|
key=lambda x: x["description"] if x["description"] is not None else "",
|
2025-02-16 21:26:29 +08:00
|
|
|
max_token_size=query_param.max_token_for_local_context,
|
2025-04-17 13:09:52 +02:00
|
|
|
tokenizer=tokenizer,
|
2025-02-16 21:26:29 +08:00
|
|
|
)
|
2025-02-16 22:42:53 +08:00
|
|
|
logger.debug(
|
2025-02-16 21:26:29 +08:00
|
|
|
f"Truncate entities from {len_node_datas} to {len(node_datas)} (max tokens:{query_param.max_token_for_local_context})"
|
|
|
|
)
|
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
logger.info(
|
2025-02-16 19:26:57 +08:00
|
|
|
f"Local query uses {len(node_datas)} entites, {len(use_relations)} relations, {len(use_text_units)} chunks"
|
2024-11-25 13:40:38 +08:00
|
|
|
)
|
2024-11-25 13:29:55 +08:00
|
|
|
|
2024-11-26 10:19:28 +08:00
|
|
|
# build prompt
|
2025-05-07 18:01:23 +08:00
|
|
|
entities_context = []
|
2024-10-10 15:02:30 +08:00
|
|
|
for i, n in enumerate(node_datas):
|
2025-03-01 17:45:06 +08:00
|
|
|
created_at = n.get("created_at", "UNKNOWN")
|
|
|
|
if isinstance(created_at, (int, float)):
|
|
|
|
created_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(created_at))
|
2025-03-17 23:36:00 +08:00
|
|
|
|
2025-03-20 16:29:24 +08:00
|
|
|
# Get file path from node data
|
2025-03-17 23:32:35 +08:00
|
|
|
file_path = n.get("file_path", "unknown_source")
|
2025-03-17 23:36:00 +08:00
|
|
|
|
2025-05-07 18:01:23 +08:00
|
|
|
entities_context.append(
|
|
|
|
{
|
|
|
|
"id": i + 1,
|
|
|
|
"entity": n["entity_name"],
|
|
|
|
"type": n.get("entity_type", "UNKNOWN"),
|
|
|
|
"description": n.get("description", "UNKNOWN"),
|
|
|
|
"rank": n["rank"],
|
|
|
|
"created_at": created_at,
|
|
|
|
"file_path": file_path,
|
|
|
|
}
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
2025-05-07 18:01:23 +08:00
|
|
|
|
|
|
|
relations_context = []
|
2024-10-10 15:02:30 +08:00
|
|
|
for i, e in enumerate(use_relations):
|
2024-12-29 15:37:34 +08:00
|
|
|
created_at = e.get("created_at", "UNKNOWN")
|
|
|
|
# Convert timestamp to readable format
|
2024-12-29 15:25:57 +08:00
|
|
|
if isinstance(created_at, (int, float)):
|
|
|
|
created_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(created_at))
|
2025-03-17 23:36:00 +08:00
|
|
|
|
2025-03-20 16:29:24 +08:00
|
|
|
# Get file path from edge data
|
2025-03-17 23:32:35 +08:00
|
|
|
file_path = e.get("file_path", "unknown_source")
|
2025-03-17 23:36:00 +08:00
|
|
|
|
2025-05-07 18:01:23 +08:00
|
|
|
relations_context.append(
|
|
|
|
{
|
|
|
|
"id": i + 1,
|
|
|
|
"entity1": e["src_tgt"][0],
|
|
|
|
"entity2": e["src_tgt"][1],
|
|
|
|
"description": e["description"],
|
|
|
|
"keywords": e["keywords"],
|
|
|
|
"weight": e["weight"],
|
|
|
|
"rank": e["rank"],
|
|
|
|
"created_at": created_at,
|
|
|
|
"file_path": file_path,
|
|
|
|
}
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
|
|
|
|
2025-05-07 18:01:23 +08:00
|
|
|
text_units_context = []
|
2024-10-10 15:02:30 +08:00
|
|
|
for i, t in enumerate(use_text_units):
|
2025-05-07 18:01:23 +08:00
|
|
|
text_units_context.append(
|
|
|
|
{
|
|
|
|
"id": i + 1,
|
|
|
|
"content": t["content"],
|
|
|
|
"file_path": t.get("file_path", "unknown_source"),
|
|
|
|
}
|
2025-04-02 14:30:13 +08:00
|
|
|
)
|
2024-11-25 13:40:38 +08:00
|
|
|
return entities_context, relations_context, text_units_context
|
2024-10-10 15:02:30 +08:00
|
|
|
|
2024-10-19 09:43:17 +05:30
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
async def _find_most_related_text_unit_from_entities(
|
2025-01-07 16:26:12 +08:00
|
|
|
node_datas: list[dict],
|
|
|
|
query_param: QueryParam,
|
2025-02-08 23:33:11 +01:00
|
|
|
text_chunks_db: BaseKVStorage,
|
2025-01-07 16:26:12 +08:00
|
|
|
knowledge_graph_inst: BaseGraphStorage,
|
2024-10-10 15:02:30 +08:00
|
|
|
):
|
|
|
|
text_units = [
|
|
|
|
split_string_by_multi_markers(dp["source_id"], [GRAPH_FIELD_SEP])
|
|
|
|
for dp in node_datas
|
2025-04-09 11:30:29 +08:00
|
|
|
if dp["source_id"] is not None
|
2024-10-10 15:02:30 +08:00
|
|
|
]
|
2025-04-15 12:34:04 +08:00
|
|
|
|
2025-04-07 19:13:59 +02:00
|
|
|
node_names = [dp["entity_name"] for dp in node_datas]
|
|
|
|
batch_edges_dict = await knowledge_graph_inst.get_nodes_edges_batch(node_names)
|
|
|
|
# Build the edges list in the same order as node_datas.
|
|
|
|
edges = [batch_edges_dict.get(name, []) for name in node_names]
|
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
all_one_hop_nodes = set()
|
|
|
|
for this_edges in edges:
|
|
|
|
if not this_edges:
|
|
|
|
continue
|
|
|
|
all_one_hop_nodes.update([e[1] for e in this_edges])
|
2024-11-11 10:45:22 +08:00
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
all_one_hop_nodes = list(all_one_hop_nodes)
|
2025-04-15 12:34:04 +08:00
|
|
|
|
2025-04-07 19:09:31 +02:00
|
|
|
# Batch retrieve one-hop node data using get_nodes_batch
|
2025-04-15 12:34:04 +08:00
|
|
|
all_one_hop_nodes_data_dict = await knowledge_graph_inst.get_nodes_batch(
|
|
|
|
all_one_hop_nodes
|
|
|
|
)
|
|
|
|
all_one_hop_nodes_data = [
|
|
|
|
all_one_hop_nodes_data_dict.get(e) for e in all_one_hop_nodes
|
|
|
|
]
|
2024-11-11 10:45:22 +08:00
|
|
|
|
2024-11-05 18:36:59 -08:00
|
|
|
# Add null check for node data
|
2024-10-10 15:02:30 +08:00
|
|
|
all_one_hop_text_units_lookup = {
|
|
|
|
k: set(split_string_by_multi_markers(v["source_id"], [GRAPH_FIELD_SEP]))
|
|
|
|
for k, v in zip(all_one_hop_nodes, all_one_hop_nodes_data)
|
2024-11-05 18:36:59 -08:00
|
|
|
if v is not None and "source_id" in v # Add source_id check
|
2024-10-10 15:02:30 +08:00
|
|
|
}
|
2024-11-11 10:45:22 +08:00
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
all_text_units_lookup = {}
|
2025-01-24 16:06:04 +01:00
|
|
|
tasks = []
|
2025-03-05 15:12:01 +08:00
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
for index, (this_text_units, this_edges) in enumerate(zip(text_units, edges)):
|
|
|
|
for c_id in this_text_units:
|
2024-11-21 14:35:18 +08:00
|
|
|
if c_id not in all_text_units_lookup:
|
2025-03-05 15:12:01 +08:00
|
|
|
all_text_units_lookup[c_id] = index
|
2025-01-24 16:06:04 +01:00
|
|
|
tasks.append((c_id, index, this_edges))
|
2024-11-21 14:35:18 +08:00
|
|
|
|
2025-04-16 17:55:49 +08:00
|
|
|
# Process in batches tasks at a time to avoid overwhelming resources
|
2025-04-06 17:42:13 +08:00
|
|
|
batch_size = 5
|
2025-04-02 21:06:49 -07:00
|
|
|
results = []
|
2025-04-06 17:45:32 +08:00
|
|
|
|
2025-04-02 21:06:49 -07:00
|
|
|
for i in range(0, len(tasks), batch_size):
|
2025-04-06 17:45:32 +08:00
|
|
|
batch_tasks = tasks[i : i + batch_size]
|
2025-04-02 21:06:49 -07:00
|
|
|
batch_results = await asyncio.gather(
|
|
|
|
*[text_chunks_db.get_by_id(c_id) for c_id, _, _ in batch_tasks]
|
|
|
|
)
|
|
|
|
results.extend(batch_results)
|
2025-01-24 16:06:04 +01:00
|
|
|
|
|
|
|
for (c_id, index, this_edges), data in zip(tasks, results):
|
|
|
|
all_text_units_lookup[c_id] = {
|
|
|
|
"data": data,
|
|
|
|
"order": index,
|
|
|
|
"relation_counts": 0,
|
|
|
|
}
|
|
|
|
|
|
|
|
if this_edges:
|
|
|
|
for e in this_edges:
|
|
|
|
if (
|
|
|
|
e[1] in all_one_hop_text_units_lookup
|
|
|
|
and c_id in all_one_hop_text_units_lookup[e[1]]
|
|
|
|
):
|
|
|
|
all_text_units_lookup[c_id]["relation_counts"] += 1
|
2024-11-11 10:45:22 +08:00
|
|
|
|
2024-11-05 18:36:59 -08:00
|
|
|
# Filter out None values and ensure data has content
|
2024-10-10 15:02:30 +08:00
|
|
|
all_text_units = [
|
2024-11-11 10:45:22 +08:00
|
|
|
{"id": k, **v}
|
|
|
|
for k, v in all_text_units_lookup.items()
|
2024-11-05 18:36:59 -08:00
|
|
|
if v is not None and v.get("data") is not None and "content" in v["data"]
|
2024-10-10 15:02:30 +08:00
|
|
|
]
|
2024-11-11 10:45:22 +08:00
|
|
|
|
2024-11-05 18:36:59 -08:00
|
|
|
if not all_text_units:
|
|
|
|
logger.warning("No valid text units found")
|
|
|
|
return []
|
2024-11-11 10:45:22 +08:00
|
|
|
|
2025-04-17 16:57:53 +02:00
|
|
|
tokenizer: Tokenizer = text_chunks_db.global_config.get("tokenizer")
|
2024-10-10 15:02:30 +08:00
|
|
|
all_text_units = sorted(
|
2024-11-11 10:45:22 +08:00
|
|
|
all_text_units, key=lambda x: (x["order"], -x["relation_counts"])
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
|
|
|
all_text_units = truncate_list_by_token_size(
|
|
|
|
all_text_units,
|
|
|
|
key=lambda x: x["data"]["content"],
|
|
|
|
max_token_size=query_param.max_token_for_text_unit,
|
2025-04-17 13:09:52 +02:00
|
|
|
tokenizer=tokenizer,
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
2024-11-11 10:45:22 +08:00
|
|
|
|
2025-02-16 22:42:53 +08:00
|
|
|
logger.debug(
|
2025-02-16 19:47:20 +08:00
|
|
|
f"Truncate chunks from {len(all_text_units_lookup)} to {len(all_text_units)} (max tokens:{query_param.max_token_for_text_unit})"
|
2025-02-16 19:26:57 +08:00
|
|
|
)
|
|
|
|
|
2024-11-05 18:36:59 -08:00
|
|
|
all_text_units = [t["data"] for t in all_text_units]
|
2024-10-10 15:02:30 +08:00
|
|
|
return all_text_units
|
|
|
|
|
2024-10-19 09:43:17 +05:30
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
async def _find_most_related_edges_from_entities(
|
2025-01-07 16:26:12 +08:00
|
|
|
node_datas: list[dict],
|
|
|
|
query_param: QueryParam,
|
|
|
|
knowledge_graph_inst: BaseGraphStorage,
|
2024-10-10 15:02:30 +08:00
|
|
|
):
|
2025-04-16 17:53:13 +08:00
|
|
|
node_names = [dp["entity_name"] for dp in node_datas]
|
|
|
|
batch_edges_dict = await knowledge_graph_inst.get_nodes_edges_batch(node_names)
|
2025-04-17 01:28:22 +08:00
|
|
|
|
2024-11-14 15:59:37 +08:00
|
|
|
all_edges = []
|
|
|
|
seen = set()
|
|
|
|
|
2025-04-16 17:53:13 +08:00
|
|
|
for node_name in node_names:
|
|
|
|
this_edges = batch_edges_dict.get(node_name, [])
|
2024-11-14 15:59:37 +08:00
|
|
|
for e in this_edges:
|
|
|
|
sorted_edge = tuple(sorted(e))
|
|
|
|
if sorted_edge not in seen:
|
|
|
|
seen.add(sorted_edge)
|
|
|
|
all_edges.append(sorted_edge)
|
|
|
|
|
2025-04-07 19:13:59 +02:00
|
|
|
# Prepare edge pairs in two forms:
|
|
|
|
# For the batch edge properties function, use dicts.
|
|
|
|
edge_pairs_dicts = [{"src": e[0], "tgt": e[1]} for e in all_edges]
|
|
|
|
# For edge degrees, use tuples.
|
|
|
|
edge_pairs_tuples = list(all_edges) # all_edges is already a list of tuples
|
2025-04-15 12:34:04 +08:00
|
|
|
|
2025-04-07 19:13:59 +02:00
|
|
|
# Call the batched functions concurrently.
|
|
|
|
edge_data_dict, edge_degrees_dict = await asyncio.gather(
|
|
|
|
knowledge_graph_inst.get_edges_batch(edge_pairs_dicts),
|
2025-04-15 12:34:04 +08:00
|
|
|
knowledge_graph_inst.edge_degrees_batch(edge_pairs_tuples),
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
2025-04-15 12:34:04 +08:00
|
|
|
|
2025-04-07 19:13:59 +02:00
|
|
|
# Reconstruct edge_datas list in the same order as the deduplicated results.
|
|
|
|
all_edges_data = []
|
|
|
|
for pair in all_edges:
|
|
|
|
edge_props = edge_data_dict.get(pair)
|
|
|
|
if edge_props is not None:
|
|
|
|
combined = {
|
|
|
|
"src_tgt": pair,
|
|
|
|
"rank": edge_degrees_dict.get(pair, 0),
|
|
|
|
**edge_props,
|
|
|
|
}
|
|
|
|
all_edges_data.append(combined)
|
|
|
|
|
2025-04-17 16:57:53 +02:00
|
|
|
tokenizer: Tokenizer = knowledge_graph_inst.global_config.get("tokenizer")
|
2024-10-10 15:02:30 +08:00
|
|
|
all_edges_data = sorted(
|
|
|
|
all_edges_data, key=lambda x: (x["rank"], x["weight"]), reverse=True
|
|
|
|
)
|
|
|
|
all_edges_data = truncate_list_by_token_size(
|
|
|
|
all_edges_data,
|
2025-02-27 23:34:57 +07:00
|
|
|
key=lambda x: x["description"] if x["description"] is not None else "",
|
2024-10-10 15:02:30 +08:00
|
|
|
max_token_size=query_param.max_token_for_global_context,
|
2025-04-17 13:09:52 +02:00
|
|
|
tokenizer=tokenizer,
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
2025-02-16 19:26:57 +08:00
|
|
|
|
2025-02-16 22:42:53 +08:00
|
|
|
logger.debug(
|
2025-02-16 19:47:20 +08:00
|
|
|
f"Truncate relations from {len(all_edges)} to {len(all_edges_data)} (max tokens:{query_param.max_token_for_global_context})"
|
|
|
|
)
|
2025-02-16 19:26:57 +08:00
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
return all_edges_data
|
|
|
|
|
2024-10-19 09:43:17 +05:30
|
|
|
|
2024-11-25 13:29:55 +08:00
|
|
|
async def _get_edge_data(
|
2025-01-07 16:26:12 +08:00
|
|
|
keywords,
|
|
|
|
knowledge_graph_inst: BaseGraphStorage,
|
|
|
|
relationships_vdb: BaseVectorStorage,
|
2025-02-08 23:33:11 +01:00
|
|
|
text_chunks_db: BaseKVStorage,
|
2025-01-07 16:26:12 +08:00
|
|
|
query_param: QueryParam,
|
2024-10-10 15:02:30 +08:00
|
|
|
):
|
2025-02-13 04:12:00 +08:00
|
|
|
logger.info(
|
|
|
|
f"Query edges: {keywords}, top_k: {query_param.top_k}, cosine: {relationships_vdb.cosine_better_than_threshold}"
|
|
|
|
)
|
2025-03-08 15:43:17 +00:00
|
|
|
|
2025-03-10 15:39:18 +00:00
|
|
|
results = await relationships_vdb.query(
|
|
|
|
keywords, top_k=query_param.top_k, ids=query_param.ids
|
|
|
|
)
|
2024-10-19 09:43:17 +05:30
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
if not len(results):
|
2024-12-10 14:13:11 +08:00
|
|
|
return "", "", ""
|
2024-10-19 09:43:17 +05:30
|
|
|
|
2025-04-07 19:09:31 +02:00
|
|
|
# Prepare edge pairs in two forms:
|
|
|
|
# For the batch edge properties function, use dicts.
|
|
|
|
edge_pairs_dicts = [{"src": r["src_id"], "tgt": r["tgt_id"]} for r in results]
|
|
|
|
# For edge degrees, use tuples.
|
|
|
|
edge_pairs_tuples = [(r["src_id"], r["tgt_id"]) for r in results]
|
|
|
|
|
|
|
|
# Call the batched functions concurrently.
|
|
|
|
edge_data_dict, edge_degrees_dict = await asyncio.gather(
|
|
|
|
knowledge_graph_inst.get_edges_batch(edge_pairs_dicts),
|
2025-04-15 12:34:04 +08:00
|
|
|
knowledge_graph_inst.edge_degrees_batch(edge_pairs_tuples),
|
2025-04-07 19:09:31 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
# Reconstruct edge_datas list in the same order as results.
|
|
|
|
edge_datas = []
|
|
|
|
for k in results:
|
|
|
|
pair = (k["src_id"], k["tgt_id"])
|
|
|
|
edge_props = edge_data_dict.get(pair)
|
|
|
|
if edge_props is not None:
|
|
|
|
# Use edge degree from the batch as rank.
|
|
|
|
combined = {
|
|
|
|
"src_id": k["src_id"],
|
|
|
|
"tgt_id": k["tgt_id"],
|
|
|
|
"rank": edge_degrees_dict.get(pair, k.get("rank", 0)),
|
2025-05-01 11:33:42 +08:00
|
|
|
"created_at": k.get("created_at", None),
|
2025-04-07 19:09:31 +02:00
|
|
|
**edge_props,
|
|
|
|
}
|
|
|
|
edge_datas.append(combined)
|
2025-04-15 12:34:04 +08:00
|
|
|
|
2025-04-17 16:57:53 +02:00
|
|
|
tokenizer: Tokenizer = text_chunks_db.global_config.get("tokenizer")
|
2024-10-10 15:02:30 +08:00
|
|
|
edge_datas = sorted(
|
|
|
|
edge_datas, key=lambda x: (x["rank"], x["weight"]), reverse=True
|
|
|
|
)
|
|
|
|
edge_datas = truncate_list_by_token_size(
|
|
|
|
edge_datas,
|
2025-02-27 23:34:57 +07:00
|
|
|
key=lambda x: x["description"] if x["description"] is not None else "",
|
2024-10-10 15:02:30 +08:00
|
|
|
max_token_size=query_param.max_token_for_global_context,
|
2025-04-17 13:09:52 +02:00
|
|
|
tokenizer=tokenizer,
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
2025-01-24 16:06:04 +01:00
|
|
|
use_entities, use_text_units = await asyncio.gather(
|
|
|
|
_find_most_related_entities_from_relationships(
|
2025-04-18 16:14:31 +02:00
|
|
|
edge_datas,
|
|
|
|
query_param,
|
|
|
|
knowledge_graph_inst,
|
2025-01-24 16:06:04 +01:00
|
|
|
),
|
|
|
|
_find_related_text_unit_from_relationships(
|
2025-04-18 16:14:31 +02:00
|
|
|
edge_datas,
|
|
|
|
query_param,
|
|
|
|
text_chunks_db,
|
|
|
|
knowledge_graph_inst,
|
2025-01-24 16:06:04 +01:00
|
|
|
),
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
|
|
|
logger.info(
|
2025-02-16 19:26:57 +08:00
|
|
|
f"Global query uses {len(use_entities)} entites, {len(edge_datas)} relations, {len(use_text_units)} chunks"
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
2024-11-25 13:29:55 +08:00
|
|
|
|
2025-05-07 18:01:23 +08:00
|
|
|
relations_context = []
|
2024-10-10 15:02:30 +08:00
|
|
|
for i, e in enumerate(edge_datas):
|
2025-05-01 15:14:15 +08:00
|
|
|
created_at = e.get("created_at", "UNKNOWN")
|
2024-12-29 15:37:34 +08:00
|
|
|
# Convert timestamp to readable format
|
2024-12-29 15:25:57 +08:00
|
|
|
if isinstance(created_at, (int, float)):
|
|
|
|
created_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(created_at))
|
2025-03-17 23:36:00 +08:00
|
|
|
|
2025-03-20 16:29:24 +08:00
|
|
|
# Get file path from edge data
|
2025-03-17 23:32:35 +08:00
|
|
|
file_path = e.get("file_path", "unknown_source")
|
2025-03-17 23:36:00 +08:00
|
|
|
|
2025-05-07 18:01:23 +08:00
|
|
|
relations_context.append(
|
|
|
|
{
|
|
|
|
"id": i + 1,
|
|
|
|
"entity1": e["src_id"],
|
|
|
|
"entity2": e["tgt_id"],
|
|
|
|
"description": e["description"],
|
|
|
|
"keywords": e["keywords"],
|
|
|
|
"weight": e["weight"],
|
|
|
|
"rank": e["rank"],
|
|
|
|
"created_at": created_at,
|
|
|
|
"file_path": file_path,
|
|
|
|
}
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
|
|
|
|
2025-05-07 18:01:23 +08:00
|
|
|
entities_context = []
|
2024-10-10 15:02:30 +08:00
|
|
|
for i, n in enumerate(use_entities):
|
2025-05-01 15:14:15 +08:00
|
|
|
created_at = n.get("created_at", "UNKNOWN")
|
2025-03-01 17:45:06 +08:00
|
|
|
# Convert timestamp to readable format
|
|
|
|
if isinstance(created_at, (int, float)):
|
|
|
|
created_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(created_at))
|
2025-03-17 23:36:00 +08:00
|
|
|
|
2025-03-20 16:29:24 +08:00
|
|
|
# Get file path from node data
|
2025-03-17 23:32:35 +08:00
|
|
|
file_path = n.get("file_path", "unknown_source")
|
2025-03-17 23:36:00 +08:00
|
|
|
|
2025-05-07 18:01:23 +08:00
|
|
|
entities_context.append(
|
|
|
|
{
|
|
|
|
"id": i + 1,
|
|
|
|
"entity": n["entity_name"],
|
|
|
|
"type": n.get("entity_type", "UNKNOWN"),
|
|
|
|
"description": n.get("description", "UNKNOWN"),
|
|
|
|
"rank": n["rank"],
|
|
|
|
"created_at": created_at,
|
|
|
|
"file_path": file_path,
|
|
|
|
}
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
|
|
|
|
2025-05-07 18:01:23 +08:00
|
|
|
text_units_context = []
|
2024-10-10 15:02:30 +08:00
|
|
|
for i, t in enumerate(use_text_units):
|
2025-05-07 18:01:23 +08:00
|
|
|
text_units_context.append(
|
|
|
|
{
|
|
|
|
"id": i + 1,
|
|
|
|
"content": t["content"],
|
|
|
|
"file_path": t.get("file_path", "unknown"),
|
|
|
|
}
|
2025-05-07 17:42:14 +08:00
|
|
|
)
|
2024-11-25 13:40:38 +08:00
|
|
|
return entities_context, relations_context, text_units_context
|
2024-10-10 15:02:30 +08:00
|
|
|
|
2024-10-19 09:43:17 +05:30
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
async def _find_most_related_entities_from_relationships(
|
2025-01-07 16:26:12 +08:00
|
|
|
edge_datas: list[dict],
|
|
|
|
query_param: QueryParam,
|
|
|
|
knowledge_graph_inst: BaseGraphStorage,
|
2024-10-10 15:02:30 +08:00
|
|
|
):
|
2024-11-14 15:59:37 +08:00
|
|
|
entity_names = []
|
|
|
|
seen = set()
|
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
for e in edge_datas:
|
2024-11-14 15:59:37 +08:00
|
|
|
if e["src_id"] not in seen:
|
|
|
|
entity_names.append(e["src_id"])
|
|
|
|
seen.add(e["src_id"])
|
|
|
|
if e["tgt_id"] not in seen:
|
|
|
|
entity_names.append(e["tgt_id"])
|
|
|
|
seen.add(e["tgt_id"])
|
2024-10-19 09:43:17 +05:30
|
|
|
|
2025-04-07 19:09:31 +02:00
|
|
|
# Batch approach: Retrieve nodes and their degrees concurrently with one query each.
|
|
|
|
nodes_dict, degrees_dict = await asyncio.gather(
|
|
|
|
knowledge_graph_inst.get_nodes_batch(entity_names),
|
2025-04-15 12:34:04 +08:00
|
|
|
knowledge_graph_inst.node_degrees_batch(entity_names),
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
2025-04-07 19:09:31 +02:00
|
|
|
|
|
|
|
# Rebuild the list in the same order as entity_names
|
|
|
|
node_datas = []
|
|
|
|
for entity_name in entity_names:
|
|
|
|
node = nodes_dict.get(entity_name)
|
|
|
|
degree = degrees_dict.get(entity_name, 0)
|
|
|
|
if node is None:
|
|
|
|
logger.warning(f"Node '{entity_name}' not found in batch retrieval.")
|
|
|
|
continue
|
|
|
|
# Combine the node data with the entity name and computed degree (as rank)
|
|
|
|
combined = {**node, "entity_name": entity_name, "rank": degree}
|
|
|
|
node_datas.append(combined)
|
2024-10-10 15:02:30 +08:00
|
|
|
|
2025-04-17 16:57:53 +02:00
|
|
|
tokenizer: Tokenizer = knowledge_graph_inst.global_config.get("tokenizer")
|
2025-02-16 19:26:57 +08:00
|
|
|
len_node_datas = len(node_datas)
|
2024-10-10 15:02:30 +08:00
|
|
|
node_datas = truncate_list_by_token_size(
|
|
|
|
node_datas,
|
2025-02-27 23:34:57 +07:00
|
|
|
key=lambda x: x["description"] if x["description"] is not None else "",
|
2024-10-10 15:02:30 +08:00
|
|
|
max_token_size=query_param.max_token_for_local_context,
|
2025-04-17 13:09:52 +02:00
|
|
|
tokenizer=tokenizer,
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
2025-02-16 22:42:53 +08:00
|
|
|
logger.debug(
|
2025-02-16 19:47:20 +08:00
|
|
|
f"Truncate entities from {len_node_datas} to {len(node_datas)} (max tokens:{query_param.max_token_for_local_context})"
|
|
|
|
)
|
2024-10-10 15:02:30 +08:00
|
|
|
|
|
|
|
return node_datas
|
|
|
|
|
2024-10-19 09:43:17 +05:30
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
async def _find_related_text_unit_from_relationships(
|
2025-01-07 16:26:12 +08:00
|
|
|
edge_datas: list[dict],
|
|
|
|
query_param: QueryParam,
|
2025-02-08 23:33:11 +01:00
|
|
|
text_chunks_db: BaseKVStorage,
|
2025-01-07 16:26:12 +08:00
|
|
|
knowledge_graph_inst: BaseGraphStorage,
|
2024-10-10 15:02:30 +08:00
|
|
|
):
|
|
|
|
text_units = [
|
|
|
|
split_string_by_multi_markers(dp["source_id"], [GRAPH_FIELD_SEP])
|
|
|
|
for dp in edge_datas
|
2025-04-09 11:30:29 +08:00
|
|
|
if dp["source_id"] is not None
|
2024-10-10 15:02:30 +08:00
|
|
|
]
|
|
|
|
all_text_units_lookup = {}
|
|
|
|
|
2025-01-24 16:06:04 +01:00
|
|
|
async def fetch_chunk_data(c_id, index):
|
|
|
|
if c_id not in all_text_units_lookup:
|
|
|
|
chunk_data = await text_chunks_db.get_by_id(c_id)
|
|
|
|
# Only store valid data
|
|
|
|
if chunk_data is not None and "content" in chunk_data:
|
|
|
|
all_text_units_lookup[c_id] = {
|
|
|
|
"data": chunk_data,
|
|
|
|
"order": index,
|
|
|
|
}
|
|
|
|
|
|
|
|
tasks = []
|
2024-10-10 15:02:30 +08:00
|
|
|
for index, unit_list in enumerate(text_units):
|
|
|
|
for c_id in unit_list:
|
2025-01-24 16:06:04 +01:00
|
|
|
tasks.append(fetch_chunk_data(c_id, index))
|
|
|
|
|
|
|
|
await asyncio.gather(*tasks)
|
2024-12-09 15:08:30 +08:00
|
|
|
|
|
|
|
if not all_text_units_lookup:
|
|
|
|
logger.warning("No valid text chunks found")
|
|
|
|
return []
|
2024-10-19 09:43:17 +05:30
|
|
|
|
2024-12-09 15:08:30 +08:00
|
|
|
all_text_units = [{"id": k, **v} for k, v in all_text_units_lookup.items()]
|
2024-10-19 09:43:17 +05:30
|
|
|
all_text_units = sorted(all_text_units, key=lambda x: x["order"])
|
2024-12-09 15:08:30 +08:00
|
|
|
|
|
|
|
# Ensure all text chunks have content
|
|
|
|
valid_text_units = [
|
|
|
|
t for t in all_text_units if t["data"] is not None and "content" in t["data"]
|
|
|
|
]
|
|
|
|
|
|
|
|
if not valid_text_units:
|
|
|
|
logger.warning("No valid text chunks after filtering")
|
|
|
|
return []
|
|
|
|
|
2025-04-17 16:57:53 +02:00
|
|
|
tokenizer: Tokenizer = text_chunks_db.global_config.get("tokenizer")
|
2024-12-09 15:08:30 +08:00
|
|
|
truncated_text_units = truncate_list_by_token_size(
|
|
|
|
valid_text_units,
|
2024-10-10 15:02:30 +08:00
|
|
|
key=lambda x: x["data"]["content"],
|
|
|
|
max_token_size=query_param.max_token_for_text_unit,
|
2025-04-17 13:09:52 +02:00
|
|
|
tokenizer=tokenizer,
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
2024-12-09 15:08:30 +08:00
|
|
|
|
2025-02-16 22:42:53 +08:00
|
|
|
logger.debug(
|
2025-02-16 19:47:20 +08:00
|
|
|
f"Truncate chunks from {len(valid_text_units)} to {len(truncated_text_units)} (max tokens:{query_param.max_token_for_text_unit})"
|
2025-02-16 19:26:57 +08:00
|
|
|
)
|
|
|
|
|
2024-12-09 15:08:30 +08:00
|
|
|
all_text_units: list[TextChunkSchema] = [t["data"] for t in truncated_text_units]
|
2024-10-10 15:02:30 +08:00
|
|
|
|
|
|
|
return all_text_units
|
|
|
|
|
2024-10-19 09:43:17 +05:30
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
async def naive_query(
|
2025-02-14 23:31:27 +01:00
|
|
|
query: str,
|
2025-01-07 16:26:12 +08:00
|
|
|
chunks_vdb: BaseVectorStorage,
|
|
|
|
query_param: QueryParam,
|
2025-02-14 23:31:27 +01:00
|
|
|
global_config: dict[str, str],
|
|
|
|
hashing_kv: BaseKVStorage | None = None,
|
2025-02-17 16:45:00 +05:30
|
|
|
system_prompt: str | None = None,
|
2025-02-14 23:49:39 +01:00
|
|
|
) -> str | AsyncIterator[str]:
|
2025-04-28 19:36:21 +08:00
|
|
|
if query_param.model_func:
|
|
|
|
use_model_func = query_param.model_func
|
|
|
|
else:
|
|
|
|
use_model_func = global_config["llm_model_func"]
|
|
|
|
# Apply higher priority (5) to query relation LLM function
|
|
|
|
use_model_func = partial(use_model_func, _priority=5)
|
|
|
|
|
2024-12-08 17:35:52 +08:00
|
|
|
# Handle cache
|
2025-01-24 18:59:24 +08:00
|
|
|
args_hash = compute_args_hash(query_param.mode, query, cache_type="query")
|
2024-12-08 17:35:52 +08:00
|
|
|
cached_response, quantized, min_val, max_val = await handle_cache(
|
2025-02-11 11:42:46 +08:00
|
|
|
hashing_kv, args_hash, query, query_param.mode, cache_type="query"
|
2024-12-08 17:35:52 +08:00
|
|
|
)
|
|
|
|
if cached_response is not None:
|
|
|
|
return cached_response
|
|
|
|
|
2025-04-17 13:09:52 +02:00
|
|
|
tokenizer: Tokenizer = global_config["tokenizer"]
|
2025-05-07 03:57:14 +08:00
|
|
|
|
2025-05-07 17:42:14 +08:00
|
|
|
_, _, text_units_context = await _get_vector_context(
|
|
|
|
query, chunks_vdb, query_param, tokenizer
|
|
|
|
)
|
|
|
|
|
|
|
|
if text_units_context is None or len(text_units_context) == 0:
|
2024-12-09 15:08:30 +08:00
|
|
|
return PROMPTS["fail_response"]
|
|
|
|
|
2025-05-07 17:42:14 +08:00
|
|
|
text_units_str = json.dumps(text_units_context, ensure_ascii=False)
|
2024-10-10 15:02:30 +08:00
|
|
|
if query_param.only_need_context:
|
2025-05-07 17:42:14 +08:00
|
|
|
return f"""
|
|
|
|
---Document Chunks---
|
2024-12-09 15:08:30 +08:00
|
|
|
|
2025-05-07 17:42:14 +08:00
|
|
|
```json
|
|
|
|
{text_units_str}
|
|
|
|
```
|
|
|
|
|
|
|
|
"""
|
2025-01-25 16:29:18 +08:00
|
|
|
# Process conversation history
|
|
|
|
history_context = ""
|
|
|
|
if query_param.conversation_history:
|
|
|
|
history_context = get_conversation_turns(
|
|
|
|
query_param.conversation_history, query_param.history_turns
|
|
|
|
)
|
|
|
|
|
2025-02-17 16:45:00 +05:30
|
|
|
sys_prompt_temp = system_prompt if system_prompt else PROMPTS["naive_rag_response"]
|
2024-10-10 15:02:30 +08:00
|
|
|
sys_prompt = sys_prompt_temp.format(
|
2025-05-07 17:42:14 +08:00
|
|
|
content_data=text_units_str,
|
2025-01-25 16:57:47 +08:00
|
|
|
response_type=query_param.response_type,
|
|
|
|
history=history_context,
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
2024-12-09 15:08:30 +08:00
|
|
|
|
2024-11-15 12:57:01 +08:00
|
|
|
if query_param.only_need_prompt:
|
|
|
|
return sys_prompt
|
2024-12-09 15:08:30 +08:00
|
|
|
|
2025-04-17 10:56:23 +02:00
|
|
|
len_of_prompts = len(tokenizer.encode(query + sys_prompt))
|
2025-02-17 15:10:15 +08:00
|
|
|
logger.debug(f"[naive_query]Prompt Tokens: {len_of_prompts}")
|
2025-02-16 19:26:57 +08:00
|
|
|
|
2024-10-10 15:02:30 +08:00
|
|
|
response = await use_model_func(
|
|
|
|
query,
|
|
|
|
system_prompt=sys_prompt,
|
2025-04-21 00:06:15 +08:00
|
|
|
stream=query_param.stream,
|
2024-10-10 15:02:30 +08:00
|
|
|
)
|
2024-10-14 19:41:07 +08:00
|
|
|
|
2025-04-21 00:06:15 +08:00
|
|
|
if isinstance(response, str) and len(response) > len(sys_prompt):
|
2024-10-19 09:43:17 +05:30
|
|
|
response = (
|
2025-01-07 16:26:12 +08:00
|
|
|
response[len(sys_prompt) :]
|
2024-10-19 09:43:17 +05:30
|
|
|
.replace(sys_prompt, "")
|
|
|
|
.replace("user", "")
|
|
|
|
.replace("model", "")
|
|
|
|
.replace(query, "")
|
|
|
|
.replace("<system>", "")
|
|
|
|
.replace("</system>", "")
|
|
|
|
.strip()
|
|
|
|
)
|
2024-10-10 15:02:30 +08:00
|
|
|
|
2025-04-16 01:24:59 +08:00
|
|
|
if hashing_kv.global_config.get("enable_llm_cache"):
|
|
|
|
# Save to cache
|
|
|
|
await save_to_cache(
|
|
|
|
hashing_kv,
|
|
|
|
CacheData(
|
|
|
|
args_hash=args_hash,
|
|
|
|
content=response,
|
|
|
|
prompt=query,
|
|
|
|
quantized=quantized,
|
|
|
|
min_val=min_val,
|
|
|
|
max_val=max_val,
|
|
|
|
mode=query_param.mode,
|
|
|
|
cache_type="query",
|
|
|
|
),
|
|
|
|
)
|
2024-12-08 17:35:52 +08:00
|
|
|
|
2024-11-06 11:18:14 -05:00
|
|
|
return response
|
2024-12-28 11:56:28 +08:00
|
|
|
|
|
|
|
|
2025-01-24 18:59:24 +08:00
|
|
|
async def kg_query_with_keywords(
|
|
|
|
query: str,
|
2025-01-07 16:26:12 +08:00
|
|
|
knowledge_graph_inst: BaseGraphStorage,
|
|
|
|
entities_vdb: BaseVectorStorage,
|
|
|
|
relationships_vdb: BaseVectorStorage,
|
2025-02-08 23:33:11 +01:00
|
|
|
text_chunks_db: BaseKVStorage,
|
2025-01-07 16:26:12 +08:00
|
|
|
query_param: QueryParam,
|
2025-02-14 23:31:27 +01:00
|
|
|
global_config: dict[str, str],
|
|
|
|
hashing_kv: BaseKVStorage | None = None,
|
2025-05-07 17:42:14 +08:00
|
|
|
ll_keywords: list[str] = [],
|
|
|
|
hl_keywords: list[str] = [],
|
|
|
|
chunks_vdb: BaseVectorStorage | None = None,
|
2025-02-15 00:10:37 +01:00
|
|
|
) -> str | AsyncIterator[str]:
|
2024-12-28 11:56:28 +08:00
|
|
|
"""
|
2025-01-24 18:59:24 +08:00
|
|
|
Refactored kg_query that does NOT extract keywords by itself.
|
|
|
|
It expects hl_keywords and ll_keywords to be set in query_param, or defaults to empty.
|
|
|
|
Then it uses those to build context and produce a final LLM response.
|
2024-12-28 11:56:28 +08:00
|
|
|
"""
|
2025-04-28 19:36:21 +08:00
|
|
|
if query_param.model_func:
|
|
|
|
use_model_func = query_param.model_func
|
|
|
|
else:
|
|
|
|
use_model_func = global_config["llm_model_func"]
|
|
|
|
# Apply higher priority (5) to query relation LLM function
|
|
|
|
use_model_func = partial(use_model_func, _priority=5)
|
2025-01-24 18:59:24 +08:00
|
|
|
|
|
|
|
args_hash = compute_args_hash(query_param.mode, query, cache_type="query")
|
2024-12-28 11:56:28 +08:00
|
|
|
cached_response, quantized, min_val, max_val = await handle_cache(
|
2025-01-24 18:59:24 +08:00
|
|
|
hashing_kv, args_hash, query, query_param.mode, cache_type="query"
|
2024-12-28 11:56:28 +08:00
|
|
|
)
|
|
|
|
if cached_response is not None:
|
|
|
|
return cached_response
|
|
|
|
|
2025-01-24 18:59:24 +08:00
|
|
|
# If neither has any keywords, you could handle that logic here.
|
|
|
|
if not hl_keywords and not ll_keywords:
|
|
|
|
logger.warning(
|
|
|
|
"No keywords found in query_param. Could default to global mode or fail."
|
|
|
|
)
|
|
|
|
return PROMPTS["fail_response"]
|
|
|
|
if not ll_keywords and query_param.mode in ["local", "hybrid"]:
|
|
|
|
logger.warning("low_level_keywords is empty, switching to global mode.")
|
|
|
|
query_param.mode = "global"
|
|
|
|
if not hl_keywords and query_param.mode in ["global", "hybrid"]:
|
|
|
|
logger.warning("high_level_keywords is empty, switching to local mode.")
|
|
|
|
query_param.mode = "local"
|
2024-12-28 11:56:28 +08:00
|
|
|
|
2025-05-07 17:42:14 +08:00
|
|
|
ll_keywords_str = ", ".join(ll_keywords) if ll_keywords else ""
|
|
|
|
hl_keywords_str = ", ".join(hl_keywords) if hl_keywords else ""
|
2024-12-28 11:56:28 +08:00
|
|
|
|
2025-01-24 18:59:24 +08:00
|
|
|
context = await _build_query_context(
|
2025-02-18 09:05:51 +01:00
|
|
|
ll_keywords_str,
|
|
|
|
hl_keywords_str,
|
2025-01-24 18:59:24 +08:00
|
|
|
knowledge_graph_inst,
|
|
|
|
entities_vdb,
|
|
|
|
relationships_vdb,
|
|
|
|
text_chunks_db,
|
|
|
|
query_param,
|
2025-05-07 17:42:14 +08:00
|
|
|
chunks_vdb=chunks_vdb,
|
2024-12-28 11:56:28 +08:00
|
|
|
)
|
2025-01-24 18:59:24 +08:00
|
|
|
if not context:
|
2024-12-28 11:56:28 +08:00
|
|
|
return PROMPTS["fail_response"]
|
|
|
|
|
|
|
|
if query_param.only_need_context:
|
2025-01-24 18:59:24 +08:00
|
|
|
return context
|
2024-12-28 11:56:28 +08:00
|
|
|
|
2025-01-24 18:59:24 +08:00
|
|
|
# Process conversation history
|
|
|
|
history_context = ""
|
|
|
|
if query_param.conversation_history:
|
|
|
|
history_context = get_conversation_turns(
|
|
|
|
query_param.conversation_history, query_param.history_turns
|
|
|
|
)
|
|
|
|
|
|
|
|
sys_prompt_temp = PROMPTS["rag_response"]
|
|
|
|
sys_prompt = sys_prompt_temp.format(
|
|
|
|
context_data=context,
|
2024-12-28 11:56:28 +08:00
|
|
|
response_type=query_param.response_type,
|
2025-01-24 18:59:24 +08:00
|
|
|
history=history_context,
|
2024-12-28 11:56:28 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
if query_param.only_need_prompt:
|
|
|
|
return sys_prompt
|
|
|
|
|
2025-04-17 10:56:23 +02:00
|
|
|
tokenizer: Tokenizer = global_config["tokenizer"]
|
|
|
|
len_of_prompts = len(tokenizer.encode(query + sys_prompt))
|
2025-02-17 12:28:49 +08:00
|
|
|
logger.debug(f"[kg_query_with_keywords]Prompt Tokens: {len_of_prompts}")
|
2025-02-16 19:26:57 +08:00
|
|
|
|
2025-03-17 23:32:35 +08:00
|
|
|
# 6. Generate response
|
2024-12-28 11:56:28 +08:00
|
|
|
response = await use_model_func(
|
|
|
|
query,
|
|
|
|
system_prompt=sys_prompt,
|
|
|
|
stream=query_param.stream,
|
|
|
|
)
|
2025-03-01 17:45:06 +08:00
|
|
|
|
2025-03-17 23:32:35 +08:00
|
|
|
# Clean up response content
|
2024-12-28 11:56:28 +08:00
|
|
|
if isinstance(response, str) and len(response) > len(sys_prompt):
|
|
|
|
response = (
|
|
|
|
response.replace(sys_prompt, "")
|
|
|
|
.replace("user", "")
|
|
|
|
.replace("model", "")
|
|
|
|
.replace(query, "")
|
|
|
|
.replace("<system>", "")
|
|
|
|
.replace("</system>", "")
|
|
|
|
.strip()
|
|
|
|
)
|
|
|
|
|
2025-04-16 01:24:59 +08:00
|
|
|
if hashing_kv.global_config.get("enable_llm_cache"):
|
|
|
|
await save_to_cache(
|
|
|
|
hashing_kv,
|
|
|
|
CacheData(
|
|
|
|
args_hash=args_hash,
|
|
|
|
content=response,
|
|
|
|
prompt=query,
|
|
|
|
quantized=quantized,
|
|
|
|
min_val=min_val,
|
|
|
|
max_val=max_val,
|
|
|
|
mode=query_param.mode,
|
|
|
|
cache_type="query",
|
|
|
|
),
|
|
|
|
)
|
2025-03-01 17:45:06 +08:00
|
|
|
|
2024-12-28 11:56:28 +08:00
|
|
|
return response
|
2025-03-11 15:43:04 +08:00
|
|
|
|
2025-03-11 15:44:01 +08:00
|
|
|
|
2025-03-11 15:43:04 +08:00
|
|
|
async def query_with_keywords(
|
|
|
|
query: str,
|
|
|
|
prompt: str,
|
|
|
|
param: QueryParam,
|
|
|
|
knowledge_graph_inst: BaseGraphStorage,
|
|
|
|
entities_vdb: BaseVectorStorage,
|
|
|
|
relationships_vdb: BaseVectorStorage,
|
|
|
|
chunks_vdb: BaseVectorStorage,
|
|
|
|
text_chunks_db: BaseKVStorage,
|
|
|
|
global_config: dict[str, str],
|
|
|
|
hashing_kv: BaseKVStorage | None = None,
|
|
|
|
) -> str | AsyncIterator[str]:
|
|
|
|
"""
|
|
|
|
Extract keywords from the query and then use them for retrieving information.
|
2025-03-11 15:44:01 +08:00
|
|
|
|
2025-03-11 15:43:04 +08:00
|
|
|
1. Extracts high-level and low-level keywords from the query
|
|
|
|
2. Formats the query with the extracted keywords and prompt
|
|
|
|
3. Uses the appropriate query method based on param.mode
|
2025-03-11 15:44:01 +08:00
|
|
|
|
2025-03-11 15:43:04 +08:00
|
|
|
Args:
|
|
|
|
query: The user's query
|
|
|
|
prompt: Additional prompt to prepend to the query
|
|
|
|
param: Query parameters
|
|
|
|
knowledge_graph_inst: Knowledge graph storage
|
|
|
|
entities_vdb: Entities vector database
|
|
|
|
relationships_vdb: Relationships vector database
|
|
|
|
chunks_vdb: Document chunks vector database
|
|
|
|
text_chunks_db: Text chunks storage
|
|
|
|
global_config: Global configuration
|
|
|
|
hashing_kv: Cache storage
|
2025-03-11 15:44:01 +08:00
|
|
|
|
2025-03-11 15:43:04 +08:00
|
|
|
Returns:
|
|
|
|
Query response or async iterator
|
|
|
|
"""
|
|
|
|
# Extract keywords
|
2025-04-02 21:15:40 +03:00
|
|
|
hl_keywords, ll_keywords = await get_keywords_from_query(
|
|
|
|
query=query,
|
|
|
|
query_param=param,
|
2025-03-11 15:43:04 +08:00
|
|
|
global_config=global_config,
|
|
|
|
hashing_kv=hashing_kv,
|
|
|
|
)
|
|
|
|
|
|
|
|
# Create a new string with the prompt and the keywords
|
2025-05-07 17:42:14 +08:00
|
|
|
keywords_str = ", ".join(ll_keywords + hl_keywords)
|
|
|
|
formatted_question = (
|
|
|
|
f"{prompt}\n\n### Keywords\n\n{keywords_str}\n\n### Query\n\n{query}"
|
|
|
|
)
|
|
|
|
|
|
|
|
param.original_query = query
|
2025-03-11 15:43:04 +08:00
|
|
|
|
|
|
|
# Use appropriate query method based on mode
|
2025-05-07 17:42:14 +08:00
|
|
|
if param.mode in ["local", "global", "hybrid", "mix"]:
|
2025-03-11 15:43:04 +08:00
|
|
|
return await kg_query_with_keywords(
|
|
|
|
formatted_question,
|
|
|
|
knowledge_graph_inst,
|
|
|
|
entities_vdb,
|
|
|
|
relationships_vdb,
|
|
|
|
text_chunks_db,
|
|
|
|
param,
|
|
|
|
global_config,
|
|
|
|
hashing_kv=hashing_kv,
|
2025-05-07 17:42:14 +08:00
|
|
|
hl_keywords=hl_keywords,
|
|
|
|
ll_keywords=ll_keywords,
|
|
|
|
chunks_vdb=chunks_vdb,
|
2025-03-11 15:43:04 +08:00
|
|
|
)
|
|
|
|
elif param.mode == "naive":
|
|
|
|
return await naive_query(
|
|
|
|
formatted_question,
|
|
|
|
chunks_vdb,
|
|
|
|
text_chunks_db,
|
|
|
|
param,
|
|
|
|
global_config,
|
|
|
|
hashing_kv=hashing_kv,
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
raise ValueError(f"Unknown mode {param.mode}")
|