2024-01-15 08:46:22 +08:00
|
|
|
|
# -*- coding: utf-8 -*-
|
2024-01-17 09:39:50 +08:00
|
|
|
|
import json
|
2023-12-25 19:05:59 +08:00
|
|
|
|
import re
|
2024-03-01 19:48:01 +08:00
|
|
|
|
from copy import deepcopy
|
|
|
|
|
|
2024-02-27 14:57:34 +08:00
|
|
|
|
from elasticsearch_dsl import Q, Search
|
2024-01-18 19:28:37 +08:00
|
|
|
|
from typing import List, Optional, Dict, Union
|
2023-12-25 19:05:59 +08:00
|
|
|
|
from dataclasses import dataclass
|
2024-01-17 09:39:50 +08:00
|
|
|
|
|
|
|
|
|
from rag.settings import es_logger
|
2024-01-15 08:46:22 +08:00
|
|
|
|
from rag.utils import rmSpace
|
|
|
|
|
from rag.nlp import huqie, query
|
2023-12-25 19:05:59 +08:00
|
|
|
|
import numpy as np
|
|
|
|
|
|
2023-12-28 13:50:13 +08:00
|
|
|
|
|
2024-01-15 19:47:25 +08:00
|
|
|
|
def index_name(uid): return f"ragflow_{uid}"
|
2023-12-28 13:50:13 +08:00
|
|
|
|
|
2023-12-26 19:32:06 +08:00
|
|
|
|
|
2023-12-25 19:05:59 +08:00
|
|
|
|
class Dealer:
|
2024-01-17 20:20:42 +08:00
|
|
|
|
def __init__(self, es):
|
2023-12-25 19:05:59 +08:00
|
|
|
|
self.qryr = query.EsQueryer(es)
|
2023-12-28 13:50:13 +08:00
|
|
|
|
self.qryr.flds = [
|
|
|
|
|
"title_tks^10",
|
|
|
|
|
"title_sm_tks^5",
|
2024-01-18 19:28:37 +08:00
|
|
|
|
"important_kwd^30",
|
|
|
|
|
"important_tks^20",
|
2023-12-28 13:50:13 +08:00
|
|
|
|
"content_ltks^2",
|
|
|
|
|
"content_sm_ltks"]
|
2023-12-25 19:05:59 +08:00
|
|
|
|
self.es = es
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
|
class SearchResult:
|
2023-12-28 13:50:13 +08:00
|
|
|
|
total: int
|
2023-12-25 19:05:59 +08:00
|
|
|
|
ids: List[str]
|
|
|
|
|
query_vector: List[float] = None
|
|
|
|
|
field: Optional[Dict] = None
|
|
|
|
|
highlight: Optional[Dict] = None
|
|
|
|
|
aggregation: Union[List, Dict, None] = None
|
|
|
|
|
keywords: Optional[List[str]] = None
|
|
|
|
|
group_docs: List[List] = None
|
|
|
|
|
|
2024-01-17 20:20:42 +08:00
|
|
|
|
def _vector(self, txt, emb_mdl, sim=0.8, topk=10):
|
|
|
|
|
qv, c = emb_mdl.encode_queries(txt)
|
2023-12-25 19:05:59 +08:00
|
|
|
|
return {
|
2024-01-18 19:28:37 +08:00
|
|
|
|
"field": "q_%d_vec" % len(qv),
|
2023-12-25 19:05:59 +08:00
|
|
|
|
"k": topk,
|
|
|
|
|
"similarity": sim,
|
2024-01-18 19:28:37 +08:00
|
|
|
|
"num_candidates": topk * 2,
|
2024-01-17 09:39:50 +08:00
|
|
|
|
"query_vector": qv
|
2023-12-25 19:05:59 +08:00
|
|
|
|
}
|
|
|
|
|
|
2024-01-17 20:20:42 +08:00
|
|
|
|
def search(self, req, idxnm, emb_mdl=None):
|
2023-12-25 19:05:59 +08:00
|
|
|
|
qst = req.get("question", "")
|
2023-12-28 13:50:13 +08:00
|
|
|
|
bqry, keywords = self.qryr.question(qst)
|
|
|
|
|
if req.get("kb_ids"):
|
|
|
|
|
bqry.filter.append(Q("terms", kb_id=req["kb_ids"]))
|
2024-01-17 09:39:50 +08:00
|
|
|
|
if req.get("doc_ids"):
|
|
|
|
|
bqry.filter.append(Q("terms", doc_id=req["doc_ids"]))
|
2024-01-17 20:20:42 +08:00
|
|
|
|
if "available_int" in req:
|
2024-01-18 19:28:37 +08:00
|
|
|
|
if req["available_int"] == 0:
|
|
|
|
|
bqry.filter.append(Q("range", available_int={"lt": 1}))
|
|
|
|
|
else:
|
2024-01-23 19:45:36 +08:00
|
|
|
|
bqry.filter.append(
|
|
|
|
|
Q("bool", must_not=Q("range", available_int={"lt": 1})))
|
2023-12-25 19:05:59 +08:00
|
|
|
|
bqry.boost = 0.05
|
|
|
|
|
|
|
|
|
|
s = Search()
|
2023-12-28 13:50:13 +08:00
|
|
|
|
pg = int(req.get("page", 1)) - 1
|
2023-12-25 19:05:59 +08:00
|
|
|
|
ps = int(req.get("size", 1000))
|
2024-01-18 19:28:37 +08:00
|
|
|
|
src = req.get("fields", ["docnm_kwd", "content_ltks", "kb_id", "img_id",
|
2024-03-04 14:42:26 +08:00
|
|
|
|
"image_id", "doc_id", "q_512_vec", "q_768_vec", "position_int",
|
2024-02-05 18:08:17 +08:00
|
|
|
|
"q_1024_vec", "q_1536_vec", "available_int", "content_with_weight"])
|
2023-12-25 19:05:59 +08:00
|
|
|
|
|
2023-12-28 13:50:13 +08:00
|
|
|
|
s = s.query(bqry)[pg * ps:(pg + 1) * ps]
|
2023-12-25 19:05:59 +08:00
|
|
|
|
s = s.highlight("content_ltks")
|
|
|
|
|
s = s.highlight("title_ltks")
|
2023-12-28 13:50:13 +08:00
|
|
|
|
if not qst:
|
2024-03-04 14:42:26 +08:00
|
|
|
|
if not req.get("sort"):
|
|
|
|
|
s = s.sort(
|
|
|
|
|
{"create_time": {"order": "desc", "unmapped_type": "date"}},
|
|
|
|
|
{"create_timestamp_flt": {"order": "desc", "unmapped_type": "float"}}
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
s = s.sort(
|
2024-03-06 09:09:16 +08:00
|
|
|
|
{"page_num_int": {"order": "asc", "unmapped_type": "float", "mode" : "avg"}},
|
|
|
|
|
{"top_int": {"order": "asc", "unmapped_type": "float", "mode": "avg"}},
|
2024-03-04 14:42:26 +08:00
|
|
|
|
{"create_time": {"order": "desc", "unmapped_type": "date"}},
|
|
|
|
|
{"create_timestamp_flt": {"order": "desc", "unmapped_type": "float"}}
|
|
|
|
|
)
|
2023-12-25 19:05:59 +08:00
|
|
|
|
|
2024-01-17 09:39:50 +08:00
|
|
|
|
if qst:
|
|
|
|
|
s = s.highlight_options(
|
|
|
|
|
fragment_size=120,
|
|
|
|
|
number_of_fragments=5,
|
|
|
|
|
boundary_scanner_locale="zh-CN",
|
|
|
|
|
boundary_scanner="SENTENCE",
|
|
|
|
|
boundary_chars=",./;:\\!(),。?:!……()——、"
|
|
|
|
|
)
|
2023-12-25 19:05:59 +08:00
|
|
|
|
s = s.to_dict()
|
|
|
|
|
q_vec = []
|
2023-12-28 13:50:13 +08:00
|
|
|
|
if req.get("vector"):
|
2024-01-17 20:20:42 +08:00
|
|
|
|
assert emb_mdl, "No embedding model selected"
|
2024-01-23 19:45:36 +08:00
|
|
|
|
s["knn"] = self._vector(
|
|
|
|
|
qst, emb_mdl, req.get(
|
2024-02-19 19:22:17 +08:00
|
|
|
|
"similarity", 0.1), ps)
|
2023-12-25 19:05:59 +08:00
|
|
|
|
s["knn"]["filter"] = bqry.to_dict()
|
2024-01-23 19:45:36 +08:00
|
|
|
|
if "highlight" in s:
|
|
|
|
|
del s["highlight"]
|
2023-12-25 19:05:59 +08:00
|
|
|
|
q_vec = s["knn"]["query_vector"]
|
2024-01-17 09:39:50 +08:00
|
|
|
|
es_logger.info("【Q】: {}".format(json.dumps(s)))
|
2024-03-01 19:48:01 +08:00
|
|
|
|
res = self.es.search(deepcopy(s), idxnm=idxnm, timeout="600s", src=src)
|
2024-01-17 09:39:50 +08:00
|
|
|
|
es_logger.info("TOTAL: {}".format(self.es.getTotal(res)))
|
2023-12-25 19:05:59 +08:00
|
|
|
|
if self.es.getTotal(res) == 0 and "knn" in s:
|
2023-12-28 13:50:13 +08:00
|
|
|
|
bqry, _ = self.qryr.question(qst, min_match="10%")
|
|
|
|
|
if req.get("kb_ids"):
|
|
|
|
|
bqry.filter.append(Q("terms", kb_id=req["kb_ids"]))
|
2023-12-25 19:05:59 +08:00
|
|
|
|
s["query"] = bqry.to_dict()
|
|
|
|
|
s["knn"]["filter"] = bqry.to_dict()
|
2024-02-19 19:22:17 +08:00
|
|
|
|
s["knn"]["similarity"] = 0.17
|
2023-12-28 13:50:13 +08:00
|
|
|
|
res = self.es.search(s, idxnm=idxnm, timeout="600s", src=src)
|
2023-12-25 19:05:59 +08:00
|
|
|
|
|
|
|
|
|
kwds = set([])
|
|
|
|
|
for k in keywords:
|
|
|
|
|
kwds.add(k)
|
|
|
|
|
for kk in huqie.qieqie(k).split(" "):
|
2023-12-28 13:50:13 +08:00
|
|
|
|
if len(kk) < 2:
|
|
|
|
|
continue
|
|
|
|
|
if kk in kwds:
|
|
|
|
|
continue
|
2023-12-25 19:05:59 +08:00
|
|
|
|
kwds.add(kk)
|
|
|
|
|
|
|
|
|
|
aggs = self.getAggregation(res, "docnm_kwd")
|
|
|
|
|
|
|
|
|
|
return self.SearchResult(
|
2023-12-28 13:50:13 +08:00
|
|
|
|
total=self.es.getTotal(res),
|
|
|
|
|
ids=self.es.getDocIds(res),
|
|
|
|
|
query_vector=q_vec,
|
|
|
|
|
aggregation=aggs,
|
|
|
|
|
highlight=self.getHighlight(res),
|
2024-01-17 09:39:50 +08:00
|
|
|
|
field=self.getFields(res, src),
|
2023-12-28 13:50:13 +08:00
|
|
|
|
keywords=list(kwds)
|
2023-12-25 19:05:59 +08:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def getAggregation(self, res, g):
|
2023-12-28 13:50:13 +08:00
|
|
|
|
if not "aggregations" in res or "aggs_" + g not in res["aggregations"]:
|
|
|
|
|
return
|
|
|
|
|
bkts = res["aggregations"]["aggs_" + g]["buckets"]
|
2023-12-25 19:05:59 +08:00
|
|
|
|
return [(b["key"], b["doc_count"]) for b in bkts]
|
|
|
|
|
|
|
|
|
|
def getHighlight(self, res):
|
|
|
|
|
def rmspace(line):
|
|
|
|
|
eng = set(list("qwertyuioplkjhgfdsazxcvbnm"))
|
|
|
|
|
r = []
|
|
|
|
|
for t in line.split(" "):
|
2023-12-28 13:50:13 +08:00
|
|
|
|
if not t:
|
|
|
|
|
continue
|
|
|
|
|
if len(r) > 0 and len(
|
|
|
|
|
t) > 0 and r[-1][-1] in eng and t[0] in eng:
|
|
|
|
|
r.append(" ")
|
2023-12-25 19:05:59 +08:00
|
|
|
|
r.append(t)
|
|
|
|
|
r = "".join(r)
|
|
|
|
|
return r
|
|
|
|
|
|
|
|
|
|
ans = {}
|
|
|
|
|
for d in res["hits"]["hits"]:
|
|
|
|
|
hlts = d.get("highlight")
|
2023-12-28 13:50:13 +08:00
|
|
|
|
if not hlts:
|
|
|
|
|
continue
|
2023-12-25 19:05:59 +08:00
|
|
|
|
ans[d["_id"]] = "".join([a for a in list(hlts.items())[0][1]])
|
|
|
|
|
return ans
|
|
|
|
|
|
|
|
|
|
def getFields(self, sres, flds):
|
|
|
|
|
res = {}
|
2023-12-28 13:50:13 +08:00
|
|
|
|
if not flds:
|
|
|
|
|
return {}
|
|
|
|
|
for d in self.es.getSource(sres):
|
|
|
|
|
m = {n: d.get(n) for n in flds if d.get(n) is not None}
|
|
|
|
|
for n, v in m.items():
|
|
|
|
|
if isinstance(v, type([])):
|
2024-03-04 14:42:26 +08:00
|
|
|
|
m[n] = "\t".join([str(vv) if not isinstance(vv, list) else "\t".join([str(vvv) for vvv in vv]) for vv in v])
|
2023-12-25 19:05:59 +08:00
|
|
|
|
continue
|
2023-12-28 13:50:13 +08:00
|
|
|
|
if not isinstance(v, type("")):
|
|
|
|
|
m[n] = str(m[n])
|
2024-02-19 19:22:17 +08:00
|
|
|
|
if n.find("tks")>0: m[n] = rmSpace(m[n])
|
2023-12-25 19:05:59 +08:00
|
|
|
|
|
2023-12-28 13:50:13 +08:00
|
|
|
|
if m:
|
|
|
|
|
res[d["id"]] = m
|
2023-12-25 19:05:59 +08:00
|
|
|
|
return res
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
def trans2floats(txt):
|
|
|
|
|
return [float(t) for t in txt.split("\t")]
|
|
|
|
|
|
2024-01-23 19:45:36 +08:00
|
|
|
|
def insert_citations(self, answer, chunks, chunk_v,
|
2024-03-06 19:16:31 +08:00
|
|
|
|
embd_mdl, tkweight=0.7, vtweight=0.3):
|
2024-02-27 14:57:34 +08:00
|
|
|
|
assert len(chunks) == len(chunk_v)
|
2024-01-18 19:28:37 +08:00
|
|
|
|
pieces = re.split(r"([;。?!!\n]|[a-z][.?;!][ \n])", answer)
|
|
|
|
|
for i in range(1, len(pieces)):
|
|
|
|
|
if re.match(r"[a-z][.?;!][ \n]", pieces[i]):
|
|
|
|
|
pieces[i - 1] += pieces[i][0]
|
|
|
|
|
pieces[i] = pieces[i][1:]
|
|
|
|
|
idx = []
|
|
|
|
|
pieces_ = []
|
|
|
|
|
for i, t in enumerate(pieces):
|
2024-01-23 19:45:36 +08:00
|
|
|
|
if len(t) < 5:
|
|
|
|
|
continue
|
2024-01-18 19:28:37 +08:00
|
|
|
|
idx.append(i)
|
|
|
|
|
pieces_.append(t)
|
2024-01-22 19:51:38 +08:00
|
|
|
|
es_logger.info("{} => {}".format(answer, pieces_))
|
2024-01-23 19:45:36 +08:00
|
|
|
|
if not pieces_:
|
|
|
|
|
return answer
|
2023-12-25 19:05:59 +08:00
|
|
|
|
|
2024-01-23 19:45:36 +08:00
|
|
|
|
ans_v, _ = embd_mdl.encode(pieces_)
|
2024-01-18 19:28:37 +08:00
|
|
|
|
assert len(ans_v[0]) == len(chunk_v[0]), "The dimension of query and chunk do not match: {} vs. {}".format(
|
|
|
|
|
len(ans_v[0]), len(chunk_v[0]))
|
2023-12-28 13:50:13 +08:00
|
|
|
|
|
2024-01-18 19:28:37 +08:00
|
|
|
|
chunks_tks = [huqie.qie(ck).split(" ") for ck in chunks]
|
|
|
|
|
cites = {}
|
2024-01-23 19:45:36 +08:00
|
|
|
|
for i, a in enumerate(pieces_):
|
2024-01-18 19:28:37 +08:00
|
|
|
|
sim, tksim, vtsim = self.qryr.hybrid_similarity(ans_v[i],
|
|
|
|
|
chunk_v,
|
2024-01-23 19:45:36 +08:00
|
|
|
|
huqie.qie(
|
|
|
|
|
pieces_[i]).split(" "),
|
2024-01-18 19:28:37 +08:00
|
|
|
|
chunks_tks,
|
|
|
|
|
tkweight, vtweight)
|
2023-12-28 13:50:13 +08:00
|
|
|
|
mx = np.max(sim) * 0.99
|
2024-03-06 19:16:31 +08:00
|
|
|
|
if mx < 0.35:
|
2024-01-23 19:45:36 +08:00
|
|
|
|
continue
|
|
|
|
|
cites[idx[i]] = list(
|
2024-02-27 14:57:34 +08:00
|
|
|
|
set([str(ii) for ii in range(len(chunk_v)) if sim[ii] > mx]))[:4]
|
2023-12-25 19:05:59 +08:00
|
|
|
|
|
2024-01-18 19:28:37 +08:00
|
|
|
|
res = ""
|
2024-01-23 19:45:36 +08:00
|
|
|
|
for i, p in enumerate(pieces):
|
2024-01-18 19:28:37 +08:00
|
|
|
|
res += p
|
2024-01-23 19:45:36 +08:00
|
|
|
|
if i not in idx:
|
|
|
|
|
continue
|
|
|
|
|
if i not in cites:
|
|
|
|
|
continue
|
2024-02-27 17:51:54 +08:00
|
|
|
|
for c in cites[i]: assert int(c) < len(chunk_v)
|
2024-03-06 19:16:31 +08:00
|
|
|
|
for c in cites[i]: res += f" ##{c}$$"
|
2023-12-25 19:05:59 +08:00
|
|
|
|
|
|
|
|
|
return res
|
|
|
|
|
|
2024-01-23 19:45:36 +08:00
|
|
|
|
def rerank(self, sres, query, tkweight=0.3,
|
|
|
|
|
vtweight=0.7, cfield="content_ltks"):
|
2023-12-28 13:50:13 +08:00
|
|
|
|
ins_embd = [
|
|
|
|
|
Dealer.trans2floats(
|
2024-01-23 19:45:36 +08:00
|
|
|
|
sres.field[i].get("q_%d_vec" % len(sres.query_vector), "\t".join(["0"] * len(sres.query_vector)))) for i in sres.ids]
|
2023-12-28 13:50:13 +08:00
|
|
|
|
if not ins_embd:
|
2024-01-22 19:51:38 +08:00
|
|
|
|
return [], [], []
|
2024-02-05 18:08:17 +08:00
|
|
|
|
ins_tw = [sres.field[i][cfield].split(" ")
|
2024-01-23 19:45:36 +08:00
|
|
|
|
for i in sres.ids]
|
2024-01-17 20:20:42 +08:00
|
|
|
|
sim, tksim, vtsim = self.qryr.hybrid_similarity(sres.query_vector,
|
2024-01-18 19:28:37 +08:00
|
|
|
|
ins_embd,
|
2024-01-23 19:45:36 +08:00
|
|
|
|
huqie.qie(
|
|
|
|
|
query).split(" "),
|
2024-01-18 19:28:37 +08:00
|
|
|
|
ins_tw, tkweight, vtweight)
|
2024-01-17 20:20:42 +08:00
|
|
|
|
return sim, tksim, vtsim
|
2023-12-25 19:05:59 +08:00
|
|
|
|
|
2024-01-18 19:28:37 +08:00
|
|
|
|
def hybrid_similarity(self, ans_embd, ins_embd, ans, inst):
|
|
|
|
|
return self.qryr.hybrid_similarity(ans_embd,
|
|
|
|
|
ins_embd,
|
|
|
|
|
huqie.qie(ans).split(" "),
|
|
|
|
|
huqie.qie(inst).split(" "))
|
|
|
|
|
|
|
|
|
|
def retrieval(self, question, embd_mdl, tenant_id, kb_ids, page, page_size, similarity_threshold=0.2,
|
|
|
|
|
vector_similarity_weight=0.3, top=1024, doc_ids=None, aggs=True):
|
2024-01-22 19:51:38 +08:00
|
|
|
|
ranks = {"total": 0, "chunks": [], "doc_aggs": {}}
|
2024-01-23 19:45:36 +08:00
|
|
|
|
if not question:
|
|
|
|
|
return ranks
|
2024-01-18 19:28:37 +08:00
|
|
|
|
req = {"kb_ids": kb_ids, "doc_ids": doc_ids, "size": top,
|
|
|
|
|
"question": question, "vector": True,
|
|
|
|
|
"similarity": similarity_threshold}
|
|
|
|
|
sres = self.search(req, index_name(tenant_id), embd_mdl)
|
2023-12-25 19:05:59 +08:00
|
|
|
|
|
2024-01-18 19:28:37 +08:00
|
|
|
|
sim, tsim, vsim = self.rerank(
|
|
|
|
|
sres, question, 1 - vector_similarity_weight, vector_similarity_weight)
|
|
|
|
|
idx = np.argsort(sim * -1)
|
2024-01-22 19:51:38 +08:00
|
|
|
|
|
2024-01-18 19:28:37 +08:00
|
|
|
|
dim = len(sres.query_vector)
|
|
|
|
|
start_idx = (page - 1) * page_size
|
|
|
|
|
for i in idx:
|
|
|
|
|
if sim[i] < similarity_threshold:
|
|
|
|
|
break
|
2024-02-08 17:01:01 +08:00
|
|
|
|
ranks["total"] += 1
|
2024-01-18 19:28:37 +08:00
|
|
|
|
start_idx -= 1
|
|
|
|
|
if start_idx >= 0:
|
|
|
|
|
continue
|
|
|
|
|
if len(ranks["chunks"]) == page_size:
|
|
|
|
|
if aggs:
|
|
|
|
|
continue
|
|
|
|
|
break
|
|
|
|
|
id = sres.ids[i]
|
|
|
|
|
dnm = sres.field[id]["docnm_kwd"]
|
2024-02-08 17:01:01 +08:00
|
|
|
|
did = sres.field[id]["doc_id"]
|
2024-01-18 19:28:37 +08:00
|
|
|
|
d = {
|
|
|
|
|
"chunk_id": id,
|
|
|
|
|
"content_ltks": sres.field[id]["content_ltks"],
|
2024-02-05 18:08:17 +08:00
|
|
|
|
"content_with_weight": sres.field[id]["content_with_weight"],
|
2024-01-18 19:28:37 +08:00
|
|
|
|
"doc_id": sres.field[id]["doc_id"],
|
|
|
|
|
"docnm_kwd": dnm,
|
|
|
|
|
"kb_id": sres.field[id]["kb_id"],
|
|
|
|
|
"important_kwd": sres.field[id].get("important_kwd", []),
|
|
|
|
|
"img_id": sres.field[id].get("img_id", ""),
|
|
|
|
|
"similarity": sim[i],
|
|
|
|
|
"vector_similarity": vsim[i],
|
|
|
|
|
"term_similarity": tsim[i],
|
2024-03-05 16:33:47 +08:00
|
|
|
|
"vector": self.trans2floats(sres.field[id].get("q_%d_vec" % dim, "\t".join(["0"] * dim))),
|
|
|
|
|
"positions": sres.field[id].get("position_int", "").split("\t")
|
2024-01-18 19:28:37 +08:00
|
|
|
|
}
|
2024-03-05 16:33:47 +08:00
|
|
|
|
if len(d["positions"]) % 5 == 0:
|
|
|
|
|
poss = []
|
|
|
|
|
for i in range(0, len(d["positions"]), 5):
|
|
|
|
|
poss.append([float(d["positions"][i]), float(d["positions"][i + 1]), float(d["positions"][i + 2]),
|
|
|
|
|
float(d["positions"][i + 3]), float(d["positions"][i + 4])])
|
|
|
|
|
d["positions"] = poss
|
2024-01-18 19:28:37 +08:00
|
|
|
|
ranks["chunks"].append(d)
|
|
|
|
|
if dnm not in ranks["doc_aggs"]:
|
2024-02-08 17:01:01 +08:00
|
|
|
|
ranks["doc_aggs"][dnm] = {"doc_id": did, "count": 0}
|
|
|
|
|
ranks["doc_aggs"][dnm]["count"] += 1
|
|
|
|
|
ranks["doc_aggs"] = [{"doc_name": k, "doc_id": v["doc_id"], "count": v["count"]} for k,v in sorted(ranks["doc_aggs"].items(), key=lambda x:x[1]["count"]*-1)]
|
2024-01-17 09:39:50 +08:00
|
|
|
|
|
2024-01-18 19:28:37 +08:00
|
|
|
|
return ranks
|
2024-02-07 19:27:23 +08:00
|
|
|
|
|
2024-02-19 19:22:17 +08:00
|
|
|
|
def sql_retrieval(self, sql, fetch_size=128, format="json"):
|
2024-02-07 19:27:23 +08:00
|
|
|
|
sql = re.sub(r"[ ]+", " ", sql)
|
2024-02-19 19:22:17 +08:00
|
|
|
|
sql = sql.replace("%", "")
|
|
|
|
|
es_logger.info(f"Get es sql: {sql}")
|
2024-02-07 19:27:23 +08:00
|
|
|
|
replaces = []
|
2024-02-19 19:22:17 +08:00
|
|
|
|
for r in re.finditer(r" ([a-z_]+_l?tks)( like | ?= ?)'([^']+)'", sql):
|
|
|
|
|
fld, v = r.group(1), r.group(3)
|
|
|
|
|
match = " MATCH({}, '{}', 'operator=OR;fuzziness=AUTO:1,3;minimum_should_match=30%') ".format(fld, huqie.qieqie(huqie.qie(v)))
|
|
|
|
|
replaces.append(("{}{}'{}'".format(r.group(1), r.group(2), r.group(3)), match))
|
2024-02-07 19:27:23 +08:00
|
|
|
|
|
2024-02-19 19:22:17 +08:00
|
|
|
|
for p, r in replaces: sql = sql.replace(p, r, 1)
|
|
|
|
|
es_logger.info(f"To es: {sql}")
|
2024-02-07 19:27:23 +08:00
|
|
|
|
|
|
|
|
|
try:
|
2024-02-19 19:22:17 +08:00
|
|
|
|
tbl = self.es.sql(sql, fetch_size, format)
|
2024-02-07 19:27:23 +08:00
|
|
|
|
return tbl
|
|
|
|
|
except Exception as e:
|
2024-02-19 19:22:17 +08:00
|
|
|
|
es_logger.error(f"SQL failure: {sql} =>" + str(e))
|
2024-02-07 19:27:23 +08:00
|
|
|
|
|