mirror of
https://github.com/OpenSPG/KAG.git
synced 2025-11-25 23:01:34 +00:00
* add path find * fix find path * spg guided relation extraction * fix dict parse with same key * rename graphalgoclient to graphclient * rename graphalgoclient to graphclient * file reader supports http url * add checkpointer class * parser supports checkpoint * add build * remove incorrect logs * remove logs * update examples * update chain checkpointer * vectorizer batch size set to 32 * add a zodb backended checkpointer * add a zodb backended checkpointer * fix zodb based checkpointer * add thread for zodb IO * fix(common): resolve mutlithread conflict in zodb IO * fix(common): load existing zodb checkpoints * update examples * update examples * fix zodb writer * add docstring * fix jieba version mismatch * commit kag_config-tc.yaml 1、rename type to register_name 2、put a uniqe & specific name to register_name 3、rename reader to scanner 4、rename parser to reader 5、rename num_parallel to num_parallel_file, rename chain_level_num_paralle to num_parallel_chain_of_file 6、rename kag_extractor to schema_free_extractor, schema_base_extractor to schema_constraint_extractor 7、pre-define llm & vectorize_model and refer them in the yaml file Issues to be resolved: 1、examples of event extract & spg extract 2、statistic of indexer, such as nums of nodes & edges extracted, ratio of llm invoke. 3、Exceptions such as Debt, account does not exist should be thrown in llm invoke. 4、conf of solver need to be re-examined. * commit kag_config-tc.yaml 1、rename type to register_name 2、put a uniqe & specific name to register_name 3、rename reader to scanner 4、rename parser to reader 5、rename num_parallel to num_parallel_file, rename chain_level_num_paralle to num_parallel_chain_of_file 6、rename kag_extractor to schema_free_extractor, schema_base_extractor to schema_constraint_extractor 7、pre-define llm & vectorize_model and refer them in the yaml file Issues to be resolved: 1、examples of event extract & spg extract 2、statistic of indexer, such as nums of nodes & edges extracted, ratio of llm invoke. 3、Exceptions such as Debt, account does not exist should be thrown in llm invoke. 4、conf of solver need to be re-examined. * 1、fix bug in base_table_splitter * 1、fix bug in base_table_splitter * 1、fix bug in default_chain * 增加solver * add kag * update outline splitter * add main test * add op * code refactor * add tools * fix outline splitter * fix outline prompt * graph api pass * commit with page rank * add search api and graph api * add markdown report * fix vectorizer num batch compute * add retry for vectorize model call * update markdown reader * update markdown reader * update pdf reader * raise extractor failure * add default expr * add log * merge jc reader features * rm import * add build * fix zodb based checkpointer * add thread for zodb IO * fix(common): resolve mutlithread conflict in zodb IO * fix(common): load existing zodb checkpoints * update examples * update examples * fix zodb writer * add docstring * fix jieba version mismatch * commit kag_config-tc.yaml 1、rename type to register_name 2、put a uniqe & specific name to register_name 3、rename reader to scanner 4、rename parser to reader 5、rename num_parallel to num_parallel_file, rename chain_level_num_paralle to num_parallel_chain_of_file 6、rename kag_extractor to schema_free_extractor, schema_base_extractor to schema_constraint_extractor 7、pre-define llm & vectorize_model and refer them in the yaml file Issues to be resolved: 1、examples of event extract & spg extract 2、statistic of indexer, such as nums of nodes & edges extracted, ratio of llm invoke. 3、Exceptions such as Debt, account does not exist should be thrown in llm invoke. 4、conf of solver need to be re-examined. * commit kag_config-tc.yaml 1、rename type to register_name 2、put a uniqe & specific name to register_name 3、rename reader to scanner 4、rename parser to reader 5、rename num_parallel to num_parallel_file, rename chain_level_num_paralle to num_parallel_chain_of_file 6、rename kag_extractor to schema_free_extractor, schema_base_extractor to schema_constraint_extractor 7、pre-define llm & vectorize_model and refer them in the yaml file Issues to be resolved: 1、examples of event extract & spg extract 2、statistic of indexer, such as nums of nodes & edges extracted, ratio of llm invoke. 3、Exceptions such as Debt, account does not exist should be thrown in llm invoke. 4、conf of solver need to be re-examined. * 1、fix bug in base_table_splitter * 1、fix bug in base_table_splitter * 1、fix bug in default_chain * update outline splitter * add main test * add markdown report * code refactor * fix outline splitter * fix outline prompt * update markdown reader * fix vectorizer num batch compute * add retry for vectorize model call * update markdown reader * raise extractor failure * rm parser * run pipeline * add config option of whether to perform llm config check, default to false * fix * recover pdf reader * several components can be null for default chain * 支持完整qa运行 * add if * remove unused code * 使用chunk兜底 * excluded source relation to choose * add generate * default recall 10 * add local memory * 排除相似边 * 增加保护 * 修复并发问题 * add debug logger * 支持topk参数化 * 支持chunk截断和调整spo select 的prompt * 增加查询请求保护 * 增加force_chunk配置 * fix entity linker algorithm * 增加sub query改写 * fix md reader dup in test * fix * merge knext to kag parallel * fix package * 修复指标下跌问题 * scanner update * scanner update * add doc and update example scripts * fix * add bridge to spg server * add format * fix bridge * update conf for baike * disable ckpt for spg server runner * llm invoke error default raise exceptions * chore(version): bump version to X.Y.Z * update default response generation prompt * add method getSummarizationMetrics * fix(common): fix project conf empty error * fix typo * 增加上报信息 * 修改main solver * postprocessor support spg server * 修改solver支持名 * fix language * 修改chunker接口,增加openapi * rename vectorizer to vectorize_model in spg server config * generate_random_string start with gen * add knext llm vector checker * add knext llm vector checker * add knext llm vector checker * solver移除默认值 * udpate yaml and register_name for baike * udpate yaml and register_name for baike * remove config key check * 修复llmmodule * fix knext project * udpate yaml and register_name for examples * udpate yaml and register_name for examples * Revert "udpate yaml and register_name for examples" This reverts commit b3fa5ca9ba749e501133ac67bd8746027ab839d9. * update register name * fix * fix * support multiple resigter names * update component * update reader register names (#183) * fix markdown reader * fix llm client for retry * feat(common): add processed chunk id checkpoint (#185) * update reader register names * add processed chunk id checkpoint * feat(example): add example config (#186) * update reader register names * add processed chunk id checkpoint * add example config file * add max_workers parameter for getSummarizationMetrics to make it faster * add csqa data generation script generate_data.py * commit generated csqa builder and solver data * add csqa basic project files * adjust split_length and num_threads_per_chain to match lightrag settings * ignore ckpt dirs * add csqa evaluation script eval.py * save evaluation scripts summarization_metrics.py and factual_correctness.py * save LightRAG output csqa_lightrag_answers.json * ignore KAG output csqa_kag_answers.json * add README.md for CSQA * fix(solver): fix solver pipeline conf (#191) * update reader register names * add processed chunk id checkpoint * add example config file * update solver pipeline config * fix project create * update links and file paths * reformat csqa kag_config.yaml * reformat csqa python files * reformat getSummarizationMetrics and compare_summarization_answers * fix(solver): fix solver config (#192) * update reader register names * add processed chunk id checkpoint * add example config file * update solver pipeline config * fix project create * fix main solver conf * add except * fix typo in csqa README.md * feat(conf): support reinitialize config for call from java side (#199) * update reader register names * add processed chunk id checkpoint * add example config file * update solver pipeline config * fix project create * fix main solver conf * support reinitialize config for java call * revert default response generation prompt * update project list * add README.md for the hotpotqa, 2wiki and musique examples * 增加spo检索 * turn off kag config dump by default * turn off knext schema dump by default * add .gitignore and fix kag_config.yaml * add README.md for the medicine example * add README.md for the supplychain example * bugfix for risk mining * use exact out * refactor(solver): format solver code (#205) * update reader register names * add processed chunk id checkpoint * add example config file * update solver pipeline config * fix project create * fix main solver conf * support reinitialize config for java call * black format --------- Co-authored-by: peilong <peilong.zpl@antgroup.com> Co-authored-by: 锦呈 <zhangxinhong.zxh@antgroup.com> Co-authored-by: zhengke.gzk <zhengke.gzk@antgroup.com> Co-authored-by: huaidong.xhd <huaidong.xhd@antgroup.com>
136 lines
4.7 KiB
Python
136 lines
4.7 KiB
Python
# -*- coding: utf-8 -*-
|
|
# Copyright 2023 OpenSPG Authors
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
|
|
# in compliance with the License. You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software distributed under the License
|
|
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
|
# or implied.
|
|
|
|
import unittest
|
|
from kag.builder.model.sub_graph import SubGraph
|
|
from kag.builder.component.mapping.spo_mapping import SPOMapping
|
|
|
|
|
|
class TestSPOMapping(unittest.TestCase):
|
|
def setUp(self):
|
|
self.mapping_1 = SPOMapping()
|
|
self.mapping_1.add_field_mappings(
|
|
s_id_col="subject_id",
|
|
p_type_col="predicate",
|
|
o_id_col="object_id",
|
|
s_type_col="subject_type",
|
|
o_type_col="object_type",
|
|
)
|
|
self.mapping_1.add_sub_property_mapping(source_name="sub_property")
|
|
self.mapping_2 = SPOMapping()
|
|
self.mapping_2.add_field_mappings(
|
|
s_id_col="subject_id",
|
|
p_type_col="predicate",
|
|
o_id_col="object_id",
|
|
s_type_col="subject_type",
|
|
o_type_col="object_type",
|
|
)
|
|
self.mapping_2.add_sub_property_mapping(
|
|
source_name="sub_property_1", target_name="target_1"
|
|
)
|
|
self.mapping_2.add_sub_property_mapping(
|
|
source_name="sub_property_2", target_name="target_2"
|
|
)
|
|
|
|
def test_invoke_with_dict_properties(self):
|
|
# 准备一些模拟的数据
|
|
input_data = {
|
|
"subject_id": "s1",
|
|
"predicate": "P1",
|
|
"object_id": "o1",
|
|
"subject_type": "Person",
|
|
"object_type": "Organization",
|
|
"sub_property": '{"target": "value"}',
|
|
}
|
|
|
|
# 调用invoke方法
|
|
outputs = self.mapping_1.invoke(input_data)
|
|
|
|
# 验证结果
|
|
self.assertIsInstance(outputs, list)
|
|
self.assertEqual(len(outputs), 1)
|
|
sub_graph = outputs[0]
|
|
self.assertIsInstance(sub_graph, SubGraph)
|
|
self.assertEqual(len(sub_graph.nodes), 2)
|
|
self.assertEqual(len(sub_graph.edges), 1)
|
|
|
|
# 验证节点和边的详细信息
|
|
s_node, o_node = sub_graph.nodes
|
|
self.assertEqual(s_node.id, "s1")
|
|
self.assertEqual(s_node.name, "s1")
|
|
self.assertEqual(s_node.label, "Person")
|
|
|
|
self.assertEqual(o_node.id, "o1")
|
|
self.assertEqual(o_node.name, "o1")
|
|
self.assertEqual(o_node.label, "Organization")
|
|
|
|
edge = sub_graph.edges[0]
|
|
self.assertEqual(edge.from_id, "s1")
|
|
self.assertEqual(edge.from_type, "Person")
|
|
self.assertEqual(edge.label, "P1")
|
|
self.assertEqual(edge.to_id, "o1")
|
|
self.assertEqual(edge.to_type, "Organization")
|
|
self.assertEqual(edge.properties, {"target": "value"})
|
|
|
|
def test_invoke_with_properties(self):
|
|
# 准备一些模拟的数据
|
|
input_data = {
|
|
"subject_id": "s1",
|
|
"predicate": "P1",
|
|
"object_id": "o1",
|
|
"subject_type": "Person",
|
|
"object_type": "Organization",
|
|
"sub_property_1": "value_1",
|
|
"sub_property_2": "value_2",
|
|
}
|
|
|
|
# 调用invoke方法
|
|
outputs = self.mapping_2.invoke(input_data)
|
|
|
|
# 验证结果
|
|
self.assertIsInstance(outputs, list)
|
|
self.assertEqual(len(outputs), 1)
|
|
sub_graph = outputs[0]
|
|
self.assertIsInstance(sub_graph, SubGraph)
|
|
self.assertEqual(len(sub_graph.nodes), 2)
|
|
self.assertEqual(len(sub_graph.edges), 1)
|
|
|
|
# 验证节点和边的详细信息
|
|
s_node, o_node = sub_graph.nodes
|
|
self.assertEqual(s_node.id, "s1")
|
|
self.assertEqual(s_node.name, "s1")
|
|
self.assertEqual(s_node.label, "Person")
|
|
|
|
self.assertEqual(o_node.id, "o1")
|
|
self.assertEqual(o_node.name, "o1")
|
|
self.assertEqual(o_node.label, "Organization")
|
|
|
|
edge = sub_graph.edges[0]
|
|
self.assertEqual(edge.from_id, "s1")
|
|
self.assertEqual(edge.from_type, "Person")
|
|
self.assertEqual(edge.label, "P1")
|
|
self.assertEqual(edge.to_id, "o1")
|
|
self.assertEqual(edge.to_type, "Organization")
|
|
self.assertEqual(
|
|
edge.properties, {"target_1": "value_1", "target_2": "value_2"}
|
|
)
|
|
|
|
def test_add_sub_property_mapping_raises_value_error(self):
|
|
# 尝试在已经设置了sub_property_col后再次添加映射
|
|
self.mapping_1.sub_property_col = "sub_property"
|
|
with self.assertRaises(ValueError):
|
|
self.mapping_1.add_sub_property_mapping(source_name="another_property")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
unittest.main()
|