graphrag/tests/verbs/test_create_summarized_entities.py
Nathan Evans 61b3d6d56a
Migrate helper verbs (#1248)
* Remove genid

* Move snapshot_rows

* Move snapshot

* Delete spread_json

* Delete unzip

* Delete zip

* Move unpack_graph

* Move compute_edge_combined_degree

* Delete create_graph

* Delete concat

* Delete text replace

* Delete text_translate

* Move text_split

* Inline aggregate override

* Move cluster_graph

* Move merge_graphs

* Semver

* Move text_chunk

* Move layout_graph and fix some __init__s

* Move extract_covariates

* Rename text_split -> split_text

* Move extract_entities

* Move summarize_descriptions

* Rename text_chunk -> chunk_text

* Move community report creation

* Remove verb-level packing operators

* Streamline some naming

* Streamline param name/order

* Move mock LLM data to tests

* Fixed missed rename

* Update some strategy refs

* Rename run_gi

* Inject mock responses into integ test config
2024-10-09 13:46:44 -07:00

130 lines
3.4 KiB
Python

# Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
import networkx as nx
import pytest
from graphrag.config.enums import LLMType
from graphrag.index.storage.memory_pipeline_storage import MemoryPipelineStorage
from graphrag.index.workflows.v1.create_summarized_entities import (
build_steps,
workflow_name,
)
from .util import (
get_config_for_workflow,
get_workflow_output,
load_expected,
load_input_tables,
)
MOCK_LLM_RESPONSES = [
"""
This is a MOCK response for the LLM. It is summarized!
""".strip()
]
MOCK_LLM_CONFIG = {
"type": LLMType.StaticResponse,
"responses": MOCK_LLM_RESPONSES,
}
async def test_create_summarized_entities():
input_tables = load_input_tables([
"workflow:create_base_extracted_entities",
])
expected = load_expected(workflow_name)
storage = MemoryPipelineStorage()
config = get_config_for_workflow(workflow_name)
config["summarize_descriptions"]["strategy"]["llm"] = MOCK_LLM_CONFIG
steps = build_steps(config)
actual = await get_workflow_output(
input_tables,
{
"steps": steps,
},
storage=storage,
)
# the serialization of the graph may differ so we can't assert the dataframes directly
assert actual.shape == expected.shape, "Graph dataframe shapes differ"
# let's parse a sample of the raw graphml
actual_graphml_0 = actual["entity_graph"][:1][0]
actual_graph_0 = nx.parse_graphml(actual_graphml_0)
expected_graphml_0 = expected["entity_graph"][:1][0]
expected_graph_0 = nx.parse_graphml(expected_graphml_0)
assert (
actual_graph_0.number_of_nodes() == expected_graph_0.number_of_nodes()
), "Graphml node count differs"
assert (
actual_graph_0.number_of_edges() == expected_graph_0.number_of_edges()
), "Graphml edge count differs"
# ensure the mock summary was injected to the nodes
nodes = list(actual_graph_0.nodes(data=True))
assert (
nodes[0][1]["description"]
== "This is a MOCK response for the LLM. It is summarized!"
)
assert len(storage.keys()) == 0, "Storage should be empty"
async def test_create_summarized_entities_with_snapshots():
input_tables = load_input_tables([
"workflow:create_base_extracted_entities",
])
expected = load_expected(workflow_name)
storage = MemoryPipelineStorage()
config = get_config_for_workflow(workflow_name)
config["summarize_descriptions"]["strategy"]["llm"] = MOCK_LLM_CONFIG
config["graphml_snapshot"] = True
steps = build_steps(config)
actual = await get_workflow_output(
input_tables,
{
"steps": steps,
},
storage=storage,
)
assert actual.shape == expected.shape, "Graph dataframe shapes differ"
assert storage.keys() == [
"summarized_graph.graphml",
], "Graph snapshot keys differ"
async def test_create_summarized_entities_missing_llm_throws():
input_tables = load_input_tables([
"workflow:create_base_extracted_entities",
])
config = get_config_for_workflow(workflow_name)
del config["summarize_descriptions"]["strategy"]["llm"]
steps = build_steps(config)
with pytest.raises(ValueError): # noqa PT011
await get_workflow_output(
input_tables,
{
"steps": steps,
},
)