graphrag/tests/verbs/test_extract_graph.py

160 lines
4.9 KiB
Python
Raw Normal View History

# Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
import pytest
from graphrag.config.enums import LLMType
from graphrag.index.run.utils import create_run_context
from graphrag.index.workflows.v1.extract_graph import (
build_steps,
workflow_name,
)
from .util import (
get_config_for_workflow,
get_workflow_output,
load_input_tables,
load_test_table,
)
MOCK_LLM_ENTITY_RESPONSES = [
"""
("entity"<|>COMPANY_A<|>COMPANY<|>Company_A is a test company)
##
("entity"<|>COMPANY_B<|>COMPANY<|>Company_B owns Company_A and also shares an address with Company_A)
##
("entity"<|>PERSON_C<|>PERSON<|>Person_C is director of Company_A)
##
("relationship"<|>COMPANY_A<|>COMPANY_B<|>Company_A and Company_B are related because Company_A is 100% owned by Company_B and the two companies also share the same address)<|>2)
##
("relationship"<|>COMPANY_A<|>PERSON_C<|>Company_A and Person_C are related because Person_C is director of Company_A<|>1))
""".strip()
]
MOCK_LLM_ENTITY_CONFIG = {
"type": LLMType.StaticResponse,
"responses": MOCK_LLM_ENTITY_RESPONSES,
}
MOCK_LLM_SUMMARIZATION_RESPONSES = [
"""
This is a MOCK response for the LLM. It is summarized!
""".strip()
]
MOCK_LLM_SUMMARIZATION_CONFIG = {
"type": LLMType.StaticResponse,
"responses": MOCK_LLM_SUMMARIZATION_RESPONSES,
}
async def test_extract_graph():
input_tables = load_input_tables([
"workflow:create_base_text_units",
])
nodes_expected = load_test_table("base_entity_nodes")
edges_expected = load_test_table("base_relationship_edges")
context = create_run_context(None, None, None)
await context.runtime_storage.set(
"base_text_units", input_tables["workflow:create_base_text_units"]
)
config = get_config_for_workflow(workflow_name)
config["entity_extract"]["strategy"]["llm"] = MOCK_LLM_ENTITY_CONFIG
config["summarize_descriptions"]["strategy"]["llm"] = MOCK_LLM_SUMMARIZATION_CONFIG
steps = build_steps(config)
await get_workflow_output(
input_tables,
{
"steps": steps,
},
context=context,
)
# graph construction creates transient tables for nodes, edges, and communities
nodes_actual = await context.runtime_storage.get("base_entity_nodes")
edges_actual = await context.runtime_storage.get("base_relationship_edges")
assert len(nodes_actual.columns) == len(nodes_expected.columns), (
"Nodes dataframe columns differ"
)
assert len(edges_actual.columns) == len(edges_expected.columns), (
"Edges dataframe columns differ"
)
# TODO: with the combined verb we can't force summarization
# this is because the mock responses always result in a single description, which is returned verbatim rather than summarized
# we need to update the mocking to provide somewhat unique graphs so a true merge happens
# the assertion should grab a node and ensure the description matches the mock description, not the original as we are doing below
Add Cosmos DB storage/cache option (#1431) * added cosmosdb constructor and database methods * added rest of abstract method headers * added cosmos db container methods * implemented has and delete methods * finished implementing abstract class methods * integrated class into storage factory * integrated cosmosdb class into cache factory * added support for new config file fields * replaced primary key cosmosdb initialization with connection strings * modified cosmosdb setter to require json * Fix non-default emitters * Format * Ruff * ruff * first successful run of cosmosdb indexing * removed extraneous container_name setting * require base_dir to be typed as str * reverted merged changed from closed branch * removed nested try statement * readded initial non-parquet emitter fix * added basic support for parquet emitter using internal conversions * merged with main and resolved conflicts * fixed more merge conflicts * added cosmosdb functionality to query pipeline * tested query for cosmosdb * collapsed cosmosdb schema to use minimal containers and databases * simplified create_database and create_container functions * ruff fixes and semversioner * spellcheck and ci fixes * updated pyproject toml and lock file * apply fixes after merge from main * add temporary comments * refactor cache factory * refactored storage factory * minor formatting * update dictionary * fix spellcheck typo * fix default value * fix pydantic model defaults * update pydantic models * fix init_content * cleanup how factory passes parameters to file storage * remove unnecessary output file type * update pydantic model * cleanup code * implemented clear method * fix merge from main * add test stub for cosmosdb * regenerate lock file * modified set method to collapse parquet rows * modified get method to collapse parquet rows * updated has and delete methods and docstrings to adhere to new schema * added prefix helper function * replaced delimiter for prefixed id * verified empty tests are passing * fix merges from main * add find test * update cicd step name * tested querying for new schema * resolved errors from merge conflicts * refactored set method to handle cache in new schema * refactored get method to handle cache in new schema * force unique ids to be written to cosmos for nodes * found bug with has and delete methods * modified has and delete to work with cache in new schema * fix the merge from main * minor typo fixes * update lock file * spellcheck fix * fix init function signature * minor formatting updates * remove https protocol * change localhost to 127.0.0.1 address * update pytest to use bacj engine * verified cache tests * improved speed of has function * resolved pytest error with find function * added test for child method * make container_name variable private as _container_name * minor variable name fix * cleanup cosmos pytest and make the cosmosdb storage class operations more efficient * update cicd to use different cosmosdb emulator * test with http protocol * added pytest for clear() * add longer timeout for cosmosdb emulator startup * revert http connection back to https * add comments to cicd code for future dev usage * set to container and database clients to none upon deletion * ruff changes * add comments to cicd code * removed unneeded None statements and ruff fixes * more ruff fixes * Update test_run.py * remove unnecessary call to delete container * ruff format updates * Reverted test_run.py * fix ruff formatter errors * cleanup variable names to be more consistent * remove extra semversioner file * revert pydantic model changes * revert pydantic model change * revert pydantic model change * re-enable inline formatting rule * update documentation in dev guide --------- Co-authored-by: Alonso Guevara <alonsog@microsoft.com> Co-authored-by: Josh Bradley <joshbradley@microsoft.com>
2024-12-19 14:43:21 -05:00
assert nodes_actual["description"].to_numpy()[0] == "Company_A is a test company"
assert len(context.storage.keys()) == 0, "Storage should be empty"
async def test_extract_graph_with_snapshots():
input_tables = load_input_tables([
"workflow:create_base_text_units",
])
context = create_run_context(None, None, None)
await context.runtime_storage.set(
"base_text_units", input_tables["workflow:create_base_text_units"]
)
config = get_config_for_workflow(workflow_name)
config["entity_extract"]["strategy"]["llm"] = MOCK_LLM_ENTITY_CONFIG
config["summarize_descriptions"]["strategy"]["llm"] = MOCK_LLM_SUMMARIZATION_CONFIG
config["snapshot_graphml"] = True
config["snapshot_transient"] = True
config["embed_graph_enabled"] = True # need this on in order to see the snapshot
steps = build_steps(config)
await get_workflow_output(
input_tables,
{
"steps": steps,
},
context=context,
)
assert context.storage.keys() == [
"graph.graphml",
"base_entity_nodes.parquet",
"base_relationship_edges.parquet",
], "Graph snapshot keys differ"
async def test_extract_graph_missing_llm_throws():
input_tables = load_input_tables([
"workflow:create_base_text_units",
])
context = create_run_context(None, None, None)
await context.runtime_storage.set(
"base_text_units", input_tables["workflow:create_base_text_units"]
)
config = get_config_for_workflow(workflow_name)
config["entity_extract"]["strategy"]["llm"] = MOCK_LLM_ENTITY_CONFIG
del config["summarize_descriptions"]["strategy"]["llm"]
steps = build_steps(config)
with pytest.raises(ValueError): # noqa PT011
await get_workflow_output(
input_tables,
{
"steps": steps,
},
context=context,
)