graphrag/tests/verbs/test_create_final_covariates.py

100 lines
3.7 KiB
Python
Raw Normal View History

# Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
import pytest
from pandas.testing import assert_series_equal
from graphrag.callbacks.noop_verb_callbacks import NoopVerbCallbacks
from graphrag.config.create_graphrag_config import create_graphrag_config
from graphrag.config.enums import LLMType
from graphrag.index.run.derive_from_rows import ParallelizationError
from graphrag.index.workflows.create_final_covariates import (
run_workflow,
workflow_name,
)
from graphrag.utils.storage import load_table_from_storage
from .util import (
create_test_context,
load_test_table,
)
MOCK_LLM_RESPONSES = [
"""
(COMPANY A<|>GOVERNMENT AGENCY B<|>ANTI-COMPETITIVE PRACTICES<|>TRUE<|>2022-01-10T00:00:00<|>2022-01-10T00:00:00<|>Company A was found to engage in anti-competitive practices because it was fined for bid rigging in multiple public tenders published by Government Agency B according to an article published on 2022/01/10<|>According to an article published on 2022/01/10, Company A was fined for bid rigging while participating in multiple public tenders published by Government Agency B.)
""".strip()
]
MOCK_LLM_CONFIG = {"type": LLMType.StaticResponse, "responses": MOCK_LLM_RESPONSES}
async def test_create_final_covariates():
input = load_test_table("create_base_text_units")
expected = load_test_table(workflow_name)
context = await create_test_context(
storage=["create_base_text_units"],
)
config = create_graphrag_config()
config.claim_extraction.strategy = {
"type": "graph_intelligence",
"llm": MOCK_LLM_CONFIG,
"claim_description": "description",
}
await run_workflow(
config,
context,
NoopVerbCallbacks(),
)
actual = await load_table_from_storage(workflow_name, context.storage)
Add Incremental Indexing v1 (#1318) * Create entypoint for cli and api (#1067) * Add cli and api entrypoints for update index * Semver * Update docs * Run tests on feature branch main * Better /main handling in tests * Incremental indexing/file delta (#1123) * Calculate new inputs and deleted inputs on update * Semver * Clear ruff checks * Fix pyright * Fix PyRight * Ruff again * Update relationships after inc index (#1236) * Collapse create final community reports (#1227) * Remove extraneous param * Add community report mocking assertions * Collapse primary report generation * Collapse embeddings * Format * Semver * Remove extraneous check * Move option set * Collapse create base entity graph (#1233) * Collapse create_base_entity_graph * Format/typing * Semver * Fix smoke tests * Simplify assignment * Collapse create summarized entities (#1237) * Collapse entity summarize * Semver * Collapse create base extracted entities (#1235) * Set up base assertions * Replace entity_extract * Finish collapsing workflow * Semver * Update snoke tests * Incremental indexing/update final text units (#1241) * Update final text units * Format * Address comments * Add v1 community merge using time period (#1257) * Add naive community merge using time period * formatting * Query fixes * Add descriptions from merged_entities * Add summarization and embeddings * Use iso format * Ruff * Pyright and smoke tests * Pyright * Pyright * Update parquet for verb tests * Fix smoke tests * Remove sorting * Update smoke tests * Smoke tests * Smoke tests * Updated verb test to ack for latest changes on covariates * Add config for incremental index + Bug fixes (#1317) * Add config for incremental index + Bug fixes * Ruff * Fix smoke tests * Semversioner * Small refactor * Remove unused file * Ruff * Update verb tests inputs * Update verb tests inputs --------- Co-authored-by: Nathan Evans <github@talkswithnumbers.com>
2024-10-30 11:59:44 -06:00
assert len(actual.columns) == len(expected.columns)
# our mock only returns one covariate per text unit, so that's a 1:1 mapping versus the LLM-extracted content in the test data
assert len(actual) == len(input)
# assert all of the columns that covariates copied from the input
assert_series_equal(actual["text_unit_id"], input["id"], check_names=False)
Artifact cleanup (#1341) * Add source documents for verb tests * Remove entity_type erroneous column * Add new test data * Remove source/target degree columns * Remove top_level_node_id * Remove chunk column configs * Rename "chunk" to "text" * Rename "chunk" to "text" in base * Re-map document input to use base text units * Revert base text units as final documents dep * Update test data * Split/rename node source_id * Drop node size (dup of degree) * Drop document_ids from covariates * Remove unused document_ids from models * Remove n_tokens from covariate table * Fix missed document_ids delete * Wire base text units to final documents * Rename relationship rank as combined_degree * Add rank as first-class property to Relationship * Remove split_text operation * Fix relationships test parquet * Update test parquets * Add entity ids to community table * Remove stored graph embedding columns * Format * Semver * Fix JSON typo * Spelling * Rename lancedb * Sort lancedb * Fix unit test * Fix test to account for changing period * Update tests for separate embeddings * Format * Better assertion printing * Fix unit test for windows * Rename document.raw_content -> document.text * Remove read_documents function * Remove unused document summary from model * Remove unused imports * Format * Add new snapshots to default init * Use util to construct embeddings collection name * Align inc index model with branch changes * Update data and tests for int ids * Clean up embedding locs * Switch entity "name" to "title" for consistency * Fix short_id -> human_readable_id defaults * Format * Rework community IDs * Fix community size compute * Fix unit tests * Fix report read * Pare down nodes table output * Fix unit test * Fix merge * Fix community loading * Format * Fix community id report extraction * Update tests * Consistent short IDs and ordering * Update ordering and tests * Update incremental for new nodes model * Guard document columns loc * Match column ordering * Fix document guard * Update smoke tests * Fill NA on community extract * Logging for smoke test debug * Add parquet schema details doc * Fix community hierarchy guard * Use better empty hierarchy guard * Back-compat shims * Semver * Fix warning * Format * Remove default fallback * Reuse key
2024-11-13 15:11:19 -08:00
# make sure the human ids are incrementing
assert actual["human_readable_id"][0] == 1
assert actual["human_readable_id"][1] == 2
# check that the mock data is parsed and inserted into the correct columns
assert actual["covariate_type"][0] == "claim"
assert actual["subject_id"][0] == "COMPANY A"
assert actual["object_id"][0] == "GOVERNMENT AGENCY B"
assert actual["type"][0] == "ANTI-COMPETITIVE PRACTICES"
assert actual["status"][0] == "TRUE"
assert actual["start_date"][0] == "2022-01-10T00:00:00"
assert actual["end_date"][0] == "2022-01-10T00:00:00"
assert (
actual["description"][0]
== "Company A was found to engage in anti-competitive practices because it was fined for bid rigging in multiple public tenders published by Government Agency B according to an article published on 2022/01/10"
)
assert (
actual["source_text"][0]
== "According to an article published on 2022/01/10, Company A was fined for bid rigging while participating in multiple public tenders published by Government Agency B."
)
async def test_create_final_covariates_missing_llm_throws():
context = await create_test_context(
storage=["create_base_text_units"],
)
config = create_graphrag_config()
config.claim_extraction.strategy = {
"type": "graph_intelligence",
"claim_description": "description",
}
with pytest.raises(ParallelizationError):
await run_workflow(
config,
context,
NoopVerbCallbacks(),
)