graphrag/tests/verbs/util.py

100 lines
3.5 KiB
Python
Raw Normal View History

# Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
from typing import cast
import pandas as pd
from datashaper import Workflow
from pandas.testing import assert_series_equal
from graphrag.config.create_graphrag_config import create_graphrag_config
from graphrag.index.config.workflow import PipelineWorkflowConfig
from graphrag.index.context import PipelineRunContext
from graphrag.index.create_pipeline_config import create_pipeline_config
from graphrag.index.run.utils import create_run_context
pd.set_option("display.max_columns", None)
def load_input_tables(inputs: list[str]) -> dict[str, pd.DataFrame]:
"""Harvest all the referenced input IDs from the workflow being tested and pass them here."""
# stick all the inputs in a map - Workflow looks them up by name
input_tables: dict[str, pd.DataFrame] = {}
Artifact cleanup (#1341) * Add source documents for verb tests * Remove entity_type erroneous column * Add new test data * Remove source/target degree columns * Remove top_level_node_id * Remove chunk column configs * Rename "chunk" to "text" * Rename "chunk" to "text" in base * Re-map document input to use base text units * Revert base text units as final documents dep * Update test data * Split/rename node source_id * Drop node size (dup of degree) * Drop document_ids from covariates * Remove unused document_ids from models * Remove n_tokens from covariate table * Fix missed document_ids delete * Wire base text units to final documents * Rename relationship rank as combined_degree * Add rank as first-class property to Relationship * Remove split_text operation * Fix relationships test parquet * Update test parquets * Add entity ids to community table * Remove stored graph embedding columns * Format * Semver * Fix JSON typo * Spelling * Rename lancedb * Sort lancedb * Fix unit test * Fix test to account for changing period * Update tests for separate embeddings * Format * Better assertion printing * Fix unit test for windows * Rename document.raw_content -> document.text * Remove read_documents function * Remove unused document summary from model * Remove unused imports * Format * Add new snapshots to default init * Use util to construct embeddings collection name * Align inc index model with branch changes * Update data and tests for int ids * Clean up embedding locs * Switch entity "name" to "title" for consistency * Fix short_id -> human_readable_id defaults * Format * Rework community IDs * Fix community size compute * Fix unit tests * Fix report read * Pare down nodes table output * Fix unit test * Fix merge * Fix community loading * Format * Fix community id report extraction * Update tests * Consistent short IDs and ordering * Update ordering and tests * Update incremental for new nodes model * Guard document columns loc * Match column ordering * Fix document guard * Update smoke tests * Fill NA on community extract * Logging for smoke test debug * Add parquet schema details doc * Fix community hierarchy guard * Use better empty hierarchy guard * Back-compat shims * Semver * Fix warning * Format * Remove default fallback * Reuse key
2024-11-13 15:11:19 -08:00
source = pd.read_parquet("tests/verbs/data/source_documents.parquet")
input_tables["source"] = source
for input in inputs:
# remove the workflow: prefix if it exists, because that is not part of the actual table filename
name = input.replace("workflow:", "")
input_tables[input] = pd.read_parquet(f"tests/verbs/data/{name}.parquet")
return input_tables
def load_test_table(output: str) -> pd.DataFrame:
"""Pass in the workflow output (generally the workflow name)"""
return pd.read_parquet(f"tests/verbs/data/{output}.parquet")
def get_config_for_workflow(name: str) -> PipelineWorkflowConfig:
"""Instantiates the bare minimum config to get a default workflow config for testing."""
config = create_graphrag_config()
# this flag needs to be set before creating the pipeline config, or the entire covariate workflow will be excluded
config.claim_extraction.enabled = True
pipeline_config = create_pipeline_config(config)
result = next(conf for conf in pipeline_config.workflows if conf.name == name)
return cast("PipelineWorkflowConfig", result.config)
async def get_workflow_output(
input_tables: dict[str, pd.DataFrame],
schema: dict,
context: PipelineRunContext | None = None,
) -> pd.DataFrame:
"""Pass in the input tables, the schema, and the output name"""
# the bare minimum workflow is the pipeline schema and table context
workflow = Workflow(
schema=schema,
input_tables=input_tables,
)
run_context = context or create_run_context(None, None, None)
await workflow.run(context=run_context)
# if there's only one output, it is the default here, no name required
return cast("pd.DataFrame", workflow.output())
def compare_outputs(
actual: pd.DataFrame, expected: pd.DataFrame, columns: list[str] | None = None
) -> None:
"""Compare the actual and expected dataframes, optionally specifying columns to compare.
Add Incremental Indexing v1 (#1318) * Create entypoint for cli and api (#1067) * Add cli and api entrypoints for update index * Semver * Update docs * Run tests on feature branch main * Better /main handling in tests * Incremental indexing/file delta (#1123) * Calculate new inputs and deleted inputs on update * Semver * Clear ruff checks * Fix pyright * Fix PyRight * Ruff again * Update relationships after inc index (#1236) * Collapse create final community reports (#1227) * Remove extraneous param * Add community report mocking assertions * Collapse primary report generation * Collapse embeddings * Format * Semver * Remove extraneous check * Move option set * Collapse create base entity graph (#1233) * Collapse create_base_entity_graph * Format/typing * Semver * Fix smoke tests * Simplify assignment * Collapse create summarized entities (#1237) * Collapse entity summarize * Semver * Collapse create base extracted entities (#1235) * Set up base assertions * Replace entity_extract * Finish collapsing workflow * Semver * Update snoke tests * Incremental indexing/update final text units (#1241) * Update final text units * Format * Address comments * Add v1 community merge using time period (#1257) * Add naive community merge using time period * formatting * Query fixes * Add descriptions from merged_entities * Add summarization and embeddings * Use iso format * Ruff * Pyright and smoke tests * Pyright * Pyright * Update parquet for verb tests * Fix smoke tests * Remove sorting * Update smoke tests * Smoke tests * Smoke tests * Updated verb test to ack for latest changes on covariates * Add config for incremental index + Bug fixes (#1317) * Add config for incremental index + Bug fixes * Ruff * Fix smoke tests * Semversioner * Small refactor * Remove unused file * Ruff * Update verb tests inputs * Update verb tests inputs --------- Co-authored-by: Nathan Evans <github@talkswithnumbers.com>
2024-10-30 11:59:44 -06:00
This uses assert_series_equal since we are sometimes intentionally omitting columns from the actual output.
"""
cols = expected.columns if columns is None else columns
assert len(actual) == len(expected), (
f"Expected: {len(expected)} rows, Actual: {len(actual)} rows"
)
for column in cols:
assert column in actual.columns
try:
# dtypes can differ since the test data is read from parquet and our workflow runs in memory
assert_series_equal(
actual[column], expected[column], check_dtype=False, check_index=False
)
except AssertionError:
print("Expected:")
print(expected[column])
print("Actual:")
print(actual[column])
raise