graphrag/tests/verbs/test_create_community_reports.py
Nathan Evans c02ab0984a
Streamline workflows (#1674)
* Remove create_final_nodes

* Rename final entity output to "entities"

* Remove duplicate code from graph extraction

* Rename create_final_relationships output to "relationships"

* Rename create_final_communities output to "communities"

* Combine compute_communities and create_final_communities

* Rename create_final_covariates output to "covariates"

* Rename create_final_community_reports output to "community_reports"

* Rename create_final_text_units output to "text_units"

* Rename create_final_documents output to "documents"

* Remove transient snapshots config

* Move create_final_entities to finalize_entities operation

* Move create_final_relationships flow to finalize_relationships operation

* Reuse some community report functions

* Collapse most of graph and text unit-based report generation

* Unify schemas files

* Move community reports extractor

* Move NLP report prompt to prompts folder

* Fix a few pandas warnings

* Rename embeddings config to embed_text

* Rename claim_extraction config to extract_claims

* Remove nltk from standard graph extraction

* Fix verb tests

* Fix extract graph config naming

* Fix moved file reference

* Create v1-to-v2 migration notebook

* Semver

* Fix smoke test artifact count

* Raise tpm/rpm on smoke tests

* Update drift settings for smoke tests

* Reuse project directory var in api notebook

* Format

* Format
2025-02-07 11:11:03 -08:00

82 lines
2.4 KiB
Python

# Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
from graphrag.callbacks.noop_workflow_callbacks import NoopWorkflowCallbacks
from graphrag.config.create_graphrag_config import create_graphrag_config
from graphrag.config.enums import LLMType
from graphrag.index.operations.summarize_communities.community_reports_extractor import (
CommunityReportResponse,
FindingModel,
)
from graphrag.index.workflows.create_community_reports import (
run_workflow,
)
from graphrag.utils.storage import load_table_from_storage
from .util import (
DEFAULT_MODEL_CONFIG,
compare_outputs,
create_test_context,
load_test_table,
)
MOCK_RESPONSES = [
CommunityReportResponse(
title="<report_title>",
summary="<executive_summary>",
rating=2,
rating_explanation="<rating_explanation>",
findings=[
FindingModel(
summary="<insight_1_summary>", explanation="<insight_1_explanation"
),
FindingModel(
summary="<insight_2_summary>", explanation="<insight_2_explanation"
),
],
)
]
async def test_create_community_reports():
expected = load_test_table("community_reports")
context = await create_test_context(
storage=[
"covariates",
"relationships",
"entities",
"communities",
]
)
config = create_graphrag_config({"models": DEFAULT_MODEL_CONFIG})
llm_settings = config.get_language_model_config(
config.community_reports.model_id
).model_dump()
llm_settings["type"] = LLMType.StaticResponse
llm_settings["responses"] = MOCK_RESPONSES
llm_settings["parse_json"] = True
config.community_reports.strategy = {
"type": "graph_intelligence",
"llm": llm_settings,
}
await run_workflow(
config,
context,
NoopWorkflowCallbacks(),
)
actual = await load_table_from_storage("community_reports", context.storage)
assert len(actual.columns) == len(expected.columns)
# only assert a couple of columns that are not mock - most of this table is LLM-generated
compare_outputs(actual, expected, columns=["community", "level"])
# assert a handful of mock data items to confirm they get put in the right spot
assert actual["rank"][:1][0] == 2
assert actual["rank_explanation"][:1][0] == "<rating_explanation>"