mirror of
https://github.com/microsoft/graphrag.git
synced 2025-07-24 17:31:50 +00:00

* add fnllm; remove llm folder * remove llm unit tests * update imports * update imports * formatting * enable autosave * update mockllm * update community reports extractor * move most llm usage to fnllm * update type issues * fix unit tests * type updates * update dictionary * semver * update llm construction, get integration tests working * load from llmparameters model * move ruff settings to ruff.toml * add gitattributes file * ignore ruff.toml spelling * update .gitattributes * update gitignore * update config construction * update prompt var usage * add cache adapter * use cache adapter in embeddings calls * update embedding strategy * add fnllm * add pytest-dotenv * fix some verb tests * get verbtests running * update ruff.toml for vscode * enable ruff native server in vscode * update artifact inspecting code * remove local-test update * use string.replace instead of string.format in community reprots etxractor * bump timeout * revert ruff.toml, vscode settings for another pr * revert cspell config * revert gitignore * remove json-repair, update fnllm * use fnllm generic type interfaces * update load_llm to use target models * consolidate chat parameters * add 'extra_attributes' prop to community report response * formatting * update fnllm * formatting * formatting * Add defaults to some llm params to avoid null on params hash * Formatting --------- Co-authored-by: Alonso Guevara <alonsog@microsoft.com> Co-authored-by: Josh Bradley <joshbradley@microsoft.com>
106 lines
2.9 KiB
Python
106 lines
2.9 KiB
Python
# Copyright (c) 2024 Microsoft Corporation.
|
|
# Licensed under the MIT License
|
|
|
|
|
|
import pytest
|
|
from datashaper.errors import VerbParallelizationError
|
|
|
|
from graphrag.config.enums import LLMType
|
|
from graphrag.index.graph.extractors.community_reports.community_reports_extractor import (
|
|
CommunityReportResponse,
|
|
FindingModel,
|
|
)
|
|
from graphrag.index.workflows.v1.create_final_community_reports import (
|
|
build_steps,
|
|
workflow_name,
|
|
)
|
|
|
|
from .util import (
|
|
compare_outputs,
|
|
get_config_for_workflow,
|
|
get_workflow_output,
|
|
load_input_tables,
|
|
load_test_table,
|
|
)
|
|
|
|
MOCK_RESPONSES = [
|
|
CommunityReportResponse(
|
|
title="<report_title>",
|
|
summary="<executive_summary>",
|
|
rating=2,
|
|
rating_explanation="<rating_explanation>",
|
|
findings=[
|
|
FindingModel(
|
|
summary="<insight_1_summary>", explanation="<insight_1_explanation"
|
|
),
|
|
FindingModel(
|
|
summary="<insight_2_summary>", explanation="<insight_2_explanation"
|
|
),
|
|
],
|
|
)
|
|
]
|
|
|
|
MOCK_LLM_CONFIG = {
|
|
"type": LLMType.StaticResponse,
|
|
"responses": MOCK_RESPONSES,
|
|
"parse_json": True,
|
|
}
|
|
|
|
|
|
async def test_create_final_community_reports():
|
|
input_tables = load_input_tables([
|
|
"workflow:create_final_nodes",
|
|
"workflow:create_final_covariates",
|
|
"workflow:create_final_relationships",
|
|
"workflow:create_final_entities",
|
|
"workflow:create_final_communities",
|
|
])
|
|
expected = load_test_table(workflow_name)
|
|
|
|
config = get_config_for_workflow(workflow_name)
|
|
|
|
config["create_community_reports"]["strategy"]["llm"] = MOCK_LLM_CONFIG
|
|
|
|
steps = build_steps(config)
|
|
|
|
actual = await get_workflow_output(
|
|
input_tables,
|
|
{
|
|
"steps": steps,
|
|
},
|
|
)
|
|
|
|
assert len(actual.columns) == len(expected.columns)
|
|
|
|
# only assert a couple of columns that are not mock - most of this table is LLM-generated
|
|
compare_outputs(actual, expected, columns=["community", "level"])
|
|
|
|
# assert a handful of mock data items to confirm they get put in the right spot
|
|
assert actual["rank"][:1][0] == 2
|
|
assert actual["rank_explanation"][:1][0] == "<rating_explanation>"
|
|
|
|
|
|
async def test_create_final_community_reports_missing_llm_throws():
|
|
input_tables = load_input_tables([
|
|
"workflow:create_final_nodes",
|
|
"workflow:create_final_covariates",
|
|
"workflow:create_final_relationships",
|
|
"workflow:create_final_entities",
|
|
"workflow:create_final_communities",
|
|
])
|
|
|
|
config = get_config_for_workflow(workflow_name)
|
|
|
|
# deleting the llm config results in a default mock injection in run_graph_intelligence
|
|
del config["create_community_reports"]["strategy"]["llm"]
|
|
|
|
steps = build_steps(config)
|
|
|
|
with pytest.raises(VerbParallelizationError):
|
|
await get_workflow_output(
|
|
input_tables,
|
|
{
|
|
"steps": steps,
|
|
},
|
|
)
|