# Copyright (c) 2024 Microsoft Corporation. # Licensed under the MIT License import pytest from datashaper.errors import VerbParallelizationError from graphrag.config.enums import LLMType from graphrag.index.graph.extractors.community_reports.community_reports_extractor import ( CommunityReportResponse, FindingModel, ) from graphrag.index.workflows.v1.create_final_community_reports import ( build_steps, workflow_name, ) from .util import ( compare_outputs, get_config_for_workflow, get_workflow_output, load_input_tables, load_test_table, ) MOCK_RESPONSES = [ CommunityReportResponse( title="", summary="", rating=2, rating_explanation="", findings=[ FindingModel( summary="", explanation="" async def test_create_final_community_reports_missing_llm_throws(): input_tables = load_input_tables([ "workflow:create_final_nodes", "workflow:create_final_covariates", "workflow:create_final_relationships", "workflow:create_final_entities", "workflow:create_final_communities", ]) config = get_config_for_workflow(workflow_name) # deleting the llm config results in a default mock injection in run_graph_intelligence del config["create_community_reports"]["strategy"]["llm"] steps = build_steps(config) with pytest.raises(VerbParallelizationError): await get_workflow_output( input_tables, { "steps": steps, }, )