2024-09-23 16:55:53 -07:00
|
|
|
# Copyright (c) 2024 Microsoft Corporation.
|
|
|
|
# Licensed under the MIT License
|
|
|
|
|
2025-01-03 13:59:26 -08:00
|
|
|
from graphrag.config.create_graphrag_config import create_graphrag_config
|
2025-02-07 11:11:03 -08:00
|
|
|
from graphrag.index.workflows.create_base_text_units import run_workflow
|
2025-01-03 13:59:26 -08:00
|
|
|
from graphrag.utils.storage import load_table_from_storage
|
2024-09-23 16:55:53 -07:00
|
|
|
|
|
|
|
from .util import (
|
2025-01-21 15:52:06 -08:00
|
|
|
DEFAULT_MODEL_CONFIG,
|
2024-09-23 16:55:53 -07:00
|
|
|
compare_outputs,
|
2025-01-03 13:59:26 -08:00
|
|
|
create_test_context,
|
2024-12-05 09:57:26 -08:00
|
|
|
load_test_table,
|
2025-02-13 17:03:51 -08:00
|
|
|
update_document_metadata,
|
2024-09-23 16:55:53 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
async def test_create_base_text_units():
|
2025-02-07 11:11:03 -08:00
|
|
|
expected = load_test_table("text_units")
|
2024-09-23 16:55:53 -07:00
|
|
|
|
2025-01-03 13:59:26 -08:00
|
|
|
context = await create_test_context()
|
2024-10-24 10:20:03 -07:00
|
|
|
|
2025-01-21 15:52:06 -08:00
|
|
|
config = create_graphrag_config({"models": DEFAULT_MODEL_CONFIG})
|
2024-09-23 16:55:53 -07:00
|
|
|
|
2025-02-28 09:31:48 -08:00
|
|
|
await run_workflow(config, context)
|
2024-09-23 16:55:53 -07:00
|
|
|
|
2025-02-07 11:11:03 -08:00
|
|
|
actual = await load_table_from_storage("text_units", context.storage)
|
2024-11-04 17:23:29 -08:00
|
|
|
|
2025-02-12 14:38:03 -03:00
|
|
|
compare_outputs(actual, expected, columns=["text", "document_ids", "n_tokens"])
|
|
|
|
|
|
|
|
|
|
|
|
async def test_create_base_text_units_metadata():
|
|
|
|
expected = load_test_table("text_units_metadata")
|
|
|
|
|
|
|
|
context = await create_test_context()
|
|
|
|
|
|
|
|
config = create_graphrag_config({"models": DEFAULT_MODEL_CONFIG})
|
|
|
|
# test data was created with 4o, so we need to match the encoding for chunks to be identical
|
|
|
|
config.chunks.encoding_model = "o200k_base"
|
|
|
|
config.input.metadata = ["title"]
|
|
|
|
config.chunks.prepend_metadata = True
|
|
|
|
|
2025-02-13 17:03:51 -08:00
|
|
|
await update_document_metadata(config.input.metadata, context)
|
|
|
|
|
2025-02-28 09:31:48 -08:00
|
|
|
await run_workflow(config, context)
|
2025-02-12 14:38:03 -03:00
|
|
|
|
|
|
|
actual = await load_table_from_storage("text_units", context.storage)
|
|
|
|
compare_outputs(actual, expected)
|
|
|
|
|
|
|
|
|
|
|
|
async def test_create_base_text_units_metadata_included_in_chunk():
|
|
|
|
expected = load_test_table("text_units_metadata_included_chunk")
|
|
|
|
|
|
|
|
context = await create_test_context()
|
|
|
|
|
|
|
|
config = create_graphrag_config({"models": DEFAULT_MODEL_CONFIG})
|
|
|
|
# test data was created with 4o, so we need to match the encoding for chunks to be identical
|
|
|
|
config.chunks.encoding_model = "o200k_base"
|
|
|
|
config.input.metadata = ["title"]
|
|
|
|
config.chunks.prepend_metadata = True
|
|
|
|
config.chunks.chunk_size_includes_metadata = True
|
|
|
|
|
2025-02-13 17:03:51 -08:00
|
|
|
await update_document_metadata(config.input.metadata, context)
|
|
|
|
|
2025-02-28 09:31:48 -08:00
|
|
|
await run_workflow(config, context)
|
2025-02-12 14:38:03 -03:00
|
|
|
|
|
|
|
actual = await load_table_from_storage("text_units", context.storage)
|
2025-02-07 11:11:03 -08:00
|
|
|
# only check the columns from the base workflow - our expected table is the final and will have more
|
|
|
|
compare_outputs(actual, expected, columns=["text", "document_ids", "n_tokens"])
|