graphrag/tests/verbs/test_generate_text_embeddings.py

79 lines
2.3 KiB
Python
Raw Normal View History

# Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
from io import BytesIO
import pandas as pd
from graphrag.index.config.embeddings import (
all_embeddings,
)
from graphrag.index.run.utils import create_run_context
from graphrag.index.workflows.v1.generate_text_embeddings import (
build_steps,
workflow_name,
)
from .util import (
get_config_for_workflow,
get_workflow_output,
load_input_tables,
)
async def test_generate_text_embeddings():
input_tables = load_input_tables(
inputs=[
"workflow:create_final_documents",
"workflow:create_final_relationships",
"workflow:create_final_text_units",
"workflow:create_final_entities",
"workflow:create_final_community_reports",
]
)
context = create_run_context(None, None, None)
config = get_config_for_workflow(workflow_name)
config["text_embed"]["strategy"]["type"] = "mock"
config["snapshot_embeddings"] = True
config["embedded_fields"] = all_embeddings
steps = build_steps(config)
await get_workflow_output(
input_tables,
{
"steps": steps,
},
context,
)
parquet_files = context.storage.keys()
for field in all_embeddings:
assert f"embeddings.{field}.parquet" in parquet_files
# entity description should always be here, let's assert its format
entity_description_embeddings_buffer = BytesIO(
await context.storage.get(
"embeddings.entity.description.parquet", as_bytes=True
)
)
entity_description_embeddings = pd.read_parquet(
entity_description_embeddings_buffer
)
assert len(entity_description_embeddings.columns) == 2
assert "id" in entity_description_embeddings.columns
assert "embedding" in entity_description_embeddings.columns
# every other embedding is optional but we've turned them all on, so check a random one
Artifact cleanup (#1341) * Add source documents for verb tests * Remove entity_type erroneous column * Add new test data * Remove source/target degree columns * Remove top_level_node_id * Remove chunk column configs * Rename "chunk" to "text" * Rename "chunk" to "text" in base * Re-map document input to use base text units * Revert base text units as final documents dep * Update test data * Split/rename node source_id * Drop node size (dup of degree) * Drop document_ids from covariates * Remove unused document_ids from models * Remove n_tokens from covariate table * Fix missed document_ids delete * Wire base text units to final documents * Rename relationship rank as combined_degree * Add rank as first-class property to Relationship * Remove split_text operation * Fix relationships test parquet * Update test parquets * Add entity ids to community table * Remove stored graph embedding columns * Format * Semver * Fix JSON typo * Spelling * Rename lancedb * Sort lancedb * Fix unit test * Fix test to account for changing period * Update tests for separate embeddings * Format * Better assertion printing * Fix unit test for windows * Rename document.raw_content -> document.text * Remove read_documents function * Remove unused document summary from model * Remove unused imports * Format * Add new snapshots to default init * Use util to construct embeddings collection name * Align inc index model with branch changes * Update data and tests for int ids * Clean up embedding locs * Switch entity "name" to "title" for consistency * Fix short_id -> human_readable_id defaults * Format * Rework community IDs * Fix community size compute * Fix unit tests * Fix report read * Pare down nodes table output * Fix unit test * Fix merge * Fix community loading * Format * Fix community id report extraction * Update tests * Consistent short IDs and ordering * Update ordering and tests * Update incremental for new nodes model * Guard document columns loc * Match column ordering * Fix document guard * Update smoke tests * Fill NA on community extract * Logging for smoke test debug * Add parquet schema details doc * Fix community hierarchy guard * Use better empty hierarchy guard * Back-compat shims * Semver * Fix warning * Format * Remove default fallback * Reuse key
2024-11-13 15:11:19 -08:00
document_text_embeddings_buffer = BytesIO(
await context.storage.get("embeddings.document.text.parquet", as_bytes=True)
)
Artifact cleanup (#1341) * Add source documents for verb tests * Remove entity_type erroneous column * Add new test data * Remove source/target degree columns * Remove top_level_node_id * Remove chunk column configs * Rename "chunk" to "text" * Rename "chunk" to "text" in base * Re-map document input to use base text units * Revert base text units as final documents dep * Update test data * Split/rename node source_id * Drop node size (dup of degree) * Drop document_ids from covariates * Remove unused document_ids from models * Remove n_tokens from covariate table * Fix missed document_ids delete * Wire base text units to final documents * Rename relationship rank as combined_degree * Add rank as first-class property to Relationship * Remove split_text operation * Fix relationships test parquet * Update test parquets * Add entity ids to community table * Remove stored graph embedding columns * Format * Semver * Fix JSON typo * Spelling * Rename lancedb * Sort lancedb * Fix unit test * Fix test to account for changing period * Update tests for separate embeddings * Format * Better assertion printing * Fix unit test for windows * Rename document.raw_content -> document.text * Remove read_documents function * Remove unused document summary from model * Remove unused imports * Format * Add new snapshots to default init * Use util to construct embeddings collection name * Align inc index model with branch changes * Update data and tests for int ids * Clean up embedding locs * Switch entity "name" to "title" for consistency * Fix short_id -> human_readable_id defaults * Format * Rework community IDs * Fix community size compute * Fix unit tests * Fix report read * Pare down nodes table output * Fix unit test * Fix merge * Fix community loading * Format * Fix community id report extraction * Update tests * Consistent short IDs and ordering * Update ordering and tests * Update incremental for new nodes model * Guard document columns loc * Match column ordering * Fix document guard * Update smoke tests * Fill NA on community extract * Logging for smoke test debug * Add parquet schema details doc * Fix community hierarchy guard * Use better empty hierarchy guard * Back-compat shims * Semver * Fix warning * Format * Remove default fallback * Reuse key
2024-11-13 15:11:19 -08:00
document_text_embeddings = pd.read_parquet(document_text_embeddings_buffer)
assert len(document_text_embeddings.columns) == 2
assert "id" in document_text_embeddings.columns
assert "embedding" in document_text_embeddings.columns