graphrag/tests/verbs/test_generate_text_embeddings.py
gaudyb 17658c5df8
New workflow to generate embeddings in a single workflow (#1296)
* New workflow to generate embeddings in a single workflow

* New workflow to generate embeddings in a single workflow

* version change

* clean tests without any embeddings references

* clean tests without any embeddings references

* remove code

* feedback implemented

* changes in logic

* feedback implemented

* store in table bug fixed

* smoke test for generate_text_embeddings workflow

* smoke test fix

* add generate_text_embeddings to the list of transient workflows

* smoke tests

* fix

* ruff formatting updates

* fix

* smoke test fixed

* smoke test fixed

* fix lancedb import

* smoke test fix

* ignore sorting

* smoke test fixed

* smoke test fixed

* check smoke test

* smoke test fixed

* change config for vector store

* format fix

* vector store changes

* revert debug profile back to empty filepath

* merge conflict solved

* merge conflict solved

* format fixed

* format fixed

* fix return dataframe

* snapshot fix

* format fix

* embeddings param implemented

* validation fixes

* fix map

* fix map

* fix properties

* config updates

* smoke test fixed

* settings change

* Update collection config and rework back-compat

* Repalce . with - for embedding store

---------

Co-authored-by: Alonso Guevara <alonsog@microsoft.com>
Co-authored-by: Josh Bradley <joshbradley@microsoft.com>
Co-authored-by: Nathan Evans <github@talkswithnumbers.com>
2024-11-01 15:01:35 -07:00

83 lines
2.4 KiB
Python

# Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
from io import BytesIO
import pandas as pd
from graphrag.index.config.embeddings import (
all_embeddings,
)
from graphrag.index.run.utils import create_run_context
from graphrag.index.workflows.v1.generate_text_embeddings import (
build_steps,
workflow_name,
)
from .util import (
get_config_for_workflow,
get_workflow_output,
load_input_tables,
)
async def test_generate_text_embeddings():
input_tables = load_input_tables(
inputs=[
"workflow:create_final_documents",
"workflow:create_final_relationships",
"workflow:create_final_text_units",
"workflow:create_final_entities",
"workflow:create_final_community_reports",
]
)
context = create_run_context(None, None, None)
config = get_config_for_workflow(workflow_name)
config["text_embed"]["strategy"]["type"] = "mock"
config["snapshot_embeddings"] = True
config["embedded_fields"] = all_embeddings
steps = build_steps(config)
await get_workflow_output(
input_tables,
{
"steps": steps,
},
context,
)
parquet_files = context.storage.keys()
for field in all_embeddings:
assert f"embeddings.{field}.parquet" in parquet_files
# entity description should always be here, let's assert its format
entity_description_embeddings_buffer = BytesIO(
await context.storage.get(
"embeddings.entity.description.parquet", as_bytes=True
)
)
entity_description_embeddings = pd.read_parquet(
entity_description_embeddings_buffer
)
assert len(entity_description_embeddings.columns) == 2
assert "id" in entity_description_embeddings.columns
assert "embedding" in entity_description_embeddings.columns
# every other embedding is optional but we've turned them all on, so check a random one
document_raw_content_embeddings_buffer = BytesIO(
await context.storage.get(
"embeddings.document.raw_content.parquet", as_bytes=True
)
)
document_raw_content_embeddings = pd.read_parquet(
document_raw_content_embeddings_buffer
)
assert len(document_raw_content_embeddings.columns) == 2
assert "id" in document_raw_content_embeddings.columns
assert "embedding" in document_raw_content_embeddings.columns