diff --git a/lightrag/api/README.md b/lightrag/api/README.md index 915ad7f3..4517a318 100644 --- a/lightrag/api/README.md +++ b/lightrag/api/README.md @@ -179,9 +179,9 @@ The command-line `workspace` argument and the `WORKSPACE` environment variable i - **For local file-based databases, data isolation is achieved through workspace subdirectories:** `JsonKVStorage`, `JsonDocStatusStorage`, `NetworkXStorage`, `NanoVectorDBStorage`, `FaissVectorDBStorage`. - **For databases that store data in collections, it's done by adding a workspace prefix to the collection name:** `RedisKVStorage`, `RedisDocStatusStorage`, `MilvusVectorDBStorage`, `QdrantVectorDBStorage`, `MongoKVStorage`, `MongoDocStatusStorage`, `MongoVectorDBStorage`, `MongoGraphStorage`, `PGGraphStorage`. - **For relational databases, data isolation is achieved by adding a `workspace` field to the tables for logical data separation:** `PGKVStorage`, `PGVectorStorage`, `PGDocStatusStorage`. -- **For the Neo4j graph database, logical data isolation is achieved through labels:** `Neo4JStorage` +- **For graph databases, logical data isolation is achieved through labels:** `Neo4JStorage`, `MemgraphStorage` -To maintain compatibility with legacy data, the default workspace for PostgreSQL is `default` and for Neo4j is `base` when no workspace is configured. For all external storages, the system provides dedicated workspace environment variables to override the common `WORKSPACE` environment variable configuration. These storage-specific workspace environment variables are: `REDIS_WORKSPACE`, `MILVUS_WORKSPACE`, `QDRANT_WORKSPACE`, `MONGODB_WORKSPACE`, `POSTGRES_WORKSPACE`, `NEO4J_WORKSPACE`. +To maintain compatibility with legacy data, the default workspace for PostgreSQL is `default` and for Neo4j is `base` when no workspace is configured. For all external storages, the system provides dedicated workspace environment variables to override the common `WORKSPACE` environment variable configuration. These storage-specific workspace environment variables are: `REDIS_WORKSPACE`, `MILVUS_WORKSPACE`, `QDRANT_WORKSPACE`, `MONGODB_WORKSPACE`, `POSTGRES_WORKSPACE`, `NEO4J_WORKSPACE`, `MEMGRAPH_WORKSPACE`. ### Multiple workers for Gunicorn + Uvicorn @@ -394,6 +394,7 @@ MongoKVStorage MongoDB NetworkXStorage NetworkX (default) Neo4JStorage Neo4J PGGraphStorage PostgreSQL with AGE plugin +MemgraphStorage. Memgraph ``` > Testing has shown that Neo4J delivers superior performance in production environments compared to PostgreSQL with AGE plugin. diff --git a/lightrag/kg/memgraph_impl.py b/lightrag/kg/memgraph_impl.py index 8c6d6574..77e45a06 100644 --- a/lightrag/kg/memgraph_impl.py +++ b/lightrag/kg/memgraph_impl.py @@ -435,7 +435,7 @@ class MemgraphStorage(BaseGraphStorage): async def upsert_node(self, node_id: str, node_data: dict[str, str]) -> None: """ - Upsert a node in the Neo4j database. + Upsert a node in the Memgraph database. Args: node_id: The unique identifier for the node (used as label) @@ -448,7 +448,7 @@ class MemgraphStorage(BaseGraphStorage): properties = node_data entity_type = properties["entity_type"] if "entity_id" not in properties: - raise ValueError("Neo4j: node properties must contain an 'entity_id' field") + raise ValueError("Memgraph: node properties must contain an 'entity_id' field") try: async with self._driver.session(database=self._DATABASE) as session: @@ -817,28 +817,34 @@ class MemgraphStorage(BaseGraphStorage): WITH start CALL {{ WITH start - MATCH path = (start)-[*0..{max_depth}]-(node) + MATCH path = (start)-[*BFS 0..{max_depth}]-(node) WITH nodes(path) AS path_nodes, relationships(path) AS path_rels UNWIND path_nodes AS n WITH collect(DISTINCT n) AS all_nodes, collect(DISTINCT path_rels) AS all_rel_lists WITH all_nodes, reduce(r = [], x IN all_rel_lists | r + x) AS all_rels RETURN all_nodes, all_rels }} - WITH all_nodes AS nodes, all_rels AS relationships, size(all_nodes) AS total_nodes + WITH all_nodes AS nodes, all_rels AS relationships, size(all_nodes) AS total_nodes_found WITH - CASE - WHEN total_nodes <= {max_nodes} THEN nodes - ELSE nodes[0..{max_nodes}] - END AS limited_nodes, - relationships, - total_nodes, - total_nodes > {max_nodes} AS is_truncated + CASE + WHEN total_nodes_found <= {max_nodes} THEN nodes + ELSE nodes[0..{max_nodes}] + END AS limited_nodes, + relationships, + total_nodes_found, + total_nodes_found > {max_nodes} AS is_truncated + + UNWIND relationships AS rel + WITH limited_nodes, rel, total_nodes_found, is_truncated + WHERE startNode(rel) IN limited_nodes AND endNode(rel) IN limited_nodes + WITH limited_nodes, collect(DISTINCT rel) AS limited_relationships, total_nodes_found, is_truncated RETURN - [node IN limited_nodes | {{node: node}}] AS node_info, - relationships, - total_nodes, - is_truncated + [node IN limited_nodes | {{node: node}}] AS node_info, + limited_relationships AS relationships, + total_nodes_found, + is_truncated """ + result_set = None try: result_set = await session.run(