2025-01-27 09:09:08 +01:00
import os
from dataclasses import dataclass
2025-04-11 18:34:48 +08:00
from typing import final
2025-01-27 09:09:08 +01:00
2025-02-24 02:37:42 +08:00
from lightrag . types import KnowledgeGraph , KnowledgeGraphNode , KnowledgeGraphEdge
2025-02-26 05:38:38 +08:00
from lightrag . utils import logger
from lightrag . base import BaseGraphStorage
2025-06-25 12:37:57 +08:00
from lightrag . constants import GRAPH_FIELD_SEP
2025-02-19 18:40:13 +01:00
import networkx as nx
2025-03-01 10:37:05 +08:00
from . shared_storage import (
get_storage_lock ,
get_update_flag ,
set_all_update_flags ,
)
2025-02-20 14:17:26 +01:00
2025-04-17 01:28:22 +08:00
from dotenv import load_dotenv
# use the .env that is inside the current folder
# allows to use different .env file for each lightrag instance
# the OS environment variables take precedence over the .env file
load_dotenv ( dotenv_path = " .env " , override = False )
2025-02-28 01:25:59 +08:00
2025-02-16 14:38:09 +01:00
@final
2025-01-27 09:09:08 +01:00
@dataclass
class NetworkXStorage ( BaseGraphStorage ) :
@staticmethod
def load_nx_graph ( file_name ) - > nx . Graph :
if os . path . exists ( file_name ) :
return nx . read_graphml ( file_name )
return None
@staticmethod
2025-08-12 01:26:09 +08:00
def write_nx_graph ( graph : nx . Graph , file_name , workspace = " _ " ) :
2025-01-27 09:09:08 +01:00
logger . info (
2025-08-12 01:26:09 +08:00
f " [ { workspace } ] Writing graph with { graph . number_of_nodes ( ) } nodes, { graph . number_of_edges ( ) } edges "
2025-01-27 09:09:08 +01:00
)
nx . write_graphml ( graph , file_name )
def __post_init__ ( self ) :
2025-07-07 00:57:21 +08:00
working_dir = self . global_config [ " working_dir " ]
if self . workspace :
# Include workspace in the file path for data isolation
workspace_dir = os . path . join ( working_dir , self . workspace )
2025-08-12 01:26:09 +08:00
self . final_namespace = f " { self . workspace } _ { self . namespace } "
2025-07-07 00:57:21 +08:00
else :
# Default behavior when workspace is empty
2025-08-12 01:26:09 +08:00
self . final_namespace = self . namespace
workspace_dir = working_dir
self . workspace = " _ "
os . makedirs ( workspace_dir , exist_ok = True )
self . _graphml_xml_file = os . path . join (
working_dir , f " graph_ { self . namespace } .graphml "
)
2025-03-01 10:37:05 +08:00
self . _storage_lock = None
self . storage_updated = None
self . _graph = None
2025-02-27 20:17:28 +08:00
2025-03-01 10:37:05 +08:00
# Load initial graph
preloaded_graph = NetworkXStorage . load_nx_graph ( self . _graphml_xml_file )
2025-02-28 01:14:25 +08:00
if preloaded_graph is not None :
logger . info (
2025-08-12 01:26:09 +08:00
f " [ { self . workspace } ] Loaded graph from { self . _graphml_xml_file } with { preloaded_graph . number_of_nodes ( ) } nodes, { preloaded_graph . number_of_edges ( ) } edges "
2025-02-28 01:14:25 +08:00
)
else :
2025-08-12 01:26:09 +08:00
logger . info (
f " [ { self . workspace } ] Created new empty graph fiel: { self . _graphml_xml_file } "
)
2025-02-28 01:14:25 +08:00
self . _graph = preloaded_graph or nx . Graph ( )
2025-03-01 16:23:34 +08:00
2025-03-01 10:37:05 +08:00
async def initialize ( self ) :
""" Initialize storage data """
# Get the update flag for cross-process update notification
2025-08-12 01:26:09 +08:00
self . storage_updated = await get_update_flag ( self . final_namespace )
2025-03-01 10:37:05 +08:00
# Get the storage lock for use in other methods
self . _storage_lock = get_storage_lock ( )
async def _get_graph ( self ) :
""" Check if the storage should be reloaded """
# Acquire lock to prevent concurrent read and write
async with self . _storage_lock :
# Check if data needs to be reloaded
2025-03-25 10:48:15 +08:00
if self . storage_updated . value :
2025-03-01 16:23:34 +08:00
logger . info (
2025-08-12 01:26:09 +08:00
f " [ { self . workspace } ] Process { os . getpid ( ) } reloading graph { self . _graphml_xml_file } due to modifications by another process "
2025-03-01 16:23:34 +08:00
)
2025-03-01 10:37:05 +08:00
# Reload data
2025-03-01 16:23:34 +08:00
self . _graph = (
NetworkXStorage . load_nx_graph ( self . _graphml_xml_file ) or nx . Graph ( )
)
2025-03-01 10:37:05 +08:00
# Reset update flag
2025-03-25 10:48:15 +08:00
self . storage_updated . value = False
2025-01-27 09:09:08 +01:00
2025-03-01 16:23:34 +08:00
return self . _graph
2025-01-27 09:09:08 +01:00
async def has_node ( self , node_id : str ) - > bool :
2025-03-01 10:37:05 +08:00
graph = await self . _get_graph ( )
return graph . has_node ( node_id )
2025-01-27 09:09:08 +01:00
async def has_edge ( self , source_node_id : str , target_node_id : str ) - > bool :
2025-03-01 10:37:05 +08:00
graph = await self . _get_graph ( )
return graph . has_edge ( source_node_id , target_node_id )
2025-01-27 09:09:08 +01:00
2025-02-16 13:53:59 +01:00
async def get_node ( self , node_id : str ) - > dict [ str , str ] | None :
2025-03-01 10:37:05 +08:00
graph = await self . _get_graph ( )
return graph . nodes . get ( node_id )
2025-01-27 09:09:08 +01:00
async def node_degree ( self , node_id : str ) - > int :
2025-03-01 10:37:05 +08:00
graph = await self . _get_graph ( )
return graph . degree ( node_id )
2025-01-27 09:09:08 +01:00
async def edge_degree ( self , src_id : str , tgt_id : str ) - > int :
2025-03-01 10:37:05 +08:00
graph = await self . _get_graph ( )
2025-06-27 01:37:10 +08:00
src_degree = graph . degree ( src_id ) if graph . has_node ( src_id ) else 0
tgt_degree = graph . degree ( tgt_id ) if graph . has_node ( tgt_id ) else 0
return src_degree + tgt_degree
2025-01-27 09:09:08 +01:00
async def get_edge (
self , source_node_id : str , target_node_id : str
2025-02-16 13:53:59 +01:00
) - > dict [ str , str ] | None :
2025-03-01 10:37:05 +08:00
graph = await self . _get_graph ( )
return graph . edges . get ( ( source_node_id , target_node_id ) )
2025-01-27 09:09:08 +01:00
2025-02-16 13:53:59 +01:00
async def get_node_edges ( self , source_node_id : str ) - > list [ tuple [ str , str ] ] | None :
2025-03-01 10:37:05 +08:00
graph = await self . _get_graph ( )
if graph . has_node ( source_node_id ) :
return list ( graph . edges ( source_node_id ) )
2025-02-28 01:14:25 +08:00
return None
2025-01-27 09:09:08 +01:00
2025-02-16 13:53:59 +01:00
async def upsert_node ( self , node_id : str , node_data : dict [ str , str ] ) - > None :
2025-03-31 16:21:20 +08:00
"""
Importance notes :
1. Changes will be persisted to disk during the next index_done_callback
2. Only one process should updating the storage at a time before index_done_callback ,
KG - storage - log should be used to avoid data corruption
"""
2025-03-01 10:37:05 +08:00
graph = await self . _get_graph ( )
graph . add_node ( node_id , * * node_data )
2025-01-27 09:09:08 +01:00
async def upsert_edge (
self , source_node_id : str , target_node_id : str , edge_data : dict [ str , str ]
2025-02-16 13:53:59 +01:00
) - > None :
2025-03-31 16:21:20 +08:00
"""
Importance notes :
1. Changes will be persisted to disk during the next index_done_callback
2. Only one process should updating the storage at a time before index_done_callback ,
KG - storage - log should be used to avoid data corruption
"""
2025-03-01 10:37:05 +08:00
graph = await self . _get_graph ( )
graph . add_edge ( source_node_id , target_node_id , * * edge_data )
2025-01-27 09:09:08 +01:00
2025-02-16 13:53:59 +01:00
async def delete_node ( self , node_id : str ) - > None :
2025-03-31 16:21:20 +08:00
"""
Importance notes :
1. Changes will be persisted to disk during the next index_done_callback
2. Only one process should updating the storage at a time before index_done_callback ,
KG - storage - log should be used to avoid data corruption
"""
2025-03-01 10:37:05 +08:00
graph = await self . _get_graph ( )
if graph . has_node ( node_id ) :
graph . remove_node ( node_id )
2025-08-12 01:26:09 +08:00
logger . debug ( f " [ { self . workspace } ] Node { node_id } deleted from the graph " )
2025-02-28 01:14:25 +08:00
else :
2025-08-12 01:26:09 +08:00
logger . warning (
f " [ { self . workspace } ] Node { node_id } not found in the graph for deletion "
)
2025-01-27 09:09:08 +01:00
2025-03-01 10:37:05 +08:00
async def remove_nodes ( self , nodes : list [ str ] ) :
2025-01-27 09:09:08 +01:00
""" Delete multiple nodes
2025-03-31 16:21:20 +08:00
Importance notes :
1. Changes will be persisted to disk during the next index_done_callback
2. Only one process should updating the storage at a time before index_done_callback ,
KG - storage - log should be used to avoid data corruption
2025-01-27 09:09:08 +01:00
Args :
nodes : List of node IDs to be deleted
"""
2025-03-01 10:37:05 +08:00
graph = await self . _get_graph ( )
2025-02-28 01:14:25 +08:00
for node in nodes :
if graph . has_node ( node ) :
graph . remove_node ( node )
2025-01-27 09:09:08 +01:00
2025-03-01 10:37:05 +08:00
async def remove_edges ( self , edges : list [ tuple [ str , str ] ] ) :
2025-01-27 09:09:08 +01:00
""" Delete multiple edges
2025-03-31 16:21:20 +08:00
Importance notes :
1. Changes will be persisted to disk during the next index_done_callback
2. Only one process should updating the storage at a time before index_done_callback ,
KG - storage - log should be used to avoid data corruption
2025-01-27 09:09:08 +01:00
Args :
edges : List of edges to be deleted , each edge is a ( source , target ) tuple
"""
2025-03-01 10:37:05 +08:00
graph = await self . _get_graph ( )
2025-02-28 01:14:25 +08:00
for source , target in edges :
if graph . has_edge ( source , target ) :
graph . remove_edge ( source , target )
2025-02-20 14:29:36 +01:00
2025-02-20 15:09:43 +01:00
async def get_all_labels ( self ) - > list [ str ] :
2025-02-24 02:37:42 +08:00
"""
Get all node labels in the graph
Returns :
[ label1 , label2 , . . . ] # Alphabetically sorted label list
"""
2025-03-01 10:37:05 +08:00
graph = await self . _get_graph ( )
2025-02-28 01:14:25 +08:00
labels = set ( )
2025-03-01 10:37:05 +08:00
for node in graph . nodes ( ) :
2025-02-28 01:14:25 +08:00
labels . add ( str ( node ) ) # Add node id as a label
2025-02-24 03:33:22 +08:00
2025-02-24 02:37:42 +08:00
# Return sorted list
return sorted ( list ( labels ) )
2025-02-20 15:09:43 +01:00
2025-02-20 14:29:36 +01:00
async def get_knowledge_graph (
2025-03-05 11:37:55 +08:00
self ,
node_label : str ,
2025-03-06 20:54:02 +08:00
max_depth : int = 3 ,
2025-07-07 21:53:57 +08:00
max_nodes : int = None ,
2025-02-20 14:29:36 +01:00
) - > KnowledgeGraph :
2025-02-24 02:37:42 +08:00
"""
2025-03-02 16:20:37 +08:00
Retrieve a connected subgraph of nodes where the label includes the specified ` node_label ` .
2025-03-05 11:48:04 +08:00
2025-02-24 02:37:42 +08:00
Args :
2025-04-02 21:41:24 +08:00
node_label : Label of the starting node , * means all nodes
max_depth : Maximum depth of the subgraph , Defaults to 3
max_nodes : Maxiumu nodes to return by BFS , Defaults to 1000
2025-02-24 03:33:22 +08:00
2025-02-24 02:37:42 +08:00
Returns :
2025-04-02 22:12:20 +08:00
KnowledgeGraph object containing nodes and edges , with an is_truncated flag
indicating whether the graph was truncated due to max_nodes limit
2025-02-24 02:37:42 +08:00
"""
2025-07-07 21:53:57 +08:00
# Get max_nodes from global_config if not provided
if max_nodes is None :
max_nodes = self . global_config . get ( " max_graph_nodes " , 1000 )
else :
# Limit max_nodes to not exceed global_config max_graph_nodes
max_nodes = min ( max_nodes , self . global_config . get ( " max_graph_nodes " , 1000 ) )
2025-03-01 10:37:05 +08:00
graph = await self . _get_graph ( )
2025-02-28 01:14:25 +08:00
2025-04-02 22:12:20 +08:00
result = KnowledgeGraph ( )
2025-04-02 23:19:41 +08:00
2025-02-28 01:14:25 +08:00
# Handle special case for "*" label
if node_label == " * " :
2025-04-02 21:41:24 +08:00
# Get degrees of all nodes
degrees = dict ( graph . degree ( ) )
# Sort nodes by degree in descending order and take top max_nodes
sorted_nodes = sorted ( degrees . items ( ) , key = lambda x : x [ 1 ] , reverse = True )
2025-04-02 23:19:41 +08:00
2025-04-02 22:12:20 +08:00
# Check if graph is truncated
if len ( sorted_nodes ) > max_nodes :
result . is_truncated = True
2025-04-02 23:19:41 +08:00
logger . info (
2025-08-12 01:26:09 +08:00
f " [ { self . workspace } ] Graph truncated: { len ( sorted_nodes ) } nodes found, limited to { max_nodes } "
2025-04-02 23:19:41 +08:00
)
2025-04-02 21:41:24 +08:00
limited_nodes = [ node for node , _ in sorted_nodes [ : max_nodes ] ]
# Create subgraph with the highest degree nodes
subgraph = graph . subgraph ( limited_nodes )
2025-02-28 01:14:25 +08:00
else :
2025-04-02 21:41:24 +08:00
# Check if node exists
if node_label not in graph :
2025-08-12 01:26:09 +08:00
logger . warning (
f " [ { self . workspace } ] Node { node_label } not found in the graph "
)
2025-04-02 21:41:24 +08:00
return KnowledgeGraph ( ) # Return empty graph
2025-04-25 11:25:29 +08:00
# Use modified BFS to get nodes, prioritizing high-degree nodes at the same depth
2025-04-02 21:41:24 +08:00
bfs_nodes = [ ]
visited = set ( )
2025-04-25 11:25:29 +08:00
# Store (node, depth, degree) in the queue
queue = [ ( node_label , 0 , graph . degree ( node_label ) ) ]
2025-04-02 23:19:41 +08:00
2025-04-25 11:25:29 +08:00
# Modified breadth-first search with degree-based prioritization
2025-04-02 21:41:24 +08:00
while queue and len ( bfs_nodes ) < max_nodes :
2025-04-25 11:25:29 +08:00
# Get the current depth from the first node in queue
current_depth = queue [ 0 ] [ 1 ]
# Collect all nodes at the current depth
current_level_nodes = [ ]
while queue and queue [ 0 ] [ 1 ] == current_depth :
current_level_nodes . append ( queue . pop ( 0 ) )
# Sort nodes at current depth by degree (highest first)
current_level_nodes . sort ( key = lambda x : x [ 2 ] , reverse = True )
# Process all nodes at current depth in order of degree
for current_node , depth , degree in current_level_nodes :
if current_node not in visited :
visited . add ( current_node )
bfs_nodes . append ( current_node )
# Only explore neighbors if we haven't reached max_depth
if depth < max_depth :
# Add neighbor nodes to queue with incremented depth
neighbors = list ( graph . neighbors ( current_node ) )
# Filter out already visited neighbors
unvisited_neighbors = [
n for n in neighbors if n not in visited
]
# Add neighbors to the queue with their degrees
for neighbor in unvisited_neighbors :
neighbor_degree = graph . degree ( neighbor )
queue . append ( ( neighbor , depth + 1 , neighbor_degree ) )
# Check if we've reached max_nodes
if len ( bfs_nodes ) > = max_nodes :
break
2025-04-02 23:19:41 +08:00
2025-04-02 22:12:20 +08:00
# Check if graph is truncated - if we still have nodes in the queue
# and we've reached max_nodes, then the graph is truncated
if queue and len ( bfs_nodes ) > = max_nodes :
result . is_truncated = True
2025-04-02 23:19:41 +08:00
logger . info (
2025-08-12 01:26:09 +08:00
f " [ { self . workspace } ] Graph truncated: breadth-first search limited to { max_nodes } nodes "
2025-04-02 23:19:41 +08:00
)
2025-04-02 21:41:24 +08:00
# Create subgraph with BFS discovered nodes
subgraph = graph . subgraph ( bfs_nodes )
2025-02-24 03:32:33 +08:00
2025-02-28 01:14:25 +08:00
# Add nodes to result
2025-04-02 21:41:24 +08:00
seen_nodes = set ( )
seen_edges = set ( )
2025-02-28 01:14:25 +08:00
for node in subgraph . nodes ( ) :
if str ( node ) in seen_nodes :
continue
node_data = dict ( subgraph . nodes [ node ] )
# Get entity_type as labels
labels = [ ]
if " entity_type " in node_data :
if isinstance ( node_data [ " entity_type " ] , list ) :
labels . extend ( node_data [ " entity_type " ] )
else :
labels . append ( node_data [ " entity_type " ] )
# Create node with properties
node_properties = { k : v for k , v in node_data . items ( ) }
result . nodes . append (
KnowledgeGraphNode (
id = str ( node ) , labels = [ str ( node ) ] , properties = node_properties
2025-02-24 02:37:42 +08:00
)
2025-02-28 01:14:25 +08:00
)
seen_nodes . add ( str ( node ) )
# Add edges to result
for edge in subgraph . edges ( ) :
source , target = edge
2025-03-18 00:00:56 +08:00
# Esure unique edge_id for undirect graph
2025-04-02 21:41:24 +08:00
if str ( source ) > str ( target ) :
2025-03-18 00:00:56 +08:00
source , target = target , source
2025-02-28 01:14:25 +08:00
edge_id = f " { source } - { target } "
if edge_id in seen_edges :
continue
edge_data = dict ( subgraph . edges [ edge ] )
# Create edge with complete information
result . edges . append (
KnowledgeGraphEdge (
id = edge_id ,
2025-03-06 20:54:02 +08:00
type = " DIRECTED " ,
2025-02-28 01:14:25 +08:00
source = str ( source ) ,
target = str ( target ) ,
properties = edge_data ,
2025-02-24 02:37:42 +08:00
)
2025-02-28 01:14:25 +08:00
)
seen_edges . add ( edge_id )
2025-02-24 02:37:42 +08:00
logger . info (
2025-08-12 01:26:09 +08:00
f " [ { self . workspace } ] Subgraph query successful | Node count: { len ( result . nodes ) } | Edge count: { len ( result . edges ) } "
2025-02-24 02:37:42 +08:00
)
return result
2025-03-01 10:37:05 +08:00
2025-06-25 12:37:57 +08:00
async def get_nodes_by_chunk_ids ( self , chunk_ids : list [ str ] ) - > list [ dict ] :
chunk_ids_set = set ( chunk_ids )
graph = await self . _get_graph ( )
matching_nodes = [ ]
for node_id , node_data in graph . nodes ( data = True ) :
if " source_id " in node_data :
node_source_ids = set ( node_data [ " source_id " ] . split ( GRAPH_FIELD_SEP ) )
if not node_source_ids . isdisjoint ( chunk_ids_set ) :
node_data_with_id = node_data . copy ( )
node_data_with_id [ " id " ] = node_id
matching_nodes . append ( node_data_with_id )
return matching_nodes
async def get_edges_by_chunk_ids ( self , chunk_ids : list [ str ] ) - > list [ dict ] :
chunk_ids_set = set ( chunk_ids )
graph = await self . _get_graph ( )
matching_edges = [ ]
for u , v , edge_data in graph . edges ( data = True ) :
if " source_id " in edge_data :
edge_source_ids = set ( edge_data [ " source_id " ] . split ( GRAPH_FIELD_SEP ) )
if not edge_source_ids . isdisjoint ( chunk_ids_set ) :
edge_data_with_nodes = edge_data . copy ( )
edge_data_with_nodes [ " source " ] = u
edge_data_with_nodes [ " target " ] = v
matching_edges . append ( edge_data_with_nodes )
return matching_edges
2025-08-03 09:19:02 +08:00
async def get_all_nodes ( self ) - > list [ dict ] :
""" Get all nodes in the graph.
Returns :
A list of all nodes , where each node is a dictionary of its properties
"""
graph = await self . _get_graph ( )
all_nodes = [ ]
for node_id , node_data in graph . nodes ( data = True ) :
node_data_with_id = node_data . copy ( )
node_data_with_id [ " id " ] = node_id
all_nodes . append ( node_data_with_id )
return all_nodes
async def get_all_edges ( self ) - > list [ dict ] :
""" Get all edges in the graph.
Returns :
A list of all edges , where each edge is a dictionary of its properties
"""
graph = await self . _get_graph ( )
all_edges = [ ]
for u , v , edge_data in graph . edges ( data = True ) :
edge_data_with_nodes = edge_data . copy ( )
edge_data_with_nodes [ " source " ] = u
edge_data_with_nodes [ " target " ] = v
all_edges . append ( edge_data_with_nodes )
return all_edges
2025-03-01 12:41:30 +08:00
async def index_done_callback ( self ) - > bool :
""" Save data to disk """
2025-03-24 02:11:59 +08:00
async with self . _storage_lock :
# Check if storage was updated by another process
2025-03-25 10:48:15 +08:00
if self . storage_updated . value :
2025-03-24 02:11:59 +08:00
# Storage was updated by another process, reload data instead of saving
2025-04-10 01:06:46 +08:00
logger . info (
2025-08-12 01:26:09 +08:00
f " [ { self . workspace } ] Graph was updated by another process, reloading... "
2025-03-24 02:11:59 +08:00
)
self . _graph = (
NetworkXStorage . load_nx_graph ( self . _graphml_xml_file ) or nx . Graph ( )
)
# Reset update flag
self . storage_updated . value = False
return False # Return error
2025-03-01 16:23:34 +08:00
2025-03-01 10:37:05 +08:00
# Acquire lock and perform persistence
async with self . _storage_lock :
try :
# Save data to disk
2025-08-12 01:26:09 +08:00
NetworkXStorage . write_nx_graph (
self . _graph , self . _graphml_xml_file , self . workspace
)
2025-03-01 10:37:05 +08:00
# Notify other processes that data has been updated
2025-08-12 01:26:09 +08:00
await set_all_update_flags ( self . final_namespace )
2025-03-01 12:41:30 +08:00
# Reset own update flag to avoid self-reloading
2025-03-25 10:48:15 +08:00
self . storage_updated . value = False
2025-03-01 10:37:05 +08:00
return True # Return success
except Exception as e :
2025-08-12 01:26:09 +08:00
logger . error ( f " [ { self . workspace } ] Error saving graph: { e } " )
2025-03-01 10:37:05 +08:00
return False # Return error
2025-03-01 16:23:34 +08:00
2025-03-01 12:41:30 +08:00
return True
2025-03-30 15:17:57 +08:00
async def drop ( self ) - > dict [ str , str ] :
""" Drop all graph data from storage and clean up resources
2025-03-31 23:22:27 +08:00
2025-03-30 15:17:57 +08:00
This method will :
1. Remove the graph storage file if it exists
2. Reset the graph to an empty state
3. Update flags to notify other processes
2025-03-31 16:21:20 +08:00
4. Changes is persisted to disk immediately
2025-03-31 23:22:27 +08:00
2025-03-30 15:17:57 +08:00
Returns :
dict [ str , str ] : Operation status and message
- On success : { " status " : " success " , " message " : " data dropped " }
- On failure : { " status " : " error " , " message " : " <error details> " }
"""
try :
async with self . _storage_lock :
# delete _client_file_name
if os . path . exists ( self . _graphml_xml_file ) :
os . remove ( self . _graphml_xml_file )
self . _graph = nx . Graph ( )
# Notify other processes that data has been updated
2025-08-12 01:26:09 +08:00
await set_all_update_flags ( self . final_namespace )
2025-03-30 15:17:57 +08:00
# Reset own update flag to avoid self-reloading
self . storage_updated . value = False
2025-03-31 23:22:27 +08:00
logger . info (
2025-08-12 01:26:09 +08:00
f " [ { self . workspace } ] Process { os . getpid ( ) } drop graph file: { self . _graphml_xml_file } "
2025-03-31 23:22:27 +08:00
)
2025-03-30 15:17:57 +08:00
return { " status " : " success " , " message " : " data dropped " }
except Exception as e :
2025-08-12 01:26:09 +08:00
logger . error (
f " [ { self . workspace } ] Error dropping graph file: { self . _graphml_xml_file } : { e } "
)
2025-03-30 15:17:57 +08:00
return { " status " : " error " , " message " : str ( e ) }