2025-02-26 18:11:02 +08:00
|
|
|
import os
|
2025-02-27 13:25:22 +08:00
|
|
|
import sys
|
2025-03-01 01:49:26 +08:00
|
|
|
import asyncio
|
2025-02-26 05:38:38 +08:00
|
|
|
from multiprocessing.synchronize import Lock as ProcessLock
|
|
|
|
from multiprocessing import Manager
|
2025-03-01 02:22:35 +08:00
|
|
|
from typing import Any, Dict, Optional, Union, TypeVar, Generic
|
2025-02-27 19:05:51 +08:00
|
|
|
|
2025-02-26 05:38:38 +08:00
|
|
|
|
2025-02-27 13:25:22 +08:00
|
|
|
# Define a direct print function for critical logs that must be visible in all processes
|
|
|
|
def direct_log(message, level="INFO"):
|
|
|
|
"""
|
|
|
|
Log a message directly to stderr to ensure visibility in all processes,
|
|
|
|
including the Gunicorn master process.
|
2025-02-27 19:05:51 +08:00
|
|
|
"""
|
2025-02-27 13:25:22 +08:00
|
|
|
print(f"{level}: {message}", file=sys.stderr, flush=True)
|
|
|
|
|
2025-02-27 19:05:51 +08:00
|
|
|
|
2025-03-01 16:23:34 +08:00
|
|
|
T = TypeVar("T")
|
2025-03-01 10:48:55 +08:00
|
|
|
LockType = Union[ProcessLock, asyncio.Lock]
|
|
|
|
|
|
|
|
is_multiprocess = None
|
|
|
|
_workers = None
|
|
|
|
_manager = None
|
|
|
|
_initialized = None
|
|
|
|
|
|
|
|
# shared data for storage across processes
|
|
|
|
_shared_dicts: Optional[Dict[str, Any]] = None
|
|
|
|
_init_flags: Optional[Dict[str, bool]] = None # namespace -> initialized
|
2025-03-01 16:23:34 +08:00
|
|
|
_update_flags: Optional[Dict[str, bool]] = None # namespace -> updated
|
2025-03-01 10:48:55 +08:00
|
|
|
|
|
|
|
# locks for mutex access
|
|
|
|
_storage_lock: Optional[LockType] = None
|
|
|
|
_internal_lock: Optional[LockType] = None
|
|
|
|
_pipeline_status_lock: Optional[LockType] = None
|
2025-03-01 02:22:35 +08:00
|
|
|
|
2025-03-01 16:23:34 +08:00
|
|
|
|
2025-03-01 02:22:35 +08:00
|
|
|
class UnifiedLock(Generic[T]):
|
2025-03-01 05:01:26 +08:00
|
|
|
"""Provide a unified lock interface type for asyncio.Lock and multiprocessing.Lock"""
|
2025-03-01 16:23:34 +08:00
|
|
|
|
2025-03-01 02:22:35 +08:00
|
|
|
def __init__(self, lock: Union[ProcessLock, asyncio.Lock], is_async: bool):
|
|
|
|
self._lock = lock
|
|
|
|
self._is_async = is_async
|
|
|
|
|
2025-03-01 16:23:34 +08:00
|
|
|
async def __aenter__(self) -> "UnifiedLock[T]":
|
2025-03-01 02:22:35 +08:00
|
|
|
if self._is_async:
|
|
|
|
await self._lock.acquire()
|
|
|
|
else:
|
|
|
|
self._lock.acquire()
|
|
|
|
return self
|
|
|
|
|
|
|
|
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
|
|
if self._is_async:
|
|
|
|
self._lock.release()
|
|
|
|
else:
|
|
|
|
self._lock.release()
|
|
|
|
|
2025-03-01 16:23:34 +08:00
|
|
|
def __enter__(self) -> "UnifiedLock[T]":
|
2025-03-01 05:01:26 +08:00
|
|
|
"""For backward compatibility"""
|
2025-03-01 02:22:35 +08:00
|
|
|
if self._is_async:
|
2025-03-01 10:48:55 +08:00
|
|
|
raise RuntimeError("Use 'async with' for shared_storage lock")
|
2025-03-01 02:22:35 +08:00
|
|
|
self._lock.acquire()
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
2025-03-01 05:01:26 +08:00
|
|
|
"""For backward compatibility"""
|
2025-03-01 02:22:35 +08:00
|
|
|
if self._is_async:
|
2025-03-01 10:48:55 +08:00
|
|
|
raise RuntimeError("Use 'async with' for shared_storage lock")
|
2025-03-01 02:22:35 +08:00
|
|
|
self._lock.release()
|
|
|
|
|
|
|
|
|
2025-03-01 10:48:55 +08:00
|
|
|
def get_internal_lock() -> UnifiedLock:
|
|
|
|
"""return unified storage lock for data consistency"""
|
2025-03-01 16:23:34 +08:00
|
|
|
return UnifiedLock(lock=_internal_lock, is_async=not is_multiprocess)
|
|
|
|
|
2025-02-26 18:11:02 +08:00
|
|
|
|
2025-03-01 10:48:55 +08:00
|
|
|
def get_storage_lock() -> UnifiedLock:
|
|
|
|
"""return unified storage lock for data consistency"""
|
2025-03-01 16:23:34 +08:00
|
|
|
return UnifiedLock(lock=_storage_lock, is_async=not is_multiprocess)
|
|
|
|
|
2025-02-26 18:11:02 +08:00
|
|
|
|
2025-03-01 10:48:55 +08:00
|
|
|
def get_pipeline_status_lock() -> UnifiedLock:
|
|
|
|
"""return unified storage lock for data consistency"""
|
2025-03-01 16:23:34 +08:00
|
|
|
return UnifiedLock(lock=_pipeline_status_lock, is_async=not is_multiprocess)
|
|
|
|
|
2025-02-28 01:25:59 +08:00
|
|
|
|
2025-02-27 08:48:33 +08:00
|
|
|
def initialize_share_data(workers: int = 1):
|
2025-02-27 13:25:22 +08:00
|
|
|
"""
|
|
|
|
Initialize shared storage data for single or multi-process mode.
|
2025-02-27 19:05:51 +08:00
|
|
|
|
2025-02-27 13:25:22 +08:00
|
|
|
When used with Gunicorn's preload feature, this function is called once in the
|
|
|
|
master process before forking worker processes, allowing all workers to share
|
|
|
|
the same initialized data.
|
2025-02-27 19:05:51 +08:00
|
|
|
|
2025-02-27 13:25:22 +08:00
|
|
|
In single-process mode, this function is called during LightRAG object initialization.
|
2025-02-27 19:05:51 +08:00
|
|
|
|
2025-02-27 13:25:22 +08:00
|
|
|
The function determines whether to use cross-process shared variables for data storage
|
|
|
|
based on the number of workers. If workers=1, it uses thread locks and local dictionaries.
|
|
|
|
If workers>1, it uses process locks and shared dictionaries managed by multiprocessing.Manager.
|
2025-02-27 19:05:51 +08:00
|
|
|
|
2025-02-27 13:25:22 +08:00
|
|
|
Args:
|
|
|
|
workers (int): Number of worker processes. If 1, single-process mode is used.
|
|
|
|
If > 1, multi-process mode with shared memory is used.
|
|
|
|
"""
|
2025-02-27 19:05:51 +08:00
|
|
|
global \
|
|
|
|
_manager, \
|
2025-03-01 01:49:26 +08:00
|
|
|
_workers, \
|
2025-02-27 19:05:51 +08:00
|
|
|
is_multiprocess, \
|
|
|
|
is_multiprocess, \
|
2025-03-01 10:48:55 +08:00
|
|
|
_storage_lock, \
|
|
|
|
_internal_lock, \
|
|
|
|
_pipeline_status_lock, \
|
2025-02-27 19:05:51 +08:00
|
|
|
_shared_dicts, \
|
|
|
|
_init_flags, \
|
2025-03-01 01:49:26 +08:00
|
|
|
_initialized, \
|
|
|
|
_update_flags
|
2025-02-27 19:05:51 +08:00
|
|
|
|
2025-02-27 13:25:22 +08:00
|
|
|
# Check if already initialized
|
2025-02-27 15:36:12 +08:00
|
|
|
if _initialized:
|
2025-02-27 19:05:51 +08:00
|
|
|
direct_log(
|
|
|
|
f"Process {os.getpid()} Shared-Data already initialized (multiprocess={is_multiprocess})"
|
|
|
|
)
|
2025-02-27 13:25:22 +08:00
|
|
|
return
|
2025-02-27 19:05:51 +08:00
|
|
|
|
2025-02-27 08:48:33 +08:00
|
|
|
_manager = Manager()
|
2025-03-01 01:49:26 +08:00
|
|
|
_workers = workers
|
2025-02-27 08:48:33 +08:00
|
|
|
|
2025-02-27 13:25:22 +08:00
|
|
|
if workers > 1:
|
2025-02-27 15:36:12 +08:00
|
|
|
is_multiprocess = True
|
2025-03-01 10:48:55 +08:00
|
|
|
_internal_lock = _manager.Lock()
|
|
|
|
_storage_lock = _manager.Lock()
|
|
|
|
_pipeline_status_lock = _manager.Lock()
|
2025-02-27 08:48:33 +08:00
|
|
|
_shared_dicts = _manager.dict()
|
2025-02-28 01:25:59 +08:00
|
|
|
_init_flags = _manager.dict()
|
2025-03-01 01:49:26 +08:00
|
|
|
_update_flags = _manager.dict()
|
2025-02-27 19:05:51 +08:00
|
|
|
direct_log(
|
|
|
|
f"Process {os.getpid()} Shared-Data created for Multiple Process (workers={workers})"
|
|
|
|
)
|
2025-02-27 13:25:22 +08:00
|
|
|
else:
|
2025-02-27 15:36:12 +08:00
|
|
|
is_multiprocess = False
|
2025-03-01 10:48:55 +08:00
|
|
|
_internal_lock = asyncio.Lock()
|
|
|
|
_storage_lock = asyncio.Lock()
|
|
|
|
_pipeline_status_lock = asyncio.Lock()
|
2025-02-27 13:25:22 +08:00
|
|
|
_shared_dicts = {}
|
|
|
|
_init_flags = {}
|
2025-03-01 01:49:26 +08:00
|
|
|
_update_flags = {}
|
2025-02-27 19:03:53 +08:00
|
|
|
direct_log(f"Process {os.getpid()} Shared-Data created for Single Process")
|
2025-02-26 18:11:02 +08:00
|
|
|
|
2025-02-27 13:25:22 +08:00
|
|
|
# Mark as initialized
|
2025-02-27 15:36:12 +08:00
|
|
|
_initialized = True
|
2025-02-28 21:46:45 +08:00
|
|
|
|
|
|
|
|
2025-03-01 10:48:55 +08:00
|
|
|
async def initialize_pipeline_status():
|
2025-03-01 03:48:19 +08:00
|
|
|
"""
|
|
|
|
Initialize pipeline namespace with default values.
|
|
|
|
"""
|
|
|
|
pipeline_namespace = await get_namespace_data("pipeline_status")
|
|
|
|
|
2025-03-01 10:48:55 +08:00
|
|
|
async with get_internal_lock():
|
2025-03-01 03:48:19 +08:00
|
|
|
# Check if already initialized by checking for required fields
|
|
|
|
if "busy" in pipeline_namespace:
|
|
|
|
return
|
|
|
|
|
|
|
|
# Create a shared list object for history_messages
|
|
|
|
history_messages = _manager.list() if is_multiprocess else []
|
2025-03-01 16:23:34 +08:00
|
|
|
pipeline_namespace.update(
|
|
|
|
{
|
|
|
|
"busy": False, # Control concurrent processes
|
|
|
|
"job_name": "Default Job", # Current job name (indexing files/indexing texts)
|
|
|
|
"job_start": None, # Job start time
|
|
|
|
"docs": 0, # Total number of documents to be indexed
|
|
|
|
"batchs": 0, # Number of batches for processing documents
|
|
|
|
"cur_batch": 0, # Current processing batch
|
|
|
|
"request_pending": False, # Flag for pending request for processing
|
|
|
|
"latest_message": "", # Latest message from pipeline processing
|
|
|
|
"history_messages": history_messages, # 使用共享列表对象
|
|
|
|
}
|
|
|
|
)
|
2025-03-01 03:48:19 +08:00
|
|
|
direct_log(f"Process {os.getpid()} Pipeline namespace initialized")
|
2025-02-26 18:11:16 +08:00
|
|
|
|
2025-02-27 19:05:51 +08:00
|
|
|
|
2025-03-01 05:01:26 +08:00
|
|
|
async def get_update_flag(namespace: str):
|
2025-03-01 01:49:26 +08:00
|
|
|
"""
|
2025-03-01 05:01:26 +08:00
|
|
|
Create a namespace's update flag for a workers.
|
|
|
|
Returen the update flag to caller for referencing or reset.
|
2025-03-01 01:49:26 +08:00
|
|
|
"""
|
|
|
|
global _update_flags
|
|
|
|
if _update_flags is None:
|
|
|
|
raise ValueError("Try to create namespace before Shared-Data is initialized")
|
|
|
|
|
2025-03-01 10:48:55 +08:00
|
|
|
async with get_internal_lock():
|
2025-03-01 02:22:35 +08:00
|
|
|
if namespace not in _update_flags:
|
|
|
|
if is_multiprocess and _manager is not None:
|
|
|
|
_update_flags[namespace] = _manager.list()
|
|
|
|
else:
|
2025-03-01 01:49:26 +08:00
|
|
|
_update_flags[namespace] = []
|
2025-03-01 16:23:34 +08:00
|
|
|
direct_log(
|
|
|
|
f"Process {os.getpid()} initialized updated flags for namespace: [{namespace}]"
|
|
|
|
)
|
|
|
|
|
2025-03-01 02:22:35 +08:00
|
|
|
if is_multiprocess and _manager is not None:
|
2025-03-01 16:23:34 +08:00
|
|
|
new_update_flag = _manager.Value("b", False)
|
2025-03-01 02:22:35 +08:00
|
|
|
else:
|
2025-03-01 01:49:26 +08:00
|
|
|
new_update_flag = False
|
2025-03-01 16:23:34 +08:00
|
|
|
|
2025-03-01 02:22:35 +08:00
|
|
|
_update_flags[namespace].append(new_update_flag)
|
|
|
|
return new_update_flag
|
2025-03-01 01:49:26 +08:00
|
|
|
|
2025-03-01 16:23:34 +08:00
|
|
|
|
2025-03-01 05:01:26 +08:00
|
|
|
async def set_all_update_flags(namespace: str):
|
|
|
|
"""Set all update flag of namespace indicating all workers need to reload data from files"""
|
2025-03-01 01:49:26 +08:00
|
|
|
global _update_flags
|
|
|
|
if _update_flags is None:
|
|
|
|
raise ValueError("Try to create namespace before Shared-Data is initialized")
|
2025-03-01 16:23:34 +08:00
|
|
|
|
2025-03-01 10:48:55 +08:00
|
|
|
async with get_internal_lock():
|
2025-03-01 02:22:35 +08:00
|
|
|
if namespace not in _update_flags:
|
|
|
|
raise ValueError(f"Namespace {namespace} not found in update flags")
|
|
|
|
# Update flags for both modes
|
|
|
|
for i in range(len(_update_flags[namespace])):
|
|
|
|
if is_multiprocess:
|
2025-03-01 01:49:26 +08:00
|
|
|
_update_flags[namespace][i].value = True
|
2025-03-01 02:22:35 +08:00
|
|
|
else:
|
2025-03-01 01:49:26 +08:00
|
|
|
_update_flags[namespace][i] = True
|
|
|
|
|
|
|
|
|
2025-03-01 14:58:26 +08:00
|
|
|
async def get_all_update_flags_status() -> Dict[str, list]:
|
|
|
|
"""
|
|
|
|
Get update flags status for all namespaces.
|
2025-03-01 16:23:34 +08:00
|
|
|
|
2025-03-01 14:58:26 +08:00
|
|
|
Returns:
|
|
|
|
Dict[str, list]: A dictionary mapping namespace names to lists of update flag statuses
|
|
|
|
"""
|
|
|
|
if _update_flags is None:
|
|
|
|
return {}
|
2025-03-01 16:23:34 +08:00
|
|
|
|
2025-03-01 14:58:26 +08:00
|
|
|
result = {}
|
|
|
|
async with get_internal_lock():
|
|
|
|
for namespace, flags in _update_flags.items():
|
|
|
|
worker_statuses = []
|
|
|
|
for flag in flags:
|
|
|
|
if is_multiprocess:
|
|
|
|
worker_statuses.append(flag.value)
|
|
|
|
else:
|
|
|
|
worker_statuses.append(flag)
|
|
|
|
result[namespace] = worker_statuses
|
2025-03-01 16:23:34 +08:00
|
|
|
|
2025-03-01 14:58:26 +08:00
|
|
|
return result
|
|
|
|
|
|
|
|
|
2025-02-26 18:11:02 +08:00
|
|
|
def try_initialize_namespace(namespace: str) -> bool:
|
|
|
|
"""
|
2025-02-27 13:25:22 +08:00
|
|
|
Try to initialize a namespace. Returns True if the current process gets initialization permission.
|
|
|
|
Uses atomic operations on shared dictionaries to ensure only one process can successfully initialize.
|
2025-02-26 18:11:02 +08:00
|
|
|
"""
|
|
|
|
global _init_flags, _manager
|
2025-02-26 18:11:16 +08:00
|
|
|
|
2025-02-27 19:03:53 +08:00
|
|
|
if _init_flags is None:
|
2025-02-28 01:14:25 +08:00
|
|
|
raise ValueError("Try to create nanmespace before Shared-Data is initialized")
|
2025-02-26 18:11:16 +08:00
|
|
|
|
2025-02-27 19:03:53 +08:00
|
|
|
if namespace not in _init_flags:
|
|
|
|
_init_flags[namespace] = True
|
2025-02-28 01:25:59 +08:00
|
|
|
direct_log(
|
|
|
|
f"Process {os.getpid()} ready to initialize storage namespace: [{namespace}]"
|
|
|
|
)
|
2025-02-27 19:03:53 +08:00
|
|
|
return True
|
2025-02-28 01:25:59 +08:00
|
|
|
direct_log(
|
2025-03-01 05:01:26 +08:00
|
|
|
f"Process {os.getpid()} storage namespace already initialized: [{namespace}]"
|
2025-02-28 01:25:59 +08:00
|
|
|
)
|
2025-02-27 19:03:53 +08:00
|
|
|
return False
|
2025-02-26 05:38:38 +08:00
|
|
|
|
2025-02-26 18:11:16 +08:00
|
|
|
|
2025-03-01 01:49:26 +08:00
|
|
|
async def get_namespace_data(namespace: str) -> Dict[str, Any]:
|
2025-02-26 05:38:38 +08:00
|
|
|
"""get storage space for specific storage type(namespace)"""
|
2025-02-27 19:03:53 +08:00
|
|
|
if _shared_dicts is None:
|
2025-02-27 19:05:51 +08:00
|
|
|
direct_log(
|
|
|
|
f"Error: try to getnanmespace before Shared-Data is initialized, pid={os.getpid()}",
|
|
|
|
level="ERROR",
|
|
|
|
)
|
2025-02-27 19:03:53 +08:00
|
|
|
raise ValueError("Shared dictionaries not initialized")
|
|
|
|
|
2025-03-01 10:48:55 +08:00
|
|
|
async with get_internal_lock():
|
2025-03-01 02:22:35 +08:00
|
|
|
if namespace not in _shared_dicts:
|
|
|
|
if is_multiprocess and _manager is not None:
|
|
|
|
_shared_dicts[namespace] = _manager.dict()
|
|
|
|
else:
|
2025-02-26 18:11:02 +08:00
|
|
|
_shared_dicts[namespace] = {}
|
2025-02-27 19:05:51 +08:00
|
|
|
|
2025-02-26 18:11:02 +08:00
|
|
|
return _shared_dicts[namespace]
|
2025-02-26 05:38:38 +08:00
|
|
|
|
2025-02-26 18:11:16 +08:00
|
|
|
|
2025-02-27 13:25:22 +08:00
|
|
|
def finalize_share_data():
|
|
|
|
"""
|
|
|
|
Release shared resources and clean up.
|
2025-02-27 19:05:51 +08:00
|
|
|
|
2025-02-27 13:25:22 +08:00
|
|
|
This function should be called when the application is shutting down
|
|
|
|
to properly release shared resources and avoid memory leaks.
|
2025-02-27 19:05:51 +08:00
|
|
|
|
2025-02-27 13:25:22 +08:00
|
|
|
In multi-process mode, it shuts down the Manager and releases all shared objects.
|
|
|
|
In single-process mode, it simply resets the global variables.
|
|
|
|
"""
|
2025-02-27 19:05:51 +08:00
|
|
|
global \
|
|
|
|
_manager, \
|
|
|
|
is_multiprocess, \
|
|
|
|
_global_lock, \
|
|
|
|
_shared_dicts, \
|
|
|
|
_init_flags, \
|
|
|
|
_initialized
|
|
|
|
|
2025-02-27 13:25:22 +08:00
|
|
|
# Check if already initialized
|
2025-02-27 15:36:12 +08:00
|
|
|
if not _initialized:
|
2025-02-27 19:05:51 +08:00
|
|
|
direct_log(
|
|
|
|
f"Process {os.getpid()} storage data not initialized, nothing to finalize"
|
|
|
|
)
|
2025-02-27 13:25:22 +08:00
|
|
|
return
|
2025-02-27 19:05:51 +08:00
|
|
|
|
|
|
|
direct_log(
|
|
|
|
f"Process {os.getpid()} finalizing storage data (multiprocess={is_multiprocess})"
|
|
|
|
)
|
|
|
|
|
2025-02-27 13:25:22 +08:00
|
|
|
# In multi-process mode, shut down the Manager
|
2025-02-27 15:36:12 +08:00
|
|
|
if is_multiprocess and _manager is not None:
|
2025-02-27 13:25:22 +08:00
|
|
|
try:
|
|
|
|
# Clear shared dictionaries first
|
|
|
|
if _shared_dicts is not None:
|
|
|
|
_shared_dicts.clear()
|
|
|
|
if _init_flags is not None:
|
|
|
|
_init_flags.clear()
|
2025-02-27 19:05:51 +08:00
|
|
|
|
2025-02-27 13:25:22 +08:00
|
|
|
# Shut down the Manager
|
|
|
|
_manager.shutdown()
|
|
|
|
direct_log(f"Process {os.getpid()} Manager shutdown complete")
|
|
|
|
except Exception as e:
|
2025-02-27 19:05:51 +08:00
|
|
|
direct_log(
|
|
|
|
f"Process {os.getpid()} Error shutting down Manager: {e}", level="ERROR"
|
|
|
|
)
|
|
|
|
|
2025-02-27 13:25:22 +08:00
|
|
|
# Reset global variables
|
|
|
|
_manager = None
|
|
|
|
_initialized = None
|
|
|
|
is_multiprocess = None
|
|
|
|
_shared_dicts = None
|
|
|
|
_init_flags = None
|
|
|
|
_global_lock = None
|
2025-02-27 19:05:51 +08:00
|
|
|
|
2025-02-27 13:25:22 +08:00
|
|
|
direct_log(f"Process {os.getpid()} storage data finalization complete")
|