Fix LLM cache handling for PGKVStorage to address document deletion scenarios.

- Add dynamic cache_type field
- Support mode parameter for LLM cache
- Maintain backward compatibility
This commit is contained in:
yangdx 2025-06-29 14:39:50 +08:00
parent dd12b08708
commit 37bf341a69

View File

@ -504,14 +504,22 @@ class PGKVStorage(BaseKVStorage):
async def get_by_id(self, id: str) -> dict[str, Any] | None:
"""Get doc_full data by id."""
sql = SQL_TEMPLATES["get_by_id_" + self.namespace]
params = {"workspace": self.db.workspace, "id": id}
if is_namespace(self.namespace, NameSpace.KV_STORE_LLM_RESPONSE_CACHE):
# For LLM cache, the id parameter actually represents the mode
params = {"workspace": self.db.workspace, "mode": id}
array_res = await self.db.query(sql, params, multirows=True)
res = {}
for row in array_res:
res[row["id"]] = row
# Dynamically add cache_type field based on mode
row_with_cache_type = dict(row)
if id == "default":
row_with_cache_type["cache_type"] = "extract"
else:
row_with_cache_type["cache_type"] = "unknown"
res[row["id"]] = row_with_cache_type
return res if res else None
else:
params = {"workspace": self.db.workspace, "id": id}
response = await self.db.query(sql, params)
return response if response else None