mirror of
				https://github.com/HKUDS/LightRAG.git
				synced 2025-10-31 09:49:54 +00:00 
			
		
		
		
	
		
			
	
	
		
			81 lines
		
	
	
		
			2.0 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
		
		
			
		
	
	
			81 lines
		
	
	
		
			2.0 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
|   | import asyncio | ||
|  | import inspect | ||
|  | import logging | ||
|  | import os | ||
|  | 
 | ||
|  | from lightrag import LightRAG, QueryParam | ||
|  | from lightrag.llm import ollama_embedding, ollama_model_complete | ||
|  | from lightrag.utils import EmbeddingFunc | ||
|  | 
 | ||
|  | WORKING_DIR = "./dickens_age" | ||
|  | 
 | ||
|  | logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO) | ||
|  | 
 | ||
|  | if not os.path.exists(WORKING_DIR): | ||
|  |     os.mkdir(WORKING_DIR) | ||
|  | 
 | ||
|  | # AGE | ||
|  | os.environ["AGE_POSTGRES_DB"] = "postgresDB" | ||
|  | os.environ["AGE_POSTGRES_USER"] = "postgresUser" | ||
|  | os.environ["AGE_POSTGRES_PASSWORD"] = "postgresPW" | ||
|  | os.environ["AGE_POSTGRES_HOST"] = "localhost" | ||
|  | os.environ["AGE_POSTGRES_PORT"] = "5455" | ||
|  | os.environ["AGE_GRAPH_NAME"] = "dickens" | ||
|  | 
 | ||
|  | rag = LightRAG( | ||
|  |     working_dir=WORKING_DIR, | ||
|  |     llm_model_func=ollama_model_complete, | ||
|  |     llm_model_name="llama3.1:8b", | ||
|  |     llm_model_max_async=4, | ||
|  |     llm_model_max_token_size=32768, | ||
|  |     llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}}, | ||
|  |     embedding_func=EmbeddingFunc( | ||
|  |         embedding_dim=768, | ||
|  |         max_token_size=8192, | ||
|  |         func=lambda texts: ollama_embedding( | ||
|  |             texts, embed_model="nomic-embed-text", host="http://localhost:11434" | ||
|  |         ), | ||
|  |     ), | ||
|  |     graph_storage="AGEStorage", | ||
|  | ) | ||
|  | 
 | ||
|  | with open("./book.txt", "r", encoding="utf-8") as f: | ||
|  |     rag.insert(f.read()) | ||
|  | 
 | ||
|  | # Perform naive search | ||
|  | print( | ||
|  |     rag.query("What are the top themes in this story?", param=QueryParam(mode="naive")) | ||
|  | ) | ||
|  | 
 | ||
|  | # Perform local search | ||
|  | print( | ||
|  |     rag.query("What are the top themes in this story?", param=QueryParam(mode="local")) | ||
|  | ) | ||
|  | 
 | ||
|  | # Perform global search | ||
|  | print( | ||
|  |     rag.query("What are the top themes in this story?", param=QueryParam(mode="global")) | ||
|  | ) | ||
|  | 
 | ||
|  | # Perform hybrid search | ||
|  | print( | ||
|  |     rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid")) | ||
|  | ) | ||
|  | 
 | ||
|  | # stream response | ||
|  | resp = rag.query( | ||
|  |     "What are the top themes in this story?", | ||
|  |     param=QueryParam(mode="hybrid", stream=True), | ||
|  | ) | ||
|  | 
 | ||
|  | 
 | ||
|  | async def print_stream(stream): | ||
|  |     async for chunk in stream: | ||
|  |         print(chunk, end="", flush=True) | ||
|  | 
 | ||
|  | 
 | ||
|  | if inspect.isasyncgen(resp): | ||
|  |     asyncio.run(print_stream(resp)) | ||
|  | else: | ||
|  |     print(resp) |