mirror of
https://github.com/HKUDS/LightRAG.git
synced 2025-06-26 22:00:19 +00:00
184 lines
5.1 KiB
Plaintext
184 lines
5.1 KiB
Plaintext
### This is sample file of .env
|
|
|
|
### Server Configuration
|
|
# HOST=0.0.0.0
|
|
# PORT=9621
|
|
# WORKERS=2
|
|
# CORS_ORIGINS=http://localhost:3000,http://localhost:8080
|
|
WEBUI_TITLE='Graph RAG Engine'
|
|
WEBUI_DESCRIPTION="Simple and Fast Graph Based RAG System"
|
|
|
|
### Optional SSL Configuration
|
|
# SSL=true
|
|
# SSL_CERTFILE=/path/to/cert.pem
|
|
# SSL_KEYFILE=/path/to/key.pem
|
|
|
|
### Directory Configuration (defaults to current working directory)
|
|
# WORKING_DIR=<absolute_path_for_working_dir>
|
|
# INPUT_DIR=<absolute_path_for_doc_input_dir>
|
|
|
|
### Ollama Emulating Model Tag
|
|
# OLLAMA_EMULATING_MODEL_TAG=latest
|
|
|
|
### Max nodes return from grap retrieval
|
|
# MAX_GRAPH_NODES=1000
|
|
|
|
### Logging level
|
|
# LOG_LEVEL=INFO
|
|
# VERBOSE=False
|
|
# LOG_MAX_BYTES=10485760
|
|
# LOG_BACKUP_COUNT=5
|
|
### Logfile location (defaults to current working directory)
|
|
# LOG_DIR=/path/to/log/directory
|
|
|
|
### Settings for RAG query
|
|
# HISTORY_TURNS=3
|
|
# COSINE_THRESHOLD=0.2
|
|
# TOP_K=60
|
|
# MAX_TOKEN_TEXT_CHUNK=4000
|
|
# MAX_TOKEN_RELATION_DESC=4000
|
|
# MAX_TOKEN_ENTITY_DESC=4000
|
|
|
|
### Settings for document indexing
|
|
SUMMARY_LANGUAGE=English
|
|
# CHUNK_SIZE=1200
|
|
# CHUNK_OVERLAP_SIZE=100
|
|
|
|
### Number of parallel processing documents in one patch
|
|
# MAX_PARALLEL_INSERT=2
|
|
|
|
### Max tokens for entity/relations description after merge
|
|
# MAX_TOKEN_SUMMARY=500
|
|
### Number of entities/edges to trigger LLM re-summary on merge ( at least 3 is recommented)
|
|
# FORCE_LLM_SUMMARY_ON_MERGE=6
|
|
|
|
### Num of chunks send to Embedding in single request
|
|
# EMBEDDING_BATCH_NUM=32
|
|
### Max concurrency requests for Embedding
|
|
# EMBEDDING_FUNC_MAX_ASYNC=16
|
|
# MAX_EMBED_TOKENS=8192
|
|
|
|
### LLM Configuration
|
|
### Time out in seconds for LLM, None for infinite timeout
|
|
TIMEOUT=150
|
|
### Some models like o1-mini require temperature to be set to 1
|
|
TEMPERATURE=0.5
|
|
### Max concurrency requests of LLM
|
|
MAX_ASYNC=4
|
|
### Max tokens send to LLM (less than context size of the model)
|
|
MAX_TOKENS=32768
|
|
ENABLE_LLM_CACHE=true
|
|
ENABLE_LLM_CACHE_FOR_EXTRACT=true
|
|
|
|
### Ollama example (For local services installed with docker, you can use host.docker.internal as host)
|
|
LLM_BINDING=ollama
|
|
LLM_MODEL=mistral-nemo:latest
|
|
LLM_BINDING_API_KEY=your_api_key
|
|
LLM_BINDING_HOST=http://localhost:11434
|
|
|
|
### OpenAI alike example
|
|
# LLM_BINDING=openai
|
|
# LLM_MODEL=gpt-4o
|
|
# LLM_BINDING_HOST=https://api.openai.com/v1
|
|
# LLM_BINDING_API_KEY=your_api_key
|
|
### lollms example
|
|
# LLM_BINDING=lollms
|
|
# LLM_MODEL=mistral-nemo:latest
|
|
# LLM_BINDING_HOST=http://localhost:9600
|
|
# LLM_BINDING_API_KEY=your_api_key
|
|
|
|
### Embedding Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal)
|
|
EMBEDDING_MODEL=bge-m3:latest
|
|
EMBEDDING_DIM=1024
|
|
# EMBEDDING_BINDING_API_KEY=your_api_key
|
|
### ollama example
|
|
EMBEDDING_BINDING=ollama
|
|
EMBEDDING_BINDING_HOST=http://localhost:11434
|
|
### OpenAI alike example
|
|
# EMBEDDING_BINDING=openai
|
|
# EMBEDDING_BINDING_HOST=https://api.openai.com/v1
|
|
### Lollms example
|
|
# EMBEDDING_BINDING=lollms
|
|
# EMBEDDING_BINDING_HOST=http://localhost:9600
|
|
|
|
### Optional for Azure (LLM_BINDING_HOST, LLM_BINDING_API_KEY take priority)
|
|
# AZURE_OPENAI_API_VERSION=2024-08-01-preview
|
|
# AZURE_OPENAI_DEPLOYMENT=gpt-4o
|
|
# AZURE_OPENAI_API_KEY=your_api_key
|
|
# AZURE_OPENAI_ENDPOINT=https://myendpoint.openai.azure.com
|
|
|
|
# AZURE_EMBEDDING_DEPLOYMENT=text-embedding-3-large
|
|
# AZURE_EMBEDDING_API_VERSION=2023-05-15
|
|
|
|
### Data storage selection
|
|
LIGHTRAG_KV_STORAGE=JsonKVStorage
|
|
LIGHTRAG_VECTOR_STORAGE=NanoVectorDBStorage
|
|
LIGHTRAG_GRAPH_STORAGE=NetworkXStorage
|
|
LIGHTRAG_DOC_STATUS_STORAGE=JsonDocStatusStorage
|
|
|
|
### TiDB Configuration (Deprecated)
|
|
# TIDB_HOST=localhost
|
|
# TIDB_PORT=4000
|
|
# TIDB_USER=your_username
|
|
# TIDB_PASSWORD='your_password'
|
|
# TIDB_DATABASE=your_database
|
|
### separating all data from difference Lightrag instances(deprecating)
|
|
# TIDB_WORKSPACE=default
|
|
|
|
### PostgreSQL Configuration
|
|
POSTGRES_HOST=localhost
|
|
POSTGRES_PORT=5432
|
|
POSTGRES_USER=your_username
|
|
POSTGRES_PASSWORD='your_password'
|
|
POSTGRES_DATABASE=your_database
|
|
POSTGRES_MAX_CONNECTIONS=12
|
|
### separating all data from difference Lightrag instances(deprecating)
|
|
# POSTGRES_WORKSPACE=default
|
|
|
|
### Independent AGM Configuration(not for AMG embedded in PostreSQL)
|
|
AGE_POSTGRES_DB=
|
|
AGE_POSTGRES_USER=
|
|
AGE_POSTGRES_PASSWORD=
|
|
AGE_POSTGRES_HOST=
|
|
# AGE_POSTGRES_PORT=8529
|
|
|
|
# AGE Graph Name(apply to PostgreSQL and independent AGM)
|
|
### AGE_GRAPH_NAME is precated
|
|
# AGE_GRAPH_NAME=lightrag
|
|
|
|
### Neo4j Configuration
|
|
NEO4J_URI=neo4j+s://xxxxxxxx.databases.neo4j.io
|
|
NEO4J_USERNAME=neo4j
|
|
NEO4J_PASSWORD='your_password'
|
|
|
|
### MongoDB Configuration
|
|
MONGO_URI=mongodb://root:root@localhost:27017/
|
|
MONGO_DATABASE=LightRAG
|
|
### separating all data from difference Lightrag instances(deprecating)
|
|
# MONGODB_GRAPH=false
|
|
|
|
### Milvus Configuration
|
|
MILVUS_URI=http://localhost:19530
|
|
MILVUS_DB_NAME=lightrag
|
|
# MILVUS_USER=root
|
|
# MILVUS_PASSWORD=your_password
|
|
# MILVUS_TOKEN=your_token
|
|
|
|
### Qdrant
|
|
QDRANT_URL=http://localhost:16333
|
|
# QDRANT_API_KEY=your-api-key
|
|
|
|
### Redis
|
|
REDIS_URI=redis://localhost:6379
|
|
|
|
### For JWT Auth
|
|
# AUTH_ACCOUNTS='admin:admin123,user1:pass456'
|
|
# TOKEN_SECRET=Your-Key-For-LightRAG-API-Server
|
|
# TOKEN_EXPIRE_HOURS=48
|
|
# GUEST_TOKEN_EXPIRE_HOURS=24
|
|
# JWT_ALGORITHM=HS256
|
|
|
|
### API-Key to access LightRAG Server API
|
|
# LIGHTRAG_API_KEY=your-secure-api-key-here
|
|
# WHITELIST_PATHS=/health,/api/*
|