LightRAG/env.example

160 lines
4.6 KiB
Plaintext
Raw Normal View History

### This is sample file of .env
2025-05-14 13:22:03 +08:00
### Server Configuration
2025-05-14 13:22:03 +08:00
HOST=0.0.0.0
PORT=9621
WEBUI_TITLE='My Graph KB'
WEBUI_DESCRIPTION="Simple and Fast Graph Based RAG System"
OLLAMA_EMULATING_MODEL_TAG=latest
# WORKERS=2
# CORS_ORIGINS=http://localhost:3000,http://localhost:8080
2025-05-14 13:22:03 +08:00
### Login Configuration
# AUTH_ACCOUNTS='admin:admin123,user1:pass456'
# TOKEN_SECRET=Your-Key-For-LightRAG-API-Server
# TOKEN_EXPIRE_HOURS=48
# GUEST_TOKEN_EXPIRE_HOURS=24
# JWT_ALGORITHM=HS256
### API-Key to access LightRAG Server API
# LIGHTRAG_API_KEY=your-secure-api-key-here
# WHITELIST_PATHS=/health,/api/*
2025-01-16 22:28:28 +01:00
### Optional SSL Configuration
# SSL=true
# SSL_CERTFILE=/path/to/cert.pem
# SSL_KEYFILE=/path/to/key.pem
### Directory Configuration (defaults to current working directory)
2025-05-14 13:33:33 +08:00
### Should not be set if deploy by docker (Set by Dockerfile instead of .env)
2025-05-14 13:25:05 +08:00
### Default value is ./inputs and ./rag_storage
# INPUT_DIR=<absolute_path_for_doc_input_dir>
2025-05-14 13:22:03 +08:00
# WORKING_DIR=<absolute_path_for_working_dir>
2025-04-04 21:43:52 +08:00
### Max nodes return from grap retrieval
# MAX_GRAPH_NODES=1000
2025-02-23 13:26:38 +08:00
### Logging level
# LOG_LEVEL=INFO
# VERBOSE=False
# LOG_MAX_BYTES=10485760
# LOG_BACKUP_COUNT=5
### Logfile location (defaults to current working directory)
# LOG_DIR=/path/to/log/directory
2025-02-23 13:26:38 +08:00
### Settings for RAG query
# HISTORY_TURNS=3
# COSINE_THRESHOLD=0.2
# TOP_K=60
# MAX_TOKEN_TEXT_CHUNK=4000
# MAX_TOKEN_RELATION_DESC=4000
# MAX_TOKEN_ENTITY_DESC=4000
2025-05-14 13:22:03 +08:00
### Entity and ralation summarization configuration
### Language: English, Chinese, French, German ...
2025-03-24 02:02:34 +08:00
SUMMARY_LANGUAGE=English
2025-05-14 13:22:03 +08:00
### Number of duplicated entities/edges to trigger LLM re-summary on merge ( at least 3 is recommented)
# FORCE_LLM_SUMMARY_ON_MERGE=6
2025-04-10 17:29:07 +08:00
### Max tokens for entity/relations description after merge
# MAX_TOKEN_SUMMARY=500
2025-05-14 13:22:03 +08:00
### Number of parallel processing documents(Less than MAX_ASYNC/2 is recommended)
# MAX_PARALLEL_INSERT=2
### Chunk size for document splitting, 500~1500 is recommended
# CHUNK_SIZE=1200
# CHUNK_OVERLAP_SIZE=100
2025-02-23 13:26:38 +08:00
### LLM Configuration
2025-05-14 13:22:03 +08:00
ENABLE_LLM_CACHE=true
ENABLE_LLM_CACHE_FOR_EXTRACT=true
### Time out in seconds for LLM, None for infinite timeout
2025-05-14 13:22:03 +08:00
TIMEOUT=240
### Some models like o1-mini require temperature to be set to 1
2025-05-14 13:22:03 +08:00
TEMPERATURE=0
### Max concurrency requests of LLM
MAX_ASYNC=4
2025-05-14 13:22:03 +08:00
### Max tokens send to LLM for entity relation summaries (less than context size of the model)
MAX_TOKENS=32768
2025-05-14 13:22:03 +08:00
### LLM Binding type: openai, ollama, lollms
LLM_BINDING=openai
LLM_MODEL=gpt-4o
LLM_BINDING_HOST=https://api.openai.com/v1
2025-02-23 13:26:38 +08:00
LLM_BINDING_API_KEY=your_api_key
2025-05-14 13:22:03 +08:00
### Embedding Configuration
### Embedding Binding type: openai, ollama, lollms
EMBEDDING_BINDING=ollama
EMBEDDING_MODEL=bge-m3:latest
2025-02-23 13:26:38 +08:00
EMBEDDING_DIM=1024
2025-05-14 13:22:03 +08:00
EMBEDDING_BINDING_API_KEY=your_api_key
# If the embedding service is deployed within the same Docker stack, use host.docker.internal instead of localhost
2025-02-23 13:26:38 +08:00
EMBEDDING_BINDING_HOST=http://localhost:11434
2025-05-14 13:22:03 +08:00
### Num of chunks send to Embedding in single request
# EMBEDDING_BATCH_NUM=32
### Max concurrency requests for Embedding
# EMBEDDING_FUNC_MAX_ASYNC=16
### Maximum tokens sent to Embedding for each chunk (no longer in use?)
# MAX_EMBED_TOKENS=8192
### Data storage selection
2025-05-14 13:22:03 +08:00
# LIGHTRAG_KV_STORAGE=PGKVStorage
# LIGHTRAG_VECTOR_STORAGE=PGVectorStorage
# LIGHTRAG_DOC_STATUS_STORAGE=PGDocStatusStorage
# LIGHTRAG_GRAPH_STORAGE=Neo4JStorage
2025-02-11 14:57:37 +08:00
2025-04-02 14:40:27 +08:00
### TiDB Configuration (Deprecated)
# TIDB_HOST=localhost
# TIDB_PORT=4000
# TIDB_USER=your_username
# TIDB_PASSWORD='your_password'
# TIDB_DATABASE=your_database
2025-03-31 01:03:58 +08:00
### separating all data from difference Lightrag instances(deprecating)
# TIDB_WORKSPACE=default
### PostgreSQL Configuration
POSTGRES_HOST=localhost
POSTGRES_PORT=5432
POSTGRES_USER=your_username
2025-02-11 14:57:37 +08:00
POSTGRES_PASSWORD='your_password'
POSTGRES_DATABASE=your_database
2025-05-09 14:19:42 +08:00
POSTGRES_MAX_CONNECTIONS=12
2025-03-31 01:03:58 +08:00
### separating all data from difference Lightrag instances(deprecating)
# POSTGRES_WORKSPACE=default
2025-05-14 13:22:03 +08:00
### Neo4j Configuration
NEO4J_URI=neo4j+s://xxxxxxxx.databases.neo4j.io
NEO4J_USERNAME=neo4j
NEO4J_PASSWORD='your_password'
### Independent AGM Configuration(not for AMG embedded in PostreSQL)
2025-05-14 13:22:03 +08:00
# AGE_POSTGRES_DB=
# AGE_POSTGRES_USER=
# AGE_POSTGRES_PASSWORD=
# AGE_POSTGRES_HOST=
# AGE_POSTGRES_PORT=8529
# AGE Graph Name(apply to PostgreSQL and independent AGM)
2025-03-31 01:03:58 +08:00
### AGE_GRAPH_NAME is precated
# AGE_GRAPH_NAME=lightrag
### MongoDB Configuration
2025-03-20 03:19:43 +08:00
MONGO_URI=mongodb://root:root@localhost:27017/
MONGO_DATABASE=LightRAG
2025-03-31 01:03:58 +08:00
### separating all data from difference Lightrag instances(deprecating)
# MONGODB_GRAPH=false
2025-03-20 03:19:43 +08:00
### Milvus Configuration
MILVUS_URI=http://localhost:19530
MILVUS_DB_NAME=lightrag
# MILVUS_USER=root
# MILVUS_PASSWORD=your_password
# MILVUS_TOKEN=your_token
### Qdrant
QDRANT_URL=http://localhost:16333
2025-02-23 13:26:38 +08:00
# QDRANT_API_KEY=your-api-key
### Redis
2025-02-26 18:11:16 +08:00
REDIS_URI=redis://localhost:6379