mirror of
https://github.com/HKUDS/LightRAG.git
synced 2025-06-26 22:00:19 +00:00
Finetune example to be able to run ollama example without need to tweak context size in Modelfile
This commit is contained in:
parent
e54d0536c4
commit
1d24eaf656
@ -15,9 +15,10 @@ if not os.path.exists(WORKING_DIR):
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=ollama_model_complete,
|
||||
llm_model_name="mistral:7b",
|
||||
llm_model_max_async=2,
|
||||
llm_model_kwargs={"host": "http://localhost:11434"},
|
||||
llm_model_name="gemma2:2b",
|
||||
llm_model_max_async=4,
|
||||
llm_model_max_token_size=32768,
|
||||
llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=768,
|
||||
max_token_size=8192,
|
||||
@ -27,7 +28,6 @@ rag = LightRAG(
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
with open("./book.txt") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user