Finetune example to be able to run ollama example without need to tweak context size in Modelfile

This commit is contained in:
Andrii Lazarchuk 2024-10-22 14:35:42 +00:00
parent e54d0536c4
commit 1d24eaf656

View File

@ -15,9 +15,10 @@ if not os.path.exists(WORKING_DIR):
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=ollama_model_complete,
llm_model_name="mistral:7b",
llm_model_max_async=2,
llm_model_kwargs={"host": "http://localhost:11434"},
llm_model_name="gemma2:2b",
llm_model_max_async=4,
llm_model_max_token_size=32768,
llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
embedding_func=EmbeddingFunc(
embedding_dim=768,
max_token_size=8192,
@ -27,7 +28,6 @@ rag = LightRAG(
),
)
with open("./book.txt") as f:
rag.insert(f.read())