From 473cbd0ec6b50f255a1d2236ddbb10f12a2b7907 Mon Sep 17 00:00:00 2001 From: darthtrevino Date: Mon, 22 Apr 2024 19:16:56 +0000 Subject: [PATCH] =?UTF-8?q?Deploying=20to=20gh-pages=20from=20@=20microsof?= =?UTF-8?q?t/graphrag@0d8cb0b2c152e334b0c1c9ce0c7bf4349ff7a85a=20?= =?UTF-8?q?=F0=9F=9A=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- posts/get_started/index.html | 4 ++-- posts/query/3-cli/index.html | 24 ++++++++++++------------ 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/posts/get_started/index.html b/posts/get_started/index.html index 2405af34..4af8db66 100644 --- a/posts/get_started/index.html +++ b/posts/get_started/index.html @@ -364,7 +364,7 @@ Once the pipeline is complete, you should see a new folder called ./ragtes
python -m graphrag.query \
---data ./ragtest/output/<timestamp>/artifacts \
+--root ./ragtest \
 --method global \
 "What are the top themes in this story?"
@@ -376,7 +376,7 @@ Once the pipeline is complete, you should see a new folder called ./ragtes
python -m graphrag.query \
---data ./ragtest/output/<timestamp>/artifacts \
+--data ./ragtest \
 --method local \
 "Who is Scrooge, and what are his main relationships?"
diff --git a/posts/query/3-cli/index.html b/posts/query/3-cli/index.html index cd8b4030..1fd223ea 100644 --- a/posts/query/3-cli/index.html +++ b/posts/query/3-cli/index.html @@ -301,18 +301,18 @@ a {
  • GRAPHRAG_EMBEDDING_API_BASE - The API Base URL. Default: None
  • GRAPHRAG_EMBEDDING_TYPE - The embedding client to use. Either openai_embedding or azure_openai_embedding. Default: openai_embedding
  • GRAPHRAG_EMBEDDING_MAX_RETRIES - The maximum number of retries to attempt when a request fails. Default: 20
  • -
  • TEXT_UNIT_PROP - Proportion of context window dedicated to related text units. Default: 0.5
  • -
  • COMMUNITY_PROP - Proportion of context window dedicated to community reports. Default: 0.1
  • -
  • CONVERSATION_HISTORY_MAX_TURNS - Maximum number of turns to include in the conversation history. Default: 5
  • -
  • TOP_K_MAPPED_ENTITIES - Number of related entities to retrieve from the entity description embedding store. Default: 10
  • -
  • TOP_K_RELATIONSHIPS - Control the number of out-of-network relationships to pull into the context window. Default: 10
  • -
  • LOCAL_CONTEXT_MAX_TOKENS - Change this based on the token limit you have on your model (if you are using a model with 8k limit, a good setting could be 5000). Default: 12000
  • -
  • GRAPHRAG_LLM_MAX_TOKENS - Change this based on the token limit you have on your model (if you are using a model with 8k limit, a good setting could be 1000=1500). Default: 2000
  • -
  • CONTEXT_BUILDER_MAX_TOKENS - Change this based on the token limit you have on your model (if you are using a model with 8k limit, a good setting could be 5000). Default: 12000
  • -
  • MAP_LLM_MAX_TOKENS - Default: 500
  • -
  • REDUCE_LLM_MAX_TOKENS - Change this based on the token limit you have on your model (if you are using a model with 8k limit, a good setting could be 1000-1500). Default: 2000
  • -
  • SEARCH_ENGINE_MAX_TOKENS - Change this based on the token limit you have on your model (if you are using a model with 8k limit, a good setting could be 5000). Default: 12000
  • -
  • SEARCH_ENGINE_CONCURRENCY - Default: 32
  • +
  • GRAPHRAG_LOCAL_SEARCH_TEXT_UNIT_PROP - Proportion of context window dedicated to related text units. Default: 0.5
  • +
  • GRAPHRAG_LOCAL_SEARCH_COMMUNITY_PROP - Proportion of context window dedicated to community reports. Default: 0.1
  • +
  • GRAPHRAG_LOCAL_SEARCH_CONVERSATION_HISTORY_MAX_TURNS - Maximum number of turns to include in the conversation history. Default: 5
  • +
  • GRAPHRAG_LOCAL_SEARCH_TOP_K_ENTITIES - Number of related entities to retrieve from the entity description embedding store. Default: 10
  • +
  • GRAPHRAG_LOCAL_SEARCH_TOP_K_RELATIONSHIPS - Control the number of out-of-network relationships to pull into the context window. Default: 10
  • +
  • GRAPHRAG_LOCAL_SEARCH_MAX_TOKENS - Change this based on the token limit you have on your model (if you are using a model with 8k limit, a good setting could be 5000). Default: 12000
  • +
  • GRAPHRAG_LOCAL_SEARCH_LLM_MAX_TOKENS - Change this based on the token limit you have on your model (if you are using a model with 8k limit, a good setting could be 1000=1500). Default: 2000
  • +
  • GRAPHRAG_GLOBAL_SEARCH_MAX_TOKENS - Change this based on the token limit you have on your model (if you are using a model with 8k limit, a good setting could be 5000). Default: 12000
  • +
  • GRAPHRAG_GLOBAL_SEARCH_DATA_MAX_TOKENS - Change this based on the token limit you have on your model (if you are using a model with 8k limit, a good setting could be 5000). Default: 12000
  • +
  • GRAPHRAG_GLOBAL_SEARCH_MAP_MAX_TOKENS - Default: 500
  • +
  • GRAPHRAG_GLOBAL_SEARCH_REDUCE_MAX_TOKENS - Change this based on the token limit you have on your model (if you are using a model with 8k limit, a good setting could be 1000-1500). Default: 2000
  • +
  • GRAPHRAG_GLOBAL_SEARCH_CONCURRENCY - Default: 32