diff --git a/data/operation_dulce/dataset.zip b/data/operation_dulce/dataset.zip index f07caf40..a64047e6 100644 Binary files a/data/operation_dulce/dataset.zip and b/data/operation_dulce/dataset.zip differ diff --git a/posts/query/notebooks/global_search_nb/index.html b/posts/query/notebooks/global_search_nb/index.html index b562308e..46d68edd 100644 --- a/posts/query/notebooks/global_search_nb/index.html +++ b/posts/query/notebooks/global_search_nb/index.html @@ -308,7 +308,7 @@ a {
api_key = os.environ["GRAPHRAG_API_KEY"]
-llm_model = os.environ["GRAPHRAG_EMBEDDING_MODEL"]
+llm_model = os.environ["GRAPHRAG_LLM_MODEL"]
 
 llm = ChatOpenAI(
     api_key=api_key,
diff --git a/posts/query/notebooks/local_search_nb/index.html b/posts/query/notebooks/local_search_nb/index.html
index a8d2818b..1bcdf7fd 100644
--- a/posts/query/notebooks/local_search_nb/index.html
+++ b/posts/query/notebooks/local_search_nb/index.html
@@ -416,7 +416,7 @@ text_unit_df.head
   
api_key = os.environ["GRAPHRAG_API_KEY"]
-llm_model = os.environ["GRAPHRAG_EMBEDDING_MODEL"]
+llm_model = os.environ["GRAPHRAG_LLM_MODEL"]
 embedding_model = os.environ["GRAPHRAG_EMBEDDING_MODEL"]
 
 llm = ChatOpenAI(
@@ -571,11 +571,20 @@ result = await
   
 
+ +
+
if "claims" in result.context_data:
+    print(result.context_data["claims"].head())
+ + +

Question Generation

This function takes a list of user queries and generates the next candidate questions.

-
question_generator = LocalQuestionGen(
+  
question_generator = LocalQuestionGen(
     llm=llm,
     context_builder=context_builder,
     token_encoder=token_encoder,
@@ -583,13 +592,13 @@ result = await=local_context_params,
 )
-
-
question_history = [
+  
question_history = [
     "Tell me about Agent Mercer",
     "What happens in Dulce military base?",
 ]
@@ -598,7 +607,7 @@ candidate_questions = )
 print(candidate_questions.response)
-