mirror of
https://github.com/HKUDS/LightRAG.git
synced 2025-06-26 22:00:19 +00:00
update
This commit is contained in:
parent
13c67c2bcf
commit
fba232487b
112
examples/batch_eval.py
Normal file
112
examples/batch_eval.py
Normal file
@ -0,0 +1,112 @@
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
import jsonlines
|
||||
|
||||
from openai import OpenAI
|
||||
|
||||
|
||||
def batch_eval(query_file, result1_file, result2_file, output_file_path, api_key):
|
||||
client = OpenAI(api_key=api_key)
|
||||
|
||||
with open(query_file, 'r') as f:
|
||||
data = f.read()
|
||||
|
||||
queries = re.findall(r'- Question \d+: (.+)', data)
|
||||
|
||||
with open(result1_file, 'r') as f:
|
||||
answers1 = json.load(f)
|
||||
answers1 = [i['result'] for i in answers1]
|
||||
|
||||
with open(result2_file, 'r') as f:
|
||||
answers2 = json.load(f)
|
||||
answers2 = [i['result'] for i in answers2]
|
||||
|
||||
requests = []
|
||||
for i, (query, answer1, answer2) in enumerate(zip(queries, answers1, answers2)):
|
||||
sys_prompt = f"""
|
||||
---Role---
|
||||
You are an expert tasked with evaluating two answers to the same question based on three criteria: **Comprehensiveness**, **Diversity**, and **Empowerment**.
|
||||
"""
|
||||
|
||||
prompt = f"""
|
||||
You will evaluate two answers to the same question based on three criteria: **Comprehensiveness**, **Diversity**, and **Empowerment**.
|
||||
|
||||
- **Comprehensiveness**: How much detail does the answer provide to cover all aspects and details of the question?
|
||||
- **Diversity**: How varied and rich is the answer in providing different perspectives and insights on the question?
|
||||
- **Empowerment**: How well does the answer help the reader understand and make informed judgments about the topic?
|
||||
|
||||
For each criterion, choose the better answer (either Answer 1 or Answer 2) and explain why. Then, select an overall winner based on these three categories.
|
||||
|
||||
Here is the question:
|
||||
{query}
|
||||
|
||||
Here are the two answers:
|
||||
|
||||
**Answer 1:**
|
||||
{answer1}
|
||||
|
||||
**Answer 2:**
|
||||
{answer2}
|
||||
|
||||
Evaluate both answers using the three criteria listed above and provide detailed explanations for each criterion.
|
||||
|
||||
Output your evaluation in the following JSON format:
|
||||
|
||||
{{
|
||||
"Comprehensiveness": {{
|
||||
"Winner": "[Answer 1 or Answer 2]",
|
||||
"Explanation": "[Provide explanation here]"
|
||||
}},
|
||||
"Empowerment": {{
|
||||
"Winner": "[Answer 1 or Answer 2]",
|
||||
"Explanation": "[Provide explanation here]"
|
||||
}},
|
||||
"Overall Winner": {{
|
||||
"Winner": "[Answer 1 or Answer 2]",
|
||||
"Explanation": "[Summarize why this answer is the overall winner based on the three criteria]"
|
||||
}}
|
||||
}}
|
||||
"""
|
||||
|
||||
|
||||
request_data = {
|
||||
"custom_id": f"request-{i+1}",
|
||||
"method": "POST",
|
||||
"url": "/v1/chat/completions",
|
||||
"body": {
|
||||
"model": "gpt-4o-mini",
|
||||
"messages": [
|
||||
{"role": "system", "content": sys_prompt},
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
}
|
||||
}
|
||||
|
||||
requests.append(request_data)
|
||||
|
||||
with jsonlines.open(output_file_path, mode='w') as writer:
|
||||
for request in requests:
|
||||
writer.write(request)
|
||||
|
||||
print(f"Batch API requests written to {output_file_path}")
|
||||
|
||||
batch_input_file = client.files.create(
|
||||
file=open(output_file_path, "rb"),
|
||||
purpose="batch"
|
||||
)
|
||||
batch_input_file_id = batch_input_file.id
|
||||
|
||||
batch = client.batches.create(
|
||||
input_file_id=batch_input_file_id,
|
||||
endpoint="/v1/chat/completions",
|
||||
completion_window="24h",
|
||||
metadata={
|
||||
"description": "nightly eval job"
|
||||
}
|
||||
)
|
||||
|
||||
print(f'Batch {batch.id} has been created.')
|
||||
|
||||
if __name__ == "__main__":
|
||||
batch_eval()
|
19
examples/insert.py
Normal file
19
examples/insert.py
Normal file
@ -0,0 +1,19 @@
|
||||
import os
|
||||
import sys
|
||||
sys.path.append('xxx/xxx/LightRAG')
|
||||
|
||||
from lightrag import LightRAG
|
||||
|
||||
os.environ["OPENAI_API_KEY"] = ""
|
||||
|
||||
WORKING_DIR = ""
|
||||
|
||||
if not os.path.exists(WORKING_DIR):
|
||||
os.mkdir(WORKING_DIR)
|
||||
|
||||
rag = LightRAG(working_dir=WORKING_DIR)
|
||||
|
||||
with open('./text.txt', 'r') as f:
|
||||
text = f.read()
|
||||
|
||||
rag.insert(text)
|
17
examples/query.py
Normal file
17
examples/query.py
Normal file
@ -0,0 +1,17 @@
|
||||
import os
|
||||
import sys
|
||||
sys.path.append('xxx/xxx/LightRAG')
|
||||
|
||||
from lightrag import LightRAG, QueryParam
|
||||
|
||||
os.environ["OPENAI_API_KEY"] = ""
|
||||
|
||||
WORKING_DIR = ""
|
||||
|
||||
rag = LightRAG(working_dir=WORKING_DIR)
|
||||
|
||||
mode = 'global'
|
||||
query_param = QueryParam(mode=mode)
|
||||
|
||||
result, _ = rag.query("", param=query_param)
|
||||
print(result)
|
Binary file not shown.
Binary file not shown.
@ -17,10 +17,9 @@ from .utils import compute_args_hash, wrap_embedding_func_with_attrs
|
||||
retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout)),
|
||||
)
|
||||
async def openai_complete_if_cache(
|
||||
model, prompt, api_key=''
|
||||
, system_prompt=None, history_messages=[], **kwargs
|
||||
model, prompt, system_prompt=None, history_messages=[], **kwargs
|
||||
) -> str:
|
||||
openai_async_client = AsyncOpenAI(api_key=api_key)
|
||||
openai_async_client = AsyncOpenAI()
|
||||
hashing_kv: BaseKVStorage = kwargs.pop("hashing_kv", None)
|
||||
messages = []
|
||||
if system_prompt:
|
||||
@ -72,8 +71,8 @@ async def gpt_4o_mini_complete(
|
||||
wait=wait_exponential(multiplier=1, min=4, max=10),
|
||||
retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout)),
|
||||
)
|
||||
async def openai_embedding(texts: list[str], api_key='') -> np.ndarray:
|
||||
openai_async_client = AsyncOpenAI(api_key=api_key)
|
||||
async def openai_embedding(texts: list[str]) -> np.ndarray:
|
||||
openai_async_client = AsyncOpenAI()
|
||||
response = await openai_async_client.embeddings.create(
|
||||
model="text-embedding-3-small", input=texts, encoding_format="float"
|
||||
)
|
||||
|
Loading…
x
Reference in New Issue
Block a user