2023-12-04 14:25:29 +00:00
|
|
|
import os
|
|
|
|
from haystack import Document
|
2024-01-10 21:20:42 +01:00
|
|
|
from haystack.document_stores.in_memory import InMemoryDocumentStore
|
2023-12-04 14:25:29 +00:00
|
|
|
from haystack.pipeline_utils import build_rag_pipeline
|
|
|
|
|
|
|
|
API_KEY = "SET YOUR OPENAI API KEY HERE"
|
|
|
|
|
|
|
|
# We support many different databases. Here we load a simple and lightweight in-memory document store.
|
|
|
|
document_store = InMemoryDocumentStore()
|
|
|
|
|
|
|
|
# Create some example documents and add them to the document store.
|
|
|
|
documents = [
|
|
|
|
Document(content="My name is Jean and I live in Paris."),
|
|
|
|
Document(content="My name is Mark and I live in Berlin."),
|
|
|
|
Document(content="My name is Giorgio and I live in Rome."),
|
|
|
|
]
|
|
|
|
document_store.write_documents(documents)
|
|
|
|
|
|
|
|
# Let's now build a simple RAG pipeline that uses a generative model to answer questions.
|
|
|
|
rag_pipeline = build_rag_pipeline(llm_api_key=API_KEY, document_store=document_store)
|
|
|
|
answers = rag_pipeline.run(query="Who lives in Rome?")
|
|
|
|
print(answers.data)
|