haystack/test/nodes/test_preprocessor.py

112 lines
4.2 KiB
Python
Raw Normal View History

from pathlib import Path
from haystack import Document
from haystack.nodes.file_converter.pdf import PDFToTextConverter
from haystack.nodes.preprocessor.preprocessor import PreProcessor
[RAG] Integrate "Retrieval-Augmented Generation" with Haystack (#484) * Adding dummy generator implementation * Adding tutorial to try the model * Committing current non working code * Committing current update where we need to call generate function directly and need to convert embedding to tensor way * Addressing review comments. * Refactoring finder, and implementing rag_generator class. * Refined the implementation of RAGGenerator and now it is in clean shape * Renaming RAGGenerator to RAGenerator * Reverting change from finder.py and addressing review comments * Remove support for RagSequenceForGeneration * Utilizing embed_passage function from DensePassageRetriever * Adding sample test data to verify generator output * Updating testing script * Updating testing script * Fixing bug related to top_k * Updating latest farm dependency * Comment out farm dependency * Reverting changes from TransformersReader * Adding transformers dataset to compare transformers and haystack generator implementation * Using generator_encoder instead of question_encoder to generate context_input_ids * Adding workaround to install FARM dependency from master branch * Removing unnecessary changes * Fixing generator test * Removing transformers datasets * Fixing generator test * Some cleanup and updating TODO comments * Adding tutorial notebook * Updating tutorials with comments * Explicitly passing token model in RAG test * Addressing review comments * Fixing notebook * Refactoring tests to reduce memory footprint * Split generator tests in separate ci step and before running it reclaim memory by terminating containers * Moving tika dependent test to separate dir * Remove unwanted code * Brining reader under session scope * Farm is now session object hence restoring changes from default value * Updating assert for pdf converter * Dummy commit to trigger CI flow * REducing memory footprint required for generator tests * Fixing mypy issues * Marking test with tika and elasticsearch markers. Reverting changes in CI and pytest splits * reducing changes * Fixing CI * changing elastic search ci * Fixing test error * Disabling return of embedding * Marking generator test as well * Refactoring tutorials * Increasing ES memory to 750M * Trying another fix for ES CI * Reverting CI changes * Splitting tests in CI * Generator and non-generator markers split * Adding pytest.ini to add markers and enable strict-markers option * Reducing elastic search container memory * Simplifying generator test by using documents with embedding directly * Bump up farm to 0.5.0
2020-10-30 18:06:02 +01:00
from ..conftest import SAMPLES_PATH
TEXT = """
This is a sample sentence in paragraph_1. This is a sample sentence in paragraph_1. This is a sample sentence in
paragraph_1. This is a sample sentence in paragraph_1. This is a sample sentence in paragraph_1.
This is a sample sentence in paragraph_2. This is a sample sentence in paragraph_2. This is a sample sentence in
paragraph_2. This is a sample sentence in paragraph_2. This is a sample sentence in paragraph_2.
This is a sample sentence in paragraph_3. This is a sample sentence in paragraph_3. This is a sample sentence in
paragraph_3. This is a sample sentence in paragraph_3. This is to trick the test with using an abbreviation like Dr.
in the sentence.
"""
def test_preprocess_sentence_split():
document = Document(content=TEXT)
preprocessor = PreProcessor(
split_length=1, split_overlap=0, split_by="sentence", split_respect_sentence_boundary=False
)
documents = preprocessor.process(document)
assert len(documents) == 15
preprocessor = PreProcessor(
split_length=10, split_overlap=0, split_by="sentence", split_respect_sentence_boundary=False
)
documents = preprocessor.process(document)
assert len(documents) == 2
def test_preprocess_word_split():
document = Document(content=TEXT)
preprocessor = PreProcessor(
split_length=10, split_overlap=0, split_by="word", split_respect_sentence_boundary=False
)
documents = preprocessor.process(document)
assert len(documents) == 11
preprocessor = PreProcessor(split_length=15, split_overlap=0, split_by="word", split_respect_sentence_boundary=True)
documents = preprocessor.process(document)
for i, doc in enumerate(documents):
if i == 0:
assert len(doc.content.split(" ")) == 14
assert len(doc.content.split(" ")) <= 15 or doc.content.startswith("This is to trick")
assert len(documents) == 8
preprocessor = PreProcessor(
split_length=40, split_overlap=10, split_by="word", split_respect_sentence_boundary=True
)
documents = preprocessor.process(document)
assert len(documents) == 5
preprocessor = PreProcessor(split_length=5, split_overlap=0, split_by="word", split_respect_sentence_boundary=True)
documents = preprocessor.process(document)
assert len(documents) == 15
def test_preprocess_passage_split():
document = Document(content=TEXT)
preprocessor = PreProcessor(
split_length=1, split_overlap=0, split_by="passage", split_respect_sentence_boundary=False
)
documents = preprocessor.process(document)
assert len(documents) == 3
preprocessor = PreProcessor(
split_length=2, split_overlap=0, split_by="passage", split_respect_sentence_boundary=False
)
documents = preprocessor.process(document)
assert len(documents) == 2
def test_clean_header_footer():
converter = PDFToTextConverter()
document = converter.convert(
file_path=Path(SAMPLES_PATH / "pdf" / "sample_pdf_2.pdf")
) # file contains header/footer
preprocessor = PreProcessor(clean_header_footer=True, split_by=None)
documents = preprocessor.process(document)
assert len(documents) == 1
assert "This is a header." not in documents[0].content
assert "footer" not in documents[0].content
def test_remove_substrings():
document = Document(content="This is a header. Some additional text. wiki. Some emoji ✨ 🪲 Weird whitespace\b\b\b.")
# check that the file contains the substrings we are about to remove
assert "This is a header." in document.content
assert "wiki" in document.content
assert "🪲" in document.content
assert "whitespace" in document.content
assert "" in document.content
preprocessor = PreProcessor(remove_substrings=["This is a header.", "wiki", "🪲"])
documents = preprocessor.process(document)
assert "This is a header." not in documents[0].content
assert "wiki" not in documents[0].content
assert "🪲" not in documents[0].content
assert "whitespace" in documents[0].content
assert "" in documents[0].content