mirror of
				https://github.com/deepset-ai/haystack.git
				synced 2025-10-31 09:49:48 +00:00 
			
		
		
		
	 be25655663
			
		
	
	
		be25655663
		
			
		
	
	
	
	
		
			
			* Initial commit, add search_engine * Add TopPSampler * Add more TopPSampler unit tests * Remove SearchEngineSampler (converted to TopPSampler) * Add some basic WebSearch unit tests * Rename unit tests * Add WebRetriever into agent_tools * Adjust to WebRetriever * Add WebRetriever mode [snippet|document] * Minor changes * SerperDev: add peopleAlsoAsk search results * First agent for hotpotqa * Making WebRetriever work on hotpotqa * refactor: minor WebRetriever improvements (#4377) * refactor: remove doc ids rebuild + antecipate cache * refactor: improve caching, fix Document ids * Minor WebRetriever improvements * Overlooked minor fixes * feat: add Bing API as search engine * refactor: let kwargs pass-through * feat: increase search context * check sampler result, improve batch typing * refactor: increase mypy compliance * Initial commit, add search_engine * Add TopPSampler * Add more TopPSampler unit tests * Remove SearchEngineSampler (converted to TopPSampler) * Add some basic WebSearch unit tests * Rename unit tests * Add WebRetriever into agent_tools * Adjust to WebRetriever * Add WebRetriever mode [snippet|document] * Minor changes * SerperDev: add peopleAlsoAsk search results * First agent for hotpotqa * Making WebRetriever work on hotpotqa * refactor: minor WebRetriever improvements (#4377) * refactor: remove doc ids rebuild + antecipate cache * refactor: improve caching, fix Document ids * Minor WebRetriever improvements * Overlooked minor fixes * feat: add Bing API as search engine * refactor: let kwargs pass-through * feat: increase search context * check sampler result, improve batch typing * refactor: increase mypy compliance * Fix mypy * Minor example fixes * Fix the descriptions * PR feedback updates * More fixes * TopPSampler: handle top p None value, add unit test * Add top_k to WebSearch * Use boilerpy3 instead trafilatura * Remove date finding * Add more WebRetriever docs * Refactor long methods * making the preprocessor optional * hide WebSearch and make NeuralWebSearch a pipeline * remove unused imports * add WebQAPipeline and split example into two * change example search engine to SerperDev * Turn off progress bars in WebRetriever's PreProcesssor * Agent tool examples - final updates * Add webqa test, search results ranking scores * Better answer box handling for SerperDev and SerpAPI * Minor fixes * pylint * pylint fixes * extract TopPSampler from WebRetriever * use sampler only for WebRetriever modes other than snippet * add web retriever tests * add web retriever tests * exclude rdflib@6.3.2 due to license issues * add test for preprocessed docs and kwargs examples in docstrings * Move test_webqa_pipeline to test/pipelines * change docstring for join_documents_and_scores * Use WebQAPipeline in examples/web_lfqa.py * Use WebQAPipeline in examples/web_lfqa.py * Move test_webqa_pipeline to e2e * Updated lg * Sampler added automatically in WebQAPipeline, no need to add it * Updated lg * Updated lg * :ignore Update agent tools examples to new templates (#4503) * Update examples to new templates * Add print back * fix linting and black format issues --------- Co-authored-by: Daniel Bichuetti <daniel.bichuetti@gmail.com> Co-authored-by: agnieszka-m <amarzec13@gmail.com> Co-authored-by: Julian Risch <julian.risch@deepset.ai>
		
			
				
	
	
		
			94 lines
		
	
	
		
			4.0 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			94 lines
		
	
	
		
			4.0 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| import os
 | |
| 
 | |
| import pytest
 | |
| 
 | |
| from haystack.nodes import PromptNode
 | |
| from haystack.nodes.retriever.web import WebRetriever
 | |
| from haystack.pipelines import ExtractiveQAPipeline, WebQAPipeline
 | |
| 
 | |
| from haystack.schema import Answer
 | |
| 
 | |
| 
 | |
| @pytest.mark.integration
 | |
| @pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
 | |
| def test_extractive_qa_answers(reader, retriever_with_docs, document_store_with_docs):
 | |
|     pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
 | |
|     prediction = pipeline.run(query="Who lives in Berlin?", params={"Retriever": {"top_k": 10}, "Reader": {"top_k": 3}})
 | |
|     assert prediction is not None
 | |
|     assert type(prediction["answers"][0]) == Answer
 | |
|     assert prediction["query"] == "Who lives in Berlin?"
 | |
|     assert prediction["answers"][0].answer == "Carla"
 | |
|     assert prediction["answers"][0].score <= 1
 | |
|     assert prediction["answers"][0].score >= 0
 | |
|     assert prediction["answers"][0].meta["meta_field"] == "test1"
 | |
|     assert prediction["answers"][0].context == "My name is Carla and I live in Berlin"
 | |
| 
 | |
|     assert len(prediction["answers"]) == 3
 | |
| 
 | |
| 
 | |
| @pytest.mark.integration
 | |
| @pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
 | |
| def test_extractive_qa_answers_without_normalized_scores(reader_without_normalized_scores, retriever_with_docs):
 | |
|     pipeline = ExtractiveQAPipeline(reader=reader_without_normalized_scores, retriever=retriever_with_docs)
 | |
|     prediction = pipeline.run(query="Who lives in Berlin?", params={"Reader": {"top_k": 3}})
 | |
|     assert prediction is not None
 | |
|     assert prediction["query"] == "Who lives in Berlin?"
 | |
|     assert prediction["answers"][0].answer == "Carla"
 | |
|     assert prediction["answers"][0].score <= 9
 | |
|     assert prediction["answers"][0].score >= 8
 | |
|     assert prediction["answers"][0].meta["meta_field"] == "test1"
 | |
|     assert prediction["answers"][0].context == "My name is Carla and I live in Berlin"
 | |
| 
 | |
|     assert len(prediction["answers"]) == 3
 | |
| 
 | |
| 
 | |
| @pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
 | |
| def test_extractive_qa_offsets(reader, retriever_with_docs):
 | |
|     pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
 | |
|     prediction = pipeline.run(query="Who lives in Berlin?", params={"Retriever": {"top_k": 5}})
 | |
| 
 | |
|     start = prediction["answers"][0].offsets_in_context[0].start
 | |
|     end = prediction["answers"][0].offsets_in_context[0].end
 | |
| 
 | |
|     assert start == 11
 | |
|     assert end == 16
 | |
| 
 | |
|     assert prediction["answers"][0].context[start:end] == prediction["answers"][0].answer
 | |
| 
 | |
| 
 | |
| @pytest.mark.integration
 | |
| @pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
 | |
| def test_extractive_qa_answers_single_result(reader, retriever_with_docs):
 | |
|     pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
 | |
|     query = "testing finder"
 | |
|     prediction = pipeline.run(query=query, params={"Retriever": {"top_k": 1}, "Reader": {"top_k": 1}})
 | |
|     assert prediction is not None
 | |
|     assert len(prediction["answers"]) == 1
 | |
| 
 | |
| 
 | |
| @pytest.mark.integration
 | |
| @pytest.mark.skipif(
 | |
|     not os.environ.get("OPENAI_API_KEY", None),
 | |
|     reason="Please export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test.",
 | |
| )
 | |
| @pytest.mark.skipif(
 | |
|     not os.environ.get("SERPERDEV_API_KEY", None),
 | |
|     reason="Please export an env var called SERPERDEV_API_KEY containing the SerperDev key to run this test.",
 | |
| )
 | |
| def test_webqa_pipeline():
 | |
|     search_key = os.environ.get("SERPERDEV_API_KEY")
 | |
|     openai_key = os.environ.get("OPENAI_API_KEY")
 | |
|     pn = PromptNode(
 | |
|         "text-davinci-003",
 | |
|         api_key=openai_key,
 | |
|         max_length=256,
 | |
|         default_prompt_template="question-answering-with-document-scores",
 | |
|     )
 | |
|     web_retriever = WebRetriever(api_key=search_key, top_search_results=2)
 | |
|     pipeline = WebQAPipeline(retriever=web_retriever, prompt_node=pn)
 | |
|     result = pipeline.run(query="Who is the father of Arya Stark?")
 | |
|     assert isinstance(result, dict)
 | |
|     assert len(result["results"]) == 1
 | |
|     answer = result["results"][0]
 | |
|     assert "Stark" in answer or "NED" in answer
 |