mirror of
https://github.com/deepset-ai/haystack.git
synced 2025-07-22 08:21:24 +00:00

* Add endpoint to get documents by filter * Add test for /documents/get_by_filter and extend the delete documents test * Add rest_api/file-upload to .gitignore * Make sure the document store is empty for each test * Improve docstrings of delete_documents_by_filters and get_documents_by_filters Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
58 lines
1.9 KiB
Python
58 lines
1.9 KiB
Python
from typing import Dict, List, Optional, Union, Any
|
|
from pydantic import BaseModel, Field
|
|
from haystack import Document
|
|
|
|
|
|
class QueryRequest(BaseModel):
|
|
query: str
|
|
params: Optional[dict] = None
|
|
|
|
|
|
class FilterRequest(BaseModel):
|
|
filters: Optional[Dict[str, Optional[Union[str, List[str]]]]] = None
|
|
|
|
|
|
class QueryAnswer(BaseModel):
|
|
answer: Optional[str]
|
|
question: Optional[str]
|
|
score: Optional[float] = None
|
|
probability: Optional[float] = None
|
|
context: Optional[str]
|
|
offset_start: Optional[int]
|
|
offset_end: Optional[int]
|
|
offset_start_in_doc: Optional[int]
|
|
offset_end_in_doc: Optional[int]
|
|
document_id: Optional[str] = None
|
|
meta: Optional[Dict[str, Any]]
|
|
|
|
|
|
class QueryResponse(BaseModel):
|
|
query: str
|
|
answers: List[QueryAnswer]
|
|
|
|
|
|
class DocumentResponse(BaseModel):
|
|
text: str
|
|
id: Optional[str] = None
|
|
score: Optional[float] = None
|
|
question: Optional[str] = None
|
|
meta: Dict[str, Any] = None
|
|
#embedding: Optional[np.ndarray] = None
|
|
id_hash_keys: Optional[List[str]] = None
|
|
|
|
|
|
class ExtractiveQAFeedback(BaseModel):
|
|
question: str = Field(..., description="The question input by the user, i.e., the query.")
|
|
is_correct_answer: bool = Field(..., description="Whether the answer is correct or not.")
|
|
document_id: str = Field(..., description="The document in the query result for which feedback is given.")
|
|
model_id: Optional[int] = Field(None, description="The model used for the query.")
|
|
is_correct_document: bool = Field(
|
|
...,
|
|
description="In case of negative feedback, there could be two cases; incorrect answer but correct "
|
|
"document & incorrect document. This flag denotes if the returned document was correct.",
|
|
)
|
|
answer: str = Field(..., description="The answer string.")
|
|
offset_start_in_doc: int = Field(
|
|
..., description="The answer start offset in the original doc. Only required for doc-qa feedback."
|
|
)
|