mirror of
https://github.com/deepset-ai/haystack.git
synced 2026-01-05 03:28:09 +00:00
* Install haystack with required extras * remove whitespaces Co-authored-by: Silvano Cerza <3314350+silvanocerza@users.noreply.github.com> * Add sleep * Add s for seconds * Move container initialization in workflow * Update e2e.yml add nightly run * use new folder for initial e2e test * use file hash for caching and trigger on push to branch * remove \n from model names read from file * remove trigger on push to branch --------- Co-authored-by: Silvano Cerza <3314350+silvanocerza@users.noreply.github.com> Co-authored-by: bogdankostic <bogdankostic@web.de>
65 lines
2.0 KiB
YAML
65 lines
2.0 KiB
YAML
# If you change this name also do it in ci_metrics.yml
|
|
name: end-to-end
|
|
|
|
on:
|
|
workflow_dispatch: # Activate this workflow manually
|
|
schedule:
|
|
- cron: "0 0 * * *"
|
|
|
|
env:
|
|
PYTHON_VERSION: "3.8"
|
|
|
|
jobs:
|
|
e2e:
|
|
timeout-minutes: 60
|
|
strategy:
|
|
fail-fast: false # Avoid cancelling the others if one of these fails
|
|
matrix:
|
|
folder:
|
|
- "document_search"
|
|
|
|
runs-on: ubuntu-latest
|
|
|
|
steps:
|
|
- uses: actions/checkout@v3
|
|
|
|
- uses: actions/setup-python@v4
|
|
with:
|
|
python-version: ${{ env.PYTHON_VERSION }}
|
|
|
|
- name: Run Elasticsearch
|
|
run: |
|
|
docker run -d -p 9200:9200 -e "discovery.type=single-node" -e "ES_JAVA_OPTS=-Xms128m -Xmx256m" elasticsearch:7.9.2
|
|
|
|
- name: Run Opensearch
|
|
run: |
|
|
docker run -d -p 9201:9200 -p 9600:9600 -e "discovery.type=single-node" opensearchproject/opensearch:1.3.5
|
|
|
|
- name: Run Weaviate
|
|
run: docker run -d -p 8080:8080 --name haystack_test_weaviate --env AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED='true' --env PERSISTENCE_DATA_PATH='/var/lib/weaviate' --env ENABLE_EXPERIMENTAL_BM25='true' --env DISK_USE_READONLY_PERCENTAGE='95' semitechnologies/weaviate:1.17.2
|
|
|
|
- name: Install Haystack
|
|
run: pip install .[inference,elasticsearch7,faiss,weaviate,opensearch,dev]
|
|
|
|
- name: Cache HF models
|
|
id: cache-hf-models
|
|
uses: actions/cache@v3
|
|
with:
|
|
path: ./e2e
|
|
key: ${{ runner.os }}-${{ hashFiles('**/models_to_cache.txt') }}
|
|
env:
|
|
SEGMENT_DOWNLOAD_TIMEOUT_MINS: 15
|
|
- name: Download models
|
|
if: steps.cache-hf-models.outputs.cache-hit != 'true'
|
|
shell: python
|
|
run: |
|
|
from transformers import AutoModel
|
|
with open("./e2e/models_to_cache.txt") as file:
|
|
AutoModel.from_pretrained(file.readline().rstrip())
|
|
- name: Run tests
|
|
env:
|
|
TOKENIZERS_PARALLELISM: 'false' # Avoid logspam by tokenizers
|
|
# we add "and not document_store" to exclude the tests that were ported to the new strategy
|
|
run: |
|
|
pytest e2e/${{ matrix.folder }}
|