feat: Bump python to 3.10 for gpu docker image, use nvidia/cuda (#3701)

* Update pytorch base image

* Small corrections

* Revert back to load_schema() call

* reverted to import haystack for schema generation

Co-authored-by: Mayank Jobanputra <mayankjobanputra@gmail.com>
This commit is contained in:
Vladimir Blagojevic 2022-12-30 11:34:27 +01:00 committed by GitHub
parent ae98961b74
commit 19e9b06b4e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 12 additions and 3 deletions

View File

@ -3,6 +3,7 @@ ARG base_immage
FROM $build_image AS build-image
ARG DEBIAN_FRONTEND=noninteractive
ARG haystack_version
ARG haystack_extras
@ -25,10 +26,11 @@ RUN git clone --depth=1 --branch=${haystack_version} https://github.com/deepset-
WORKDIR /opt/haystack
# Use a virtualenv we can copy over the next build stage
RUN python -m venv --system-site-packages /opt/venv
RUN python3 -m venv --system-site-packages /opt/venv
ENV PATH="/opt/venv/bin:$PATH"
RUN pip install --upgrade pip && \
pip install --no-cache-dir -U torchaudio && \
pip install --no-cache-dir .${haystack_extras} && \
pip install --no-cache-dir ./rest_api
@ -40,3 +42,8 @@ COPY --from=build-image /opt/pdftotext /usr/local/bin
RUN apt-get update && apt-get install -y libfontconfig && rm -rf /var/lib/apt/lists/*
ENV PATH="/opt/venv/bin:$PATH"
# Importing Haystack will generate and persist the json schema, we do this here for two reasons:
# - the schema will be already there when the container runs, saving the generation overhead when a container starts
# - derived images don't need to write the schema and can run with lower user privileges
RUN python3 -c "import haystack"

View File

@ -54,8 +54,10 @@ target "base-gpu" {
dockerfile = "Dockerfile.base"
tags = ["${IMAGE_NAME}:base-gpu-${IMAGE_TAG_SUFFIX}"]
args = {
build_image = "pytorch/pytorch:1.12.1-cuda11.3-cudnn8-runtime"
base_immage = "pytorch/pytorch:1.12.1-cuda11.3-cudnn8-runtime"
# pytorch/pytorch:1.13.1-cuda11.6 ships Python 3.10.8
build_image = "pytorch/pytorch:1.13.1-cuda11.6-cudnn8-runtime"
base_immage = "pytorch/pytorch:1.13.1-cuda11.6-cudnn8-runtime"
haystack_version = "${HAYSTACK_VERSION}"
haystack_extras = notequal("",HAYSTACK_EXTRAS) ? "${HAYSTACK_EXTRAS}" : "[docstores-gpu,crawler,preprocessing,ocr,onnx-gpu]"
}