diff --git a/.github/workflows/docker-openmetadata-airflow.yml b/.github/workflows/docker-openmetadata-airflow.yml deleted file mode 100644 index bbd411f41df..00000000000 --- a/.github/workflows/docker-openmetadata-airflow.yml +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2021 Collate -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: docker-openmetadata-airflow docker -on: - workflow_dispatch: - inputs: - tag: - description: "Input tag" - required: true - release: - types: [published] - -jobs: - push_to_docker_hub: - runs-on: ubuntu-latest - env: - input: ${{ github.event.inputs.tag }} - - steps: - - name: Check trigger type - if: ${{ env.input == '' }} - run: echo "input=0.12.0" >> $GITHUB_ENV - - - name: Check out the Repo - uses: actions/checkout@v2 - - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_OPENMETADATA_USERNAME }} - password: ${{ secrets.DOCKERHUB_OPENMETADATA_TOKEN }} - - - name: Build and push - uses: docker/build-push-action@v2 - with: - context: . - platforms: linux/amd64,linux/arm64 - push: ${{ github.event_name == 'release' }} - # Update tags before every release - tags: 'openmetadata/airflow:${{ env.input }},openmetadata/airflow:latest' - file: ./docker/airflow/Dockerfile diff --git a/docker/local-metadata/docker-compose-postgres.yml b/docker/local-metadata/docker-compose-postgres.yml index 65bb75a7e37..ed8f9571d06 100644 --- a/docker/local-metadata/docker-compose-postgres.yml +++ b/docker/local-metadata/docker-compose-postgres.yml @@ -32,8 +32,7 @@ services: ports: - "5432:5432" networks: - local_app_net: - ipv4_address: 172.16.239.10 + - local_app_net healthcheck: test: psql -U postgres -tAc 'select 1' -d openmetadata_db interval: 15s @@ -47,8 +46,7 @@ services: - discovery.type=single-node - ES_JAVA_OPTS=-Xms1024m -Xmx1024m networks: - local_app_net: - ipv4_address: 172.16.239.11 + - local_app_net expose: - 9200 - 9300 @@ -121,9 +119,6 @@ services: expose: - 8585 - 8586 - - 9200 - - 9300 - - 5432 ports: - "8585:8585" - "8586:8586" @@ -133,15 +128,14 @@ services: postgresql: condition: service_healthy networks: - local_app_net: - ipv4_address: 172.16.239.13 + - local_app_net healthcheck: test: [ "CMD", "curl", "-f", "http://localhost:8586/healthcheck" ] ingestion: build: context: ../../. - dockerfile: ingestion/Dockerfile_local + dockerfile: ingestion/Dockerfile.ci args: INGESTION_DEPENDENCY: ${INGESTION_DEPENDENCY:-all} container_name: openmetadata_ingestion @@ -153,12 +147,22 @@ services: openmetadata-server: condition: service_healthy environment: + AIRFLOW__API__AUTH_BACKENDS: airflow.api.auth.backend.basic_auth + AIRFLOW__CORE__EXECUTOR: LocalExecutor + AIRFLOW__LINEAGE__BACKEND: airflow_provider_openmetadata.lineage.openmetadata.OpenMetadataLineageBackend + AIRFLOW__LINEAGE__AIRFLOW_SERVICE_NAME: airflow_docker + AIRFLOW__LINEAGE__OPENMETADATA_API_ENDPOINT: http://openmetadata-server:8585/api + AIRFLOW__LINEAGE__AUTH_PROVIDER_TYPE: no-auth # Update this if you are using SSO + AIRFLOW__OPENMETADATA_AIRFLOW_APIS__DAG_GENERATED_CONFIGS: "/opt/airflow/dag_generated_configs" DB_HOST: ${DB_HOST:-postgresql} DB_PORT: ${DB_PORT:-5432} AIRFLOW_DB: ${AIRFLOW_DB:-airflow_db} DB_USER: ${DB_USER:-airflow_user} DB_SCHEME: ${DB_SCHEME:-postgresql+psycopg2} DB_PASSWORD: ${DB_PASSWORD:-airflow_pass} + entrypoint: /bin/bash + command: + - "/opt/airflow/ingestion_dependency.sh" expose: - 8080 ports: @@ -166,9 +170,8 @@ services: networks: - local_app_net volumes: - - /var/run/docker.sock:/var/run/docker.sock - - ingestion-volume-dag-airflow:/airflow/dag_generated_configs - - ingestion-volume-dags:/ingestion/examples/airflow/dags + - ingestion-volume-dag-airflow:/opt/airflow/dag_generated_configs + - ingestion-volume-dags:/opt/airflow/airflow/dags - ingestion-volume-tmp:/tmp networks: diff --git a/docker/local-metadata/docker-compose.yml b/docker/local-metadata/docker-compose.yml index f8806e91b9b..3c91f27dd2e 100644 --- a/docker/local-metadata/docker-compose.yml +++ b/docker/local-metadata/docker-compose.yml @@ -31,8 +31,7 @@ services: ports: - "3306:3306" networks: - local_app_net: - ipv4_address: 172.16.239.10 + - local_app_net healthcheck: test: mysql --user=root --password=$$MYSQL_ROOT_PASSWORD --silent --execute "use openmetadata_db" interval: 15s @@ -46,8 +45,7 @@ services: - discovery.type=single-node - ES_JAVA_OPTS=-Xms1024m -Xmx1024m networks: - local_app_net: - ipv4_address: 172.16.239.11 + - local_app_net expose: - 9200 - 9300 @@ -119,9 +117,6 @@ services: expose: - 8585 - 8586 - - 9200 - - 9300 - - 3306 ports: - "8585:8585" - "8586:8586" @@ -131,18 +126,34 @@ services: mysql: condition: service_healthy networks: - local_app_net: - ipv4_address: 172.16.239.13 + - local_app_net healthcheck: test: [ "CMD", "curl", "-f", "http://localhost:8586/healthcheck" ] ingestion: build: context: ../../. - dockerfile: ingestion/Dockerfile_local + dockerfile: ingestion/Dockerfile.ci args: INGESTION_DEPENDENCY: ${INGESTION_DEPENDENCY:-all} container_name: openmetadata_ingestion + environment: + AIRFLOW__API__AUTH_BACKENDS: airflow.api.auth.backend.basic_auth + AIRFLOW__CORE__EXECUTOR: LocalExecutor + AIRFLOW__LINEAGE__BACKEND: airflow_provider_openmetadata.lineage.openmetadata.OpenMetadataLineageBackend + AIRFLOW__LINEAGE__AIRFLOW_SERVICE_NAME: airflow_docker + AIRFLOW__LINEAGE__OPENMETADATA_API_ENDPOINT: http://openmetadata-server:8585/api + AIRFLOW__LINEAGE__AUTH_PROVIDER_TYPE: no-auth # Update this if you are using SSO + AIRFLOW__OPENMETADATA_AIRFLOW_APIS__DAG_GENERATED_CONFIGS: "/opt/airflow/dag_generated_configs" + DB_HOST: ${DB_HOST:-mysql} + DB_PORT: ${DB_PORT:-3306} + AIRFLOW_DB: ${AIRFLOW_DB:-airflow_db} + AIRFLOW_DB_SCHEME: ${AIRFLOW_DB_SCHEME:-mysql+pymysql} + DB_USER: ${DB_USER:-airflow_user} + DB_PASSWORD: ${DB_PASSWORD:-airflow_pass} + entrypoint: /bin/bash + command: + - "/opt/airflow/ingestion_dependency.sh" depends_on: elasticsearch: condition: service_started @@ -157,9 +168,8 @@ services: networks: - local_app_net volumes: - - /var/run/docker.sock:/var/run/docker.sock - - ingestion-volume-dag-airflow:/airflow/dag_generated_configs - - ingestion-volume-dags:/ingestion/examples/airflow/dags + - ingestion-volume-dag-airflow:/opt/airflow/dag_generated_configs + - ingestion-volume-dags:/opt/airflow/airflow/dags - ingestion-volume-tmp:/tmp networks: diff --git a/docker/metadata/docker-compose-postgres.yml b/docker/metadata/docker-compose-postgres.yml index e86673e52f4..cb4f964ac86 100644 --- a/docker/metadata/docker-compose-postgres.yml +++ b/docker/metadata/docker-compose-postgres.yml @@ -28,8 +28,7 @@ services: ports: - "5432:5432" networks: - app_net: - ipv4_address: 172.16.240.10 + - app_net healthcheck: test: psql -U postgres -tAc 'select 1' -d openmetadata_db interval: 15s @@ -43,8 +42,7 @@ services: - discovery.type=single-node - ES_JAVA_OPTS=-Xms1024m -Xmx1024m networks: - app_net: - ipv4_address: 172.16.240.11 + - app_net ports: - "9200:9200" - "9300:9300" @@ -52,7 +50,7 @@ services: openmetadata-server: container_name: openmetadata_server restart: always - image: openmetadata/server:0.11.4 + image: openmetadata/server:0.12.0 environment: ELASTICSEARCH_HOST: elasticsearch # OpenMetadata Server Authentication Configuration @@ -111,9 +109,6 @@ services: expose: - 8585 - 8586 - - 9200 - - 9300 - - 5432 ports: - "8585:8585" - "8586:8586" @@ -123,17 +118,13 @@ services: postgresql: condition: service_healthy networks: - app_net: - ipv4_address: 172.16.240.13 - extra_hosts: - - "postgresql:172.16.240.10" - - "elasticsearch:172.16.240.11" + - app_net healthcheck: test: [ "CMD", "curl", "-f", "http://localhost:8586/healthcheck" ] ingestion: container_name: openmetadata_ingestion - image: openmetadata/ingestion:0.11.4 + image: openmetadata/ingestion:0.12.0 depends_on: elasticsearch: condition: service_started @@ -142,26 +133,31 @@ services: openmetadata-server: condition: service_healthy environment: + AIRFLOW__API__AUTH_BACKENDS: airflow.api.auth.backend.basic_auth + AIRFLOW__CORE__EXECUTOR: LocalExecutor + AIRFLOW__LINEAGE__BACKEND: airflow_provider_openmetadata.lineage.openmetadata.OpenMetadataLineageBackend + AIRFLOW__LINEAGE__AIRFLOW_SERVICE_NAME: airflow_docker + AIRFLOW__LINEAGE__OPENMETADATA_API_ENDPOINT: http://openmetadata-server:8585/api + AIRFLOW__LINEAGE__AUTH_PROVIDER_TYPE: no-auth # Update this if you are using SSO + AIRFLOW__OPENMETADATA_AIRFLOW_APIS__DAG_GENERATED_CONFIGS: "/opt/airflow/dag_generated_configs" DB_HOST: ${DB_HOST:-postgresql} DB_PORT: ${DB_PORT:-5432} AIRFLOW_DB: ${AIRFLOW_DB:-airflow_db} - AIRFLOW_DB_SCHEME: ${AIRFLOW_DB_SCHEME:-postgresql+psycopg2} DB_USER: ${DB_USER:-airflow_user} - DB_PASSWORD: ${DB_PASSWORD:-airflow_pass} DB_SCHEME: ${DB_SCHEME:-postgresql+psycopg2} + DB_PASSWORD: ${DB_PASSWORD:-airflow_pass} + entrypoint: /bin/bash + command: + - "/opt/airflow/ingestion_dependency.sh" expose: - 8080 ports: - "8080:8080" networks: - app_net - extra_hosts: - - "postgresql:172.16.240.10" - - "localhost:172.16.240.11" - - "localhost:172.16.240.13" volumes: - - ingestion-volume-dag-airflow:/airflow/dag_generated_configs - - ingestion-volume-dags:/ingestion/examples/airflow/dags + - ingestion-volume-dag-airflow:/opt/airflow/dag_generated_configs + - ingestion-volume-dags:/opt/airflow/dags - ingestion-volume-tmp:/tmp networks: diff --git a/docker/metadata/docker-compose.yml b/docker/metadata/docker-compose.yml index bc23856392f..2909285dde1 100644 --- a/docker/metadata/docker-compose.yml +++ b/docker/metadata/docker-compose.yml @@ -25,8 +25,7 @@ services: expose: - 3306 networks: - app_net: - ipv4_address: 172.16.240.10 + - app_net healthcheck: test: mysql --user=root --password=$$MYSQL_ROOT_PASSWORD --silent --execute "use openmetadata_db" interval: 15s @@ -40,8 +39,7 @@ services: - discovery.type=single-node - ES_JAVA_OPTS=-Xms1024m -Xmx1024m networks: - app_net: - ipv4_address: 172.16.240.11 + - app_net ports: - "9200:9200" - "9300:9300" @@ -120,11 +118,7 @@ services: mysql: condition: service_healthy networks: - app_net: - ipv4_address: 172.16.240.13 - extra_hosts: - - "localhost:172.16.240.10" - - "elasticsearch:172.16.240.11" + - app_net healthcheck: test: [ "CMD", "curl", "-f", "http://localhost:8586/healthcheck" ] @@ -139,25 +133,31 @@ services: openmetadata-server: condition: service_healthy environment: + AIRFLOW__API__AUTH_BACKENDS: airflow.api.auth.backend.basic_auth + AIRFLOW__CORE__EXECUTOR: LocalExecutor + AIRFLOW__LINEAGE__BACKEND: airflow_provider_openmetadata.lineage.openmetadata.OpenMetadataLineageBackend + AIRFLOW__LINEAGE__AIRFLOW_SERVICE_NAME: airflow_docker + AIRFLOW__LINEAGE__OPENMETADATA_API_ENDPOINT: http://openmetadata-server:8585/api + AIRFLOW__LINEAGE__AUTH_PROVIDER_TYPE: no-auth # Update this if you are using SSO + AIRFLOW__OPENMETADATA_AIRFLOW_APIS__DAG_GENERATED_CONFIGS: "/opt/airflow/dag_generated_configs" DB_HOST: ${DB_HOST:-mysql} DB_PORT: ${DB_PORT:-3306} AIRFLOW_DB: ${AIRFLOW_DB:-airflow_db} AIRFLOW_DB_SCHEME: ${AIRFLOW_DB_SCHEME:-mysql+pymysql} DB_USER: ${DB_USER:-airflow_user} DB_PASSWORD: ${DB_PASSWORD:-airflow_pass} + entrypoint: /bin/bash + command: + - "/opt/airflow/ingestion_dependency.sh" expose: - 8080 ports: - "8080:8080" networks: - app_net - extra_hosts: - - "localhost:172.16.240.10" - - "localhost:172.16.240.11" - - "localhost:172.16.240.13" volumes: - - ingestion-volume-dag-airflow:/airflow/dag_generated_configs - - ingestion-volume-dags:/ingestion/examples/airflow/dags + - ingestion-volume-dag-airflow:/opt/airflow/dag_generated_configs + - ingestion-volume-dags:/opt/airflow/dags - ingestion-volume-tmp:/tmp networks: diff --git a/ingestion/Dockerfile b/ingestion/Dockerfile index ebdf21f41ad..f9ff5460e63 100644 --- a/ingestion/Dockerfile +++ b/ingestion/Dockerfile @@ -1,58 +1,54 @@ -FROM python:3.9-slim as base -ENV AIRFLOW_HOME=/airflow -RUN apt-get update && \ - apt-get install -y build-essential freetds-bin freetds-dev gcc libevent-dev libffi-dev libpq-dev librdkafka-dev libsasl2-dev libsasl2-modules libssl-dev libxml2 netcat openjdk-11-jre openssl postgresql postgresql-contrib python3.9-dev tdsodbc unixodbc unixodbc-dev wget vim --no-install-recommends && \ - rm -rf /var/lib/apt/lists/* - -# Manually fix security vulnerability from curl -# - https://security.snyk.io/vuln/SNYK-DEBIAN11-CURL-2936229 -# Add it back to the usual apt-get install once a fix for Debian is released -RUN wget https://curl.se/download/curl-7.84.0.tar.gz && \ - tar -xvf curl-7.84.0.tar.gz && cd curl-7.84.0 && \ - ./configure --with-openssl && make && make install - - -FROM base as airflow -ENV AIRFLOW_VERSION=2.3.3 - -# install odbc driver -RUN apt-get update && \ - apt-get install -y gnupg && \ - curl https://packages.microsoft.com/keys/microsoft.asc | apt-key add - && \ - curl https://packages.microsoft.com/config/debian/11/prod.list > /etc/apt/sources.list.d/mssql-release.list && \ - apt-get update && \ - ACCEPT_EULA=Y apt-get install -y msodbcsql18 && \ - rm -rf /var/lib/apt/lists/* - - -ENV CONSTRAINT_URL="https://raw.githubusercontent.com/apache/airflow/constraints-${AIRFLOW_VERSION}/constraints-3.9.txt" -# Add docker provider for the DockerOperator +FROM apache/airflow:2.3.3-python3.9 +USER root +RUN curl https://packages.microsoft.com/keys/microsoft.asc | apt-key add - +RUN curl https://packages.microsoft.com/config/debian/11/prod.list > /etc/apt/sources.list.d/mssql-release.list +# Install Dependencies (listed in alphabetical order) +RUN apt-get update \ + && apt-get install -y build-essential \ + default-libmysqlclient-dev \ + freetds-bin \ + freetds-dev \ + gcc \ + gnupg \ + libevent-dev \ + libffi-dev \ + libpq-dev \ + librdkafka-dev \ + libsasl2-dev \ + libsasl2-modules \ + libssl-dev \ + libxml2 \ + openjdk-11-jre \ + openssl \ + postgresql \ + postgresql-contrib \ + tdsodbc \ + unixodbc \ + unixodbc-dev \ + wget --no-install-recommends \ + # Accept MSSQL ODBC License + && ACCEPT_EULA=Y apt-get install -y msodbcsql18 \ + && rm -rf /var/lib/apt/lists/* +# Required for Starting Ingestion Container in Docker Compose +COPY --chown=airflow:airflow ingestion/ingestion_dependency.sh /opt/airflow +# Required for Ingesting Sample Data +COPY --chown=airflow:airflow ingestion/examples/sample_data /home/airflow/ingestion/examples/sample_data +# Required for Airflow DAGs of Sample Data +COPY --chown=airflow:airflow ingestion/examples/airflow/dags /opt/airflow/dags +# Provide Execute Permissions to shell script +RUN chmod +x /opt/airflow/ingestion_dependency.sh +USER airflow +# Argument to provide for Ingestion Dependencies to install. Defaults to all +ARG INGESTION_DEPENDENCY="all" RUN pip install --upgrade pip -RUN pip install "apache-airflow[docker]==${AIRFLOW_VERSION}" --constraint "${CONSTRAINT_URL}" - - -FROM airflow as apis -WORKDIR /openmetadata-airflow-apis -COPY openmetadata-airflow-apis /openmetadata-airflow-apis - -RUN pip install "." - -FROM apis as ingestion -WORKDIR /ingestion -COPY ingestion /ingestion - -ARG INGESTION_DEPENDENCY=all -RUN pip install --upgrade ".[${INGESTION_DEPENDENCY}]" - +RUN pip install --upgrade openmetadata-managed-apis --constraint "https://raw.githubusercontent.com/apache/airflow/constraints-2.3.3/constraints-3.9.txt" +RUN pip install --upgrade openmetadata-ingestion[${INGESTION_DEPENDENCY}] # Uninstalling psycopg2-binary and installing psycopg2 instead # because the psycopg2-binary generates a architecture specific error # while authrenticating connection with the airflow, psycopg2 solves this error RUN pip uninstall psycopg2-binary -y -RUN pip install psycopg2 - - -RUN airflow db init -RUN cp -r /ingestion/airflow.cfg /airflow/airflow.cfg -RUN chmod 755 ingestion_dependency.sh -EXPOSE 8080 -CMD [ "./ingestion_dependency.sh" ] +RUN pip install psycopg2 mysqlclient +# Make required folders for openmetadata-airflow-apis +RUN mkdir -p /opt/airflow/dag_generated_configs +# This is required as it's responsible to create airflow.cfg file +RUN airflow db init && rm -f /opt/airflow/airflow.db diff --git a/docker/airflow/Dockerfile b/ingestion/Dockerfile.ci similarity index 54% rename from docker/airflow/Dockerfile rename to ingestion/Dockerfile.ci index 42eef80b1cc..c6e3d8294e8 100644 --- a/docker/airflow/Dockerfile +++ b/ingestion/Dockerfile.ci @@ -5,39 +5,61 @@ RUN curl https://packages.microsoft.com/config/debian/11/prod.list > /etc/apt/so # Install Dependencies (listed in alphabetical order) RUN apt-get update \ && apt-get install -y build-essential \ + default-libmysqlclient-dev \ freetds-bin \ freetds-dev \ gcc \ gnupg \ libevent-dev \ libffi-dev \ - default-libmysqlclient-dev \ libpq-dev \ librdkafka-dev \ libsasl2-dev \ libsasl2-modules \ libssl-dev \ libxml2 \ + netcat \ openjdk-11-jre \ openssl \ postgresql \ postgresql-contrib \ tdsodbc \ unixodbc \ - unixodbc-dev --no-install-recommends \ + unixodbc-dev \ + vim \ + wget --no-install-recommends \ # Accept MSSQL ODBC License && ACCEPT_EULA=Y apt-get install -y msodbcsql18 \ && rm -rf /var/lib/apt/lists/* +# Required for Starting Ingestion Container in Docker Compose +COPY --chown=airflow:airflow ingestion/ingestion_dependency.sh /opt/airflow +# Required for Ingesting Sample Data +COPY --chown=airflow:airflow ingestion /home/airflow/ingestion + +COPY --chown=airflow:airflow openmetadata-airflow-apis /home/airflow/openmetadata-airflow-apis +# Required for Airflow DAGs of Sample Data +COPY --chown=airflow:airflow ingestion/examples/airflow/dags /opt/airflow/dags +# Provide Execute Permissions to shell script +RUN chmod +x /opt/airflow/ingestion_dependency.sh USER airflow +ARG AIRFLOW_CONSTRAINTS_LOCATION="https://raw.githubusercontent.com/apache/airflow/constraints-2.3.3/constraints-3.9.txt" # Argument to provide for Ingestion Dependencies to install. Defaults to all -ARG INGESTION_DEPENDENCY="all" RUN pip install --upgrade pip -RUN pip install --upgrade openmetadata-managed-apis --constraint "https://raw.githubusercontent.com/apache/airflow/constraints-2.3.3/constraints-3.9.txt" -RUN pip install --upgrade openmetadata-ingestion[${INGESTION_DEPENDENCY}] + +WORKDIR /home/airflow/openmetadata-airflow-apis +RUN pip install "." + +WORKDIR /home/airflow/ingestion +ARG INGESTION_DEPENDENCY="all" +RUN pip install --upgrade ".[${INGESTION_DEPENDENCY}]" + # Uninstalling psycopg2-binary and installing psycopg2 instead # because the psycopg2-binary generates a architecture specific error # while authrenticating connection with the airflow, psycopg2 solves this error RUN pip uninstall psycopg2-binary -y RUN pip install psycopg2 mysqlclient # Make required folders for openmetadata-airflow-apis -RUN mkdir -p /opt/airflow/dag_generated_configs \ No newline at end of file +RUN mkdir -p /opt/airflow/dag_generated_configs +EXPOSE 8080 +# This is required as it's responsible to create airflow.cfg file +RUN airflow db init && rm -f /opt/airflow/airflow.db diff --git a/ingestion/Dockerfile_local b/ingestion/Dockerfile_local deleted file mode 100644 index 7a9be7dfb6f..00000000000 --- a/ingestion/Dockerfile_local +++ /dev/null @@ -1,57 +0,0 @@ -FROM python:3.9-slim as base -ENV AIRFLOW_HOME=/airflow -RUN apt-get update && \ - apt-get install -y build-essential freetds-bin freetds-dev gcc libevent-dev libffi-dev libpq-dev librdkafka-dev \ - libsasl2-dev libsasl2-modules libssl-dev libxml2 netcat openjdk-11-jre openssl postgresql postgresql-contrib \ - python3.9-dev tdsodbc unixodbc unixodbc-dev wget vim --no-install-recommends && \ - rm -rf /var/lib/apt/lists/* - -# Manually fix security vulnerability from curl -# - https://security.snyk.io/vuln/SNYK-DEBIAN11-CURL-2936229 -# Add it back to the usual apt-get install once a fix for Debian is released -RUN wget https://curl.se/download/curl-7.84.0.tar.gz && \ - tar -xvf curl-7.84.0.tar.gz && cd curl-7.84.0 && \ - ./configure --with-openssl && make && make install - - -FROM base as airflow -ENV AIRFLOW_VERSION=2.3.3 - -# install odbc driver -RUN apt-get update && \ - apt-get install -y gnupg && \ - curl https://packages.microsoft.com/keys/microsoft.asc | apt-key add - && \ - curl https://packages.microsoft.com/config/debian/11/prod.list > /etc/apt/sources.list.d/mssql-release.list && \ - apt-get update && \ - ACCEPT_EULA=Y apt-get install -y msodbcsql18 && \ - rm -rf /var/lib/apt/lists/* - - -ENV CONSTRAINT_URL="https://raw.githubusercontent.com/apache/airflow/constraints-${AIRFLOW_VERSION}/constraints-3.9.txt" -# Add docker provider for the DockerOperator -RUN pip install "apache-airflow[docker]==${AIRFLOW_VERSION}" --constraint "${CONSTRAINT_URL}" - -FROM airflow as apis -WORKDIR /openmetadata-airflow-apis -COPY openmetadata-airflow-apis /openmetadata-airflow-apis - -RUN pip install "." - -FROM apis as ingestion -WORKDIR /ingestion -COPY ingestion /ingestion - -ARG INGESTION_DEPENDENCY -RUN pip install --upgrade ".[${INGESTION_DEPENDENCY}]" - -# Uninstalling psycopg2-binary and installing psycopg2 instead -# because the psycopg2-binary generates a architecture specific error -# while authrenticating connection with the airflow, psycopg2 solves this error -RUN pip uninstall psycopg2-binary -y -RUN pip install psycopg2 - -RUN airflow db init -RUN cp -r /ingestion/airflow.cfg /airflow/airflow.cfg -RUN chmod 755 ingestion_dependency.sh -EXPOSE 8080 -CMD [ "./ingestion_dependency.sh" ] diff --git a/ingestion/airflow.cfg b/ingestion/airflow.cfg deleted file mode 100644 index 060e5ffe37a..00000000000 --- a/ingestion/airflow.cfg +++ /dev/null @@ -1,1081 +0,0 @@ -# Used to build OpenMetadata Airflow Docker Image -[core] -# The folder where your airflow pipelines live, most likely a -# subfolder in a code repository. This path must be absolute. -dags_folder = /ingestion/examples/airflow/dags - -# Hostname by providing a path to a callable, which will resolve the hostname. -# The format is "package.function". -# -# For example, default value "socket.getfqdn" means that result from getfqdn() of "socket" -# package will be used as hostname. -# -# No argument should be required in the function specified. -# If using IP address as hostname is preferred, use value ``airflow.utils.net.get_host_ip_address`` -hostname_callable = socket.getfqdn - -# Default timezone in case supplied date times are naive -# can be utc (default), system, or any IANA timezone string (e.g. Europe/Amsterdam) -default_timezone = utc - -# The executor class that airflow should use. Choices include -# ``SequentialExecutor``, ``LocalExecutor``, ``CeleryExecutor``, ``DaskExecutor``, -# ``KubernetesExecutor``, ``CeleryKubernetesExecutor`` or the -# full import path to the class when using a custom executor. -executor = LocalExecutor - -# This defines the maximum number of task instances that can run concurrently in Airflow -# regardless of scheduler count and worker count. Generally, this value is reflective of -# the number of task instances with the running state in the metadata database. -parallelism = 32 - -# The maximum number of task instances allowed to run concurrently in each DAG. To calculate -# the number of tasks that is running concurrently for a DAG, add up the number of running -# tasks for all DAG runs of the DAG. This is configurable at the DAG level with ``concurrency``, -# which is defaulted as ``dag_concurrency``. -max_active_tasks_per_dag = 16 - -# Are DAGs paused by default at creation -dags_are_paused_at_creation = True - -# The maximum number of active DAG runs per DAG. The scheduler will not create more DAG runs -# if it reaches the limit. This is configurable at the DAG level with ``max_active_runs``, -# which is defaulted as ``max_active_runs_per_dag``. -max_active_runs_per_dag = 16 - -# The maximum number of queued dagruns for a single DAG. The scheduler will not create more DAG runs -# if it reaches the limit. This is not configurable at the DAG level. -max_queued_runs_per_dag = 16 - -# Whether to load the DAG examples that ship with Airflow. It's good to -# get started, but you probably want to set this to ``False`` in a production -# environment -load_examples = False - -# Whether to load the default connections that ship with Airflow. It's good to -# get started, but you probably want to set this to ``False`` in a production -# environment -load_default_connections = True - -# Path to the folder containing Airflow plugins -plugins_folder = /airflow/plugins - -# Should tasks be executed via forking of the parent process ("False", -# the speedier option) or by spawning a new python process ("True" slow, -# but means plugin changes picked up by tasks straight away) -execute_tasks_new_python_interpreter = False - -# Secret key to save connection passwords in the db -fernet_key = - -# Whether to disable pickling dags -donot_pickle = True - -# How long before timing out a python file import -dagbag_import_timeout = 30.0 - -# Should a traceback be shown in the UI for dagbag import errors, -# instead of just the exception message -dagbag_import_error_tracebacks = True - -# If tracebacks are shown, how many entries from the traceback should be shown -dagbag_import_error_traceback_depth = 2 - -# How long before timing out a DagFileProcessor, which processes a dag file -dag_file_processor_timeout = 50 - -# The class to use for running task instances in a subprocess. -# Choices include StandardTaskRunner, CgroupTaskRunner or the full import path to the class -# when using a custom task runner. -task_runner = StandardTaskRunner - -# If set, tasks without a ``run_as_user`` argument will be run with this user -# Can be used to de-elevate a sudo user running Airflow when executing tasks -default_impersonation = - -# What security module to use (for example kerberos) -security = - -# Turn unit test mode on (overwrites many configuration options with test -# values at runtime) -unit_test_mode = False - -# Whether to enable pickling for xcom (note that this is insecure and allows for -# RCE exploits). -enable_xcom_pickling = False - -# When a task is killed forcefully, this is the amount of time in seconds that -# it has to cleanup after it is sent a SIGTERM, before it is SIGKILLED -killed_task_cleanup_time = 60 - -# Whether to override params with dag_run.conf. If you pass some key-value pairs -# through ``airflow dags backfill -c`` or -# ``airflow dags trigger -c``, the key-value pairs will override the existing ones in params. -dag_run_conf_overrides_params = True - -# When discovering DAGs, ignore any files that don't contain the strings ``DAG`` and ``airflow``. -dag_discovery_safe_mode = True - -# The number of retries each task is going to have by default. Can be overridden at dag or task level. -default_task_retries = 0 - -# Updating serialized DAG can not be faster than a minimum interval to reduce database write rate. -min_serialized_dag_update_interval = 30 - -# Fetching serialized DAG can not be faster than a minimum interval to reduce database -# read rate. This config controls when your DAGs are updated in the Webserver -min_serialized_dag_fetch_interval = 10 - -# Whether to persist DAG files code in DB. -# If set to True, Webserver reads file contents from DB instead of -# trying to access files in a DAG folder. -# (Default is ``True``) -# Example: store_dag_code = True -# store_dag_code = - -# Maximum number of Rendered Task Instance Fields (Template Fields) per task to store -# in the Database. -# All the template_fields for each of Task Instance are stored in the Database. -# Keeping this number small may cause an error when you try to view ``Rendered`` tab in -# TaskInstance view for older tasks. -max_num_rendered_ti_fields_per_task = 30 - -# On each dagrun check against defined SLAs -check_slas = True - -# Path to custom XCom class that will be used to store and resolve operators results -# Example: xcom_backend = path.to.CustomXCom -xcom_backend = airflow.models.xcom.BaseXCom - -# By default Airflow plugins are lazily-loaded (only loaded when required). Set it to ``False``, -# if you want to load plugins whenever 'airflow' is invoked via cli or loaded from module. -lazy_load_plugins = True - -# By default Airflow providers are lazily-discovered (discovery and imports happen only when required). -# Set it to False, if you want to discover providers whenever 'airflow' is invoked via cli or -# loaded from module. -lazy_discover_providers = True - -# Hide sensitive Variables or Connection extra json keys from UI and task logs when set to True -# -# (Connection passwords are always hidden in logs) -hide_sensitive_var_conn_fields = True - -# A comma-separated list of extra sensitive keywords to look for in variables names or connection's -# extra JSON. -sensitive_var_conn_names = - -[database] -# The encoding for the databases -sql_engine_encoding = utf-8 - -# Number of times the code should be retried in case of DB Operational Errors. -# Not all transactions will be retried as it can cause undesired state. -# Currently it is only used in ``DagFileProcessor.process_file`` to retry ``dagbag.sync_to_db``. -max_db_retries = 3 - -# Collation for ``dag_id``, ``task_id``, ``key`` columns in case they have different encoding. -# This is particularly useful in case of mysql with utf8mb4 encoding because -# primary keys for XCom table has too big size and ``sql_engine_collation_for_ids`` should -# be set to ``utf8mb3_general_ci``. -# sql_engine_collation_for_ids = - -# The SqlAlchemy connection string to the metadata database. -# SqlAlchemy supports many different database engines. -# More information here: -# http://airflow.apache.org/docs/apache-airflow/stable/howto/set-up-database.html#database-uri -sql_alchemy_conn = mysql+pymysql://airflow_user:airflow_pass@mysql/airflow_db - -# If SqlAlchemy should pool database connections. -sql_alchemy_pool_enabled = True - -# The SqlAlchemy pool size is the maximum number of database connections -# in the pool. 0 indicates no limit. -sql_alchemy_pool_size = 5 - -# The maximum overflow size of the pool. -# When the number of checked-out connections reaches the size set in pool_size, -# additional connections will be returned up to this limit. -# When those additional connections are returned to the pool, they are disconnected and discarded. -# It follows then that the total number of simultaneous connections the pool will allow -# is pool_size + max_overflow, -# and the total number of "sleeping" connections the pool will allow is pool_size. -# max_overflow can be set to ``-1`` to indicate no overflow limit; -# no limit will be placed on the total number of concurrent connections. Defaults to ``10``. -sql_alchemy_max_overflow = 10 - -# The SqlAlchemy pool recycle is the number of seconds a connection -# can be idle in the pool before it is invalidated. This config does -# not apply to sqlite. If the number of DB connections is ever exceeded, -# a lower config value will allow the system to recover faster. -sql_alchemy_pool_recycle = 1800 - -# Check connection at the start of each connection pool checkout. -# Typically, this is a simple statement like "SELECT 1". -# More information here: -# https://docs.sqlalchemy.org/en/13/core/pooling.html#disconnect-handling-pessimistic -sql_alchemy_pool_pre_ping = True - -# The schema to use for the metadata database. -# SqlAlchemy supports databases with the concept of multiple schemas. -sql_alchemy_schema = - -# Import path for connect args in SqlAlchemy. Defaults to an empty dict. -# This is useful when you want to configure db engine args that SqlAlchemy won't parse -# in connection string. -# See https://docs.sqlalchemy.org/en/13/core/engines.html#sqlalchemy.create_engine.params.connect_args -# sql_alchemy_connect_args = - -[logging] - -# When you start an airflow worker, airflow starts a tiny web server -# subprocess to serve the workers local log files to the airflow main -# web server, who then builds pages and sends them to users. This defines -# the port on which the logs are served. It needs to be unused, and open -# visible from the main web server to connect into the workers. -worker_log_server_port = 8793 - -# The folder where airflow should store its log files -# This path must be absolute -base_log_folder = /airflow/logs - -# Airflow can store logs remotely in AWS S3, Google Cloud Storage or Elastic Search. -# Set this to True if you want to enable remote logging. -remote_logging = False - -# Users must supply an Airflow connection id that provides access to the storage -# location. -remote_log_conn_id = - -# Path to Google Credential JSON file. If omitted, authorization based on `the Application Default -# Credentials -# `__ will -# be used. -google_key_path = - -# Storage bucket URL for remote logging -# S3 buckets should start with "s3://" -# Cloudwatch log groups should start with "cloudwatch://" -# GCS buckets should start with "gs://" -# WASB buckets should start with "wasb" just to help Airflow select correct handler -# Stackdriver logs should start with "stackdriver://" -remote_base_log_folder = - -# Use server-side encryption for logs stored in S3 -encrypt_s3_logs = False - -# Logging level. -# -# Supported values: ``CRITICAL``, ``ERROR``, ``WARNING``, ``INFO``, ``DEBUG``. -logging_level = INFO - -# Logging level for Flask-appbuilder UI. -# -# Supported values: ``CRITICAL``, ``ERROR``, ``WARNING``, ``INFO``, ``DEBUG``. -fab_logging_level = WARN - -# Logging class -# Specify the class that will specify the logging configuration -# This class has to be on the python classpath -# Example: logging_config_class = my.path.default_local_settings.LOGGING_CONFIG -logging_config_class = - -# Flag to enable/disable Colored logs in Console -# Colour the logs when the controlling terminal is a TTY. -colored_console_log = True - -# Log format for when Colored logs is enabled -colored_log_format = [%%(blue)s%%(asctime)s%%(reset)s] {%%(blue)s%%(filename)s:%%(reset)s%%(lineno)d} %%(log_color)s%%(levelname)s%%(reset)s - %%(log_color)s%%(message)s%%(reset)s -colored_formatter_class = airflow.utils.log.colored_log.CustomTTYColoredFormatter - -# Format of Log line -log_format = [%%(asctime)s] {%%(filename)s:%%(lineno)d} %%(levelname)s - %%(message)s -simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s - -# Specify prefix pattern like mentioned below with stream handler TaskHandlerWithCustomFormatter -# Example: task_log_prefix_template = {ti.dag_id}-{ti.task_id}-{execution_date}-{try_number} -task_log_prefix_template = - -# Formatting for how airflow generates file names/paths for each task run. -log_filename_template = {{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log - -# Formatting for how airflow generates file names for log -log_processor_filename_template = {{ filename }}.log - -# full path of dag_processor_manager logfile -dag_processor_manager_log_location = /airflow/logs/dag_processor_manager/dag_processor_manager.log - -# Name of handler to read task instance logs. -# Defaults to use ``task`` handler. -task_log_reader = task - -# A comma\-separated list of third-party logger names that will be configured to print messages to -# consoles\. -# Example: extra_loggers = connexion,sqlalchemy -extra_loggers = - -[metrics] - -# StatsD (https://github.com/etsy/statsd) integration settings. -# Enables sending metrics to StatsD. -statsd_on = False -statsd_host = localhost -statsd_port = 8125 -statsd_prefix = airflow - -# If you want to avoid sending all the available metrics to StatsD, -# you can configure an allow list of prefixes (comma separated) to send only the metrics that -# start with the elements of the list (e.g: "scheduler,executor,dagrun") -statsd_allow_list = - -# A function that validate the statsd stat name, apply changes to the stat name if necessary and return -# the transformed stat name. -# -# The function should have the following signature: -# def func_name(stat_name: str) -> str: -stat_name_handler = - -# To enable datadog integration to send airflow metrics. -statsd_datadog_enabled = False - -# List of datadog tags attached to all metrics(e.g: key1:value1,key2:value2) -statsd_datadog_tags = - -# If you want to utilise your own custom Statsd client set the relevant -# module path below. -# Note: The module path must exist on your PYTHONPATH for Airflow to pick it up -# statsd_custom_client_path = - -[secrets] -# Full class name of secrets backend to enable (will precede env vars and metastore in search path) -# Example: backend = airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend -backend = - -# The backend_kwargs param is loaded into a dictionary and passed to __init__ of secrets backend class. -# See documentation for the secrets backend you are using. JSON is expected. -# Example for AWS Systems Manager ParameterStore: -# ``{"connections_prefix": "/airflow/connections", "profile_name": "default"}`` -backend_kwargs = - -[cli] -# In what way should the cli access the API. The LocalClient will use the -# database directly, while the json_client will use the api running on the -# webserver -api_client = airflow.api.client.local_client - -# If you set web_server_url_prefix, do NOT forget to append it here, ex: -# ``endpoint_url = http://localhost:8080/myroot`` -# So api will look like: ``http://localhost:8080/myroot/api/experimental/...`` -endpoint_url = http://localhost:8080 - -[debug] -# Used only with ``DebugExecutor``. If set to ``True`` DAG will fail with first -# failed task. Helpful for debugging purposes. -fail_fast = False - -[api] -# Enables the deprecated experimental API. Please note that these APIs do not have access control. -# The authenticated user has full access. -# -# .. warning:: -# -# This `Experimental REST API `__ is -# deprecated since version 2.0. Please consider using -# `the Stable REST API `__. -# For more information on migration, see -# `UPDATING.md `_ -enable_experimental_api = False - -# How to authenticate users of the API. See -# https://airflow.apache.org/docs/apache-airflow/stable/security.html for possible values. -# ("airflow.api.auth.backend.default" allows all requests for historic reasons) -auth_backend = airflow.api.auth.backend.basic_auth - -# Used to set the maximum page limit for API requests -maximum_page_limit = 100 - -# Used to set the default page limit when limit is zero. A default limit -# of 100 is set on OpenApi spec. However, this particular default limit -# only work when limit is set equal to zero(0) from API requests. -# If no limit is supplied, the OpenApi spec default is used. -fallback_page_limit = 100 - -# The intended audience for JWT token credentials used for authorization. This value must match on the client and server sides. If empty, audience will not be tested. -# Example: google_oauth2_audience = project-id-random-value.apps.googleusercontent.com -google_oauth2_audience = - -# Path to Google Cloud Service Account key file (JSON). If omitted, authorization based on -# `the Application Default Credentials -# `__ will -# be used. -# Example: google_key_path = /files/service-account-json -google_key_path = - -# Used in response to a preflight request to indicate which HTTP -# headers can be used when making the actual request. This header is -# the server side response to the browser's -# Access-Control-Request-Headers header. -access_control_allow_headers = - -# Specifies the method or methods allowed when accessing the resource. -access_control_allow_methods = - -# Indicates whether the response can be shared with requesting code from the given origin. -access_control_allow_origin = - -[openmetadata_airflow_apis] -dag_generated_configs = /airflow/dag_generated_configs - -# this section is optional, the default auth provider for the secrets' manager service will be used if it is not set -# [openmetadata_secrets_manager] -# aws_access_key_id = -# aws_secret_access_key = -# aws_region = - -[atlas] -sasl_enabled = False -host = -port = 21000 -username = -password = - -[operators] -# The default owner assigned to each new operator, unless -# provided explicitly or passed via ``default_args`` -default_owner = airflow -default_cpus = 1 -default_ram = 512 -default_disk = 512 -default_gpus = 0 - -# Default queue that tasks get assigned to and that worker listen on. -default_queue = default - -# Is allowed to pass additional/unused arguments (args, kwargs) to the BaseOperator operator. -# If set to False, an exception will be thrown, otherwise only the console message will be displayed. -allow_illegal_arguments = False - -[hive] -# Default mapreduce queue for HiveOperator tasks -default_hive_mapred_queue = - -# Template for mapred_job_name in HiveOperator, supports the following named parameters -# hostname, dag_id, task_id, execution_date -# mapred_job_name_template = - -[webserver] -# The base url of your website as airflow cannot guess what domain or -# cname you are using. This is used in automated emails that -# airflow sends to point links to the right web server -base_url = http://localhost:8080 - -# Default timezone to display all dates in the UI, can be UTC, system, or -# any IANA timezone string (e.g. Europe/Amsterdam). If left empty the -# default value of core/default_timezone will be used -# Example: default_ui_timezone = America/New_York -default_ui_timezone = UTC - -# The ip specified when starting the web server -web_server_host = 0.0.0.0 - -# The port on which to run the web server -web_server_port = 8080 - -# Paths to the SSL certificate and key for the web server. When both are -# provided SSL will be enabled. This does not change the web server port. -web_server_ssl_cert = - -# Paths to the SSL certificate and key for the web server. When both are -# provided SSL will be enabled. This does not change the web server port. -web_server_ssl_key = - -# Number of seconds the webserver waits before killing gunicorn master that doesn't respond -web_server_master_timeout = 120 - -# Number of seconds the gunicorn webserver waits before timing out on a worker -web_server_worker_timeout = 120 - -# Number of workers to refresh at a time. When set to 0, worker refresh is -# disabled. When nonzero, airflow periodically refreshes webserver workers by -# bringing up new ones and killing old ones. -worker_refresh_batch_size = 1 - -# Number of seconds to wait before refreshing a batch of workers. -worker_refresh_interval = 6000 - -# If set to True, Airflow will track files in plugins_folder directory. When it detects changes, -# then reload the gunicorn. -reload_on_plugin_change = False - -# Secret key used to run your flask app. It should be as random as possible. However, when running -# more than 1 instances of webserver, make sure all of them use the same ``secret_key`` otherwise -# one of them will error with "CSRF session token is missing". -secret_key = F1QV66vi/ZPNWXosYMgxxw== - -# Number of workers to run the Gunicorn web server -workers = 4 - -# The worker class gunicorn should use. Choices include -# sync (default), eventlet, gevent -worker_class = sync - -# Log files for the gunicorn webserver. '-' means log to stderr. -access_logfile = - - -# Log files for the gunicorn webserver. '-' means log to stderr. -error_logfile = - - -# Access log format for gunicorn webserver. -# default format is %%(h)s %%(l)s %%(u)s %%(t)s "%%(r)s" %%(s)s %%(b)s "%%(f)s" "%%(a)s" -# documentation - https://docs.gunicorn.org/en/stable/settings.html#access-log-format -access_logformat = - -# Expose the configuration file in the web server -expose_config = False - -# Expose hostname in the web server -expose_hostname = True - -# Expose stacktrace in the web server -expose_stacktrace = True - -# Default DAG view. Valid values are: ``tree``, ``graph``, ``duration``, ``gantt``, ``landing_times`` -dag_default_view = tree - -# Default DAG orientation. Valid values are: -# ``LR`` (Left->Right), ``TB`` (Top->Bottom), ``RL`` (Right->Left), ``BT`` (Bottom->Top) -dag_orientation = LR - -# The amount of time (in secs) webserver will wait for initial handshake -# while fetching logs from other worker machine -log_fetch_timeout_sec = 5 - -# Time interval (in secs) to wait before next log fetching. -log_fetch_delay_sec = 2 - -# Distance away from page bottom to enable auto tailing. -log_auto_tailing_offset = 30 - -# Animation speed for auto tailing log display. -log_animation_speed = 1000 - -# By default, the webserver shows paused DAGs. Flip this to hide paused -# DAGs by default -hide_paused_dags_by_default = False - -# Consistent page size across all listing views in the UI -page_size = 100 - -# Define the color of navigation bar -navbar_color = #fff - -# Default dagrun to show in UI -default_dag_run_display_number = 25 - -# Enable werkzeug ``ProxyFix`` middleware for reverse proxy -enable_proxy_fix = False - -# Number of values to trust for ``X-Forwarded-For``. -# More info: https://werkzeug.palletsprojects.com/en/0.16.x/middleware/proxy_fix/ -proxy_fix_x_for = 1 - -# Number of values to trust for ``X-Forwarded-Proto`` -proxy_fix_x_proto = 1 - -# Number of values to trust for ``X-Forwarded-Host`` -proxy_fix_x_host = 1 - -# Number of values to trust for ``X-Forwarded-Port`` -proxy_fix_x_port = 1 - -# Number of values to trust for ``X-Forwarded-Prefix`` -proxy_fix_x_prefix = 1 - -# Set secure flag on session cookie -cookie_secure = False - -# Set samesite policy on session cookie -cookie_samesite = Lax - -# Default setting for wrap toggle on DAG code and TI log views. -default_wrap = False - -# Allow the UI to be rendered in a frame -x_frame_enabled = True - -# Send anonymous user activity to your analytics tool -# choose from google_analytics, segment, or metarouter -# analytics_tool = - -# Unique ID of your account in the analytics tool -# analytics_id = - -# 'Recent Tasks' stats will show for old DagRuns if set -show_recent_stats_for_completed_runs = True - -# Update FAB permissions and sync security manager roles -# on webserver startup -update_fab_perms = True - -# The UI cookie lifetime in minutes. User will be logged out from UI after -# ``session_lifetime_minutes`` of non-activity -session_lifetime_minutes = 43200 - -# Sets a custom page title for the DAGs overview page and site title for all pages -# instance_name = - -[email] - -# Configuration email backend and whether to -# send email alerts on retry or failure -# Email backend to use -email_backend = airflow.utils.email.send_email_smtp - -# Email connection to use -email_conn_id = smtp_default - -# Whether email alerts should be sent when a task is retried -default_email_on_retry = True - -# Whether email alerts should be sent when a task failed -default_email_on_failure = True - -# File that will be used as the template for Email subject (which will be rendered using Jinja2). -# If not set, Airflow uses a base template. -# Example: subject_template = /path/to/my_subject_template_file -# subject_template = - -# File that will be used as the template for Email content (which will be rendered using Jinja2). -# If not set, Airflow uses a base template. -# Example: html_content_template = /path/to/my_html_content_template_file -# html_content_template = - -[smtp] - -# If you want airflow to send emails on retries, failure, and you want to use -# the airflow.utils.email.send_email_smtp function, you have to configure an -# smtp server here -smtp_host = localhost -smtp_starttls = True -smtp_ssl = False -# Example: smtp_user = airflow -# smtp_user = -# Example: smtp_password = airflow -# smtp_password = -smtp_port = 25 -smtp_mail_from = airflow@example.com -smtp_timeout = 30 -smtp_retry_limit = 5 - -[sentry] - -# Sentry (https://docs.sentry.io) integration. Here you can supply -# additional configuration options based on the Python platform. See: -# https://docs.sentry.io/error-reporting/configuration/?platform=python. -# Unsupported options: ``integrations``, ``in_app_include``, ``in_app_exclude``, -# ``ignore_errors``, ``before_breadcrumb``, ``before_send``, ``transport``. -# Enable error reporting to Sentry -sentry_on = false -sentry_dsn = - -[celery_kubernetes_executor] - -# This section only applies if you are using the ``CeleryKubernetesExecutor`` in -# ``[core]`` section above -# Define when to send a task to ``KubernetesExecutor`` when using ``CeleryKubernetesExecutor``. -# When the queue of a task is the value of ``kubernetes_queue`` (default ``kubernetes``), -# the task is executed via ``KubernetesExecutor``, -# otherwise via ``CeleryExecutor`` -kubernetes_queue = kubernetes - -[celery] - -# This section only applies if you are using the CeleryExecutor in -# ``[core]`` section above -# The app name that will be used by celery -celery_app_name = airflow.executors.celery_executor - -# The concurrency that will be used when starting workers with the -# ``airflow celery worker`` command. This defines the number of task instances that -# a worker will take, so size up your workers based on the resources on -# your worker box and the nature of your tasks -worker_concurrency = 16 - -# The maximum and minimum concurrency that will be used when starting workers with the -# ``airflow celery worker`` command (always keep minimum processes, but grow -# to maximum if necessary). Note the value should be max_concurrency,min_concurrency -# Pick these numbers based on resources on worker box and the nature of the task. -# If autoscale option is available, worker_concurrency will be ignored. -# http://docs.celeryproject.org/en/latest/reference/celery.bin.worker.html#cmdoption-celery-worker-autoscale -# Example: worker_autoscale = 16,12 -# worker_autoscale = - -# Used to increase the number of tasks that a worker prefetches which can improve performance. -# The number of processes multiplied by worker_prefetch_multiplier is the number of tasks -# that are prefetched by a worker. A value greater than 1 can result in tasks being unnecessarily -# blocked if there are multiple workers and one worker prefetches tasks that sit behind long -# running tasks while another worker has unutilized processes that are unable to process the already -# claimed blocked tasks. -# https://docs.celeryproject.org/en/stable/userguide/optimizing.html#prefetch-limits -# Example: worker_prefetch_multiplier = 1 -# worker_prefetch_multiplier = - -# Umask that will be used when starting workers with the ``airflow celery worker`` -# in daemon mode. This control the file-creation mode mask which determines the initial -# value of file permission bits for newly created files. -worker_umask = 0o077 - -# The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally -# a sqlalchemy database. Refer to the Celery documentation for more information. -broker_url = redis://redis:6379/0 - -# The Celery result_backend. When a job finishes, it needs to update the -# metadata of the job. Therefore it will post a message on a message bus, -# or insert it into a database (depending of the backend) -# This status is used by the scheduler to update the state of the task -# The use of a database is highly recommended -# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-result-backend-settings -result_backend = db+postgresql://postgres:airflow@postgres/airflow - -# Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start -# it ``airflow celery flower``. This defines the IP that Celery Flower runs on -flower_host = 0.0.0.0 - -# The root URL for Flower -# Example: flower_url_prefix = /flower -flower_url_prefix = - -# This defines the port that Celery Flower runs on -flower_port = 5555 - -# Securing Flower with Basic Authentication -# Accepts user:password pairs separated by a comma -# Example: flower_basic_auth = user1:password1,user2:password2 -flower_basic_auth = - -# How many processes CeleryExecutor uses to sync task state. -# 0 means to use max(1, number of cores - 1) processes. -sync_parallelism = 0 - -# Import path for celery configuration options -celery_config_options = airflow.config_templates.default_celery.DEFAULT_CELERY_CONFIG -ssl_active = False -ssl_key = -ssl_cert = -ssl_cacert = - -# Celery Pool implementation. -# Choices include: ``prefork`` (default), ``eventlet``, ``gevent`` or ``solo``. -# See: -# https://docs.celeryproject.org/en/latest/userguide/workers.html#concurrency -# https://docs.celeryproject.org/en/latest/userguide/concurrency/eventlet.html -pool = prefork - -# The number of seconds to wait before timing out ``send_task_to_executor`` or -# ``fetch_celery_task_state`` operations. -operation_timeout = 1.0 - -# Celery task will report its status as 'started' when the task is executed by a worker. -# This is used in Airflow to keep track of the running tasks and if a Scheduler is restarted -# or run in HA mode, it can adopt the orphan tasks launched by previous SchedulerJob. -task_track_started = True - -# Time in seconds after which Adopted tasks are cleared by CeleryExecutor. This is helpful to clear -# stalled tasks. -task_adoption_timeout = 600 - -# The Maximum number of retries for publishing task messages to the broker when failing -# due to ``AirflowTaskTimeout`` error before giving up and marking Task as failed. -task_publish_max_retries = 3 - -# Worker initialisation check to validate Metadata Database connection -worker_precheck = False - -[celery_broker_transport_options] - -# This section is for specifying options which can be passed to the -# underlying celery broker transport. See: -# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_transport_options -# The visibility timeout defines the number of seconds to wait for the worker -# to acknowledge the task before the message is redelivered to another worker. -# Make sure to increase the visibility timeout to match the time of the longest -# ETA you're planning to use. -# visibility_timeout is only supported for Redis and SQS celery brokers. -# See: -# http://docs.celeryproject.org/en/master/userguide/configuration.html#std:setting-broker_transport_options -# Example: visibility_timeout = 21600 -# visibility_timeout = - -[dask] - -# This section only applies if you are using the DaskExecutor in -# [core] section above -# The IP address and port of the Dask cluster's scheduler. -cluster_address = 127.0.0.1:8786 - -# TLS/ SSL settings to access a secured Dask scheduler. -tls_ca = -tls_cert = -tls_key = - -[scheduler] -# Task instances listen for external kill signal (when you clear tasks -# from the CLI or the UI), this defines the frequency at which they should -# listen (in seconds). -job_heartbeat_sec = 5 - -# How often (in seconds) to check and tidy up 'running' TaskInstancess -# that no longer have a matching DagRun -clean_tis_without_dagrun_interval = 15.0 - -# The scheduler constantly tries to trigger new tasks (look at the -# scheduler section in the docs for more information). This defines -# how often the scheduler should run (in seconds). -scheduler_heartbeat_sec = 5 - -# The number of times to try to schedule each DAG file -# -1 indicates unlimited number -num_runs = -1 - -# The number of seconds to wait between consecutive DAG file processing -scheduler_idle_sleep_time = 1 - -# Number of seconds after which a DAG file is parsed. The DAG file is parsed every -# ``min_file_process_interval`` number of seconds. Updates to DAGs are reflected after -# this interval. Keeping this number low will increase CPU usage. -min_file_process_interval = 30 - -# How often (in seconds) to scan the DAGs directory for new files. Default to 5 minutes. -dag_dir_list_interval = 300 - -# How often should stats be printed to the logs. Setting to 0 will disable printing stats -print_stats_interval = 30 - -# How often (in seconds) should pool usage stats be sent to statsd (if statsd_on is enabled) -pool_metrics_interval = 5.0 - -# If the last scheduler heartbeat happened more than scheduler_health_check_threshold -# ago (in seconds), scheduler is considered unhealthy. -# This is used by the health check in the "/health" endpoint -scheduler_health_check_threshold = 30 - -# How often (in seconds) should the scheduler check for orphaned tasks and SchedulerJobs -orphaned_tasks_check_interval = 300.0 -child_process_log_directory = /airflow/logs/scheduler - -# Local task jobs periodically heartbeat to the DB. If the job has -# not heartbeat in this many seconds, the scheduler will mark the -# associated task instance as failed and will re-schedule the task. -scheduler_zombie_task_threshold = 300 - -# Turn off scheduler catchup by setting this to ``False``. -# Default behavior is unchanged and -# Command Line Backfills still work, but the scheduler -# will not do scheduler catchup if this is ``False``, -# however it can be set on a per DAG basis in the -# DAG definition (catchup) -catchup_by_default = True - -# This changes the batch size of queries in the scheduling main loop. -# If this is too high, SQL query performance may be impacted by one -# or more of the following: -# - reversion to full table scan -# - complexity of query predicate -# - excessive locking -# Additionally, you may hit the maximum allowable query length for your db. -# Set this to 0 for no limit (not advised) -max_tis_per_query = 512 - -# Should the scheduler issue ``SELECT ... FOR UPDATE`` in relevant queries. -# If this is set to False then you should not run more than a single -# scheduler at once -use_row_level_locking = True - -# Max number of DAGs to create DagRuns for per scheduler loop. -max_dagruns_to_create_per_loop = 10 - -# How many DagRuns should a scheduler examine (and lock) when scheduling -# and queuing tasks. -max_dagruns_per_loop_to_schedule = 20 - -# Should the Task supervisor process perform a "mini scheduler" to attempt to schedule more tasks of the -# same DAG. Leaving this on will mean tasks in the same DAG execute quicker, but might starve out other -# dags in some circumstances -schedule_after_task_execution = True - -# The scheduler can run multiple processes in parallel to parse dags. -# This defines how many processes will run. -parsing_processes = 2 - -# One of ``modified_time``, ``random_seeded_by_host`` and ``alphabetical``. -# The scheduler will list and sort the dag files to decide the parsing order. -# -# * ``modified_time``: Sort by modified time of the files. This is useful on large scale to parse the -# recently modified DAGs first. -# * ``random_seeded_by_host``: Sort randomly across multiple Schedulers but with same order on the -# same host. This is useful when running with Scheduler in HA mode where each scheduler can -# parse different DAG files. -# * ``alphabetical``: Sort by filename -file_parsing_sort_mode = modified_time - -# Turn off scheduler use of cron intervals by setting this to False. -# DAGs submitted manually in the web UI or with trigger_dag will still run. -use_job_schedule = True - -# Allow externally triggered DagRuns for Execution Dates in the future -# Only has effect if schedule_interval is set to None in DAG -allow_trigger_in_future = False - -# DAG dependency detector class to use -dependency_detector = airflow.serialization.serialized_objects.DependencyDetector - -[kerberos] -ccache = /tmp/airflow_krb5_ccache - -# gets augmented with fqdn -principal = airflow -reinit_frequency = 3600 -kinit_path = kinit -keytab = airflow.keytab - -[github_enterprise] -api_rev = v3 - -[elasticsearch] -# Elasticsearch host -host = - -# Format of the log_id, which is used to query for a given tasks logs -log_id_template = {dag_id}-{task_id}-{execution_date}-{try_number} - -# Used to mark the end of a log stream for a task -end_of_log_mark = end_of_log - -# Qualified URL for an elasticsearch_mapping frontend (like Kibana) with a template argument for log_id -# Code will construct log_id using the log_id template from the argument above. -# NOTE: The code will prefix the https:// automatically, don't include that here. -frontend = - -# Write the task logs to the stdout of the worker, rather than the default files -write_stdout = False - -# Instead of the default log formatter, write the log lines as JSON -json_format = False - -# Log fields to also attach to the json output, if enabled -json_fields = asctime, filename, lineno, levelname, message - -# The field where host name is stored (normally either `host` or `host.name`) -host_field = host - -# The field where offset is stored (normally either `offset` or `log.offset`) -offset_field = offset - -[elasticsearch_configs] -use_ssl = False -verify_certs = True - -[kubernetes] -# Path to the YAML pod file. If set, all other kubernetes-related fields are ignored. -pod_template_file = - -# The repository of the Kubernetes Image for the Worker to Run -worker_container_repository = - -# The tag of the Kubernetes Image for the Worker to Run -worker_container_tag = - -# The Kubernetes namespace where airflow workers should be created. Defaults to ``default`` -namespace = default - -# If True, all worker pods will be deleted upon termination -delete_worker_pods = True - -# If False (and delete_worker_pods is True), -# failed worker pods will not be deleted so users can investigate them. -# This only prevents removal of worker pods where the worker itself failed, -# not when the task it ran failed. -delete_worker_pods_on_failure = False - -# Number of Kubernetes Worker Pod creation calls per scheduler loop. -# Note that the current default of "1" will only launch a single pod -# per-heartbeat. It is HIGHLY recommended that users increase this -# number to match the tolerance of their kubernetes cluster for -# better performance. -worker_pods_creation_batch_size = 1 - -# Allows users to launch pods in multiple namespaces. -# Will require creating a cluster-role for the scheduler -multi_namespace_mode = False - -# Use the service account kubernetes gives to pods to connect to kubernetes cluster. -# It's intended for clients that expect to be running inside a pod running on kubernetes. -# It will raise an exception if called from a process not running in a kubernetes environment. -in_cluster = True - -# When running with in_cluster=False change the default cluster_context or config_file -# options to Kubernetes client. Leave blank these to use default behaviour like ``kubectl`` has. -# cluster_context = - -# Path to the kubernetes configfile to be used when ``in_cluster`` is set to False -# config_file = - -# Keyword parameters to pass while calling a kubernetes client core_v1_api methods -# from Kubernetes Executor provided as a single line formatted JSON dictionary string. -# List of supported params are similar for all core_v1_apis, hence a single config -# variable for all apis. See: -# https://raw.githubusercontent.com/kubernetes-client/python/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/api/core_v1_api.py -kube_client_request_args = - -# Optional keyword arguments to pass to the ``delete_namespaced_pod`` kubernetes client -# ``core_v1_api`` method when using the Kubernetes Executor. -# This should be an object and can contain any of the options listed in the ``v1DeleteOptions`` -# class defined here: -# https://github.com/kubernetes-client/python/blob/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/models/v1_delete_options.py#L19 -# Example: delete_option_kwargs = {"grace_period_seconds": 10} -delete_option_kwargs = - -# Enables TCP keepalive mechanism. This prevents Kubernetes API requests to hang indefinitely -# when idle connection is time-outed on services like cloud load balancers or firewalls. -enable_tcp_keepalive = True - -# When the `enable_tcp_keepalive` option is enabled, TCP probes a connection that has -# been idle for `tcp_keep_idle` seconds. -tcp_keep_idle = 120 - -# When the `enable_tcp_keepalive` option is enabled, if Kubernetes API does not respond -# to a keepalive probe, TCP retransmits the probe after `tcp_keep_intvl` seconds. -tcp_keep_intvl = 30 - -# When the `enable_tcp_keepalive` option is enabled, if Kubernetes API does not respond -# to a keepalive probe, TCP retransmits the probe `tcp_keep_cnt number` of times before -# a connection is considered to be broken. -tcp_keep_cnt = 6 - -# Set this to false to skip verifying SSL certificate of Kubernetes python client. -verify_ssl = True - -# How long in seconds a worker can be in Pending before it is considered a failure -worker_pods_pending_timeout = 300 - -# How often in seconds to check if Pending workers have exceeded their timeouts -worker_pods_pending_timeout_check_interval = 120 - -# How many pending pods to check for timeout violations in each check interval. -# You may want this higher if you have a very large cluster and/or use ``multi_namespace_mode``. -worker_pods_pending_timeout_batch_size = 100 - -[smart_sensor] -# When `use_smart_sensor` is True, Airflow redirects multiple qualified sensor tasks to -# smart sensor task. -use_smart_sensor = False - -# `shard_code_upper_limit` is the upper limit of `shard_code` value. The `shard_code` is generated -# by `hashcode % shard_code_upper_limit`. -shard_code_upper_limit = 10000 - -# The number of running smart sensor processes for each service. -shards = 5 - -# comma separated sensor classes support in smart_sensor. -sensors_enabled = NamedHivePartitionSensor diff --git a/ingestion/examples/airflow/airflow.cfg b/ingestion/examples/airflow/airflow.cfg deleted file mode 100644 index 4f651662ab3..00000000000 --- a/ingestion/examples/airflow/airflow.cfg +++ /dev/null @@ -1,1085 +0,0 @@ -[core] -# The folder where your airflow pipelines live, most likely a -# subfolder in a code repository. This path must be absolute. -dags_folder = /ingestion/examples/airflow/dags - -# Hostname by providing a path to a callable, which will resolve the hostname. -# The format is "package.function". -# -# For example, default value "socket.getfqdn" means that result from getfqdn() of "socket" -# package will be used as hostname. -# -# No argument should be required in the function specified. -# If using IP address as hostname is preferred, use value ``airflow.utils.net.get_host_ip_address`` -hostname_callable = socket.getfqdn - -# Default timezone in case supplied date times are naive -# can be utc (default), system, or any IANA timezone string (e.g. Europe/Amsterdam) -default_timezone = utc - -# The executor class that airflow should use. Choices include -# ``SequentialExecutor``, ``LocalExecutor``, ``CeleryExecutor``, ``DaskExecutor``, -# ``KubernetesExecutor``, ``CeleryKubernetesExecutor`` or the -# full import path to the class when using a custom executor. -executor = LocalExecutor - -# The SqlAlchemy connection string to the metadata database. -# SqlAlchemy supports many different database engines. -# More information here: -# http://airflow.apache.org/docs/apache-airflow/stable/howto/set-up-database.html#database-uri -sql_alchemy_conn = mysql+pymysql://airflow_user:airflow_pass@mysql/airflow_db - -# The encoding for the databases -sql_engine_encoding = utf-8 - -# Collation for ``dag_id``, ``task_id``, ``key`` columns in case they have different encoding. -# This is particularly useful in case of mysql with utf8mb4 encoding because -# primary keys for XCom table has too big size and ``sql_engine_collation_for_ids`` should -# be set to ``utf8mb3_general_ci``. -# sql_engine_collation_for_ids = - -# If SqlAlchemy should pool database connections. -sql_alchemy_pool_enabled = True - -# The SqlAlchemy pool size is the maximum number of database connections -# in the pool. 0 indicates no limit. -sql_alchemy_pool_size = 5 - -# The maximum overflow size of the pool. -# When the number of checked-out connections reaches the size set in pool_size, -# additional connections will be returned up to this limit. -# When those additional connections are returned to the pool, they are disconnected and discarded. -# It follows then that the total number of simultaneous connections the pool will allow -# is pool_size + max_overflow, -# and the total number of "sleeping" connections the pool will allow is pool_size. -# max_overflow can be set to ``-1`` to indicate no overflow limit; -# no limit will be placed on the total number of concurrent connections. Defaults to ``10``. -sql_alchemy_max_overflow = 10 - -# The SqlAlchemy pool recycle is the number of seconds a connection -# can be idle in the pool before it is invalidated. This config does -# not apply to sqlite. If the number of DB connections is ever exceeded, -# a lower config value will allow the system to recover faster. -sql_alchemy_pool_recycle = 1800 - -# Check connection at the start of each connection pool checkout. -# Typically, this is a simple statement like "SELECT 1". -# More information here: -# https://docs.sqlalchemy.org/en/13/core/pooling.html#disconnect-handling-pessimistic -sql_alchemy_pool_pre_ping = True - -# The schema to use for the metadata database. -# SqlAlchemy supports databases with the concept of multiple schemas. -sql_alchemy_schema = - -# Import path for connect args in SqlAlchemy. Defaults to an empty dict. -# This is useful when you want to configure db engine args that SqlAlchemy won't parse -# in connection string. -# See https://docs.sqlalchemy.org/en/13/core/engines.html#sqlalchemy.create_engine.params.connect_args -# sql_alchemy_connect_args = - -# This defines the maximum number of task instances that can run concurrently in Airflow -# regardless of scheduler count and worker count. Generally, this value is reflective of -# the number of task instances with the running state in the metadata database. -parallelism = 32 - -# The maximum number of task instances allowed to run concurrently in each DAG. To calculate -# the number of tasks that is running concurrently for a DAG, add up the number of running -# tasks for all DAG runs of the DAG. This is configurable at the DAG level with ``concurrency``, -# which is defaulted as ``dag_concurrency``. -dag_concurrency = 16 - -# Are DAGs paused by default at creation -dags_are_paused_at_creation = True - -# The maximum number of active DAG runs per DAG. The scheduler will not create more DAG runs -# if it reaches the limit. This is configurable at the DAG level with ``max_active_runs``, -# which is defaulted as ``max_active_runs_per_dag``. -max_active_runs_per_dag = 16 - -# The maximum number of queued dagruns for a single DAG. The scheduler will not create more DAG runs -# if it reaches the limit. This is not configurable at the DAG level. -max_queued_runs_per_dag = 16 - -# Whether to load the DAG examples that ship with Airflow. It's good to -# get started, but you probably want to set this to ``False`` in a production -# environment -load_examples = False - -# Whether to load the default connections that ship with Airflow. It's good to -# get started, but you probably want to set this to ``False`` in a production -# environment -load_default_connections = True - -# Path to the folder containing Airflow plugins -plugins_folder = /airflow/plugins - -# Should tasks be executed via forking of the parent process ("False", -# the speedier option) or by spawning a new python process ("True" slow, -# but means plugin changes picked up by tasks straight away) -execute_tasks_new_python_interpreter = False - -# Secret key to save connection passwords in the db -fernet_key = - -# Whether to disable pickling dags -donot_pickle = True - -# How long before timing out a python file import -dagbag_import_timeout = 30.0 - -# Should a traceback be shown in the UI for dagbag import errors, -# instead of just the exception message -dagbag_import_error_tracebacks = True - -# If tracebacks are shown, how many entries from the traceback should be shown -dagbag_import_error_traceback_depth = 2 - -# How long before timing out a DagFileProcessor, which processes a dag file -dag_file_processor_timeout = 50 - -# The class to use for running task instances in a subprocess. -# Choices include StandardTaskRunner, CgroupTaskRunner or the full import path to the class -# when using a custom task runner. -task_runner = StandardTaskRunner - -# If set, tasks without a ``run_as_user`` argument will be run with this user -# Can be used to de-elevate a sudo user running Airflow when executing tasks -default_impersonation = - -# What security module to use (for example kerberos) -security = - -# Turn unit test mode on (overwrites many configuration options with test -# values at runtime) -unit_test_mode = False - -# Whether to enable pickling for xcom (note that this is insecure and allows for -# RCE exploits). -enable_xcom_pickling = False - -# When a task is killed forcefully, this is the amount of time in seconds that -# it has to cleanup after it is sent a SIGTERM, before it is SIGKILLED -killed_task_cleanup_time = 60 - -# Whether to override params with dag_run.conf. If you pass some key-value pairs -# through ``airflow dags backfill -c`` or -# ``airflow dags trigger -c``, the key-value pairs will override the existing ones in params. -dag_run_conf_overrides_params = True - -# When discovering DAGs, ignore any files that don't contain the strings ``DAG`` and ``airflow``. -dag_discovery_safe_mode = True - -# The number of retries each task is going to have by default. Can be overridden at dag or task level. -default_task_retries = 0 - -# Updating serialized DAG can not be faster than a minimum interval to reduce database write rate. -min_serialized_dag_update_interval = 30 - -# Fetching serialized DAG can not be faster than a minimum interval to reduce database -# read rate. This config controls when your DAGs are updated in the Webserver -min_serialized_dag_fetch_interval = 10 - -# Whether to persist DAG files code in DB. -# If set to True, Webserver reads file contents from DB instead of -# trying to access files in a DAG folder. -# (Default is ``True``) -# Example: store_dag_code = True -# store_dag_code = - -# Maximum number of Rendered Task Instance Fields (Template Fields) per task to store -# in the Database. -# All the template_fields for each of Task Instance are stored in the Database. -# Keeping this number small may cause an error when you try to view ``Rendered`` tab in -# TaskInstance view for older tasks. -max_num_rendered_ti_fields_per_task = 30 - -# On each dagrun check against defined SLAs -check_slas = True - -# Path to custom XCom class that will be used to store and resolve operators results -# Example: xcom_backend = path.to.CustomXCom -xcom_backend = airflow.models.xcom.BaseXCom - -# By default Airflow plugins are lazily-loaded (only loaded when required). Set it to ``False``, -# if you want to load plugins whenever 'airflow' is invoked via cli or loaded from module. -lazy_load_plugins = True - -# By default Airflow providers are lazily-discovered (discovery and imports happen only when required). -# Set it to False, if you want to discover providers whenever 'airflow' is invoked via cli or -# loaded from module. -lazy_discover_providers = True - -# Number of times the code should be retried in case of DB Operational Errors. -# Not all transactions will be retried as it can cause undesired state. -# Currently it is only used in ``DagFileProcessor.process_file`` to retry ``dagbag.sync_to_db``. -max_db_retries = 3 - -# Hide sensitive Variables or Connection extra json keys from UI and task logs when set to True -# -# (Connection passwords are always hidden in logs) -hide_sensitive_var_conn_fields = True - -# A comma-separated list of extra sensitive keywords to look for in variables names or connection's -# extra JSON. -sensitive_var_conn_names = - -[logging] -# The folder where airflow should store its log files -# This path must be absolute -base_log_folder = /airflow/logs - -# Airflow can store logs remotely in AWS S3, Google Cloud Storage or Elastic Search. -# Set this to True if you want to enable remote logging. -remote_logging = False - -# Users must supply an Airflow connection id that provides access to the storage -# location. -remote_log_conn_id = - -# Path to Google Credential JSON file. If omitted, authorization based on `the Application Default -# Credentials -# `__ will -# be used. -google_key_path = - -# Storage bucket URL for remote logging -# S3 buckets should start with "s3://" -# Cloudwatch log groups should start with "cloudwatch://" -# GCS buckets should start with "gs://" -# WASB buckets should start with "wasb" just to help Airflow select correct handler -# Stackdriver logs should start with "stackdriver://" -remote_base_log_folder = - -# Use server-side encryption for logs stored in S3 -encrypt_s3_logs = False - -# Logging level. -# -# Supported values: ``CRITICAL``, ``ERROR``, ``WARNING``, ``INFO``, ``DEBUG``. -logging_level = INFO - -# Logging level for Flask-appbuilder UI. -# -# Supported values: ``CRITICAL``, ``ERROR``, ``WARNING``, ``INFO``, ``DEBUG``. -fab_logging_level = WARN - -# Logging class -# Specify the class that will specify the logging configuration -# This class has to be on the python classpath -# Example: logging_config_class = my.path.default_local_settings.LOGGING_CONFIG -logging_config_class = - -# Flag to enable/disable Colored logs in Console -# Colour the logs when the controlling terminal is a TTY. -colored_console_log = True - -# Log format for when Colored logs is enabled -colored_log_format = [%%(blue)s%%(asctime)s%%(reset)s] {%%(blue)s%%(filename)s:%%(reset)s%%(lineno)d} %%(log_color)s%%(levelname)s%%(reset)s - %%(log_color)s%%(message)s%%(reset)s -colored_formatter_class = airflow.utils.log.colored_log.CustomTTYColoredFormatter - -# Format of Log line -log_format = [%%(asctime)s] {%%(filename)s:%%(lineno)d} %%(levelname)s - %%(message)s -simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s - -# Specify prefix pattern like mentioned below with stream handler TaskHandlerWithCustomFormatter -# Example: task_log_prefix_template = {ti.dag_id}-{ti.task_id}-{execution_date}-{try_number} -task_log_prefix_template = - -# Formatting for how airflow generates file names/paths for each task run. -log_filename_template = {{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log - -# Formatting for how airflow generates file names for log -log_processor_filename_template = {{ filename }}.log - -# full path of dag_processor_manager logfile -dag_processor_manager_log_location = /airflow/logs/dag_processor_manager/dag_processor_manager.log - -# Name of handler to read task instance logs. -# Defaults to use ``task`` handler. -task_log_reader = task - -# A comma\-separated list of third-party logger names that will be configured to print messages to -# consoles\. -# Example: extra_loggers = connexion,sqlalchemy -extra_loggers = - -[metrics] - -# StatsD (https://github.com/etsy/statsd) integration settings. -# Enables sending metrics to StatsD. -statsd_on = False -statsd_host = localhost -statsd_port = 8125 -statsd_prefix = airflow - -# If you want to avoid sending all the available metrics to StatsD, -# you can configure an allow list of prefixes (comma separated) to send only the metrics that -# start with the elements of the list (e.g: "scheduler,executor,dagrun") -statsd_allow_list = - -# A function that validate the statsd stat name, apply changes to the stat name if necessary and return -# the transformed stat name. -# -# The function should have the following signature: -# def func_name(stat_name: str) -> str: -stat_name_handler = - -# To enable datadog integration to send airflow metrics. -statsd_datadog_enabled = False - -# List of datadog tags attached to all metrics(e.g: key1:value1,key2:value2) -statsd_datadog_tags = - -# If you want to utilise your own custom Statsd client set the relevant -# module path below. -# Note: The module path must exist on your PYTHONPATH for Airflow to pick it up -# statsd_custom_client_path = - -[secrets] -# Full class name of secrets backend to enable (will precede env vars and metastore in search path) -# Example: backend = airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend -backend = - -# The backend_kwargs param is loaded into a dictionary and passed to __init__ of secrets backend class. -# See documentation for the secrets backend you are using. JSON is expected. -# Example for AWS Systems Manager ParameterStore: -# ``{"connections_prefix": "/airflow/connections", "profile_name": "default"}`` -backend_kwargs = - -[cli] -# In what way should the cli access the API. The LocalClient will use the -# database directly, while the json_client will use the api running on the -# webserver -api_client = airflow.api.client.local_client - -# If you set web_server_url_prefix, do NOT forget to append it here, ex: -# ``endpoint_url = http://localhost:8080/myroot`` -# So api will look like: ``http://localhost:8080/myroot/api/experimental/...`` -endpoint_url = http://localhost:8080 - -[debug] -# Used only with ``DebugExecutor``. If set to ``True`` DAG will fail with first -# failed task. Helpful for debugging purposes. -fail_fast = False - -[api] -# Enables the deprecated experimental API. Please note that these APIs do not have access control. -# The authenticated user has full access. -# -# .. warning:: -# -# This `Experimental REST API `__ is -# deprecated since version 2.0. Please consider using -# `the Stable REST API `__. -# For more information on migration, see -# `UPDATING.md `_ -enable_experimental_api = False - -# How to authenticate users of the API. See -# https://airflow.apache.org/docs/apache-airflow/stable/security.html for possible values. -# ("airflow.api.auth.backend.default" allows all requests for historic reasons) -auth_backend = airflow.api.auth.backend.basic_auth - -# Used to set the maximum page limit for API requests -maximum_page_limit = 100 - -# Used to set the default page limit when limit is zero. A default limit -# of 100 is set on OpenApi spec. However, this particular default limit -# only work when limit is set equal to zero(0) from API requests. -# If no limit is supplied, the OpenApi spec default is used. -fallback_page_limit = 100 - -# The intended audience for JWT token credentials used for authorization. This value must match on the client and server sides. If empty, audience will not be tested. -# Example: google_oauth2_audience = project-id-random-value.apps.googleusercontent.com -google_oauth2_audience = - -# Path to Google Cloud Service Account key file (JSON). If omitted, authorization based on -# `the Application Default Credentials -# `__ will -# be used. -# Example: google_key_path = /files/service-account-json -google_key_path = - -# Used in response to a preflight request to indicate which HTTP -# headers can be used when making the actual request. This header is -# the server side response to the browser's -# Access-Control-Request-Headers header. -access_control_allow_headers = - -# Specifies the method or methods allowed when accessing the resource. -access_control_allow_methods = - -# Indicates whether the response can be shared with requesting code from the given origin. -access_control_allow_origin = - -[lineage] -backend = airflow_provider_openmetadata.lineage.openmetadata.OpenMetadataLineageBackend -airflow_service_name = local_airflow_3 -openmetadata_api_endpoint = http://localhost:8585/api -auth_provider_type = no-auth - -[openmetadata_airflow_apis] -dag_runner_template = /airflow/dag_templates/dag_runner.j2 -dag_generated_configs = /airflow/dag_generated_configs - -# this section is optional, the default auth provider for the secrets' manager service will be used if it is not set -# [openmetadata_secrets_manager] -# aws_access_key_id = -# aws_secret_access_key = -# aws_region = - -[atlas] -sasl_enabled = False -host = -port = 21000 -username = -password = - -[operators] -# The default owner assigned to each new operator, unless -# provided explicitly or passed via ``default_args`` -default_owner = airflow -default_cpus = 1 -default_ram = 512 -default_disk = 512 -default_gpus = 0 - -# Default queue that tasks get assigned to and that worker listen on. -default_queue = default - -# Is allowed to pass additional/unused arguments (args, kwargs) to the BaseOperator operator. -# If set to False, an exception will be thrown, otherwise only the console message will be displayed. -allow_illegal_arguments = False - -[hive] -# Default mapreduce queue for HiveOperator tasks -default_hive_mapred_queue = - -# Template for mapred_job_name in HiveOperator, supports the following named parameters -# hostname, dag_id, task_id, execution_date -# mapred_job_name_template = - -[webserver] -# The base url of your website as airflow cannot guess what domain or -# cname you are using. This is used in automated emails that -# airflow sends to point links to the right web server -base_url = http://localhost:8080 - -# Default timezone to display all dates in the UI, can be UTC, system, or -# any IANA timezone string (e.g. Europe/Amsterdam). If left empty the -# default value of core/default_timezone will be used -# Example: default_ui_timezone = America/New_York -default_ui_timezone = UTC - -# The ip specified when starting the web server -web_server_host = 0.0.0.0 - -# The port on which to run the web server -web_server_port = 8080 - -# Paths to the SSL certificate and key for the web server. When both are -# provided SSL will be enabled. This does not change the web server port. -web_server_ssl_cert = - -# Paths to the SSL certificate and key for the web server. When both are -# provided SSL will be enabled. This does not change the web server port. -web_server_ssl_key = - -# Number of seconds the webserver waits before killing gunicorn master that doesn't respond -web_server_master_timeout = 120 - -# Number of seconds the gunicorn webserver waits before timing out on a worker -web_server_worker_timeout = 120 - -# Number of workers to refresh at a time. When set to 0, worker refresh is -# disabled. When nonzero, airflow periodically refreshes webserver workers by -# bringing up new ones and killing old ones. -worker_refresh_batch_size = 1 - -# Number of seconds to wait before refreshing a batch of workers. -worker_refresh_interval = 6000 - -# If set to True, Airflow will track files in plugins_folder directory. When it detects changes, -# then reload the gunicorn. -reload_on_plugin_change = False - -# Secret key used to run your flask app. It should be as random as possible. However, when running -# more than 1 instances of webserver, make sure all of them use the same ``secret_key`` otherwise -# one of them will error with "CSRF session token is missing". -secret_key = F1QV66vi/ZPNWXosYMgxxw== - -# Number of workers to run the Gunicorn web server -workers = 4 - -# The worker class gunicorn should use. Choices include -# sync (default), eventlet, gevent -worker_class = sync - -# Log files for the gunicorn webserver. '-' means log to stderr. -access_logfile = - - -# Log files for the gunicorn webserver. '-' means log to stderr. -error_logfile = - - -# Access log format for gunicorn webserver. -# default format is %%(h)s %%(l)s %%(u)s %%(t)s "%%(r)s" %%(s)s %%(b)s "%%(f)s" "%%(a)s" -# documentation - https://docs.gunicorn.org/en/stable/settings.html#access-log-format -access_logformat = - -# Expose the configuration file in the web server -expose_config = False - -# Expose hostname in the web server -expose_hostname = True - -# Expose stacktrace in the web server -expose_stacktrace = True - -# Default DAG view. Valid values are: ``tree``, ``graph``, ``duration``, ``gantt``, ``landing_times`` -dag_default_view = tree - -# Default DAG orientation. Valid values are: -# ``LR`` (Left->Right), ``TB`` (Top->Bottom), ``RL`` (Right->Left), ``BT`` (Bottom->Top) -dag_orientation = LR - -# The amount of time (in secs) webserver will wait for initial handshake -# while fetching logs from other worker machine -log_fetch_timeout_sec = 5 - -# Time interval (in secs) to wait before next log fetching. -log_fetch_delay_sec = 2 - -# Distance away from page bottom to enable auto tailing. -log_auto_tailing_offset = 30 - -# Animation speed for auto tailing log display. -log_animation_speed = 1000 - -# By default, the webserver shows paused DAGs. Flip this to hide paused -# DAGs by default -hide_paused_dags_by_default = False - -# Consistent page size across all listing views in the UI -page_size = 100 - -# Define the color of navigation bar -navbar_color = #fff - -# Default dagrun to show in UI -default_dag_run_display_number = 25 - -# Enable werkzeug ``ProxyFix`` middleware for reverse proxy -enable_proxy_fix = False - -# Number of values to trust for ``X-Forwarded-For``. -# More info: https://werkzeug.palletsprojects.com/en/0.16.x/middleware/proxy_fix/ -proxy_fix_x_for = 1 - -# Number of values to trust for ``X-Forwarded-Proto`` -proxy_fix_x_proto = 1 - -# Number of values to trust for ``X-Forwarded-Host`` -proxy_fix_x_host = 1 - -# Number of values to trust for ``X-Forwarded-Port`` -proxy_fix_x_port = 1 - -# Number of values to trust for ``X-Forwarded-Prefix`` -proxy_fix_x_prefix = 1 - -# Set secure flag on session cookie -cookie_secure = False - -# Set samesite policy on session cookie -cookie_samesite = Lax - -# Default setting for wrap toggle on DAG code and TI log views. -default_wrap = False - -# Allow the UI to be rendered in a frame -x_frame_enabled = True - -# Send anonymous user activity to your analytics tool -# choose from google_analytics, segment, or metarouter -# analytics_tool = - -# Unique ID of your account in the analytics tool -# analytics_id = - -# 'Recent Tasks' stats will show for old DagRuns if set -show_recent_stats_for_completed_runs = True - -# Update FAB permissions and sync security manager roles -# on webserver startup -update_fab_perms = True - -# The UI cookie lifetime in minutes. User will be logged out from UI after -# ``session_lifetime_minutes`` of non-activity -session_lifetime_minutes = 43200 - -# Sets a custom page title for the DAGs overview page and site title for all pages -# instance_name = - -[email] - -# Configuration email backend and whether to -# send email alerts on retry or failure -# Email backend to use -email_backend = airflow.utils.email.send_email_smtp - -# Email connection to use -email_conn_id = smtp_default - -# Whether email alerts should be sent when a task is retried -default_email_on_retry = True - -# Whether email alerts should be sent when a task failed -default_email_on_failure = True - -# File that will be used as the template for Email subject (which will be rendered using Jinja2). -# If not set, Airflow uses a base template. -# Example: subject_template = /path/to/my_subject_template_file -# subject_template = - -# File that will be used as the template for Email content (which will be rendered using Jinja2). -# If not set, Airflow uses a base template. -# Example: html_content_template = /path/to/my_html_content_template_file -# html_content_template = - -[smtp] - -# If you want airflow to send emails on retries, failure, and you want to use -# the airflow.utils.email.send_email_smtp function, you have to configure an -# smtp server here -smtp_host = localhost -smtp_starttls = True -smtp_ssl = False -# Example: smtp_user = airflow -# smtp_user = -# Example: smtp_password = airflow -# smtp_password = -smtp_port = 25 -smtp_mail_from = airflow@example.com -smtp_timeout = 30 -smtp_retry_limit = 5 - -[sentry] - -# Sentry (https://docs.sentry.io) integration. Here you can supply -# additional configuration options based on the Python platform. See: -# https://docs.sentry.io/error-reporting/configuration/?platform=python. -# Unsupported options: ``integrations``, ``in_app_include``, ``in_app_exclude``, -# ``ignore_errors``, ``before_breadcrumb``, ``before_send``, ``transport``. -# Enable error reporting to Sentry -sentry_on = false -sentry_dsn = - -[celery_kubernetes_executor] - -# This section only applies if you are using the ``CeleryKubernetesExecutor`` in -# ``[core]`` section above -# Define when to send a task to ``KubernetesExecutor`` when using ``CeleryKubernetesExecutor``. -# When the queue of a task is the value of ``kubernetes_queue`` (default ``kubernetes``), -# the task is executed via ``KubernetesExecutor``, -# otherwise via ``CeleryExecutor`` -kubernetes_queue = kubernetes - -[celery] - -# This section only applies if you are using the CeleryExecutor in -# ``[core]`` section above -# The app name that will be used by celery -celery_app_name = airflow.executors.celery_executor - -# The concurrency that will be used when starting workers with the -# ``airflow celery worker`` command. This defines the number of task instances that -# a worker will take, so size up your workers based on the resources on -# your worker box and the nature of your tasks -worker_concurrency = 16 - -# The maximum and minimum concurrency that will be used when starting workers with the -# ``airflow celery worker`` command (always keep minimum processes, but grow -# to maximum if necessary). Note the value should be max_concurrency,min_concurrency -# Pick these numbers based on resources on worker box and the nature of the task. -# If autoscale option is available, worker_concurrency will be ignored. -# http://docs.celeryproject.org/en/latest/reference/celery.bin.worker.html#cmdoption-celery-worker-autoscale -# Example: worker_autoscale = 16,12 -# worker_autoscale = - -# Used to increase the number of tasks that a worker prefetches which can improve performance. -# The number of processes multiplied by worker_prefetch_multiplier is the number of tasks -# that are prefetched by a worker. A value greater than 1 can result in tasks being unnecessarily -# blocked if there are multiple workers and one worker prefetches tasks that sit behind long -# running tasks while another worker has unutilized processes that are unable to process the already -# claimed blocked tasks. -# https://docs.celeryproject.org/en/stable/userguide/optimizing.html#prefetch-limits -# Example: worker_prefetch_multiplier = 1 -# worker_prefetch_multiplier = - -# When you start an airflow worker, airflow starts a tiny web server -# subprocess to serve the workers local log files to the airflow main -# web server, who then builds pages and sends them to users. This defines -# the port on which the logs are served. It needs to be unused, and open -# visible from the main web server to connect into the workers. -worker_log_server_port = 8793 - -# Umask that will be used when starting workers with the ``airflow celery worker`` -# in daemon mode. This control the file-creation mode mask which determines the initial -# value of file permission bits for newly created files. -worker_umask = 0o077 - -# The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally -# a sqlalchemy database. Refer to the Celery documentation for more information. -broker_url = redis://redis:6379/0 - -# The Celery result_backend. When a job finishes, it needs to update the -# metadata of the job. Therefore it will post a message on a message bus, -# or insert it into a database (depending of the backend) -# This status is used by the scheduler to update the state of the task -# The use of a database is highly recommended -# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-result-backend-settings -result_backend = db+postgresql://postgres:airflow@postgres/airflow - -# Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start -# it ``airflow celery flower``. This defines the IP that Celery Flower runs on -flower_host = 0.0.0.0 - -# The root URL for Flower -# Example: flower_url_prefix = /flower -flower_url_prefix = - -# This defines the port that Celery Flower runs on -flower_port = 5555 - -# Securing Flower with Basic Authentication -# Accepts user:password pairs separated by a comma -# Example: flower_basic_auth = user1:password1,user2:password2 -flower_basic_auth = - -# How many processes CeleryExecutor uses to sync task state. -# 0 means to use max(1, number of cores - 1) processes. -sync_parallelism = 0 - -# Import path for celery configuration options -celery_config_options = airflow.config_templates.default_celery.DEFAULT_CELERY_CONFIG -ssl_active = False -ssl_key = -ssl_cert = -ssl_cacert = - -# Celery Pool implementation. -# Choices include: ``prefork`` (default), ``eventlet``, ``gevent`` or ``solo``. -# See: -# https://docs.celeryproject.org/en/latest/userguide/workers.html#concurrency -# https://docs.celeryproject.org/en/latest/userguide/concurrency/eventlet.html -pool = prefork - -# The number of seconds to wait before timing out ``send_task_to_executor`` or -# ``fetch_celery_task_state`` operations. -operation_timeout = 1.0 - -# Celery task will report its status as 'started' when the task is executed by a worker. -# This is used in Airflow to keep track of the running tasks and if a Scheduler is restarted -# or run in HA mode, it can adopt the orphan tasks launched by previous SchedulerJob. -task_track_started = True - -# Time in seconds after which Adopted tasks are cleared by CeleryExecutor. This is helpful to clear -# stalled tasks. -task_adoption_timeout = 600 - -# The Maximum number of retries for publishing task messages to the broker when failing -# due to ``AirflowTaskTimeout`` error before giving up and marking Task as failed. -task_publish_max_retries = 3 - -# Worker initialisation check to validate Metadata Database connection -worker_precheck = False - -[celery_broker_transport_options] - -# This section is for specifying options which can be passed to the -# underlying celery broker transport. See: -# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_transport_options -# The visibility timeout defines the number of seconds to wait for the worker -# to acknowledge the task before the message is redelivered to another worker. -# Make sure to increase the visibility timeout to match the time of the longest -# ETA you're planning to use. -# visibility_timeout is only supported for Redis and SQS celery brokers. -# See: -# http://docs.celeryproject.org/en/master/userguide/configuration.html#std:setting-broker_transport_options -# Example: visibility_timeout = 21600 -# visibility_timeout = - -[dask] - -# This section only applies if you are using the DaskExecutor in -# [core] section above -# The IP address and port of the Dask cluster's scheduler. -cluster_address = 127.0.0.1:8786 - -# TLS/ SSL settings to access a secured Dask scheduler. -tls_ca = -tls_cert = -tls_key = - -[scheduler] -# Task instances listen for external kill signal (when you clear tasks -# from the CLI or the UI), this defines the frequency at which they should -# listen (in seconds). -job_heartbeat_sec = 5 - -# How often (in seconds) to check and tidy up 'running' TaskInstancess -# that no longer have a matching DagRun -clean_tis_without_dagrun_interval = 15.0 - -# The scheduler constantly tries to trigger new tasks (look at the -# scheduler section in the docs for more information). This defines -# how often the scheduler should run (in seconds). -scheduler_heartbeat_sec = 5 - -# The number of times to try to schedule each DAG file -# -1 indicates unlimited number -num_runs = -1 - -# The number of seconds to wait between consecutive DAG file processing -processor_poll_interval = 1 - -# Number of seconds after which a DAG file is parsed. The DAG file is parsed every -# ``min_file_process_interval`` number of seconds. Updates to DAGs are reflected after -# this interval. Keeping this number low will increase CPU usage. -min_file_process_interval = 30 - -# How often (in seconds) to scan the DAGs directory for new files. Default to 5 minutes. -dag_dir_list_interval = 300 - -# How often should stats be printed to the logs. Setting to 0 will disable printing stats -print_stats_interval = 30 - -# How often (in seconds) should pool usage stats be sent to statsd (if statsd_on is enabled) -pool_metrics_interval = 5.0 - -# If the last scheduler heartbeat happened more than scheduler_health_check_threshold -# ago (in seconds), scheduler is considered unhealthy. -# This is used by the health check in the "/health" endpoint -scheduler_health_check_threshold = 30 - -# How often (in seconds) should the scheduler check for orphaned tasks and SchedulerJobs -orphaned_tasks_check_interval = 300.0 -child_process_log_directory = /airflow/logs/scheduler - -# Local task jobs periodically heartbeat to the DB. If the job has -# not heartbeat in this many seconds, the scheduler will mark the -# associated task instance as failed and will re-schedule the task. -scheduler_zombie_task_threshold = 300 - -# Turn off scheduler catchup by setting this to ``False``. -# Default behavior is unchanged and -# Command Line Backfills still work, but the scheduler -# will not do scheduler catchup if this is ``False``, -# however it can be set on a per DAG basis in the -# DAG definition (catchup) -catchup_by_default = True - -# This changes the batch size of queries in the scheduling main loop. -# If this is too high, SQL query performance may be impacted by one -# or more of the following: -# - reversion to full table scan -# - complexity of query predicate -# - excessive locking -# Additionally, you may hit the maximum allowable query length for your db. -# Set this to 0 for no limit (not advised) -max_tis_per_query = 512 - -# Should the scheduler issue ``SELECT ... FOR UPDATE`` in relevant queries. -# If this is set to False then you should not run more than a single -# scheduler at once -use_row_level_locking = True - -# Max number of DAGs to create DagRuns for per scheduler loop. -max_dagruns_to_create_per_loop = 10 - -# How many DagRuns should a scheduler examine (and lock) when scheduling -# and queuing tasks. -max_dagruns_per_loop_to_schedule = 20 - -# Should the Task supervisor process perform a "mini scheduler" to attempt to schedule more tasks of the -# same DAG. Leaving this on will mean tasks in the same DAG execute quicker, but might starve out other -# dags in some circumstances -schedule_after_task_execution = True - -# The scheduler can run multiple processes in parallel to parse dags. -# This defines how many processes will run. -parsing_processes = 2 - -# One of ``modified_time``, ``random_seeded_by_host`` and ``alphabetical``. -# The scheduler will list and sort the dag files to decide the parsing order. -# -# * ``modified_time``: Sort by modified time of the files. This is useful on large scale to parse the -# recently modified DAGs first. -# * ``random_seeded_by_host``: Sort randomly across multiple Schedulers but with same order on the -# same host. This is useful when running with Scheduler in HA mode where each scheduler can -# parse different DAG files. -# * ``alphabetical``: Sort by filename -file_parsing_sort_mode = modified_time - -# Turn off scheduler use of cron intervals by setting this to False. -# DAGs submitted manually in the web UI or with trigger_dag will still run. -use_job_schedule = True - -# Allow externally triggered DagRuns for Execution Dates in the future -# Only has effect if schedule_interval is set to None in DAG -allow_trigger_in_future = False - -# DAG dependency detector class to use -dependency_detector = airflow.serialization.serialized_objects.DependencyDetector - -[kerberos] -ccache = /tmp/airflow_krb5_ccache - -# gets augmented with fqdn -principal = airflow -reinit_frequency = 3600 -kinit_path = kinit -keytab = airflow.keytab - -[github_enterprise] -api_rev = v3 - -[elasticsearch] -# Elasticsearch host -host = - -# Format of the log_id, which is used to query for a given tasks logs -log_id_template = {dag_id}-{task_id}-{execution_date}-{try_number} - -# Used to mark the end of a log stream for a task -end_of_log_mark = end_of_log - -# Qualified URL for an elasticsearch_mapping frontend (like Kibana) with a template argument for log_id -# Code will construct log_id using the log_id template from the argument above. -# NOTE: The code will prefix the https:// automatically, don't include that here. -frontend = - -# Write the task logs to the stdout of the worker, rather than the default files -write_stdout = False - -# Instead of the default log formatter, write the log lines as JSON -json_format = False - -# Log fields to also attach to the json output, if enabled -json_fields = asctime, filename, lineno, levelname, message - -# The field where host name is stored (normally either `host` or `host.name`) -host_field = host - -# The field where offset is stored (normally either `offset` or `log.offset`) -offset_field = offset - -[elasticsearch_configs] -use_ssl = False -verify_certs = True - -[kubernetes] -# Path to the YAML pod file. If set, all other kubernetes-related fields are ignored. -pod_template_file = - -# The repository of the Kubernetes Image for the Worker to Run -worker_container_repository = - -# The tag of the Kubernetes Image for the Worker to Run -worker_container_tag = - -# The Kubernetes namespace where airflow workers should be created. Defaults to ``default`` -namespace = default - -# If True, all worker pods will be deleted upon termination -delete_worker_pods = True - -# If False (and delete_worker_pods is True), -# failed worker pods will not be deleted so users can investigate them. -# This only prevents removal of worker pods where the worker itself failed, -# not when the task it ran failed. -delete_worker_pods_on_failure = False - -# Number of Kubernetes Worker Pod creation calls per scheduler loop. -# Note that the current default of "1" will only launch a single pod -# per-heartbeat. It is HIGHLY recommended that users increase this -# number to match the tolerance of their kubernetes cluster for -# better performance. -worker_pods_creation_batch_size = 1 - -# Allows users to launch pods in multiple namespaces. -# Will require creating a cluster-role for the scheduler -multi_namespace_mode = False - -# Use the service account kubernetes gives to pods to connect to kubernetes cluster. -# It's intended for clients that expect to be running inside a pod running on kubernetes. -# It will raise an exception if called from a process not running in a kubernetes environment. -in_cluster = True - -# When running with in_cluster=False change the default cluster_context or config_file -# options to Kubernetes client. Leave blank these to use default behaviour like ``kubectl`` has. -# cluster_context = - -# Path to the kubernetes configfile to be used when ``in_cluster`` is set to False -# config_file = - -# Keyword parameters to pass while calling a kubernetes client core_v1_api methods -# from Kubernetes Executor provided as a single line formatted JSON dictionary string. -# List of supported params are similar for all core_v1_apis, hence a single config -# variable for all apis. See: -# https://raw.githubusercontent.com/kubernetes-client/python/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/api/core_v1_api.py -kube_client_request_args = - -# Optional keyword arguments to pass to the ``delete_namespaced_pod`` kubernetes client -# ``core_v1_api`` method when using the Kubernetes Executor. -# This should be an object and can contain any of the options listed in the ``v1DeleteOptions`` -# class defined here: -# https://github.com/kubernetes-client/python/blob/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/models/v1_delete_options.py#L19 -# Example: delete_option_kwargs = {"grace_period_seconds": 10} -delete_option_kwargs = - -# Enables TCP keepalive mechanism. This prevents Kubernetes API requests to hang indefinitely -# when idle connection is time-outed on services like cloud load balancers or firewalls. -enable_tcp_keepalive = True - -# When the `enable_tcp_keepalive` option is enabled, TCP probes a connection that has -# been idle for `tcp_keep_idle` seconds. -tcp_keep_idle = 120 - -# When the `enable_tcp_keepalive` option is enabled, if Kubernetes API does not respond -# to a keepalive probe, TCP retransmits the probe after `tcp_keep_intvl` seconds. -tcp_keep_intvl = 30 - -# When the `enable_tcp_keepalive` option is enabled, if Kubernetes API does not respond -# to a keepalive probe, TCP retransmits the probe `tcp_keep_cnt number` of times before -# a connection is considered to be broken. -tcp_keep_cnt = 6 - -# Set this to false to skip verifying SSL certificate of Kubernetes python client. -verify_ssl = True - -# How long in seconds a worker can be in Pending before it is considered a failure -worker_pods_pending_timeout = 300 - -# How often in seconds to check if Pending workers have exceeded their timeouts -worker_pods_pending_timeout_check_interval = 120 - -# How many pending pods to check for timeout violations in each check interval. -# You may want this higher if you have a very large cluster and/or use ``multi_namespace_mode``. -worker_pods_pending_timeout_batch_size = 100 - -[smart_sensor] -# When `use_smart_sensor` is True, Airflow redirects multiple qualified sensor tasks to -# smart sensor task. -use_smart_sensor = False - -# `shard_code_upper_limit` is the upper limit of `shard_code` value. The `shard_code` is generated -# by `hashcode % shard_code_upper_limit`. -shard_code_upper_limit = 10000 - -# The number of running smart sensor processes for each service. -shards = 5 - -# comma separated sensor classes support in smart_sensor. -sensors_enabled = NamedHivePartitionSensor diff --git a/ingestion/examples/airflow/dags/airflow_sample_data.py b/ingestion/examples/airflow/dags/airflow_sample_data.py index 2ba94f7a4eb..8e8d1806d41 100644 --- a/ingestion/examples/airflow/dags/airflow_sample_data.py +++ b/ingestion/examples/airflow/dags/airflow_sample_data.py @@ -39,7 +39,7 @@ source: serviceConnection: config: type: SampleData - sampleDataFolder: "./examples/sample_data" + sampleDataFolder: "/home/airflow/ingestion/examples/sample_data" sourceConfig: {} sink: type: metadata-rest diff --git a/ingestion/examples/airflow/dags/airflow_sample_usage.py b/ingestion/examples/airflow/dags/airflow_sample_usage.py index f4d7d27fc70..4776c1198b9 100644 --- a/ingestion/examples/airflow/dags/airflow_sample_usage.py +++ b/ingestion/examples/airflow/dags/airflow_sample_usage.py @@ -40,7 +40,7 @@ config = """ "serviceConnection": { "config": { "type": "SampleData", - "sampleDataFolder": "./examples/sample_data" + "sampleDataFolder": "/home/airflow/ingestion/examples/sample_data" } }, "sourceConfig": { diff --git a/ingestion/ingestion_dependency.sh b/ingestion/ingestion_dependency.sh index dcbc99c94b6..a8bc6b3a64c 100755 --- a/ingestion/ingestion_dependency.sh +++ b/ingestion/ingestion_dependency.sh @@ -25,7 +25,7 @@ AIRFLOW_ADMIN_PASSWORD=${AIRFLOW_ADMIN_PASSWORD:-admin} OPENMETADATA_SERVER=${OPENMETADATA_SERVER:-"http://openmetadata-server:8585"} -sed -i "s#\(sql_alchemy_conn = \).*#\1${DB_CONN}#" /airflow/airflow.cfg +sed -i "s#\(sql_alchemy_conn = \).*#\1${DB_CONN}#" /opt/airflow/airflow.cfg airflow db init diff --git a/openmetadata-ui/src/main/resources/ui/cypress/e2e/AddNewService/mysql.spec.js b/openmetadata-ui/src/main/resources/ui/cypress/e2e/AddNewService/mysql.spec.js index 20b67aafe72..9854c85ca4a 100644 --- a/openmetadata-ui/src/main/resources/ui/cypress/e2e/AddNewService/mysql.spec.js +++ b/openmetadata-ui/src/main/resources/ui/cypress/e2e/AddNewService/mysql.spec.js @@ -23,7 +23,7 @@ describe('MySQL Ingestion', () => { const connectionInput = () => { cy.get('#root_username').type('openmetadata_user'); cy.get('#root_password').type('openmetadata_password'); - cy.get('#root_hostPort').type('172.16.239.10:3306'); + cy.get('#root_hostPort').type('mysql:3306'); cy.get('#root_databaseSchema').type('openmetadata_db'); };