# Docker compose file covering DataHub's default configuration, which is to run all containers on a single host. # Please see the README.md for instructions as to how to use and customize. # NOTE: This file will cannot build! No dockerfiles are set. See the README.md in this directory. --- version: '3.9' services: datahub-frontend-react: container_name: datahub-frontend-react hostname: datahub-frontend-react image: ${DATAHUB_FRONTEND_IMAGE:-linkedin/datahub-frontend-react}:${DATAHUB_VERSION:-head} ports: - ${DATAHUB_MAPPED_FRONTEND_PORT:-9002}:9002 build: context: ../ dockerfile: docker/datahub-frontend/Dockerfile env_file: datahub-frontend/env/docker.env depends_on: datahub-gms: condition: service_healthy volumes: - ${HOME}/.datahub/plugins:/etc/datahub/plugins datahub-actions: container_name: datahub-actions hostname: actions image: acryldata/datahub-actions:${ACTIONS_VERSION:-head} env_file: datahub-actions/env/docker.env depends_on: datahub-gms: condition: service_healthy datahub-gms: container_name: datahub-gms hostname: datahub-gms image: ${DATAHUB_GMS_IMAGE:-linkedin/datahub-gms}:${DATAHUB_VERSION:-head} ports: - ${DATAHUB_MAPPED_GMS_PORT:-8080}:8080 build: context: ../ dockerfile: docker/datahub-gms/Dockerfile env_file: datahub-gms/env/docker-without-neo4j.env healthcheck: test: curl -sS --fail http://datahub-gms:${DATAHUB_MAPPED_GMS_PORT:-8080}/health start_period: 20s interval: 1s retries: 20 timeout: 5s depends_on: datahub-upgrade: condition: service_completed_successfully volumes: - ${HOME}/.datahub/plugins:/etc/datahub/plugins datahub-upgrade: container_name: datahub-upgrade hostname: datahub-upgrade image: ${DATAHUB_UPGRADE_IMAGE:-acryldata/datahub-upgrade}:${DATAHUB_VERSION:-head} command: - -u - SystemUpdate build: context: ../ dockerfile: docker/datahub-upgrade/Dockerfile env_file: datahub-upgrade/env/docker-without-neo4j.env depends_on: mysql-setup: condition: service_completed_successfully elasticsearch-setup: condition: service_completed_successfully kafka-setup: condition: service_completed_successfully labels: datahub_setup_job: true # This "container" is a workaround to pre-create search indices elasticsearch-setup: container_name: elasticsearch-setup hostname: elasticsearch-setup image: ${DATAHUB_ELASTIC_SETUP_IMAGE:-linkedin/datahub-elasticsearch-setup}:${DATAHUB_VERSION:-head} build: context: ../ dockerfile: docker/elasticsearch-setup/Dockerfile env_file: elasticsearch-setup/env/docker.env depends_on: elasticsearch: condition: service_healthy labels: datahub_setup_job: true kafka-setup: container_name: kafka-setup hostname: kafka-setup image: ${DATAHUB_KAFKA_SETUP_IMAGE:-linkedin/datahub-kafka-setup}:${DATAHUB_VERSION:-head} build: dockerfile: ./docker/kafka-setup/Dockerfile context: ../ env_file: kafka-setup/env/docker.env depends_on: broker: condition: service_healthy schema-registry: condition: service_healthy labels: datahub_setup_job: true elasticsearch: container_name: elasticsearch hostname: elasticsearch image: elasticsearch:7.10.1 ports: - ${DATAHUB_MAPPED_ELASTIC_PORT:-9200}:9200 env_file: elasticsearch/env/docker.env environment: - discovery.type=single-node - xpack.security.enabled=false deploy: resources: limits: memory: 1G healthcheck: test: curl -sS --fail http://elasticsearch:$${DATAHUB_MAPPED_ELASTIC_PORT:-9200}/_cluster/health?wait_for_status=yellow&timeout=0s start_period: 5s interval: 1s retries: 5 timeout: 5s volumes: - esdata:/usr/share/elasticsearch/data schema-registry: container_name: schema-registry hostname: schema-registry image: confluentinc/cp-schema-registry:7.2.2 ports: - ${DATAHUB_MAPPED_SCHEMA_REGISTRY_PORT:-8081}:8081 env_file: schema-registry/env/docker.env healthcheck: test: nc -z schema-registry ${DATAHUB_MAPPED_SCHEMA_REGISTRY_PORT:-8081} start_period: 5s interval: 1s retries: 5 timeout: 5s depends_on: broker: condition: service_healthy broker: container_name: broker hostname: broker image: confluentinc/cp-kafka:7.2.2 ports: - ${DATAHUB_MAPPED_KAFKA_BROKER_PORT:-9092}:9092 env_file: broker/env/docker.env healthcheck: test: nc -z broker $${DATAHUB_MAPPED_KAFKA_BROKER_PORT:-9092} start_period: 5s interval: 1s retries: 5 timeout: 5s depends_on: zookeeper: condition: service_healthy volumes: - broker:/var/lib/kafka/data/ zookeeper: container_name: zookeeper hostname: zookeeper image: confluentinc/cp-zookeeper:7.2.2 ports: - ${DATAHUB_MAPPED_ZK_PORT:-2181}:2181 env_file: zookeeper/env/docker.env healthcheck: test: echo srvr | nc zookeeper $${DATAHUB_MAPPED_ZK_PORT:-2181} start_period: 2s interval: 5s retries: 5 timeout: 5s volumes: - zkdata:/var/lib/zookeeper networks: default: name: datahub_network volumes: esdata: broker: zkdata: