# Docker compose file covering DataHub's default configuration, which is to run all containers on a single host. # Please see the README.md for instructions as to how to use and customize. # NOTE: This file does not build! No dockerfiles are set. See the README.md in this directory. --- version: '3.8' services: zookeeper: image: confluentinc/cp-zookeeper:7.2.2 env_file: zookeeper/env/docker.env hostname: zookeeper container_name: zookeeper ports: - ${DATAHUB_MAPPED_ZK_PORT:-2181}:2181 volumes: - zkdata:/var/lib/zookeeper broker: image: confluentinc/cp-kafka:7.2.2 env_file: broker/env/docker.env hostname: broker container_name: broker depends_on: - zookeeper ports: - ${DATAHUB_MAPPED_KAFKA_BROKER_PORT:-9092}:9092 volumes: - broker:/var/lib/kafka/data/ schema-registry: image: confluentinc/cp-schema-registry:7.2.2 env_file: schema-registry/env/docker.env hostname: schema-registry container_name: schema-registry depends_on: - broker ports: - ${DATAHUB_MAPPED_SCHEMA_REGISTRY_PORT:-8081}:8081 elasticsearch: image: elasticsearch:7.10.1 env_file: elasticsearch/env/docker.env container_name: elasticsearch hostname: elasticsearch ports: - ${DATAHUB_MAPPED_ELASTIC_PORT:-9200}:9200 environment: - discovery.type=single-node - xpack.security.enabled=false volumes: - esdata:/usr/share/elasticsearch/data healthcheck: test: ["CMD-SHELL", "curl -sS --fail 'http://localhost:9200/_cluster/health?wait_for_status=yellow&timeout=0s' || exit 1"] start_period: 2m retries: 4 neo4j: image: neo4j:4.4.9-community env_file: neo4j/env/docker.env hostname: neo4j container_name: neo4j ports: - ${DATAHUB_MAPPED_NEO4J_HTTP_PORT:-7474}:7474 - ${DATAHUB_MAPPED_NEO4J_BOLT_PORT:-7687}:7687 volumes: - neo4jdata:/data # This "container" is a workaround to pre-create search indices elasticsearch-setup: labels: datahub_setup_job: true build: context: ../ dockerfile: docker/elasticsearch-setup/Dockerfile image: ${DATAHUB_ELASTIC_SETUP_IMAGE:-linkedin/datahub-elasticsearch-setup}:${DATAHUB_VERSION:-head} env_file: elasticsearch-setup/env/docker.env hostname: elasticsearch-setup container_name: elasticsearch-setup depends_on: - elasticsearch datahub-gms: build: context: ../ dockerfile: docker/datahub-gms/Dockerfile image: ${DATAHUB_GMS_IMAGE:-linkedin/datahub-gms}:${DATAHUB_VERSION:-head} hostname: datahub-gms container_name: datahub-gms ports: - ${DATAHUB_MAPPED_GMS_PORT:-8080}:8080 depends_on: - mysql - neo4j datahub-frontend-react: build: context: ../ dockerfile: docker/datahub-frontend/Dockerfile image: ${DATAHUB_FRONTEND_IMAGE:-linkedin/datahub-frontend-react}:${DATAHUB_VERSION:-head} env_file: datahub-frontend/env/docker.env hostname: datahub-frontend-react container_name: datahub-frontend-react ports: - ${DATAHUB_MAPPED_FRONTEND_PORT:-9002}:9002 depends_on: - datahub-gms volumes: - ${HOME}/.datahub/plugins:/etc/datahub/plugins datahub-actions: image: acryldata/datahub-actions:${ACTIONS_VERSION:-head} hostname: actions env_file: datahub-actions/env/docker.env restart: on-failure:5 depends_on: - datahub-gms # This "container" is a workaround to pre-create topics. # This is not required in most cases, kept here for backwards compatibility with older clients that # explicitly wait for this container kafka-setup: labels: datahub_setup_job: true build: dockerfile: ./docker/kafka-setup/Dockerfile context: ../ image: ${DATAHUB_KAFKA_SETUP_IMAGE:-linkedin/datahub-kafka-setup}:${DATAHUB_VERSION:-head} env_file: kafka-setup/env/docker.env hostname: kafka-setup container_name: kafka-setup depends_on: - broker - schema-registry datahub-upgrade: labels: datahub_setup_job: true build: context: ../ dockerfile: docker/datahub-upgrade/Dockerfile image: ${DATAHUB_UPGRADE_IMAGE:-acryldata/datahub-upgrade}:${DATAHUB_VERSION:-head} env_file: datahub-upgrade/env/docker-without-neo4j.env hostname: datahub-upgrade container_name: datahub-upgrade command: ["-u", "SystemUpdate"] networks: default: name: datahub_network volumes: esdata: neo4jdata: zkdata: broker: