This is an automated email from the ASF dual-hosted git repository.
progers pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/druid.git
The following commit(s) were added to refs/heads/master by this push:
new 5dadbdf4d0 Generate the IT docker-compose.yaml files (#13669)
5dadbdf4d0 is described below
commit 5dadbdf4d051a6843c87a05948cade84eb042a40
Author: Paul Rogers <[email protected]>
AuthorDate: Tue Feb 21 15:03:02 2023 -0800
Generate the IT docker-compose.yaml files (#13669)
Generate IT docker-compose.sh files
Generates test-specific docker-compose.sh files using a simple
Python template script.
---
.gitignore | 1 +
docs/development/build.md | 5 +-
integration-tests-ex/cases/cluster.sh | 336 +++++++++-------
.../cluster/AzureDeepStorage/docker-compose.yaml | 132 -------
.../cluster/BatchIndex/docker-compose-indexer.yaml | 98 -----
.../cases/cluster/BatchIndex/docker-compose.yaml | 98 -----
.../cases/cluster/Common/druid.yaml | 14 +
.../Common/environment-configs/coordinator.env | 6 +
.../cluster/GcsDeepStorage/docker-compose.yaml | 155 --------
.../cluster/HighAvailability/docker-compose.yaml | 157 --------
.../cluster/MultiStageQuery/docker-compose.yaml | 98 -----
.../cluster/S3DeepStorage/docker-compose.yaml | 129 -------
.../druid/testsEx/cluster/DruidClusterClient.java | 5 +-
.../apache/druid/testsEx/config/ClusterConfig.java | 5 +
.../cases/templates/AzureDeepStorage.py | 43 +++
integration-tests-ex/cases/templates/BatchIndex.py | 18 +
.../cases/templates/GcsDeepStorage.py | 45 +++
.../cases/templates/HighAvailability.py | 85 ++++
.../cases/templates/MultiStageQuery.py | 26 ++
.../cases/templates/S3DeepStorage.py | 46 +++
integration-tests-ex/cases/templates/template.py | 430 +++++++++++++++++++++
integration-tests-ex/docs/compose.md | 58 +++
integration-tests-ex/docs/docker.md | 1 -
integration-tests-ex/docs/druid-config.md | 2 +-
integration-tests-ex/docs/guide.md | 42 ++
integration-tests-ex/image/build-image.sh | 1 -
it.sh | 42 +-
27 files changed, 1060 insertions(+), 1018 deletions(-)
diff --git a/.gitignore b/.gitignore
index d6ecf2b795..8365e4a3b7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,6 +6,7 @@ target
*.tar.gz
*.swp
*.swo
+*.pyc
.classpath
.idea
.project
diff --git a/docs/development/build.md b/docs/development/build.md
index b093b2e4f9..15f0689631 100644
--- a/docs/development/build.md
+++ b/docs/development/build.md
@@ -38,7 +38,8 @@ make sure it has `/master/` in the URL.
##### Other dependencies
-- Distribution builds require Python 3.x and the `pyyaml` module
+- Distribution builds require Python 3.x and the `pyyaml` module.
+- Integration tests require `pyyaml` version 5.1 or later.
##### Downloading the source
@@ -86,7 +87,7 @@ mvn clean install -Phadoop3
To generate distribution with hadoop3 dependencies, run :
```bash
-mvn clean install -Papache-release,dist-hadoop3,rat,hadoop3 -DskipTests
+mvn clean install -Papache-release,dist-hadoop3,rat,hadoop3 -DskipTests
```
#### Potential issues
diff --git a/integration-tests-ex/cases/cluster.sh
b/integration-tests-ex/cases/cluster.sh
index 3c5bea3f81..f7c6cb92f0 100755
--- a/integration-tests-ex/cases/cluster.sh
+++ b/integration-tests-ex/cases/cluster.sh
@@ -21,16 +21,21 @@
# Maps category names to cluster names. The mapping here must match
# that in the test category classes when @Cluster is used.
+# Fail if any command fails
+set -e
+
# Enable for debugging
#set -x
export MODULE_DIR=$(cd $(dirname $0) && pwd)
function usage {
- cat <<EOF
+ cat <<EOF
Usage: $0 cmd [category]
-h, help
Display this message
+ prepare category
+ Generate the docker-compose.yaml file for the category for debugging.
up category
Start the cluster
down category
@@ -38,130 +43,179 @@ Usage: $0 cmd [category]
status category
Status of the cluster (for debugging within build scripts)
compose-cmd category
- Pass the command to Docker compose.
+ Pass the command to Docker compose. Cluster should already be up.
+ gen category
+ Generate docker-compose.yaml files (only.) Done automatically as
+ part of up. Use only for debugging.
EOF
}
# Command name is required
if [ $# -eq 0 ]; then
- usage 1>&2
- exit 1
+ usage 1>&2
+ exit 1
fi
CMD=$1
shift
-# All commands need env vars
-ENV_FILE=$MODULE_DIR/../image/target/env.sh
-if [ ! -f $ENV_FILE ]; then
- echo "Please build the Docker test image before testing" 1>&2
- exit 1
-fi
+function check_env_file {
+ export ENV_FILE=$MODULE_DIR/../image/target/env.sh
+ if [ ! -f $ENV_FILE ]; then
+ echo "Please build the Docker test image before testing" 1>&2
+ exit 1
+ fi
-source $ENV_FILE
+ source $ENV_FILE
+}
function category {
- if [ $# -eq 0 ]; then
- usage 1>&2
- exit 1
- fi
- export CATEGORY=$1
- # The untranslated category is used for the local name of the
- # shared folder.
-
- # DRUID_INTEGRATION_TEST_GROUP is used in
- # docker-compose files and here. Despite the name, it is the
- # name of the cluster configuration we want to run, not the
- # test category. Multiple categories can map to the same cluster
- # definition.
-
- # Map from category name to shared cluster definition name.
- # Add an entry here if you create a new category that shares
- # a definition.
- case $CATEGORY in
- "InputSource")
- export DRUID_INTEGRATION_TEST_GROUP=BatchIndex
- ;;
- "InputFormat")
- export DRUID_INTEGRATION_TEST_GROUP=BatchIndex
- ;;
- "Catalog")
- export DRUID_INTEGRATION_TEST_GROUP=BatchIndex
- ;;
- *)
- export DRUID_INTEGRATION_TEST_GROUP=$CATEGORY
- ;;
- esac
-
- export CLUSTER_DIR=$MODULE_DIR/cluster/$DRUID_INTEGRATION_TEST_GROUP
- if [ ! -d $CLUSTER_DIR ]; then
- echo "Cluster directory $CLUSTER_DIR does not exist." 1>&2
- echo "$USAGE" 1>&2
- exit 1
- fi
-
- export TARGET_DIR=$MODULE_DIR/target
- export SHARED_DIR=$TARGET_DIR/$CATEGORY
- export ENV_FILE="$TARGET_DIR/${CATEGORY}.env"
+ if [ $# -eq 0 ]; then
+ usage 1>&2
+ exit 1
+ fi
+ export CATEGORY=$1
+ # The untranslated category is used for the local name of the
+ # shared folder.
+
+ # DRUID_INTEGRATION_TEST_GROUP is used in
+ # docker-compose files and here. Despite the name, it is the
+ # name of the cluster configuration we want to run, not the
+ # test category. Multiple categories can map to the same cluster
+ # definition.
+
+ # Map from category name to shared cluster definition name.
+ # Add an entry here if you create a new category that shares
+ # a definition.
+ case $CATEGORY in
+ "InputSource")
+ export DRUID_INTEGRATION_TEST_GROUP=BatchIndex
+ ;;
+ "InputFormat")
+ export DRUID_INTEGRATION_TEST_GROUP=BatchIndex
+ ;;
+ "Catalog")
+ export DRUID_INTEGRATION_TEST_GROUP=BatchIndex
+ ;;
+ *)
+ export DRUID_INTEGRATION_TEST_GROUP=$CATEGORY
+ ;;
+ esac
+
+ export CLUSTER_DIR=$MODULE_DIR/cluster/$DRUID_INTEGRATION_TEST_GROUP
+ export TARGET_DIR=$MODULE_DIR/target
+ export SHARED_DIR=$TARGET_DIR/$CATEGORY
+ export ENV_FILE="$TARGET_DIR/${CATEGORY}.env"
}
# Dump lots of information to debug Docker failures when run inside
# of a build environment where we can't inspect Docker directly.
function show_status {
- echo "===================================="
- ls -l target/shared
- echo "docker ps -a"
- docker ps -a
- # Was: --filter status=exited
- for id in $(docker ps -a --format "{{.ID}}"); do
- echo "===================================="
- echo "Logs for Container ID $id"
- docker logs $id | tail -n 20
- done
- echo "===================================="
+ echo "===================================="
+ ls -l target/shared
+ echo "docker ps -a"
+ docker ps -a
+ # Was: --filter status=exited
+ for id in $(docker ps -a --format "{{.ID}}"); do
+ echo "===================================="
+ echo "Logs for Container ID $id"
+ docker logs $id | tail -n 20
+ done
+ echo "===================================="
}
function build_shared_dir {
- mkdir -p $SHARED_DIR
- # Must start with an empty DB to keep MySQL happy
- rm -rf $SHARED_DIR/db
- mkdir -p $SHARED_DIR/logs
- mkdir -p $SHARED_DIR/tasklogs
- mkdir -p $SHARED_DIR/db
- mkdir -p $SHARED_DIR/kafka
- mkdir -p $SHARED_DIR/resources
- cp $MODULE_DIR/assets/log4j2.xml $SHARED_DIR/resources
- # Permissions in some build setups are screwed up. See above. The user
- # which runs Docker does not have permission to write into the /shared
- # directory. Force ownership to allow writing.
- chmod -R a+rwx $SHARED_DIR
+ mkdir -p $SHARED_DIR
+ # Must start with an empty DB to keep MySQL happy
+ rm -rf $SHARED_DIR/db
+ mkdir -p $SHARED_DIR/logs
+ mkdir -p $SHARED_DIR/tasklogs
+ mkdir -p $SHARED_DIR/db
+ mkdir -p $SHARED_DIR/kafka
+ mkdir -p $SHARED_DIR/resources
+ cp $MODULE_DIR/assets/log4j2.xml $SHARED_DIR/resources
+ # Permissions in some build setups are screwed up. See above. The user
+ # which runs Docker does not have permission to write into the /shared
+ # directory. Force ownership to allow writing.
+ chmod -R a+rwx $SHARED_DIR
}
-# Each test must have a default docker-compose.yaml file which corresponds to
using
+# Either generate the docker-compose file, or use "static" versions.
+function docker_file {
+
+ # If a template exists, generate the docker-compose.yaml file. Copy over the
Common
+ # folder.
+ TEMPLATE_DIR=$MODULE_DIR/templates
+ TEMPLATE_SCRIPT=${DRUID_INTEGRATION_TEST_GROUP}.py
+ if [ -f "$TEMPLATE_DIR/$TEMPLATE_SCRIPT" ]; then
+ export COMPOSE_DIR=$TARGET_DIR/cluster/$DRUID_INTEGRATION_TEST_GROUP
+ mkdir -p $COMPOSE_DIR
+ pushd $TEMPLATE_DIR > /dev/null
+ python3 $TEMPLATE_SCRIPT
+ popd > /dev/null
+ cp -r $MODULE_DIR/cluster/Common $TARGET_DIR/cluster
+ else
+ # Else, use the existing non-template file in place.
+ if [ ! -d $CLUSTER_DIR ]; then
+ echo "Cluster directory $CLUSTER_DIR does not exist." 1>&2
+ echo "$USAGE" 1>&2
+ exit 1
+ fi
+ export COMPOSE_DIR=$CLUSTER_DIR
+ choose_static_file
+ fi
+}
+
+# Each test that uses static (non-generated) docker compose files
+# must have a default docker-compose.yaml file which corresponds to using
# the MiddleManager (or no indexer). A test can optionally include a second
file called
# docker-compose-indexer.yaml which uses the Indexer in place of Middle
Manager.
-function docker_file {
- compose_args=""
- if [ -n "$USE_INDEXER" ]; then
- # Sanity check: USE_INDEXER must be "indexer" or "middleManager"
- # if it is set at all.
- if [ "$USE_INDEXER" != "indexer" ] && [ "$USE_INDEXER" !=
"middleManager" ]
- then
- echo "USE_INDEXER must be 'indexer' or 'middleManager' (is
'$USE_INDEXER')" 1>&2
- exit 1
- fi
- if [ "$USE_INDEXER" == "indexer" ]; then
- compose_file=docker-compose-indexer.yaml
- if [ ! -f "$CLUSTER_DIR/$compose_file" ]; then
- echo "USE_INDEXER=$USE_INDEXER, but
$CLUSTER_DIR/$compose_file is missing" 1>&2
- exit 1
- fi
- compose_args="-f $compose_file"
- fi
- fi
- echo $compose_args
+function choose_static_file {
+ export DOCKER_ARGS=""
+ if [ -n "$USE_INDEXER" ]; then
+ # Sanity check: USE_INDEXER must be "indexer" or "middleManager"
+ # if it is set at all.
+ if [ "$USE_INDEXER" != "indexer" ] && [ "$USE_INDEXER" != "middleManager" ]
+ then
+ echo "USE_INDEXER must be 'indexer' or 'middleManager' (it is
'$USE_INDEXER')" 1>&2
+ exit 1
+ fi
+ if [ "$USE_INDEXER" == "indexer" ]; then
+ compose_file=docker-compose-indexer.yaml
+ if [ ! -f "$CLUSTER_DIR/$compose_file" ]; then
+ echo "USE_INDEXER=$USE_INDEXER, but $CLUSTER_DIR/$compose_file is
missing" 1>&2
+ exit 1
+ fi
+ export DOCKER_ARGS="-f $compose_file"
+ fi
+ fi
+}
+
+function verify_docker_file {
+ if [ -f "$CLUSTER_DIR/docker-compose.yaml" ]; then
+ # Use the existing non-template file in place.
+ export COMPOSE_DIR=$CLUSTER_DIR
+ return 0
+ fi
+
+ # The docker compose file must have been generated via up
+ export COMPOSE_DIR=$TARGET_DIR/cluster/$DRUID_INTEGRATION_TEST_GROUP
+ if [ ! -f "$COMPOSE_DIR/docker-compose.yaml" ]; then
+ echo "$COMPOSE_DIR/docker-compose.yaml is missing. Is cluster up? Did you
do a 'clean' after 'up'?" 1>&2
+ fi
}
+# Determine if docker-compose is available. If not, assume Docker supports
+# the compose subcommand
+set +e
+if which docker-compose > /dev/null
+then
+ DOCKER_COMPOSE='docker-compose'
+else
+ DOCKER_COMPOSE='docker compose'
+fi
+set -e
+
# Print environment for debugging
#env
@@ -177,37 +231,57 @@ fi
set -e
case $CMD in
- "-h" )
- usage
- ;;
- "help" )
- usage
- $DOCKER_COMPOSE help
- ;;
- "up" )
- category $*
- echo "Starting cluster $DRUID_INTEGRATION_TEST_GROUP"
- build_shared_dir
- cd $CLUSTER_DIR
- $DOCKER_COMPOSE `docker_file` up -d
- # Enable the following for debugging
- #show_status
- ;;
- "status" )
- category $*
- cd $CLUSTER_DIR
- show_status
- ;;
- "down" )
- category $*
- # Enable the following for debugging
- #show_status
- cd $CLUSTER_DIR
- $DOCKER_COMPOSE `docker_file` $CMD
- ;;
- "*" )
- category $*
- cd $CLUSTER_DIR
- $DOCKER_COMPOSE `docker_file` $CMD
- ;;
+ "-h" )
+ usage
+ ;;
+ "help" )
+ usage
+ $DOCKER_COMPOSE help
+ ;;
+ "prepare" )
+ check_env_file
+ category $*
+ build_shared_dir
+ docker_file
+ ;;
+ "gen" )
+ category $*
+ build_shared_dir
+ docker_file
+ echo "Generated file is in $COMPOSE_DIR"
+ ;;
+ "up" )
+ check_env_file
+ category $*
+ echo "Starting cluster $DRUID_INTEGRATION_TEST_GROUP"
+ build_shared_dir
+ docker_file
+ cd $COMPOSE_DIR
+ $DOCKER_COMPOSE $DOCKER_ARGS up -d
+ # Enable the following for debugging
+ #show_status
+ ;;
+ "status" )
+ check_env_file
+ category $*
+ docker_file
+ cd $COMPOSE_DIR
+ show_status
+ ;;
+ "down" )
+ check_env_file
+ category $*
+ # Enable the following for debugging
+ #show_status
+ verify_docker_file
+ cd $COMPOSE_DIR
+ $DOCKER_COMPOSE $DOCKER_ARGS $CMD
+ ;;
+ "*" )
+ check_env_file
+ category $*
+ verify_docker_file
+ cd $COMPOSE_DIR
+ $DOCKER_COMPOSE $DOCKER_ARGS $CMD
+ ;;
esac
diff --git
a/integration-tests-ex/cases/cluster/AzureDeepStorage/docker-compose.yaml
b/integration-tests-ex/cases/cluster/AzureDeepStorage/docker-compose.yaml
deleted file mode 100644
index 75d9b04629..0000000000
--- a/integration-tests-ex/cases/cluster/AzureDeepStorage/docker-compose.yaml
+++ /dev/null
@@ -1,132 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# -------------------------------------------------------------------------
-
-# Cluster for the Azure deep storage test.
-#
-# Required env vars:
-#
-# AZURE_ACCOUNT
-# AZURE_KEY
-# AZURE_CONTAINER
-
-networks:
- druid-it-net:
- name: druid-it-net
- ipam:
- config:
- - subnet: 172.172.172.0/24
-
-services:
- zookeeper:
- extends:
- file: ../Common/dependencies.yaml
- service: zookeeper
-
- metadata:
- extends:
- file: ../Common/dependencies.yaml
- service: metadata
-
- coordinator:
- extends:
- file: ../Common/druid.yaml
- service: coordinator
- container_name: coordinator
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - druid_test_loadList=druid-azure-extensions
- - druid_azure_account=${AZURE_ACCOUNT}
- - druid_azure_key=${AZURE_KEY}
- - druid_azure_container=${AZURE_CONTAINER}
- # The frequency with which the coordinator polls the database
- # for changes. The DB population code has to wait at least this
- # long for the coordinator to notice changes.
- - druid_manager_segments_pollDuration=PT5S
- - druid_coordinator_period=PT10S
- depends_on:
- - zookeeper
- - metadata
-
- overlord:
- extends:
- file: ../Common/druid.yaml
- service: overlord
- container_name: overlord
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - druid_test_loadList=druid-azure-extensions
- - druid_azure_account=${AZURE_ACCOUNT}
- - druid_azure_key=${AZURE_KEY}
- - druid_azure_container=${AZURE_CONTAINER}
- depends_on:
- - zookeeper
- - metadata
-
- broker:
- extends:
- file: ../Common/druid.yaml
- service: broker
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - druid_test_loadList=druid-azure-extensions
- - druid_azure_account=${AZURE_ACCOUNT}
- - druid_azure_key=${AZURE_KEY}
- - druid_azure_container=${AZURE_CONTAINER}
- depends_on:
- - zookeeper
-
- router:
- extends:
- file: ../Common/druid.yaml
- service: router
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - druid_test_loadList=druid-azure-extensions
- - druid_azure_account=${AZURE_ACCOUNT}
- - druid_azure_key=${AZURE_KEY}
- - druid_azure_container=${AZURE_CONTAINER}
- depends_on:
- - zookeeper
-
- historical:
- extends:
- file: ../Common/druid.yaml
- service: historical
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - druid_test_loadList=druid-azure-extensions
- - druid_azure_account=${AZURE_ACCOUNT}
- - druid_azure_key=${AZURE_KEY}
- - druid_azure_container=${AZURE_CONTAINER}
- depends_on:
- - zookeeper
-
- indexer:
- extends:
- file: ../Common/druid.yaml
- service: indexer
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - druid_test_loadList=druid-azure-extensions
- - druid_storage_type=azure
- - druid_azure_account=${AZURE_ACCOUNT}
- - druid_azure_key=${AZURE_KEY}
- - druid_azure_container=${AZURE_CONTAINER}
- volumes:
- # Test data
- - ../data:/resources
- depends_on:
- - zookeeper
diff --git
a/integration-tests-ex/cases/cluster/BatchIndex/docker-compose-indexer.yaml
b/integration-tests-ex/cases/cluster/BatchIndex/docker-compose-indexer.yaml
deleted file mode 100644
index f8235db625..0000000000
--- a/integration-tests-ex/cases/cluster/BatchIndex/docker-compose-indexer.yaml
+++ /dev/null
@@ -1,98 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-networks:
- druid-it-net:
- name: druid-it-net
- ipam:
- config:
- - subnet: 172.172.172.0/24
-
-services:
- zookeeper:
- extends:
- file: ../Common/dependencies.yaml
- service: zookeeper
-
- metadata:
- extends:
- file: ../Common/dependencies.yaml
- service: metadata
-
- coordinator:
- extends:
- file: ../Common/druid.yaml
- service: coordinator
- container_name: coordinator
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- # The frequency with which the coordinator polls the database
- # for changes. The DB population code has to wait at least this
- # long for the coordinator to notice changes.
- - druid_manager_segments_pollDuration=PT5S
- - druid_coordinator_period=PT10S
- depends_on:
- - zookeeper
- - metadata
-
- overlord:
- extends:
- file: ../Common/druid.yaml
- service: overlord
- container_name: overlord
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- depends_on:
- - zookeeper
- - metadata
-
- broker:
- extends:
- file: ../Common/druid.yaml
- service: broker
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- depends_on:
- - zookeeper
-
- router:
- extends:
- file: ../Common/druid.yaml
- service: router
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- depends_on:
- - zookeeper
-
- historical:
- extends:
- file: ../Common/druid.yaml
- service: historical
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- depends_on:
- - zookeeper
-
- indexer:
- extends:
- file: ../Common/druid.yaml
- service: indexer
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- volumes:
- # Test data
- - ../../resources:/resources
- depends_on:
- - zookeeper
diff --git a/integration-tests-ex/cases/cluster/BatchIndex/docker-compose.yaml
b/integration-tests-ex/cases/cluster/BatchIndex/docker-compose.yaml
deleted file mode 100644
index 7778c1d3d0..0000000000
--- a/integration-tests-ex/cases/cluster/BatchIndex/docker-compose.yaml
+++ /dev/null
@@ -1,98 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-networks:
- druid-it-net:
- name: druid-it-net
- ipam:
- config:
- - subnet: 172.172.172.0/24
-
-services:
- zookeeper:
- extends:
- file: ../Common/dependencies.yaml
- service: zookeeper
-
- metadata:
- extends:
- file: ../Common/dependencies.yaml
- service: metadata
-
- coordinator:
- extends:
- file: ../Common/druid.yaml
- service: coordinator
- container_name: coordinator
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- # The frequency with which the coordinator polls the database
- # for changes. The DB population code has to wait at least this
- # long for the coordinator to notice changes.
- - druid_manager_segments_pollDuration=PT5S
- - druid_coordinator_period=PT10S
- depends_on:
- - zookeeper
- - metadata
-
- overlord:
- extends:
- file: ../Common/druid.yaml
- service: overlord
- container_name: overlord
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- depends_on:
- - zookeeper
- - metadata
-
- broker:
- extends:
- file: ../Common/druid.yaml
- service: broker
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- depends_on:
- - zookeeper
-
- router:
- extends:
- file: ../Common/druid.yaml
- service: router
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- depends_on:
- - zookeeper
-
- historical:
- extends:
- file: ../Common/druid.yaml
- service: historical
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- depends_on:
- - zookeeper
-
- middlemanager:
- extends:
- file: ../Common/druid.yaml
- service: middlemanager
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- volumes:
- # Test data
- - ../../resources:/resources
- depends_on:
- - zookeeper
diff --git a/integration-tests-ex/cases/cluster/Common/druid.yaml
b/integration-tests-ex/cases/cluster/Common/druid.yaml
index bd5caad223..c76482924c 100644
--- a/integration-tests-ex/cases/cluster/Common/druid.yaml
+++ b/integration-tests-ex/cases/cluster/Common/druid.yaml
@@ -61,6 +61,8 @@ services:
- environment-configs/common.env
- environment-configs/overlord.env
- ${OVERRIDE_ENV}
+ environment:
+ - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
coordinator:
image: ${DRUID_IT_IMAGE_NAME}
@@ -78,6 +80,8 @@ services:
- environment-configs/common.env
- environment-configs/coordinator.env
- ${OVERRIDE_ENV}
+ environment:
+ - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
historical:
image: ${DRUID_IT_IMAGE_NAME}
@@ -95,6 +99,8 @@ services:
- environment-configs/common.env
- environment-configs/historical.env
- ${OVERRIDE_ENV}
+ environment:
+ - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
middlemanager:
image: ${DRUID_IT_IMAGE_NAME}
@@ -124,6 +130,8 @@ services:
- environment-configs/common.env
- environment-configs/middlemanager.env
- ${OVERRIDE_ENV}
+ environment:
+ - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
indexer:
image: ${DRUID_IT_IMAGE_NAME}
@@ -141,6 +149,8 @@ services:
- environment-configs/common.env
- environment-configs/indexer.env
- ${OVERRIDE_ENV}
+ environment:
+ - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
broker:
image: ${DRUID_IT_IMAGE_NAME}
@@ -158,6 +168,8 @@ services:
- environment-configs/common.env
- environment-configs/broker.env
- ${OVERRIDE_ENV}
+ environment:
+ - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
router:
image: ${DRUID_IT_IMAGE_NAME}
@@ -175,3 +187,5 @@ services:
- environment-configs/common.env
- environment-configs/router.env
- ${OVERRIDE_ENV}
+ environment:
+ - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
diff --git
a/integration-tests-ex/cases/cluster/Common/environment-configs/coordinator.env
b/integration-tests-ex/cases/cluster/Common/environment-configs/coordinator.env
index fa20cf6cca..7a0b2ae399 100644
---
a/integration-tests-ex/cases/cluster/Common/environment-configs/coordinator.env
+++
b/integration-tests-ex/cases/cluster/Common/environment-configs/coordinator.env
@@ -37,3 +37,9 @@ druid_coordinator_period_indexingPeriod=PT180000S
# 2x indexing period so that kill period is valid
druid_coordinator_kill_period=PT360000S
druid_coordinator_period=PT1S
+
+# The frequency with which the coordinator polls the database
+# for changes. The DB population code has to wait at least this
+# long for the coordinator to notice changes.
+druid_manager_segments_pollDuration=PT5S
+druid_coordinator_period=PT10S
diff --git
a/integration-tests-ex/cases/cluster/GcsDeepStorage/docker-compose.yaml
b/integration-tests-ex/cases/cluster/GcsDeepStorage/docker-compose.yaml
deleted file mode 100644
index 3b024104b7..0000000000
--- a/integration-tests-ex/cases/cluster/GcsDeepStorage/docker-compose.yaml
+++ /dev/null
@@ -1,155 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# -------------------------------------------------------------------------
-
-# Cluster for the Google Cluster Storage (GCS) deep storage test.
-#
-# Required env vars:
-#
-# GOOGLE_BUCKET
-# GOOGLE_PREFIX
-# GOOGLE_APPLICATION_CREDENTIALS - must point to a file that holds the Google
-# credentials. Mounted into each Druid container.
-
-networks:
- druid-it-net:
- name: druid-it-net
- ipam:
- config:
- - subnet: 172.172.172.0/24
-
-services:
- zookeeper:
- extends:
- file: ../Common/dependencies.yaml
- service: zookeeper
-
- metadata:
- extends:
- file: ../Common/dependencies.yaml
- service: metadata
-
- coordinator:
- extends:
- file: ../Common/druid.yaml
- service: coordinator
- container_name: coordinator
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - druid_test_loadList=druid-google-extensions
- - druid_storage_type=google
- - druid_google_bucket=${GOOGLE_BUCKET}
- - druid_google_prefix=${GOOGLE_PREFIX}
- - GOOGLE_APPLICATION_CREDENTIALS=/resources/credentials.json
- # The frequency with which the coordinator polls the database
- # for changes. The DB population code has to wait at least this
- # long for the coordinator to notice changes.
- - druid_manager_segments_pollDuration=PT5S
- - druid_coordinator_period=PT10S
- volumes:
- # Mount credentials file
- - ${GOOGLE_APPLICATION_CREDENTIALS}:/resources/credentials.json
- depends_on:
- - zookeeper
- - metadata
-
- overlord:
- extends:
- file: ../Common/druid.yaml
- service: overlord
- container_name: overlord
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - druid_test_loadList=druid-google-extensions
- - druid_storage_type=google
- - druid_google_bucket=${GOOGLE_BUCKET}
- - druid_google_prefix=${GOOGLE_PREFIX}
- - GOOGLE_APPLICATION_CREDENTIALS=/resources/credentials.json
- volumes:
- # Mount credentials file
- - ${GOOGLE_APPLICATION_CREDENTIALS}:/resources/credentials.json
- depends_on:
- - zookeeper
- - metadata
-
- broker:
- extends:
- file: ../Common/druid.yaml
- service: broker
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - druid_test_loadList=druid-google-extensions
- - druid_storage_type=google
- - druid_google_bucket=${GOOGLE_BUCKET}
- - druid_google_prefix=${GOOGLE_PREFIX}
- - GOOGLE_APPLICATION_CREDENTIALS=/resources/credentials.json
- volumes:
- # Mount credentials file
- - ${GOOGLE_APPLICATION_CREDENTIALS}:/resources/credentials.json
- depends_on:
- - zookeeper
-
- router:
- extends:
- file: ../Common/druid.yaml
- service: router
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - druid_test_loadList=druid-google-extensions
- - druid_storage_type=google
- - druid_google_bucket=${GOOGLE_BUCKET}
- - druid_google_prefix=${GOOGLE_PREFIX}
- - GOOGLE_APPLICATION_CREDENTIALS=/resources/credentials.json
- volumes:
- # Mount credentials file
- - ${GOOGLE_APPLICATION_CREDENTIALS}:/resources/credentials.json
- depends_on:
- - zookeeper
-
- historical:
- extends:
- file: ../Common/druid.yaml
- service: historical
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - druid_test_loadList=druid-google-extensions
- - druid_storage_type=google
- - druid_google_bucket=${GOOGLE_BUCKET}
- - druid_google_prefix=${GOOGLE_PREFIX}
- - GOOGLE_APPLICATION_CREDENTIALS=/resources/credentials.json
- volumes:
- # Mount credentials file
- - ${GOOGLE_APPLICATION_CREDENTIALS}:/resources/credentials.json
- depends_on:
- - zookeeper
-
- indexer:
- extends:
- file: ../Common/druid.yaml
- service: indexer
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - druid_test_loadList=druid-google-extensions
- - druid_storage_type=google
- - druid_google_bucket=${GOOGLE_BUCKET}
- - druid_google_prefix=${GOOGLE_PREFIX}
- - GOOGLE_APPLICATION_CREDENTIALS=/resources/credentials.json
- volumes:
- # Mount credentials file
- - ${GOOGLE_APPLICATION_CREDENTIALS}:/resources/credentials.json
- # Test data
- - ../data:/resources
- depends_on:
- - zookeeper
diff --git
a/integration-tests-ex/cases/cluster/HighAvailability/docker-compose.yaml
b/integration-tests-ex/cases/cluster/HighAvailability/docker-compose.yaml
deleted file mode 100644
index bcecf4d9ee..0000000000
--- a/integration-tests-ex/cases/cluster/HighAvailability/docker-compose.yaml
+++ /dev/null
@@ -1,157 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-networks:
- druid-it-net:
- name: druid-it-net
- ipam:
- config:
- - subnet: 172.172.172.0/24
-
-services:
- zookeeper:
- extends:
- file: ../Common/dependencies.yaml
- service: zookeeper
-
- metadata:
- extends:
- file: ../Common/dependencies.yaml
- service: metadata
-
- coordinator-one:
- extends:
- file: ../Common/druid.yaml
- service: coordinator
- container_name: coordinator-one
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - DRUID_INSTANCE=one
- # The frequency with which the coordinator polls the database
- # for changes. The DB population code has to wait at least this
- # long for the coordinator to notice changes.
- - druid_manager_segments_pollDuration=PT5S
- - druid_coordinator_period=PT10S
- - druid_host=coordinator-one
- depends_on:
- - zookeeper
- - metadata
-
- # The second Coordinator (and Overlord) cannot extend
- # The base service: they need distinct ports.
- coordinator-two:
- image: ${DRUID_IT_IMAGE_NAME}
- container_name: coordinator-two
- networks:
- druid-it-net:
- ipv4_address: 172.172.172.120
- ports:
- - 18081:8081
- - 18281:8281
- - 15006:8000
- volumes:
- - ${SHARED_DIR}:/shared
- env_file:
- - ../Common/environment-configs/common.env
- - ../Common/environment-configs/coordinator.env
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - DRUID_INSTANCE=two
- - druid_manager_segments_pollDuration=PT5S
- - druid_coordinator_period=PT10S
- - druid_host=coordinator-two
- depends_on:
- - zookeeper
- - metadata
-
- overlord-one:
- extends:
- file: ../Common/druid.yaml
- service: overlord
- container_name: overlord-one
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - DRUID_INSTANCE=one
- - druid_host=overlord-one
- depends_on:
- - zookeeper
- - metadata
-
- overlord-two:
- image: ${DRUID_IT_IMAGE_NAME}
- container_name: overlord-two
- networks:
- druid-it-net:
- ipv4_address: 172.172.172.110
- ports:
- - 18090:8090
- - 18290:8290
- - 15009:8000
- volumes:
- - ${SHARED_DIR}:/shared
- env_file:
- - ../Common/environment-configs/common.env
- - ../Common/environment-configs/overlord.env
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - DRUID_INSTANCE=two
- - druid_host=overlord-two
- depends_on:
- - zookeeper
- - metadata
-
- broker:
- extends:
- file: ../Common/druid.yaml
- service: broker
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- depends_on:
- - zookeeper
-
- router:
- extends:
- file: ../Common/druid.yaml
- service: router
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- depends_on:
- - zookeeper
-
- # The custom node role has no base definition. Also, there is
- # no environment file: the needed environment settings are
- # given here.
- custom-node-role:
- image: ${DRUID_IT_IMAGE_NAME}
- container_name: custom-node-role
- networks:
- druid-it-net:
- ipv4_address: 172.172.172.90
- ports:
- - 50011:50011
- - 9301:9301
- - 9501:9501
- - 5010:8000
- volumes:
- - ${SHARED_DIR}:/shared
- env_file:
- - ../Common/environment-configs/common.env
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - DRUID_SERVICE=custom-node-role
- - SERVICE_DRUID_JAVA_OPTS=-Xmx64m -Xms64m
- - druid_host=custom-node-role
- - druid_auth_basic_common_cacheDirectory=/tmp/authCache/custom_node_role
- - druid_server_https_crlPath=/tls/revocations.crl
diff --git
a/integration-tests-ex/cases/cluster/MultiStageQuery/docker-compose.yaml
b/integration-tests-ex/cases/cluster/MultiStageQuery/docker-compose.yaml
deleted file mode 100644
index da658b25db..0000000000
--- a/integration-tests-ex/cases/cluster/MultiStageQuery/docker-compose.yaml
+++ /dev/null
@@ -1,98 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-networks:
- druid-it-net:
- name: druid-it-net
- ipam:
- config:
- - subnet: 172.172.172.0/24
-
-services:
- zookeeper:
- extends:
- file: ../Common/dependencies.yaml
- service: zookeeper
-
- metadata:
- extends:
- file: ../Common/dependencies.yaml
- service: metadata
-
- coordinator:
- extends:
- file: ../Common/druid.yaml
- service: coordinator
- container_name: coordinator
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - druid_manager_segments_pollDuration=PT5S
- - druid_coordinator_period=PT10S
- depends_on:
- - zookeeper
- - metadata
-
- overlord:
- extends:
- file: ../Common/druid.yaml
- service: overlord
- container_name: overlord
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- depends_on:
- - zookeeper
- - metadata
-
- broker:
- extends:
- file: ../Common/druid.yaml
- service: broker
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- depends_on:
- - zookeeper
-
- router:
- extends:
- file: ../Common/druid.yaml
- service: router
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- depends_on:
- - zookeeper
-
- historical:
- extends:
- file: ../Common/druid.yaml
- service: historical
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- depends_on:
- - zookeeper
-
- indexer:
- extends:
- file: ../Common/druid.yaml
- service: indexer
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - druid_msq_intermediate_storage_enable=true
- - druid_msq_intermediate_storage_type=local
- - druid_msq_intermediate_storage_basePath=/shared/durablestorage/
- volumes:
- # Test data
- - ../../resources:/resources
- depends_on:
- - zookeeper
diff --git
a/integration-tests-ex/cases/cluster/S3DeepStorage/docker-compose.yaml
b/integration-tests-ex/cases/cluster/S3DeepStorage/docker-compose.yaml
deleted file mode 100644
index b7a3d74559..0000000000
--- a/integration-tests-ex/cases/cluster/S3DeepStorage/docker-compose.yaml
+++ /dev/null
@@ -1,129 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# -------------------------------------------------------------------------
-
-# Cluster for the S3 deep storage test.
-#
-# Required env vars:
-#
-# AWS_REGION
-# AWS_ACCESS_KEY_ID
-# AWS_SECRET_ACCESS_KEY
-
-networks:
- druid-it-net:
- name: druid-it-net
- ipam:
- config:
- - subnet: 172.172.172.0/24
-
-services:
- zookeeper:
- extends:
- file: ../Common/dependencies.yaml
- service: zookeeper
-
- metadata:
- extends:
- file: ../Common/dependencies.yaml
- service: metadata
-
- coordinator:
- extends:
- file: ../Common/druid.yaml
- service: coordinator
- container_name: coordinator
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- # The frequency with which the coordinator polls the database
- # for changes. The DB population code has to wait at least this
- # long for the coordinator to notice changes.
- - druid_manager_segments_pollDuration=PT5S
- - druid_coordinator_period=PT10S
- - AWS_REGION=${AWS_REGION}
- - druid_s3_accessKey=${AWS_ACCESS_KEY_ID}
- - druid_s3_secretKey=${AWS_SECRET_ACCESS_KEY}
- depends_on:
- - zookeeper
- - metadata
-
- overlord:
- extends:
- file: ../Common/druid.yaml
- service: overlord
- container_name: overlord
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - AWS_REGION=${AWS_REGION}
- - druid_s3_accessKey=${AWS_ACCESS_KEY_ID}
- - druid_s3_secretKey=${AWS_SECRET_ACCESS_KEY}
- depends_on:
- - zookeeper
- - metadata
-
- broker:
- extends:
- file: ../Common/druid.yaml
- service: broker
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - AWS_REGION=${AWS_REGION}
- - druid_s3_accessKey=${AWS_ACCESS_KEY_ID}
- - druid_s3_secretKey=${AWS_SECRET_ACCESS_KEY}
- depends_on:
- - zookeeper
-
- router:
- extends:
- file: ../Common/druid.yaml
- service: router
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - AWS_REGION=${AWS_REGION}
- - druid_s3_accessKey=${AWS_ACCESS_KEY_ID}
- - druid_s3_secretKey=${AWS_SECRET_ACCESS_KEY}
- depends_on:
- - zookeeper
-
- historical:
- extends:
- file: ../Common/druid.yaml
- service: historical
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - AWS_REGION=${AWS_REGION}
- - druid_s3_accessKey=${AWS_ACCESS_KEY_ID}
- - druid_s3_secretKey=${AWS_SECRET_ACCESS_KEY}
- depends_on:
- - zookeeper
-
- indexer:
- extends:
- file: ../Common/druid.yaml
- service: indexer
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - druid_storage_type=s3
- - druid_storage_bucket=${DRUID_CLOUD_BUCKET}
- # Using DRUID_CLOUD_PATH env as baseKey as well.
- - druid_storage_baseKey=${DRUID_CLOUD_PATH}
- - druid_s3_accessKey=${AWS_ACCESS_KEY_ID}
- - druid_s3_secretKey=${AWS_SECRET_ACCESS_KEY}
- - AWS_REGION=${AWS_REGION}
- volumes:
- # Test data
- - ../data:/resources
- depends_on:
- - zookeeper
diff --git
a/integration-tests-ex/cases/src/test/java/org/apache/druid/testsEx/cluster/DruidClusterClient.java
b/integration-tests-ex/cases/src/test/java/org/apache/druid/testsEx/cluster/DruidClusterClient.java
index 82d3171879..9e2b0ec727 100644
---
a/integration-tests-ex/cases/src/test/java/org/apache/druid/testsEx/cluster/DruidClusterClient.java
+++
b/integration-tests-ex/cases/src/test/java/org/apache/druid/testsEx/cluster/DruidClusterClient.java
@@ -32,6 +32,7 @@ import
org.apache.druid.java.util.http.client.response.StatusResponseHandler;
import org.apache.druid.java.util.http.client.response.StatusResponseHolder;
import org.apache.druid.server.DruidNode;
import org.apache.druid.testing.guice.TestClient;
+import org.apache.druid.testsEx.config.ClusterConfig;
import org.apache.druid.testsEx.config.ResolvedConfig;
import org.apache.druid.testsEx.config.ResolvedDruidService;
import org.apache.druid.testsEx.config.ResolvedService.ResolvedInstance;
@@ -335,8 +336,8 @@ public class DruidClusterClient
*/
public void validate()
{
- RE exception = new RE("Just building for the stack trace");
- log.info(exception, "Starting cluster validation");
+ log.info("Starting cluster validation");
+ log.info("This cluster uses " + (ClusterConfig.isIndexer() ? "Indexer" :
"Middle Manager"));
for (ResolvedDruidService service : config.requireDruid().values()) {
for (ResolvedInstance instance : service.requireInstances()) {
validateInstance(service, instance);
diff --git
a/integration-tests-ex/cases/src/test/java/org/apache/druid/testsEx/config/ClusterConfig.java
b/integration-tests-ex/cases/src/test/java/org/apache/druid/testsEx/config/ClusterConfig.java
index 7c030d9ab4..e8d277a41b 100644
---
a/integration-tests-ex/cases/src/test/java/org/apache/druid/testsEx/config/ClusterConfig.java
+++
b/integration-tests-ex/cases/src/test/java/org/apache/druid/testsEx/config/ClusterConfig.java
@@ -170,6 +170,11 @@ public class ClusterConfig
return new ResolvedConfig(clusterName, resolveIncludes(), configTags);
}
+ public static boolean isIndexer()
+ {
+ return "indexer".equals(System.getenv("USE_INDEXER"));
+ }
+
/**
* Create the set of configuration tags for this run. At present, the only
options
* are "middleManager" or "indexer" corresponding to the value of the
diff --git a/integration-tests-ex/cases/templates/AzureDeepStorage.py
b/integration-tests-ex/cases/templates/AzureDeepStorage.py
new file mode 100644
index 0000000000..3c893832a3
--- /dev/null
+++ b/integration-tests-ex/cases/templates/AzureDeepStorage.py
@@ -0,0 +1,43 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from template import BaseTemplate, generate
+
+class Template(BaseTemplate):
+
+ def gen_header_comment(self):
+ self.emit('''
+# Cluster for the Azure deep storage test.
+#
+# Required env vars:
+#
+# AZURE_ACCOUNT
+# AZURE_KEY
+# AZURE_CONTAINER
+
+''')
+
+ def extend_druid_service(self, service):
+ self.add_env(service, 'druid_test_loadList', 'druid-azure-extensions')
+ self.add_property(service, 'druid.storage.type', 'azure')
+ self.add_property(service, 'druid.azure.account', '${AZURE_ACCOUNT}')
+ self.add_property(service, 'druid.azure.key', '${AZURE_KEY}')
+ self.add_property(service, 'druid.azure.container',
'${AZURE_CONTAINER}')
+
+ # This test uses different data than the default.
+ def define_data_dir(self, service):
+ self.add_volume(service, '../data', '/resources')
+
+generate(__file__, Template())
diff --git a/integration-tests-ex/cases/templates/BatchIndex.py
b/integration-tests-ex/cases/templates/BatchIndex.py
new file mode 100644
index 0000000000..27f8acdbff
--- /dev/null
+++ b/integration-tests-ex/cases/templates/BatchIndex.py
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from template import BaseTemplate, generate
+
+generate(__file__, BaseTemplate())
diff --git a/integration-tests-ex/cases/templates/GcsDeepStorage.py
b/integration-tests-ex/cases/templates/GcsDeepStorage.py
new file mode 100644
index 0000000000..7f91ce021e
--- /dev/null
+++ b/integration-tests-ex/cases/templates/GcsDeepStorage.py
@@ -0,0 +1,45 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from template import BaseTemplate, generate
+
+class Template(BaseTemplate):
+
+ def gen_header_comment(self):
+ self.emit('''
+# Cluster for the Google Cluster Storage (GCS) deep storage test.
+#
+# Required env vars:
+#
+# GOOGLE_BUCKET
+# GOOGLE_PREFIX
+# GOOGLE_APPLICATION_CREDENTIALS - must point to a file that holds the Google
+# credentials. Mounted into each Druid container.
+
+''')
+
+ def extend_druid_service(self, service):
+ self.add_env(service, 'druid_test_loadList', 'druid-google-extensions')
+ self.add_property(service, 'druid.storage.type', 'google')
+ self.add_property(service, 'druid.google.bucket', '${GOOGLE_BUCKET}')
+ self.add_property(service, 'druid.google.prefix', '${GOOGLE_PREFIX}')
+ self.add_env(service, 'GOOGLE_APPLICATION_CREDENTIALS',
'/resources/credentials.json')
+ self.add_volume(service, '${GOOGLE_APPLICATION_CREDENTIALS}',
'/resources/credentials.json')
+
+ # This test uses different data than the default.
+ def define_data_dir(self, service):
+ self.add_volume(service, '../data', '/resources')
+
+generate(__file__, Template())
diff --git a/integration-tests-ex/cases/templates/HighAvailability.py
b/integration-tests-ex/cases/templates/HighAvailability.py
new file mode 100644
index 0000000000..b00fc0fa1f
--- /dev/null
+++ b/integration-tests-ex/cases/templates/HighAvailability.py
@@ -0,0 +1,85 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from template import BaseTemplate, generate
+from template import COORDINATOR, ZOO_KEEPER, METADATA, OVERLORD
+
+# The second Coordinator (and Overlord) cannot extend
+# The base service: they need distinct ports.
+class Template(BaseTemplate):
+
+ def define_coordinator(self):
+ self.define_coordinator_one()
+ self.define_coordinator_two()
+
+ def define_coordinator_one(self):
+ service_name = COORDINATOR + '-one'
+ service = self.define_master_service(service_name, COORDINATOR)
+ service['container_name'] = service_name
+ self.add_env(service, 'DRUID_INSTANCE', 'one')
+ self.add_env(service, 'druid_host', service_name)
+ service['container_name'] = service_name
+
+ def define_coordinator_two(self):
+ service_name = COORDINATOR + '-two'
+ service = self.define_full_service(service_name, COORDINATOR, 120)
+ service['container_name'] = service_name
+ self.add_env(service, 'DRUID_INSTANCE', 'two')
+ self.add_env(service, 'druid_host', service_name)
+ service['ports'] = [ '18081:8081', '18281:8281', '15006:8000' ]
+ self.add_depends(service, [ ZOO_KEEPER, METADATA ] )
+
+ def define_overlord(self):
+ self.define_overlord_one()
+ self.define_overlord_two()
+
+ def define_overlord_one(self):
+ service_name = OVERLORD + '-one'
+ service = self.define_master_service(service_name, OVERLORD)
+ service['container_name'] = service_name
+ self.add_env(service, 'DRUID_INSTANCE', 'one')
+ self.add_env(service, 'druid_host', service_name)
+
+ def define_overlord_two(self):
+ service_name = OVERLORD + '-two'
+ service = self.define_full_service(service_name, OVERLORD, 110)
+ service['container_name'] = service_name
+ self.add_env(service, 'DRUID_INSTANCE', 'two')
+ self.add_env(service, 'druid_host', service_name)
+ service['ports'] = [ '18090:8090', '18290:8290', '15009:8000' ]
+ self.add_depends(service, [ ZOO_KEEPER, METADATA ] )
+
+ # No indexer in this cluster
+ def define_indexer(self):
+ pass
+
+ # No historical in this cluster
+ def define_historical(self):
+ pass
+
+ # The custom node role has no base definition. Also, there is
+ # no environment file: the needed environment settings are
+ # given here.
+ def define_custom_services(self):
+ service_name = 'custom-node-role'
+ service = self.define_full_service(service_name, None, 90)
+ service['container_name'] = service_name
+ self.add_env(service, 'DRUID_SERVICE', service_name)
+ self.add_env(service, 'SERVICE_DRUID_JAVA_OPTS', '-Xmx64m -Xms64m')
+ self.add_env(service, 'druid_host', service_name)
+ service['ports'] = [ '50011:50011', '9301:9301', '9501:9501',
'5010:8000' ]
+ self.add_depends(service, [ ZOO_KEEPER ] )
+
+generate(__file__, Template())
diff --git a/integration-tests-ex/cases/templates/MultiStageQuery.py
b/integration-tests-ex/cases/templates/MultiStageQuery.py
new file mode 100644
index 0000000000..bb88aa6de2
--- /dev/null
+++ b/integration-tests-ex/cases/templates/MultiStageQuery.py
@@ -0,0 +1,26 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from template import BaseTemplate, generate
+
+class Template(BaseTemplate):
+
+ def define_indexer(self):
+ service = super().define_indexer()
+ self.add_property(service, 'druid.msq.intermediate.storage.enable',
'true')
+ self.add_property(service, 'druid.msq.intermediate.storage.type',
'local')
+ self.add_property(service, 'druid.msq.intermediate.storage.basePath',
'/shared/durablestorage/')
+
+generate(__file__, Template())
diff --git a/integration-tests-ex/cases/templates/S3DeepStorage.py
b/integration-tests-ex/cases/templates/S3DeepStorage.py
new file mode 100644
index 0000000000..9fb85ca6a7
--- /dev/null
+++ b/integration-tests-ex/cases/templates/S3DeepStorage.py
@@ -0,0 +1,46 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from template import BaseTemplate, generate
+
+class Template(BaseTemplate):
+
+ def gen_header_comment(self):
+ self.emit('''
+# Cluster for the S3 deep storage test.
+#
+# Required env vars:
+#
+# DRUID_CLOUD_BUCKET
+# DRUID_CLOUD_PATH
+# AWS_REGION
+# AWS_ACCESS_KEY_ID
+# AWS_SECRET_ACCESS_KEY
+
+''')
+
+ def extend_druid_service(self, service):
+ self.add_property(service, 'druid.storage.type', 's3')
+ self.add_property(service, 'druid.s3.accessKey',
'${AWS_ACCESS_KEY_ID}')
+ self.add_property(service, 'druid.s3.secretKey',
'${AWS_SECRET_ACCESS_KEY}')
+ self.add_property(service, 'druid.storage.bucket',
'${DRUID_CLOUD_BUCKET}')
+ self.add_property(service, 'druid.storage.baseKey',
'${DRUID_CLOUD_PATH}')
+ self.add_env(service, 'AWS_REGION', '${AWS_REGION}')
+
+ # This test uses different data than the default.
+ def define_data_dir(self, service):
+ self.add_volume(service, '../data', '/resources')
+
+generate(__file__, Template())
diff --git a/integration-tests-ex/cases/templates/template.py
b/integration-tests-ex/cases/templates/template.py
new file mode 100644
index 0000000000..1be24ab032
--- /dev/null
+++ b/integration-tests-ex/cases/templates/template.py
@@ -0,0 +1,430 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''
+Generates a docker-compose.yaml file from a test-specific template. Each
+test template either uses the base template directly, or extends the template
+to customize bits of the cluster.
+
+Since the cluster is defined as YAML, the cluster definition is build up
+internally as a Python data structure made up of maps, arrays and scalars.
+PyYaml does the grunt work of converting the data structure to the YAML file.
+'''
+
+import yaml, os, os.path
+from pathlib import Path
+
+# Constants used frequently in the template.
+
+DRUID_NETWORK = 'druid-it-net'
+DRUID_SUBNET = '172.172.172'
+ZOO_KEEPER = 'zookeeper'
+METADATA = 'metadata'
+COORDINATOR = 'coordinator'
+OVERLORD = 'overlord'
+ROUTER = 'router'
+BROKER = 'broker'
+HISTORICAL = 'historical'
+INDEXER = 'indexer'
+MIDDLE_MANAGER = 'middlemanager'
+
+def generate(template_path, template):
+ '''
+ Main routine to generate a docker-compose file from a script with the
+ given template_path, using the template class given. The template path is
+ a convenient way to locate directories in the file system using information
+ that Python itself provides.
+ '''
+
+ # Compute the cluster (test category) name from the template path which
+ # we assume to be module/<something>/<template>/<something>.py
+ template_path = Path(template_path)
+ cluster = template_path.stem
+
+ # Move up to the module (that is, the cases folder) relative to the
template file.
+ module_dir = Path(__file__).parent.parent
+
+ # The target location for the output file is
<module>/target/cluster/<cluster>/docker-compose.yaml
+ target_dir = module_dir.joinpath("target")
+ target_file = target_dir.joinpath('cluster', cluster,
'docker-compose.yaml')
+
+ # Defer back to the template class to create the output into the
docker-compose.yaml file.
+ with target_file.open("w") as f:
+ template.generate_file(f, cluster)
+ f.close()
+
+class BaseTemplate:
+
+ def __init__(self):
+ # Cluster is the object tree for the docker-compose.yaml file for our
test cluster.
+ # The tree is a map of objects, each of which is a map of values. The
values are
+ # typicaly scalars, maps or arrays. These are represented generically
in Python.
+ self.cluster = {}
+
+ def generate_file(self, out_file, cluster):
+ '''
+ Generates the docker-compose.yaml file contents as a header plus a
YAML-serialized
+ form of the cluster object tree. The file is meant to be generated,
used and discarded.
+ As a result, we don't worry about generating line-by-line comments:
those should appear
+ in the template.
+ '''
+ self.cluster_name = cluster
+ self.define_cluster()
+ self.out_file = out_file
+ self.generate()
+
+ def define_cluster(self):
+ '''
+ Overall method to define the test cluster.
+ '''
+ self.define_network()
+ self.define_support_services()
+ self.define_druid_services()
+ self.define_custom_services()
+
+ def define_support_services(self):
+ '''
+ Define support services which run as containers, but are not provided
by
+ Druid.
+ '''
+ self.define_zk()
+ self.define_metadata()
+
+ def define_druid_services(self):
+ '''
+ Define the set of Druid services. Override this method to provide
ad-hoc
+ services unique to a test. If the test creates multiple versions of a
+ service, provide that by overriding the individual service method.
+ '''
+ self.define_coordinator()
+ self.define_overlord()
+ self.define_broker()
+ self.define_router()
+ self.define_historical()
+ self.define_indexer()
+
+ def define_custom_services(self):
+ '''
+ Override to define additional services for the cluster.
+ '''
+ pass
+
+ def generate(self):
+ '''
+ Emit output to the target file.
+ '''
+ self.gen_header()
+ self.gen_header_comment()
+ self.gen_body()
+
+ def emit(self, text):
+ '''
+ Emits text to the target file. Used for header comments.
+ '''
+ # Chop off the newline that occurs when ''' is on a separate line
+ if len(text) > 0 and text[0] == '\n':
+ text = text[1:]
+ self.out_file.write(text)
+
+ def gen_header(self):
+ '''
+ Emit the standard file header.
+ '''
+ self.emit('''
+# THIS FILE IS GENERATED -- DO NOT EDIT!
+#
+# Instead, edit the template from which this file was generated.
+# Template: templates/{}.py
+
+'''.format(self.cluster_name))
+
+ def gen_header_comment(self):
+ '''
+ Override to generate a custom header comment after the standard header.
+ '''
+ pass
+
+ def gen_body(self):
+ '''
+ Convert the cluster tree into YAML using the pyaml library.
+ '''
+ try:
+ # Version 5.1 or later: sort the keys in the order we created them.
+ # This makes doing diffs easier when making changes.
+ yaml.dump(self.cluster, self.out_file, sort_keys=False)
+ except TypeError:
+ # For builds that use pyyaml older than 5.1. Keys will be emitted
+ # in random order.
+ yaml.dump(self.cluster, self.out_file)
+
+ def define_network(self):
+ self.cluster['networks'] = {
+ 'druid-it-net': {
+ 'name': DRUID_NETWORK,
+ 'ipam': {
+ 'config': [
+ {'subnet': DRUID_SUBNET + '.0/24'}
+ ]
+ }
+ }
+ }
+
+ def add_service(self, name, service):
+ '''
+ Add a service to the 'services' key in the cluster tree.
+ '''
+ services = self.cluster.setdefault('services', {})
+ services[name] = service
+
+ def add_volume(self, service, local, container):
+ '''
+ Adds a volume to a service.
+ '''
+ volumes = service.setdefault('volumes', [])
+ volumes.append(local + ':' + container)
+
+ def add_env(self, service, var, value):
+ '''
+ Adds an environment variable to a service.
+ '''
+ vars = service.setdefault('environment', [])
+ vars.append(var + '=' + value)
+
+ def add_property(self, service, prop, value):
+ '''
+ Sets a property for a service. The property is of the same form as the
+ .properties file: druid.some.property.
+ This method converts the property to the env var form so you don't
have to.
+ '''
+ var = prop.replace('.', '_')
+ self.add_env(service, var, value)
+
+ def add_env_file(self, service, env_file):
+ '''
+ Add an environment file to a service.
+ '''
+ env_files = service.setdefault('env_file', [])
+ env_files.append(env_file)
+
+ def add_env_config(self, service, base_name):
+ '''
+ Add to a service one of the standard environment config files in
+ the Common/environment-configs directory
+ '''
+ self.add_env_file(service, '../Common/environment-configs/' +
base_name + '.env')
+
+ def add_port(self, service, local, container):
+ '''
+ Add a port mapping to the service
+ '''
+ ports = service.setdefault('ports', [])
+ ports.append(local + ':' + container)
+
+ def define_external_service(self, name) -> dict:
+ '''
+ Defines a support service external to Druid as a reference to a service
+ defined in dependencies.yaml.
+ '''
+ service = {'extends': {
+ 'file': '../Common/dependencies.yaml',
+ 'service': name
+ }}
+ self.add_service(name, service)
+ return service
+
+ def define_zk(self) -> dict:
+ '''
+ Define the ZooKeeper service. Returns the service.
+ '''
+ return self.define_external_service(ZOO_KEEPER)
+
+ def define_metadata(self) -> dict:
+ '''
+ Defines the metadata (MySQL) service. Returns the service
+ '''
+ return self.define_external_service(METADATA)
+
+ def define_druid_service(self, name, base) -> dict:
+ '''
+ Defines a Druid service as a reference to the base definition in
+ the druid.yaml file. Used when referencing, and extending, a standard
+ service definition. Cannot be used for a second instance of a Druid
+ service: such services have to be defined from scratch since they
+ need unique port mappings and container names.
+ '''
+ service = {}
+ if base is not None:
+ service['extends'] = {
+ 'file': '../Common/druid.yaml',
+ 'service': base
+ }
+ self.extend_druid_service(service)
+ self.add_service(name, service)
+ return service
+
+ def extend_druid_service(self, service):
+ '''
+ Override this to add options to all Druid services.
+ '''
+ pass
+
+ def add_depends(self, service, items):
+ '''
+ Adds a service dependency to a service.
+ '''
+ if items is not None and len(items) > 0:
+ depends = service.setdefault('depends_on', [])
+ depends += items
+
+ def define_master_service(self, name, base) -> dict:
+ '''
+ Defines a "master" service: one which depends on the metadata service.
+ '''
+ service = self.define_druid_service(name, base)
+ self.add_depends(service, [ZOO_KEEPER, METADATA])
+ return service
+
+ def define_std_master_service(self, name) -> dict:
+ '''
+ Defines a "standard" master service in which the service name is
+ the same as the service defined in druid.yaml.
+ '''
+ return self.define_master_service(name, name)
+
+ def define_coordinator(self) -> dict:
+ '''
+ Defines a coordinator based on the standard definition. Override to
+ customize environment variables, mounts, etc.
+ '''
+ return self.define_std_master_service(COORDINATOR)
+
+ def define_overlord(self) -> dict:
+ '''
+ Defines an overlord based on the standard definition. Override to
+ customize environment variables, mounts, etc.
+ '''
+ return self.define_std_master_service(OVERLORD)
+
+ def define_worker_service(self, name, base) -> dict:
+ '''
+ Defines a Druid "worker" service: one that depends only on ZooKeeper.
+ '''
+ service = self.define_druid_service(name, base)
+ self.add_depends(service, [ZOO_KEEPER])
+ return service
+
+ def define_std_worker_service(self, name) -> dict:
+ '''
+ Define a worker service in which the service name for this cluster is
the
+ same as the service name in druid.yaml.
+ '''
+ return self.define_worker_service(name, name)
+
+ def define_broker(self) -> dict:
+ '''
+ Defines a broker based on the standard definition. Override to
+ customize environment variables, mounts, etc.
+ '''
+ return self.define_std_worker_service(BROKER)
+
+ def define_router(self) -> dict:
+ '''
+ Defines a router based on the standard definition. Override to
+ customize environment variables, mounts, etc.
+ '''
+ return self.define_std_worker_service(ROUTER)
+
+ def define_historical(self) -> dict:
+ '''
+ Defines a historical based on the standard definition. Override to
+ customize environment variables, mounts, etc.
+ '''
+ return self.define_std_worker_service(HISTORICAL)
+
+ def define_std_indexer(self, base) -> dict:
+ '''
+ Defines a standard indexer service in which the service name in this
+ cluster is the same as the definition in druid.yaml. The service mounts
+ the standard data directory.
+ '''
+ service = self.define_worker_service(INDEXER, base)
+ self.define_data_dir(service)
+ return service
+
+ def define_data_dir(self, service):
+ '''
+ Define the input data directory mounted into the selected indexer
service.
+ '''
+ self.add_volume(service, '${MODULE_DIR}/resources', '/resources')
+
+ def define_indexer_service(self) -> dict:
+ '''
+ Defines an indexer based on the standard definition. Override to
+ customize environment variables, mounts, etc.
+ '''
+ return self.define_std_indexer(INDEXER)
+
+ def define_middle_manager_service(self) -> dict:
+ '''
+ Defines a middle manager based on the standard definition. Override to
+ customize environment variables, mounts, etc.
+ '''
+ return self.define_std_indexer(MIDDLE_MANAGER)
+
+ def get_indexer_option(self) -> str:
+ '''
+ Choose which "indexer" to use: middle manager or indexer (the specific
service)
+ based on the USE_INDEXER environment variable. Defaults to middle
manager.
+ '''
+ value = os.environ.get('USE_INDEXER')
+ if value is None:
+ value = MIDDLE_MANAGER
+ return value
+
+ def define_indexer(self):
+ '''
+ Defines the cluster's indexer (generic term) service as either indexer
(specific
+ service) or middle manager, depending on the USE_INDEXER environment
variable.
+ '''
+ value = self.get_indexer_option()
+ key = value.lower()
+ if key == INDEXER:
+ return self.define_indexer_service()
+ if key == MIDDLE_MANAGER:
+ return self.define_middle_manager_service()
+ raise Exception("Invalid USE_INDEXER value: [" + value + ']')
+
+ def define_full_service(self, name, base, host_node):
+ '''
+ Create a clone of a service as defined in druid.yaml. Use this when
+ creating a second instance of a service, since the second must use
+ distinct host IP and ports.
+ '''
+ service = {
+ 'image': '${DRUID_IT_IMAGE_NAME}',
+ 'networks': {
+ DRUID_NETWORK : {
+ 'ipv4_address': DRUID_SUBNET + '.' + str(host_node)
+ }
+ },
+ 'volumes' : [ '${SHARED_DIR}:/shared' ]
+ }
+ self.add_env_config(service, 'common')
+ if base is not None:
+ self.add_env_config(service, base)
+ self.add_env_file(service, '${OVERRIDE_ENV}')
+ self.add_env(service, 'DRUID_INTEGRATION_TEST_GROUP',
'${DRUID_INTEGRATION_TEST_GROUP}')
+ self.add_service(name, service)
+ return service
diff --git a/integration-tests-ex/docs/compose.md
b/integration-tests-ex/docs/compose.md
index 01b41896ad..c66279e816 100644
--- a/integration-tests-ex/docs/compose.md
+++ b/integration-tests-ex/docs/compose.md
@@ -82,6 +82,13 @@ files that define properties as environment variables. All
are located in
* `<service>.env` - Properties unique to one service. This is the test
equivalent to
the `service/runtime.properties` files.
+### MySQL Driver
+
+Unit tests can use any MySQL driver, typically MySQL or MariaDB. The tests use
MySQL
+by default. Choose a different driver by setting the `MYSQL_DRIVER_CLASSNAME`
environment
+variable when running tests. The variable chooses the selected driver both in
the Druid
+server running in a container, and in the test "clients".
+
### Special Environment Variables
Druid properties can be a bit awkward and verbose in a test environment. A
number of
@@ -236,3 +243,54 @@ To define a test cluster, do the following:
* If you need multiple instances of the same service, extend that service
twice, and define distinct names and port numbers.
* Add any test-specific environment configuration required.
+
+## Generating `docker-compose.yaml` Files
+
+Each test has somewhat different needs for its test cluster. Yet, there is a
+great amount of consistency across test clusters and across services. The
result,
+if we create files by hand, is a great amount of copy/paste redundancy, with
all
+the problems that copy/paste implies.
+
+As an alternative, the framework provides a simple-minded way to generate the
+`docker-compose.yaml` file using a simple Python-based template mechanism. To
use
+this:
+
+* Omit the test cluster directory: `cluster/<category>`.
+* Instead, create a template file: `templates/<category>.py`.
+* The minimal file appears below:
+
+```python
+from template import BaseTemplate, generate
+
+generate(__file__, BaseTemplate())
+```
+
+The above will generate a "generic" cluster: one of each kind of service, with
+either a Middle Manager or Indexer depending on the `USE_INDEXER`
+env var.
+
+You customize your specific cluster by creating a test-specific template class
+which overrides the various methods that build up the cluster. By using Python,
+we first build the cluster as a set of Python dictionaries and arrays, then
+we let [PyYAML](https://pyyaml.org/wiki/PyYAMLDocumentation) convert the
objects
+to a YAML file. Many methods exist to help you populate the configuration tree.
+See any of the existing files for examples.
+
+For example, you can:
+
+* Add test-specific environment config to one, some or all services.
+* Add or remove services.
+* Create multiples of selected services.
+
+The advantage is that, as Druid evolves and we change the basics, those changes
+are automatically propagated to all test clusters.
+
+Once you've created your file, the test framework will re-generate the
+`docker-compose.yaml` file on each run to reflect any per-run customization.
+The generated file is found in `target/cluster/<category>/docker-compose.yaml`.
+As with all generated files: resist the temptation to change the generated
file:
+change the template instead.
+
+The generated `docker-compose.yaml` file goes into a temporary folder:
+`target/cluster/<category>`. The script copies over the `Common` directory
+as well.
diff --git a/integration-tests-ex/docs/docker.md
b/integration-tests-ex/docs/docker.md
index afbc106468..e030b45e55 100644
--- a/integration-tests-ex/docs/docker.md
+++ b/integration-tests-ex/docs/docker.md
@@ -299,4 +299,3 @@ simple-client-sslcontext
```
If more are needed, they should be added during the image build.
-
diff --git a/integration-tests-ex/docs/druid-config.md
b/integration-tests-ex/docs/druid-config.md
index 5d715f43ca..82cdc7e199 100644
--- a/integration-tests-ex/docs/druid-config.md
+++ b/integration-tests-ex/docs/druid-config.md
@@ -42,7 +42,7 @@ variables. Thus there are two kinds:
## Configuration Flow
-We use `docker-compose` to gather up the variables. From most specific
+We use `docker compose` to gather up the variables. From most specific
(highest priority) to most general, configuration comes from:
* An environment variable set by the script which launches Docker Compose.
diff --git a/integration-tests-ex/docs/guide.md
b/integration-tests-ex/docs/guide.md
index 0646af202d..f39b37a070 100644
--- a/integration-tests-ex/docs/guide.md
+++ b/integration-tests-ex/docs/guide.md
@@ -246,9 +246,31 @@ Indexer. To run on Indexer:
* In the environment, `export USE_INDEXER=indexer`. (Use `middleManager`
otherwise. If the variable is not set, `middleManager` is the default.)
+
+Then, there are two ways to handle indexer-specific configuration: the
crude-but-effective
+way and the subtle way.
+
+#### Using Two Docker-Compose Files
+
+The crude way, which involves much copy/paste and results in two files which
must be maintained
+in sync:
+
* The `cluster/<category>/docker-compose.yaml` file should be for the Middle
manager. Create
a separate file called `cluster/<category>/docker-compose-indexer.yaml` to
define the
Indexer-based cluster.
+
+#### Generated Docker-Compose File
+
+The fancy way is to use the `docker-compose.yaml` generation template
described elsewhere.
+In that case, the script will automatically generate either the Middle
Manager, or the Indexer,
+depending on the environment variable mentioned above.
+
+#### Client Configuration
+
+The client will choose Middle Manager or Indexer automatially if you set the
+`USE_INDEXER` environment variable in your IDE. (When run via the build
+process, the environment variable is already set.)
+
* The test `src/test/resources/cluster/<category>/docker.yaml` file should
contain a conditional
entry to select define either the Middle Manager or Indexer. Example:
@@ -265,3 +287,23 @@ Indexer. To run on Indexer:
Now, the test will run on Indexer if the above environment variable is set,
Middle Manager
otherwise.
+
+#### Disable Individual Tests
+
+You may have a test that can run only on Middle Manager or Indexer. The
crude-but-effective
+way to handle this is:
+
+```
+ @Test
+ public void myMMOnlyTest()
+ {
+ if (ClusterConfig.isIndexer()) {
+ return; // Runs only on MM
+ }
+ // The MM-only test code here
+ }
+```
+
+It would be possible to define an annotation, managed by the
`DruidTestRunner`, if this
+becomes something we need to do often.
+
diff --git a/integration-tests-ex/image/build-image.sh
b/integration-tests-ex/image/build-image.sh
index 0b37f24777..4a31a78419 100755
--- a/integration-tests-ex/image/build-image.sh
+++ b/integration-tests-ex/image/build-image.sh
@@ -42,7 +42,6 @@ export MYSQL_IMAGE_VERSION=$MYSQL_IMAGE_VERSION
export CONFLUENT_VERSION=$CONFLUENT_VERSION
export MARIADB_VERSION=$MARIADB_VERSION
export HADOOP_VERSION=$HADOOP_VERSION
-export MYSQL_DRIVER_CLASSNAME=$MYSQL_DRIVER_CLASSNAME
export DRUID_IT_IMAGE_NAME=$DRUID_IT_IMAGE_NAME
EOF
diff --git a/it.sh b/it.sh
index e15e9413f9..b03b7acb57 100755
--- a/it.sh
+++ b/it.sh
@@ -34,23 +34,26 @@ Usage: $0 cmd [category]
ci
build Druid and the distribution for CI pipelines
build
- build Druid and the distribution
+ Build Druid and the distribution
dist
- build the Druid distribution (only)
+ Build the Druid distribution (only)
tools
- build druid-it-tools
+ Build druid-it-tools
image
- build the test image
+ Build the test image
up <category>
- start the cluster for category
+ Start the cluster for category
down <category>
- stop the cluster for category
+ Stop the cluster for category
test <category>
- start the cluster, run the test for category, and stop the cluster
+ Start the cluster, run the test for category, and stop the cluster
tail <category>
- show the last 20 lines of each container log
+ Show the last 20 lines of each container log
+ gen
+ Generate docker-compose.yaml files (done automatically on up)
+ run one IT in Travis (build dist, image, run test, tail logs)
github <category>
- run one IT in Github Workflows (run test, tail logs)
+ Run one IT in Github Workflows (run test, tail logs)
prune
prune Docker volumes
@@ -100,7 +103,7 @@ function tail_logs
function build_override {
mkdir -p target
- OVERRIDE_FILE="override.env"
+ OVERRIDE_FILE="$(pwd)/target/override.env"
rm -f "$OVERRIDE_FILE"
touch "$OVERRIDE_FILE"
@@ -128,8 +131,7 @@ function build_override {
# environment into the container.
# Reuse the OVERRIDE_ENV variable to pass the full list to Docker compose
- target_dir=`pwd`
- export OVERRIDE_ENV="$target_dir/$OVERRIDE_FILE"
+ export OVERRIDE_ENV="$OVERRIDE_FILE"
}
function prepare_category {
@@ -143,7 +145,6 @@ function prepare_category {
function prepare_docker {
cd $DRUID_DEV/integration-tests-ex/cases
build_override
- verify_env_vars
}
function require_env_var {
@@ -176,6 +177,8 @@ function verify_env_vars {
fi
;;
"S3DeepStorage")
+ require_env_var DRUID_CLOUD_BUCKET
+ require_env_var DRUID_CLOUD_PATH
require_env_var AWS_REGION
require_env_var AWS_ACCESS_KEY_ID
require_env_var AWS_SECRET_ACCESS_KEY
@@ -183,6 +186,11 @@ function verify_env_vars {
esac
}
+if [ $# = 0 ]; then
+ usage
+ exit 1
+fi
+
CMD=$1
shift
MAVEN_IGNORE="-P skip-static-checks,skip-tests -Dmaven.javadoc.skip=true"
@@ -207,9 +215,17 @@ case $CMD in
cd $DRUID_DEV/integration-tests-ex/image
mvn install -P test-image $MAVEN_IGNORE
;;
+ "gen")
+ # Generate the docker-compose.yaml files. Mostly for debugging
+ # since the up command does generation implicitly.
+ prepare_category $1
+ prepare_docker
+ ./cluster.sh gen $CATEGORY
+ ;;
"up" )
prepare_category $1
prepare_docker
+ verify_env_vars
./cluster.sh up $CATEGORY
;;
"down" )
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]