Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package crmsh for openSUSE:Factory checked 
in at 2022-04-19 09:58:56
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/crmsh (Old)
 and      /work/SRC/openSUSE:Factory/.crmsh.new.1941 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "crmsh"

Tue Apr 19 09:58:56 2022 rev:243 rq:970675 version:4.4.0+20220418.cbf7a09e

Changes:
--------
--- /work/SRC/openSUSE:Factory/crmsh/crmsh.changes      2022-03-21 
20:12:26.460465160 +0100
+++ /work/SRC/openSUSE:Factory/.crmsh.new.1941/crmsh.changes    2022-04-19 
09:59:54.403692360 +0200
@@ -1,0 +2,10 @@
+Mon Apr 18 06:48:42 UTC 2022 - xli...@suse.com
+
+- Update to version 4.4.0+20220418.cbf7a09e:
+  * Dev: README: update README
+  * Dev: remove unused files
+  * Dev: behave: Change behave test files based on new added 
"run-functional-tests"
+  * Dev: run-functional-tests: Add file run-functional-tests
+  * Dev: Dockerfile: Update Dockerfile used by crmsh CI for master branch
+
+-------------------------------------------------------------------

Old:
----
  crmsh-4.4.0+20220321.8cf6a9d1.tar.bz2

New:
----
  crmsh-4.4.0+20220418.cbf7a09e.tar.bz2

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ crmsh.spec ++++++
--- /var/tmp/diff_new_pack.i1e7fq/_old  2022-04-19 09:59:54.903693010 +0200
+++ /var/tmp/diff_new_pack.i1e7fq/_new  2022-04-19 09:59:54.907693015 +0200
@@ -36,7 +36,7 @@
 Summary:        High Availability cluster command-line interface
 License:        GPL-2.0-or-later
 Group:          %{pkg_group}
-Version:        4.4.0+20220321.8cf6a9d1
+Version:        4.4.0+20220418.cbf7a09e
 Release:        0
 URL:            http://crmsh.github.io
 Source0:        %{name}-%{version}.tar.bz2

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.i1e7fq/_old  2022-04-19 09:59:54.947693067 +0200
+++ /var/tmp/diff_new_pack.i1e7fq/_new  2022-04-19 09:59:54.947693067 +0200
@@ -9,7 +9,7 @@
 </service>
 <service name="tar_scm">
   <param name="url">https://github.com/ClusterLabs/crmsh.git</param>
-  <param 
name="changesrevision">8cf6a9d13af6496fdd384c18c54680ceb354b72d</param>
+  <param 
name="changesrevision">682c8132161630cc4c2af4bc249c40f93d995bae</param>
 </service>
 </servicedata>
 (No newline at EOF)

++++++ crmsh-4.4.0+20220321.8cf6a9d1.tar.bz2 -> 
crmsh-4.4.0+20220418.cbf7a09e.tar.bz2 ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.0+20220321.8cf6a9d1/.github/workflows/crmsh-ci.yml 
new/crmsh-4.4.0+20220418.cbf7a09e/.github/workflows/crmsh-ci.yml
--- old/crmsh-4.4.0+20220321.8cf6a9d1/.github/workflows/crmsh-ci.yml    
2022-03-21 09:03:21.000000000 +0100
+++ new/crmsh-4.4.0+20220418.cbf7a09e/.github/workflows/crmsh-ci.yml    
2022-04-18 08:32:05.000000000 +0200
@@ -11,7 +11,7 @@
     branches: [ master ]
 
 env:
-  DOCKER_SCRIPT: ./test/docker_scripts.sh
+  DOCKER_SCRIPT: ./test/run-functional-tests
   FOLDER: /package
   PACKAGE_NAME: crmsh
   OBS_USER: ${{ secrets.OBS_USER }}
@@ -50,8 +50,7 @@
       run:  |
         echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee 
/etc/docker/daemon.json
         sudo systemctl restart docker.service
-        $DOCKER_SCRIPT crm_report before_install
-        $DOCKER_SCRIPT crm_report run bugs
+        $DOCKER_SCRIPT 6
 
   functional_test_bootstrap_bugs:
     runs-on: ubuntu-latest
@@ -62,8 +61,7 @@
       run:  |
         echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee 
/etc/docker/daemon.json
         sudo systemctl restart docker.service
-        $DOCKER_SCRIPT bootstrap before_install
-        $DOCKER_SCRIPT bootstrap run bugs
+        $DOCKER_SCRIPT 1
 
   functional_test_bootstrap_common:
     runs-on: ubuntu-latest
@@ -74,8 +72,7 @@
       run:  |
         echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee 
/etc/docker/daemon.json
         sudo systemctl restart docker.service
-        $DOCKER_SCRIPT bootstrap before_install
-        $DOCKER_SCRIPT bootstrap run init_join_remove
+        $DOCKER_SCRIPT 2
 
   functional_test_bootstrap_options:
     runs-on: ubuntu-latest
@@ -86,8 +83,7 @@
       run:  |
         echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee 
/etc/docker/daemon.json
         sudo systemctl restart docker.service
-        $DOCKER_SCRIPT bootstrap before_install
-        $DOCKER_SCRIPT bootstrap run options
+        $DOCKER_SCRIPT 3
 
   functional_test_qdevice_setup_remove:
     runs-on: ubuntu-latest
@@ -98,8 +94,7 @@
       run:  |
         echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee 
/etc/docker/daemon.json
         sudo systemctl restart docker.service
-        $DOCKER_SCRIPT qdevice before_install
-        $DOCKER_SCRIPT qdevice run setup_remove
+        $DOCKER_SCRIPT 9
 
   functional_test_qdevice_options:
     runs-on: ubuntu-latest
@@ -110,8 +105,7 @@
       run:  |
         echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee 
/etc/docker/daemon.json
         sudo systemctl restart docker.service
-        $DOCKER_SCRIPT qdevice before_install
-        $DOCKER_SCRIPT qdevice run options
+        $DOCKER_SCRIPT 8
 
   functional_test_qdevice_validate:
     runs-on: ubuntu-latest
@@ -122,8 +116,7 @@
       run:  |
         echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee 
/etc/docker/daemon.json
         sudo systemctl restart docker.service
-        $DOCKER_SCRIPT qdevice before_install
-        $DOCKER_SCRIPT qdevice run validate
+        $DOCKER_SCRIPT 11
 
   functional_test_qdevice_user_case:
     runs-on: ubuntu-latest
@@ -134,8 +127,7 @@
       run:  |
         echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee 
/etc/docker/daemon.json
         sudo systemctl restart docker.service
-        $DOCKER_SCRIPT qdevice before_install
-        $DOCKER_SCRIPT qdevice run usercase
+        $DOCKER_SCRIPT 10
 
   functional_test_resource_subcommand:
     runs-on: ubuntu-latest
@@ -146,8 +138,7 @@
       run:  |
         echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee 
/etc/docker/daemon.json
         sudo systemctl restart docker.service
-        $DOCKER_SCRIPT resource before_install
-        $DOCKER_SCRIPT resource run
+        $DOCKER_SCRIPT 12 13
 
   functional_test_configure_sublevel:
     runs-on: ubuntu-latest
@@ -158,8 +149,7 @@
       run:  |
         echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee 
/etc/docker/daemon.json
         sudo systemctl restart docker.service
-        $DOCKER_SCRIPT configure before_install
-        $DOCKER_SCRIPT configure run bugs
+        $DOCKER_SCRIPT 4
 
   functional_test_constraints_bugs:
     runs-on: ubuntu-latest
@@ -170,8 +160,7 @@
       run:  |
         echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee 
/etc/docker/daemon.json
         sudo systemctl restart docker.service
-        $DOCKER_SCRIPT constraints before_install
-        $DOCKER_SCRIPT constraints run bugs
+        $DOCKER_SCRIPT 5
 
   functional_test_geo_cluster:
     runs-on: ubuntu-latest
@@ -182,8 +171,7 @@
       run:  |
         echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee 
/etc/docker/daemon.json
         sudo systemctl restart docker.service
-        $DOCKER_SCRIPT geo before_install
-        $DOCKER_SCRIPT geo run setup
+        $DOCKER_SCRIPT 7
 
   original_regression_test:
     runs-on: ubuntu-latest
@@ -192,8 +180,7 @@
     - uses: actions/checkout@v2
     - name: original regression test
       run:  |
-        $DOCKER_SCRIPT original before_install
-        $DOCKER_SCRIPT original run
+        $DOCKER_SCRIPT 14
 
   delivery:
     needs: [unit_test,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.4.0+20220321.8cf6a9d1/Dockerfile 
new/crmsh-4.4.0+20220418.cbf7a09e/Dockerfile
--- old/crmsh-4.4.0+20220321.8cf6a9d1/Dockerfile        2022-03-21 
09:03:21.000000000 +0100
+++ new/crmsh-4.4.0+20220418.cbf7a09e/Dockerfile        2022-04-18 
08:32:05.000000000 +0200
@@ -1,33 +1,22 @@
-FROM opensuse/leap:15.2
+FROM opensuse/tumbleweed
 MAINTAINER Xin Liang <xli...@suse.com>
 
 ARG ssh_prv_key
 ARG ssh_pub_key
-# Above is for passwordless ssh
-# docker build -t haleap --build-arg ssh_prv_key="$(cat /root/.ssh/id_rsa)" 
--build-arg ssh_pub_key="$(cat /root/.ssh/id_rsa.pub)" .
-
-ENV container docker
-
-RUN zypper -n install systemd; zypper clean ; \
-(cd /usr/lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == 
systemd-tmpfiles-setup.service ] || rm -f $i; done); \
-rm -f /usr/lib/systemd/system/multi-user.target.wants/*;\
-rm -f /etc/systemd/system/*.wants/*;\
-rm -f /usr/lib/systemd/system/local-fs.target.wants/*; \
-rm -f /usr/lib/systemd/system/sockets.target.wants/*udev*; \
-rm -f /usr/lib/systemd/system/sockets.target.wants/*initctl*; \
-rm -f /usr/lib/systemd/system/basic.target.wants/*;\
-rm -f /usr/lib/systemd/system/anaconda.target.wants/*;
+# docker build -t hatbw --build-arg ssh_prv_key="$(cat /root/.ssh/id_rsa)" 
--build-arg ssh_pub_key="$(cat /root/.ssh/id_rsa.pub)" .
+# docker login
+# docker tag hatbw liangxin1300/hatbw
+# docker push liangxin1300/hatbw
+
+RUN zypper ref
+RUN zypper -n install systemd
+RUN zypper -n install make autoconf automake vim which libxslt-tools mailx 
iproute2 iputils bzip2 openssh tar file glibc-locale-base firewalld 
libopenssl1_1 dos2unix iptables
+RUN zypper -n install python3 python3-lxml python3-python-dateutil 
python3-parallax python3-setuptools python3-PyYAML python3-curses python3-behave
+RUN zypper -n install csync2 libglue-devel corosync corosync-qdevice pacemaker 
booth corosync-qnetd
 
 RUN mkdir -p /root/.ssh && chmod 0700 /root/.ssh
 RUN echo "$ssh_prv_key" > /root/.ssh/id_rsa && chmod 600 /root/.ssh/id_rsa
 RUN echo "$ssh_pub_key" > /root/.ssh/id_rsa.pub && chmod 600 
/root/.ssh/id_rsa.pub
 RUN echo "$ssh_pub_key" > /root/.ssh/authorized_keys && chmod 600 
/root/.ssh/authorized_keys
 
-RUN zypper -n install make autoconf automake vim which libxslt-tools mailx 
iproute2 iputils bzip2 openssh tar file
-RUN zypper -n install python3 python3-lxml python3-python-dateutil 
python3-parallax python3-setuptools python3-PyYAML python3-curses python3-pip
-RUN zypper -n install csync2 libglue-devel corosync corosync-qdevice pacemaker
-RUN pip install --upgrade pip
-RUN pip install behave tox
-
-VOLUME [ "/sys/fs/cgroup" ]
 CMD ["/usr/lib/systemd/systemd", "--system"]
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.4.0+20220321.8cf6a9d1/README.md 
new/crmsh-4.4.0+20220418.cbf7a09e/README.md
--- old/crmsh-4.4.0+20220321.8cf6a9d1/README.md 2022-03-21 09:03:21.000000000 
+0100
+++ new/crmsh-4.4.0+20220418.cbf7a09e/README.md 2022-04-18 08:32:05.000000000 
+0200
@@ -51,13 +51,28 @@
 - `pip install tox`
 - In root directory of crmsh project, run `tox`
 
-[In github 
action](https://github.com/ClusterLabs/crmsh/actions/workflows/crmsh-ci.yml)
+#### Functional tests
+In local:
+- In root directory of crmsh project, run `./test/run-functional-tests 
[OPTIONS]|[TESTCASE INDEX]`
+
+```
+# ./test/run-functional-tests -h
+Usage: run-functional-tests [OPTIONS]|[TESTCASE INDEX]
+run-functional-tests is a tool for developers to setup the cluster in 
containers to run functional tests.
+The container image is based on Tumbleweed with preinstalled packages of the 
cluster stack include pacemaker/corosync/crmsh and many others.
+Users can make the code change under crmsh.git including test cases. This tool 
will pick up the code change and "make install" to all running containers.
+
+OPTIONS:
+  -h, --help           Show this help message and exit
+  -l                   List existing functional test cases and exit
+  -n NUM               Only setup a cluster with NUM nodes(containers)
+  -x                   Don't config corosync on containers(with -n option)
+  -d                   Cleanup the cluster containers
+```
 
-To run the regression tests in a docker container, use the
-`test/containerized-regression-tests.sh` script. This relies on having
-access to `docker` to pull down the base image and run the regression
-test suite. The docker base image used is defined in the `Dockerfile`
-included in the repository.
+The docker base image used is defined in the `Dockerfile` included in the 
repository.
+
+[In github 
action](https://github.com/ClusterLabs/crmsh/actions/workflows/crmsh-ci.yml)
 
 ## Manifest
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.4.0+20220321.8cf6a9d1/data-manifest 
new/crmsh-4.4.0+20220418.cbf7a09e/data-manifest
--- old/crmsh-4.4.0+20220321.8cf6a9d1/data-manifest     2022-03-21 
09:03:21.000000000 +0100
+++ new/crmsh-4.4.0+20220418.cbf7a09e/data-manifest     2022-04-18 
08:32:05.000000000 +0200
@@ -59,11 +59,9 @@
 test/cibtests/004.input
 test/cib-tests.sh
 test/cibtests/shadow.base
-test/containerized-regression-tests.sh
 test/crm-interface
 test/defaults
 test/descriptions
-test/docker_scripts.sh
 test/evaltest.sh
 test/features/bootstrap_bugs.feature
 test/features/bootstrap_init_join_remove.feature
@@ -91,8 +89,7 @@
 test/profile-history.sh
 test/README.regression
 test/regression.sh
-test/run-in-container.sh
-test/run-in-travis.sh
+test/run-functional-tests
 test/testcases/acl
 test/testcases/acl.excl
 test/testcases/acl.exp
@@ -142,7 +139,6 @@
 test/testcases/shadow
 test/testcases/shadow.exp
 test/testcases/xmlonly.sh
-test/travis-tests.sh
 test/unittests/bug-862577_corosync.conf
 test/unittests/corosync.conf.1
 test/unittests/corosync.conf.2
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.0+20220321.8cf6a9d1/test/containerized-regression-tests.sh 
new/crmsh-4.4.0+20220418.cbf7a09e/test/containerized-regression-tests.sh
--- old/crmsh-4.4.0+20220321.8cf6a9d1/test/containerized-regression-tests.sh    
2022-03-21 09:03:21.000000000 +0100
+++ new/crmsh-4.4.0+20220418.cbf7a09e/test/containerized-regression-tests.sh    
1970-01-01 01:00:00.000000000 +0100
@@ -1,8 +0,0 @@
-#!/bin/sh
-docker pull krig/crmsh:latest
-
-if [ "$1" = "--unit-tests" ]; then
-       docker run -t -v "$(pwd):/app" krig/crmsh /bin/sh -c "cd /app; 
./test/unit-tests-in-container.sh $(id -un) $(id -gn) $(id -u) $(id -g)"
-else
-       docker run -t -v "$(pwd):/app" krig/crmsh /bin/sh -c "cd /app; 
./test/run-in-container.sh $(id -un) $(id -gn) $(id -u) $(id -g)"
-fi
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.4.0+20220321.8cf6a9d1/test/docker_scripts.sh 
new/crmsh-4.4.0+20220418.cbf7a09e/test/docker_scripts.sh
--- old/crmsh-4.4.0+20220321.8cf6a9d1/test/docker_scripts.sh    2022-03-21 
09:03:21.000000000 +0100
+++ new/crmsh-4.4.0+20220418.cbf7a09e/test/docker_scripts.sh    1970-01-01 
01:00:00.000000000 +0100
@@ -1,83 +0,0 @@
-#!/bin/bash
-Docker_image='liangxin1300/hatbw'
-HA_packages='pacemaker corosync corosync-qdevice'
-TEST_TYPE='bootstrap qdevice crm_report geo'
-
-etc_hosts_content=`cat <<EOF
-10.10.10.2 hanode1
-10.10.10.3 hanode2
-10.10.10.4 hanode3
-10.10.10.5 hanode4
-10.10.10.6 hanode5
-20.20.20.7 qnetd-node
-10.10.10.8 node-without-ssh
-EOF`
-
-deploy_node() {
-  node_name=$1
-  echo "##### Deploy $node_name start"
-
-  docker run -d --name=$node_name --hostname $node_name \
-             --privileged -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v "$(pwd):/app" 
--shm-size="1g" ${Docker_image}
-  docker network connect second_net $node_name
-  docker network connect third_net $node_name
-  docker exec -t $node_name /bin/sh -c "echo \"$etc_hosts_content\" | grep -v 
$node_name >> /etc/hosts"
-  # 
https://unix.stackexchange.com/questions/335189/system-refuses-ssh-and-stuck-on-booting-up-after-systemd-installation
-  #docker exec -t $node_name /bin/sh -c "systemctl start sshd.service; 
systemctl start systemd-user-sessions.service"
-
-  if [ "$node_name" == "qnetd-node" ];then
-    docker exec -t $node_name /bin/sh -c "zypper ref;zypper -n in 
corosync-qnetd"
-  elif [ "$node_name" == "node-without-ssh" ];then
-    docker exec -t $node_name /bin/sh -c "systemctl stop sshd.service"
-  else
-    docker exec -t $node_name /bin/sh -c "cd /app; ./test/run-in-travis.sh 
build"
-  fi
-  docker exec -t $node_name /bin/sh -c "rm -rf /run/nologin"
-  echo "##### Deploy $node_name finished"
-  echo
-}
-
-before() {
-  docker pull ${Docker_image}
-  docker network create --subnet 10.10.10.0/24 --ipv6 --subnet 
2001:db8:10::/64 second_net
-  docker network create --subnet 20.20.20.0/24 --ipv6 --subnet 
2001:db8:20::/64 third_net
-
-  deploy_node hanode1
-  deploy_node hanode2
-  deploy_node hanode3
-  deploy_node hanode4
-  deploy_node hanode5
-  if [ "$1" == "qdevice" ];then
-    deploy_node qnetd-node
-    deploy_node node-without-ssh
-  fi
-}
-
-run() {
-  docker exec -t hanode1 /bin/sh -c "cd /app; ./test/run-in-travis.sh $1 $2"
-}
-
-usage() {
-  echo "Usage: ./test/`basename $0` <`echo ${TEST_TYPE// /|}`>"
-}
-
-
-# $1 could be "bootstrap", "crm_report", "qdevice" etc.
-# $2 could be "before_install" or "run"
-# $3 could be suffix of feature file
-case "$1/$2" in
-  */before_install)
-    before $1
-    ;;
-  */run)
-    run $1 $3
-    ;;
-  *)
-    if [ "$#" -eq 0 ] || ! [[ $TEST_TYPE =~ (^|[[:space:]])$1($|[[:space:]]) 
]];then
-      usage
-      exit 1
-    fi
-    before $1
-    run $1
-    ;;
-esac
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/bootstrap_bugs.feature 
new/crmsh-4.4.0+20220418.cbf7a09e/test/features/bootstrap_bugs.feature
--- old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/bootstrap_bugs.feature      
2022-03-21 09:03:21.000000000 +0100
+++ new/crmsh-4.4.0+20220418.cbf7a09e/test/features/bootstrap_bugs.feature      
2022-04-18 08:32:05.000000000 +0200
@@ -2,6 +2,7 @@
 Feature: Regression test for bootstrap bugs
 
   Tag @clean means need to stop cluster service if the service is available
+  Need nodes: hanode1 hanode2 hanode3
 
   @clean
   Scenario: Set placement-strategy value as "default"(bsc#1129462)
@@ -55,7 +56,7 @@
     When    Run "crm cluster join -c hanode1 -i eth1 -y" on "hanode2"
     Then    Cluster service is "started" on "hanode2"
     When    Run "crm corosync get nodelist.node.ring0_addr" on "hanode1"
-    Then    Expected "10.10.10.3" in stdout
+    Then    Expected "@hanode2.ip.0" in stdout
     #And     Service "hawk.service" is "started" on "hanode2"
     When    Run "crm cluster remove hanode2 -y" on "hanode1"
     Then    Online nodes are "hanode1"
@@ -63,7 +64,7 @@
     # verify bsc#1175708
     #And     Service "hawk.service" is "stopped" on "hanode2"
     When    Run "crm corosync get nodelist.node.ring0_addr" on "hanode1"
-    Then    Expected "10.10.10.3" not in stdout
+    Then    Expected "@hanode2.ip.0" not in stdout
 
   @clean
   Scenario: Multi nodes join in parallel(bsc#1175976)
@@ -101,8 +102,8 @@
   Scenario: Change host name in /etc/hosts as alias(bsc#1183654)
     Given   Cluster service is "stopped" on "hanode1"
     And     Cluster service is "stopped" on "hanode2"
-    When    Run "echo '10.10.10.2 HANODE1' >> /etc/hosts" on "hanode1"
-    When    Run "echo '10.10.10.3 HANODE2' >> /etc/hosts" on "hanode2"
+    When    Run "echo '@hanode1.ip.0 HANODE1' >> /etc/hosts" on "hanode1"
+    When    Run "echo '@hanode2.ip.0 HANODE2' >> /etc/hosts" on "hanode2"
     When    Run "crm cluster init -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     When    Run "crm cluster join -c HANODE1 -y" on "hanode2"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/bootstrap_init_join_remove.feature
 
new/crmsh-4.4.0+20220418.cbf7a09e/test/features/bootstrap_init_join_remove.feature
--- 
old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/bootstrap_init_join_remove.feature
  2022-03-21 09:03:21.000000000 +0100
+++ 
new/crmsh-4.4.0+20220418.cbf7a09e/test/features/bootstrap_init_join_remove.feature
  2022-04-18 08:32:05.000000000 +0200
@@ -3,6 +3,7 @@
 
   Test crmsh bootstrap init/join/remove process
   Tag @clean means need to stop cluster service if the service is available
+  Need nodes: hanode1 hanode2
 
   Background: Setup a two nodes cluster
     Given   Cluster service is "stopped" on "hanode1"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/bootstrap_options.feature 
new/crmsh-4.4.0+20220418.cbf7a09e/test/features/bootstrap_options.feature
--- old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/bootstrap_options.feature   
2022-03-21 09:03:21.000000000 +0100
+++ new/crmsh-4.4.0+20220418.cbf7a09e/test/features/bootstrap_options.feature   
2022-04-18 08:32:05.000000000 +0200
@@ -10,6 +10,7 @@
       "-u":      Configure corosync to communicate over unicast
       "-U":      Configure corosync to communicate over multicast
   Tag @clean means need to stop cluster service if the service is available
+  Need nodes: hanode1 hanode2
 
   @clean
   Scenario: Check help output
@@ -43,33 +44,33 @@
   @clean
   Scenario: Bind specific network interface using "-i" option
     Given   Cluster service is "stopped" on "hanode1"
-    And     IP "10.10.10.2" is belong to "eth1"
+    And     IP "@hanode1.ip.0" is belong to "eth1"
     When    Run "crm cluster init -i eth1 -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
-    And     IP "10.10.10.2" is used by corosync on "hanode1"
+    And     IP "@hanode1.ip.0" is used by corosync on "hanode1"
     And     Show corosync ring status
 
   @clean
   Scenario: Using multiple network interface using "-M" option
     Given   Cluster service is "stopped" on "hanode1"
-    And     IP "172.17.0.2" is belong to "eth0"
-    And     IP "10.10.10.2" is belong to "eth1"
+    And     IP "@hanode1.ip.default" is belong to "eth0"
+    And     IP "@hanode1.ip.0" is belong to "eth1"
     When    Run "crm cluster init -M -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
-    And     IP "172.17.0.2" is used by corosync on "hanode1"
-    And     IP "10.10.10.2" is used by corosync on "hanode1"
+    And     IP "@hanode1.ip.default" is used by corosync on "hanode1"
+    And     IP "@hanode1.ip.0" is used by corosync on "hanode1"
     And     Show corosync ring status
     And     Corosync working on "unicast" mode
 
   @clean
   Scenario: Using multiple network interface using "-i" option
     Given   Cluster service is "stopped" on "hanode1"
-    And     IP "172.17.0.2" is belong to "eth0"
-    And     IP "10.10.10.2" is belong to "eth1"
+    And     IP "@hanode1.ip.default" is belong to "eth0"
+    And     IP "@hanode1.ip.0" is belong to "eth1"
     When    Run "crm cluster init -i eth0 -i eth1 -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
-    And     IP "172.17.0.2" is used by corosync on "hanode1"
-    And     IP "10.10.10.2" is used by corosync on "hanode1"
+    And     IP "@hanode1.ip.default" is used by corosync on "hanode1"
+    And     IP "@hanode1.ip.0" is used by corosync on "hanode1"
     And     Show corosync ring status
 
   @clean
@@ -77,12 +78,12 @@
     Given   Cluster service is "stopped" on "hanode1"
     When    Try "crm cluster init -A xxx -y"
     Then    Except "ERROR: cluster.init: 'xxx' does not appear to be an IPv4 
or IPv6 address"
-    When    Try "crm cluster init -A 10.10.10.2 -y"
-    Then    Except "ERROR: cluster.init: Address already in use: 10.10.10.2"
-    When    Run "crm cluster init -n hatest -A 10.10.10.123 -y" on "hanode1"
+    When    Try "crm cluster init -A @hanode1.ip.0 -y"
+    Then    Except "ERROR: cluster.init: Address already in use: @hanode1.ip.0"
+    When    Run "crm cluster init -n hatest -A @vip.0 -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     And     Cluster name is "hatest"
-    And     Cluster virtual IP is "10.10.10.123"
+    And     Cluster virtual IP is "@vip.0"
     And     Show cluster status on "hanode1"
 
   @clean
@@ -91,7 +92,7 @@
     When    Run "crm cluster init -u -y -i eth0" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     And     Cluster is using udpu transport mode
-    And     IP "172.17.0.2" is used by corosync on "hanode1"
+    And     IP "@hanode1.ip.default" is used by corosync on "hanode1"
     And     Show corosync ring status
     And     Corosync working on "unicast" mode
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/configure_bugs.feature 
new/crmsh-4.4.0+20220418.cbf7a09e/test/features/configure_bugs.feature
--- old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/configure_bugs.feature      
2022-03-21 09:03:21.000000000 +0100
+++ new/crmsh-4.4.0+20220418.cbf7a09e/test/features/configure_bugs.feature      
2022-04-18 08:32:05.000000000 +0200
@@ -2,6 +2,7 @@
 Feature: Functional test for configure sub level
 
   Tag @clean means need to stop cluster service if the service is available
+  Need nodes: hanode1 hanode2
 
   @clean
   Scenario: Replace sensitive data by default(bsc#1163581)
@@ -20,15 +21,15 @@
     And     Show crm configure
 
     # mask password and ip address
-    When    Run "crm configure primitive ip2 IPaddr2 params ip=10.10.10.124" 
on "hanode1"
+    When    Run "crm configure primitive ip2 IPaddr2 params ip=@vip.0" on 
"hanode1"
     And     Run "sed -i 's/; \[core\]/[core]/' /etc/crm/crm.conf" on "hanode1"
     And     Run "sed -i 's/; obscure_pattern = .*$/obscure_pattern = 
passw*|ip/g' /etc/crm/crm.conf" on "hanode1"
-    And     Try "crm configure show|grep -E "10.10.10.124|qwertyui""
+    And     Try "crm configure show|grep -E "@vip.0|qwertyui""
     Then    Expected return code is "1"
     And     Show crm configure
 
     # mask password and ip address with another pattern
     When    Run "sed -i 's/obscure_pattern = .*$/obscure_pattern = passw* 
ip/g' /etc/crm/crm.conf" on "hanode1"
-    And     Try "crm configure show|grep -E "10.10.10.124|qwertyui""
+    And     Try "crm configure show|grep -E "@vip.0|qwertyui""
     Then    Expected return code is "1"
     And     Show crm configure
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/constraints_bugs.feature 
new/crmsh-4.4.0+20220418.cbf7a09e/test/features/constraints_bugs.feature
--- old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/constraints_bugs.feature    
2022-03-21 09:03:21.000000000 +0100
+++ new/crmsh-4.4.0+20220418.cbf7a09e/test/features/constraints_bugs.feature    
2022-04-18 08:32:05.000000000 +0200
@@ -2,6 +2,7 @@
 Feature: Verify constraints(order/colocation/location) bug
 
   Tag @clean means need to stop cluster service if the service is available
+  Need nodes: hanode1 hanode2
 
   Background: Setup a two nodes cluster
     Given   Cluster service is "stopped" on "hanode1"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/crm_report_bugs.feature 
new/crmsh-4.4.0+20220418.cbf7a09e/test/features/crm_report_bugs.feature
--- old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/crm_report_bugs.feature     
2022-03-21 09:03:21.000000000 +0100
+++ new/crmsh-4.4.0+20220418.cbf7a09e/test/features/crm_report_bugs.feature     
2022-04-18 08:32:05.000000000 +0200
@@ -2,6 +2,7 @@
 Feature: crm report functional test for verifying bugs
 
   Tag @clean means need to stop cluster service if the service is available
+  Need nodes: hanode1 hanode2
 
   Background: Setup a two nodes cluster
     Given   Cluster service is "stopped" on "hanode1"
@@ -72,12 +73,12 @@
     When    Run "rm -rf report.tar.bz2 report" on "hanode1"
 
     # mask password and ip address by using crm.conf
-    When    Run "crm configure primitive ip2 IPaddr2 params ip=10.10.10.124" 
on "hanode1"
+    When    Run "crm configure primitive ip2 IPaddr2 params ip=@vip.0" on 
"hanode1"
     And     Run "sed -i 's/; \[report\]/[report]/' /etc/crm/crm.conf" on 
"hanode1"
     And     Run "sed -i 's/; sanitize_rule = .*$/sanitize_rule = 
passw.*|ip.*:raw/g' /etc/crm/crm.conf" on "hanode1"
     And     Run "crm report report" on "hanode1"
     And     Run "tar jxf report.tar.bz2" on "hanode1"
-    And     Try "grep -R -E "10.10.10.124|qwertyui" report"
+    And     Try "grep -R -E "@vip.0|qwertyui" report"
     # No password here
     Then    Expected return code is "1"
     When    Run "rm -rf report.tar.bz2 report" on "hanode1"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/geo_setup.feature 
new/crmsh-4.4.0+20220418.cbf7a09e/test/features/geo_setup.feature
--- old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/geo_setup.feature   
2022-03-21 09:03:21.000000000 +0100
+++ new/crmsh-4.4.0+20220418.cbf7a09e/test/features/geo_setup.feature   
2022-04-18 08:32:05.000000000 +0200
@@ -3,6 +3,7 @@
 
   Test geo cluster setup using bootstrap
   Tag @clean means need to stop cluster service if the service is available
+  Need nodes: hanode1 hanode2 hanode3
 
   @clean
   Scenario: GEO cluster setup
@@ -10,14 +11,14 @@
     And     Cluster service is "stopped" on "hanode2"
     When    Run "crm cluster init -y -n cluster1" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
-    When    Run "crm configure primitive vip IPaddr2 params ip=10.10.10.123" 
on "hanode1"
+    When    Run "crm configure primitive vip IPaddr2 params ip=@vip.0" on 
"hanode1"
 
     When    Run "crm cluster init -y -n cluster2" on "hanode2"
     Then    Cluster service is "started" on "hanode2"
-    When    Run "crm configure primitive vip IPaddr2 params ip=10.10.10.124" 
on "hanode2"
+    When    Run "crm configure primitive vip IPaddr2 params ip=@vip.1" on 
"hanode2"
 
-    When    Run "crm cluster geo_init -y --clusters "cluster1=10.10.10.123 
cluster2=10.10.10.124" --tickets tickets-geo --arbitrator hanode3" on "hanode1"
-    When    Run "crm cluster geo_join -y --cluster-node hanode1 --clusters 
"cluster1=10.10.10.123 cluster2=10.10.10.124"" on "hanode2"
+    When    Run "crm cluster geo_init -y --clusters "cluster1=@vip.0 
cluster2=@vip.1" --tickets tickets-geo --arbitrator hanode3" on "hanode1"
+    When    Run "crm cluster geo_join -y --cluster-node hanode1 --clusters 
"cluster1=@vip.0 cluster2=@vip.1"" on "hanode2"
 
     Given   Service "booth@booth" is "stopped" on "hanode3"
     When    Run "crm cluster geo_init_arbitrator -y --cluster-node hanode1" on 
"hanode3"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/qdevice_options.feature 
new/crmsh-4.4.0+20220418.cbf7a09e/test/features/qdevice_options.feature
--- old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/qdevice_options.feature     
2022-03-21 09:03:21.000000000 +0100
+++ new/crmsh-4.4.0+20220418.cbf7a09e/test/features/qdevice_options.feature     
2022-04-18 08:32:05.000000000 +0200
@@ -7,6 +7,7 @@
       "--qdevice-tls":         Whether using TLS on 
QDevice/QNetd(on/off/required, default:on)
       "--qdevice-heuristics":  COMMAND to run with absolute path. For multiple 
commands, use ";" to separate
   Tag @clean means need to stop cluster service if the service is available
+  Need nodes: hanode1 hanode2 qnetd-node
 
   @clean
   Scenario: Use "--qdevice-algo" to change qnetd decision algorithm to "lms"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/qdevice_setup_remove.feature 
new/crmsh-4.4.0+20220418.cbf7a09e/test/features/qdevice_setup_remove.feature
--- 
old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/qdevice_setup_remove.feature    
    2022-03-21 09:03:21.000000000 +0100
+++ 
new/crmsh-4.4.0+20220418.cbf7a09e/test/features/qdevice_setup_remove.feature    
    2022-04-18 08:32:05.000000000 +0200
@@ -3,6 +3,7 @@
 
   Test corosync qdevice/qnetd setup/remove process
   Tag @clean means need to stop cluster service if the service is available
+  Need nodes: hanode1 hanode2 hanode3 hanode4 qnetd-node
 
   Background: Cluster and qdevice service are stopped
     Given   Cluster service is "stopped" on "hanode1"
@@ -115,7 +116,7 @@
     When    Run "crm cluster join -c hanode1 -y" on "hanode2"
     Then    Cluster service is "started" on "hanode2"
     And     Online nodes are "hanode1 hanode2"
-    When    Run "crm cluster init qdevice --qnetd-hostname 2001:db8:10::7 -y" 
on "hanode1"
+    When    Run "crm cluster init qdevice --qnetd-hostname @qnetd-node.ip6.0 
-y" on "hanode1"
     Then    Show corosync qdevice configuration
     And     Service "corosync-qdevice" is "started" on "hanode2"
     And     Service "corosync-qdevice" is "started" on "hanode1"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/qdevice_usercase.feature 
new/crmsh-4.4.0+20220418.cbf7a09e/test/features/qdevice_usercase.feature
--- old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/qdevice_usercase.feature    
2022-03-21 09:03:21.000000000 +0100
+++ new/crmsh-4.4.0+20220418.cbf7a09e/test/features/qdevice_usercase.feature    
2022-04-18 08:32:05.000000000 +0200
@@ -11,6 +11,7 @@
   6. Check whether hanode1 has quorum, while hanode2 doesn't
 
   Tag @clean means need to stop cluster service if the service is available
+  Need nodes: hanode1 hanode2 qnetd-node
 
   Background: Cluster and qdevice service are stopped
     Given   Cluster service is "stopped" on "hanode1"
@@ -69,8 +70,8 @@
     When    Run "ssh root@hanode2 corosync-quorumtool -s" on "hanode1"
     Then    Expected "Quorate:          Yes" in stdout
     # Use iptables command to simulate split-brain
-    When    Run "iptables -I INPUT -s 172.17.0.3 -j DROP; iptables -I OUTPUT 
-d 172.17.0.3 -j DROP" on "hanode1"
-    And     Run "iptables -I INPUT -s 172.17.0.2 -j DROP; iptables -I OUTPUT 
-d 172.17.0.2 -j DROP" on "hanode2"
+    When    Run "iptables -I INPUT -s @hanode2.ip.default -j DROP; iptables -I 
OUTPUT -d @hanode2.ip.default -j DROP" on "hanode1"
+    And     Run "iptables -I INPUT -s @hanode1.ip.default -j DROP; iptables -I 
OUTPUT -d @hanode1.ip.default -j DROP" on "hanode2"
     # Check whether hanode1 has quorum, while hanode2 doesn't
     And     Run "sleep 20" on "hanode1"
     When    Run "crm corosync status quorum" on "hanode1"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/qdevice_validate.feature 
new/crmsh-4.4.0+20220418.cbf7a09e/test/features/qdevice_validate.feature
--- old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/qdevice_validate.feature    
2022-03-21 09:03:21.000000000 +0100
+++ new/crmsh-4.4.0+20220418.cbf7a09e/test/features/qdevice_validate.feature    
2022-04-18 08:32:05.000000000 +0200
@@ -2,6 +2,7 @@
 Feature: corosync qdevice/qnetd options validate
 
   Tag @clean means need to stop cluster service if the service is available
+  Need nodes: hanode1 hanode2 qnetd-node node-without-ssh
 
   @clean
   Scenario: Option "--qnetd-hostname" use the same node
@@ -10,7 +11,7 @@
 
   @clean
   Scenario: Option "--qnetd-hostname" use hanode1's IP
-    When    Try "crm cluster init --qnetd-hostname=10.10.10.2"
+    When    Try "crm cluster init --qnetd-hostname=@hanode1.ip.0"
     Then    Except "ERROR: cluster.init: host for qnetd must be a remote one"
 
   @clean
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/resource_failcount.feature 
new/crmsh-4.4.0+20220418.cbf7a09e/test/features/resource_failcount.feature
--- old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/resource_failcount.feature  
2022-03-21 09:03:21.000000000 +0100
+++ new/crmsh-4.4.0+20220418.cbf7a09e/test/features/resource_failcount.feature  
2022-04-18 08:32:05.000000000 +0200
@@ -2,6 +2,7 @@
 Feature: Use "crm resource failcount" to manage failcounts
 
   Tag @clean means need to stop cluster service if the service is available
+  Need nodes: hanode1
 
   Background: Setup one node cluster and configure a Dummy resource
     Given     Cluster service is "stopped" on "hanode1"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/resource_set.feature 
new/crmsh-4.4.0+20220418.cbf7a09e/test/features/resource_set.feature
--- old/crmsh-4.4.0+20220321.8cf6a9d1/test/features/resource_set.feature        
2022-03-21 09:03:21.000000000 +0100
+++ new/crmsh-4.4.0+20220418.cbf7a09e/test/features/resource_set.feature        
2022-04-18 08:32:05.000000000 +0200
@@ -2,6 +2,7 @@
 Feature: Use "crm configure set" to update attributes and operations
 
   Tag @clean means need to stop cluster service if the service is available
+  Need nodes: hanode1 hanode2
 
   Background: Setup cluster and configure some resources
     Given     Cluster service is "stopped" on "hanode1"
@@ -11,9 +12,9 @@
     Then      Cluster service is "started" on "hanode2"
     When      Run "crm configure primitive d Dummy op monitor interval=3s" on 
"hanode1"
     Then      Resource "d" type "Dummy" is "Started"
-    When      Run "crm configure primitive vip IPaddr2 params ip=10.10.10.123 
op monitor interval=3s" on "hanode1"
+    When      Run "crm configure primitive vip IPaddr2 params ip=@vip.0 op 
monitor interval=3s" on "hanode1"
     Then      Resource "vip" type "IPaddr2" is "Started"
-    And       Cluster virtual IP is "10.10.10.123"
+    And       Cluster virtual IP is "@vip.0"
     When      Run "crm configure primitive s ocf:pacemaker:Stateful op monitor 
role=Promoted interval=3s op monitor role=Unpromoted interval=5s" on "hanode1"
     Then      Resource "s" type "Stateful" is "Started"
 
@@ -36,8 +37,8 @@
 
   @clean
   Scenario: Using configure.set to update resource parameters and operation 
values
-    When    Run "crm configure set vip.ip 10.10.10.124" on "hanode1"
-    Then    Cluster virtual IP is "10.10.10.124"
+    When    Run "crm configure set vip.ip @vip.0" on "hanode1"
+    Then    Cluster virtual IP is "@vip.0"
     When    Run "crm configure set d.monitor.on-fail ignore" on "hanode1"
     And     Run "crm configure show d" on "hanode1"
     Then    Expected "on-fail=ignore" in stdout
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.0+20220321.8cf6a9d1/test/run-functional-tests 
new/crmsh-4.4.0+20220418.cbf7a09e/test/run-functional-tests
--- old/crmsh-4.4.0+20220321.8cf6a9d1/test/run-functional-tests 1970-01-01 
01:00:00.000000000 +0100
+++ new/crmsh-4.4.0+20220418.cbf7a09e/test/run-functional-tests 2022-04-18 
08:32:05.000000000 +0200
@@ -0,0 +1,420 @@
+#!/bin/bash
+
+DOCKER_IMAGE=${DOCKER_IMAGE:-"liangxin1300/hatbw"}
+DOCKER_SERVICE="docker.service"
+COROSYNC_CONF="/etc/corosync/corosync.conf"
+COROSYNC_AUTH="/etc/corosync/authkey"
+HA_NETWORK_FIRST="ha_network_first"
+HA_NETWORK_SECOND="ha_network_second"
+declare -a HA_NETWORK_ARRAY
+declare -a HA_NETWORK_V6_ARRAY
+HA_NETWORK_ARRAY[0]=$HA_NETWORK_FIRST
+HA_NETWORK_ARRAY[1]=$HA_NETWORK_SECOND
+HA_NETWORK_V6_ARRAY[0]="2001:db8:10::/64"
+HA_NETWORK_V6_ARRAY[1]="2001:db8:20::/64"
+BEHAVE_CASE_DIR="$(dirname $0)/features/"
+BEHAVE_CASE_EXCLUDE="sbd|ocfs2"
+read -r -d '' COROSYNC_CONF_TEMPLATE << EOM
+totem {
+        version: 2
+        cluster_name: hacluster
+        clear_node_high_bit: yes
+        interface {
+                ringnumber: 0
+                mcastport: 5405
+                ttl: 1
+        }
+
+        transport: udpu
+        crypto_hash: sha1
+        crypto_cipher: aes256
+        token: 5000
+        join: 60
+        max_messages: 20
+        token_retransmits_before_loss_const: 10
+}
+
+logging {
+        fileline: off
+        to_stderr: no
+        to_logfile: no
+        logfile: /var/log/cluster/corosync.log
+        to_syslog: yes
+        debug: off
+        timestamp: on
+        logger_subsys {
+                subsys: QUORUM
+                debug: off
+        }
+
+}
+
+nodelist {
+}
+
+quorum {
+
+        # Enable and configure quorum subsystem (default: off)
+        # see also corosync.conf.5 and votequorum.5
+        provider: corosync_votequorum
+}
+EOM
+
+
+fatal() {
+       error $*
+       exit 1
+}
+
+
+error() {
+       echo "ERROR: $*"
+}
+
+
+warning() {
+       echo "WARNING: $*"
+}
+
+
+info() {
+       echo "INFO: $*"
+}
+
+
+is_number() {
+       num=$1
+       test ! -z "$num" && test "$num" -eq "$num" 2> /dev/null && test "$num" 
-gt 0 2> /dev/null
+}
+
+
+check_docker_env() {
+       # check if docker available
+       systemctl list-unit-files $DOCKER_SERVICE &> /dev/null
+       if [ "$?" -ne 0 ];then
+               fatal "$DOCKER_SERVICE is not available"
+       fi
+       # check if docker.service started
+       systemctl is-active $DOCKER_SERVICE &> /dev/null
+       if [ "$?" -ne 0 ];then
+               fatal "$DOCKER_SERVICE is not active"
+       fi
+       # check if docker cgroup driver is systemd
+       docker info 2> /dev/null|grep -q "Cgroup Driver: systemd"
+       if [ "$?" -ne 0 ];then
+               warning "docker cgroup driver suggest to be \"systemd\""
+       fi
+
+       [ "$1" == "cleanup" ] && return
+       # check if ha network already exists
+       for network in ${HA_NETWORK_ARRAY[@]};do
+               docker network ls|grep -q "$network"
+               if [ "$?" -eq 0 ];then
+                       fatal "HA specific network \"$network\" already exists"
+               fi
+       done
+}
+
+
+get_test_case_array() {
+       test -d $BEHAVE_CASE_DIR || fatal "Cannot find '$BEHAVE_CASE_DIR'"
+       ls $BEHAVE_CASE_DIR|grep "\.feature"|grep -Ev "$BEHAVE_CASE_EXCLUDE"
+}
+
+
+echo_test_cases() {
+       case_arry=`get_test_case_array`
+       echo "Index|File Name|Description"
+       index=1
+        for f in ${case_arry[@]};do
+               desc=`awk -F: '/Feature/{print $2}' $BEHAVE_CASE_DIR/$f`
+               printf "%3s    %-40s %-60s\n" $index $f "$desc"
+               index=$(($index+1))
+       done
+       printf "%3s    %-40s  %-60s\n" $index "regression test" "Original 
regression test"
+}
+
+
+usage_and_exit() {
+       prog=`basename $0`
+       cat <<END
+Usage: $prog [OPTIONS]|[TESTCASE INDEX]
+$prog is a tool for developers to setup the cluster in containers to run 
functional tests.
+The container image is based on Tumbleweed with preinstalled packages of the 
cluster stack include pacemaker/corosync/crmsh and many others.
+Users can make the code change under crmsh.git including test cases. This tool 
will pick up the code change and "make install" to all running containers.
+
+OPTIONS:
+  -h, --help           Show this help message and exit
+  -l                   List existing functional test cases and exit
+  -n NUM               Only setup a cluster with NUM nodes(containers)
+  -x                   Don't config corosync on containers(with -n option)
+  -d                   Cleanup the cluster containers
+
+EXAMPLES:
+To launch 2 nodes with the running cluster with the very basic corosync.conf
+# crmsh.git/test/run-functional-tests -n 2
+
+To launch 2 nodes without the cluster stack running to play with "crm cluster 
init/join"
+# crmsh.git/run-functional-tests -n 2 -x
+
+To list the existing test cases. Users could add his own new test cases.
+# crmsh.git/test/run-functional-tests -l
+
+To run a single or a number of functional test cases
+# crmsh.git/test/run-functional-tests 1
+# crmsh.git/test/run-functional-tests 1 2 3
+
+To clean up the all containers which are generated by this tool
+# crmsh.git/test/run-functional-tests -d
+END
+       exit 1
+}
+
+
+docker_exec() {
+       name=$1
+       cmd=$2
+       docker exec -t $name /bin/sh -c "$cmd"
+}
+
+
+deploy_ha_node() {
+       node_name=$1
+       project_path=$(dirname $(dirname `realpath $0`))
+       docker_options="-d --name $node_name -h $node_name --privileged 
--shm-size 1g -v $project_path:/app"
+       #TODO run autogen and configure locally to save build time inside 
containers
+       build_cmd="cd /app;./autogen.sh && ./configure --prefix /usr && make 
install && make install-crmconfDATA prefix="
+
+       info "Deploying \"$node_name\"..."
+       docker run --restart always $docker_options $DOCKER_IMAGE &> /dev/null
+       for network in ${HA_NETWORK_ARRAY[@]};do
+               docker network connect $network $node_name
+       done
+
+       if [ "$node_name" == "node-without-ssh" ];then
+               docker_exec $node_name "systemctl stop sshd.service"
+       fi
+       if [ "$node_name" != "qnetd-node" ];then
+               docker_exec $node_name "rpm -e corosync-qnetd"
+       fi
+       docker_exec $node_name "rm -rf /run/nologin"
+       docker_exec $node_name "echo 'StrictHostKeyChecking no' >> 
/etc/ssh/ssh_config"
+       info "Building crmsh codes on \"$node_name\"..."
+       docker_exec $node_name "$build_cmd" 1> /dev/null || \
+               fatal "Building failed!"
+}
+
+
+create_node() {
+       info "Loading docker image $DOCKER_IMAGE..."
+        docker pull $DOCKER_IMAGE &> /dev/null
+
+       for index in ${!HA_NETWORK_ARRAY[@]};do
+               network=${HA_NETWORK_ARRAY[$index]}
+               info "Create ha specific docker network \"$network\"..."
+               docker network create --ipv6 --subnet 
${HA_NETWORK_V6_ARRAY[$index]} $network &> /dev/null
+       done
+
+       info "Setup cluster..."
+       for node in $*;do
+               deploy_ha_node $node
+       done
+}
+
+
+config_cluster() {
+       node_num=$#
+       insert_str=""
+       container_ip_array=(`docker network inspect $HA_NETWORK_ARRAY -f 
'{{range .Containers}}{{printf "%s " .IPv4Address}}{{end}}'`)
+
+       for i in $(seq $node_num -1 1);do
+               ip=`echo ${container_ip_array[$((i-1))]}|awk -F/ '{print $1}'`
+               insert_str+="\\n\\tnode {\n\t\tring0_addr: $ip\n\t\tnodeid: 
$i\n\t}"
+       done
+       corosync_conf_str=$(sed "/nodelist/a \\${insert_str}" <(echo 
"$COROSYNC_CONF_TEMPLATE"))
+       if [ $node_num -eq 2 ];then
+               corosync_conf_str=$(sed "/corosync_votequorum/a \\\\ttwo_node: 
1" <(echo "$corosync_conf_str"))
+       fi
+
+       for node in $*;do
+               if [ $node == $1 ];then
+                       docker_exec $1 "echo \"$corosync_conf_str\" >> 
$COROSYNC_CONF"
+                       docker_exec $1 "corosync-keygen -l -k $COROSYNC_AUTH &> 
/dev/null"
+               else
+                       docker_exec $1 "scp -p $COROSYNC_CONF $COROSYNC_AUTH 
$node:/etc/corosync &> /dev/null"
+               fi
+       done
+}
+
+
+start_cluster() {
+       for node in $*;do
+               docker_exec $node "crm cluster enable && crm cluster start"
+       done
+}
+
+
+container_already_exists() {
+       docker ps -a|grep -q "$1"
+       if [ "$?" -eq 0 ];then
+               fatal "Container \"$1\" already running"
+       fi
+}
+
+
+setup_cluster() {
+       hanodes_arry=()
+       is_number $1
+       if [ "$?" -eq 0 ];then
+               for i in $(seq 1 $1);do
+                       hanodes_arry+=("hanode$i")
+               done
+       else
+               hanodes_arry=($*)
+       fi
+
+       create_node ${hanodes_arry[@]}
+       [ "$CONFIG_COROSYNC_FLAG" -eq 0 ] && return
+       config_cluster ${hanodes_arry[@]}
+       start_cluster ${hanodes_arry[@]}
+}
+
+
+cleanup_cluster() {
+       exist_network_array=()
+       for network in ${HA_NETWORK_ARRAY[@]};do
+               docker network ls|grep -q $network && 
exist_network_array+=($network)
+       done
+       if [ ${#exist_network_array[@]} -eq 0 ];then
+               info "Already cleaned up"
+               return 0
+       fi
+
+       container_array=(`docker network inspect $exist_network_array -f 
'{{range .Containers}}{{printf "%s " .Name}}{{end}}'`)
+       for node in ${container_array[@]};do
+               info "Cleanup container \"$node\"..."
+               docker container stop $node &> /dev/null
+               docker container rm $node &> /dev/null
+       done
+
+       for network in ${exist_network_array[@]};do
+               info "Cleanup ha specific docker network \"$network\"..."
+               docker network rm $network &> /dev/null
+       done
+}
+
+
+adjust_test_case() {
+       node_name=$1
+       replace_arry=(`docker_exec $node_name "grep -o -E 
'@(hanode[0-9]+|qnetd-node)\.ip[6]?\.(default|[0-9])' $2|sort -u|dos2unix"`)
+       for item in ${replace_arry[@]};do
+               item_str=${item##@}
+               node=`echo $item_str|cut -d "." -f 1`
+               ip_version=`echo $item_str|cut -d "." -f 2|tr -d "\r"`
+               ip_search_str="IPAddress"
+               if [ "$ip_version" == "ip6" ];then
+                       ip_search_str="GlobalIPv6Address"
+               fi
+               index=`echo $item_str|cut -d "." -f 3|tr -d "\r"`
+               if [ "$index" == "default" ];then
+                       ip=`docker container inspect $node -f "{{range 
.NetworkSettings.Networks}}{{printf \"%s \" .$ip_search_str}}{{end}}"|awk 
'{print $1}'|tr -d "\r"`
+               else
+                       ip=`docker container inspect $node -f 
"{{.NetworkSettings.Networks.${HA_NETWORK_ARRAY[$index]}.$ip_search_str}}"|tr 
-d "\r"`
+               fi
+               item=`echo $item|tr -d "\r"`
+               docker_exec $node_name "sed -i s/$item/$ip/g $2"
+       done
+
+       vip_replace_array=(`docker_exec $node_name "grep -o -E '@vip\.[0-9]' 
$2|sort -u|dos2unix"`)
+       for item in ${vip_replace_array[@]};do
+               index=`echo $item|cut -d "." -f 2|tr -d "\r"`
+               suffix=$((123+index))
+               ip=`docker container inspect $node_name -f 
"{{.NetworkSettings.Networks.${HA_NETWORK_ARRAY[0]}.IPAddress}}"|tr -d "\r"`
+               vip=`echo $ip|sed "s/\.[0-9][0-9]*$/\.$suffix/g"|tr -d "\r"`
+               item=`echo $item|tr -d "\r"`
+               docker_exec $node_name "sed -i s/$item/$vip/g $2"
+       done
+}
+
+
+run_origin_regression_test() {
+       CONFIG_COROSYNC_FLAG=0
+       setup_cluster "hanode1"
+       docker_exec "hanode1" "sh /usr/share/crmsh/tests/regression.sh"
+       return $?
+}
+
+
+CONFIG_COROSYNC_FLAG=1
+SETUP_N_NODES_CLUSTER=0
+options=$(getopt -l "help" -o "hldxn:" -- "$@")
+eval set -- "$options"
+while true;do
+case $1 in
+-h|--help) usage_and_exit;;
+-l)
+       echo_test_cases
+       exit 0
+       ;;
+-d)
+       check_docker_env cleanup
+       cleanup_cluster
+       exit $?
+       ;;
+-x)
+       CONFIG_COROSYNC_FLAG=0
+       shift
+       ;;
+-n)
+       check_docker_env
+       shift
+       is_number $1 || fatal "-n option need a number larger than 0"
+       SETUP_N_NODES_CLUSTER=$1
+       shift
+       ;;
+--)
+       shift
+       break
+       ;;
+esac
+done
+
+if [ $SETUP_N_NODES_CLUSTER -ge 1 ];then
+       setup_cluster $SETUP_N_NODES_CLUSTER
+       exit $?
+fi
+
+if [ "$#" -eq 0 ];then
+       usage_and_exit
+fi
+
+for case_num in $*;do
+       echo_test_cases|grep -E "\s+$case_num\s" &> /dev/null
+       if [ "$?" -ne 0 ];then
+               error "\"$case_num\" is an invalid index"
+               echo_test_cases
+               exit 1
+       fi
+done
+
+for case_num in $*;do
+       if [ "$case_num" -ne $1 ];then
+               check_docker_env cleanup
+               cleanup_cluster
+               echo
+       fi
+       check_docker_env
+       test_case_array=(`get_test_case_array`)
+       if [ $case_num -gt ${#test_case_array[*]} ];then
+               run_origin_regression_test || exit 1
+               continue
+       fi
+       case_file=$BEHAVE_CASE_DIR/${test_case_array[$((case_num-1))]}
+       case_file_in_container="/app/test/features/`basename $case_file`"
+       node_arry=(`awk -F: '/Need nodes/{print $2}' $case_file`)
+       CONFIG_COROSYNC_FLAG=0
+       setup_cluster ${node_arry[@]}
+       adjust_test_case ${node_arry[0]} $case_file_in_container
+       docker_exec ${node_arry[0]} "behave $case_file_in_container || exit 1" 
|| exit 1
+       echo
+done
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.0+20220321.8cf6a9d1/test/run-in-container.sh 
new/crmsh-4.4.0+20220418.cbf7a09e/test/run-in-container.sh
--- old/crmsh-4.4.0+20220321.8cf6a9d1/test/run-in-container.sh  2022-03-21 
09:03:21.000000000 +0100
+++ new/crmsh-4.4.0+20220418.cbf7a09e/test/run-in-container.sh  1970-01-01 
01:00:00.000000000 +0100
@@ -1,42 +0,0 @@
-#!/bin/sh
-
-oname=$1
-ogroup=$2
-ouid=$3
-ogid=$4
-
-cat /etc/group | awk '{ FS = ":" } { print $3 }' | grep -q "$ogid" || groupadd 
-g "$ogid"
-id -u $oname >/dev/null 2>&1 || useradd -u $ouid -g $ogid $oname
-
-preamble() {
-       systemctl start dbus
-}
-
-unit_tests() {
-       echo "** Unit tests"
-       su $oname -c "./test/run"
-}
-
-configure() {
-       echo "** Autogen / Configure"
-       su $oname -c "./autogen.sh"
-       su $oname -c "./configure --prefix /usr"
-}
-
-make_install() {
-       echo "** Make / Install"
-       make install
-}
-
-regression_tests() {
-       echo "** Regression tests"
-       sh /usr/share/crmsh/tests/regression.sh
-}
-
-preamble
-unit_tests
-configure
-make_install
-regression_tests
-
-chown $oname:$ogroup /app/crmtestout/*
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.4.0+20220321.8cf6a9d1/test/run-in-travis.sh 
new/crmsh-4.4.0+20220418.cbf7a09e/test/run-in-travis.sh
--- old/crmsh-4.4.0+20220321.8cf6a9d1/test/run-in-travis.sh     2022-03-21 
09:03:21.000000000 +0100
+++ new/crmsh-4.4.0+20220418.cbf7a09e/test/run-in-travis.sh     1970-01-01 
01:00:00.000000000 +0100
@@ -1,37 +0,0 @@
-#!/bin/sh
-configure() {
-       echo "** Autogen / Configure"
-       ./autogen.sh
-       ./configure --prefix /usr
-}
-
-make_install() {
-       echo "** Make / Install"
-       make install
-       make install-crmconfDATA prefix=
-}
-
-regression_tests() {
-       echo "** Regression tests"
-       sh /usr/share/crmsh/tests/regression.sh
-}
-
-functional_tests() {
-       echo "**  $1 process tests using python-behave"
-        SUFFIX="${2:-*}"
-        behave --no-logcapture --tags "@$1" --tags "~@wip" 
/usr/share/crmsh/tests/features/$1_$SUFFIX.feature
-}
-
-case "$1" in
-       build)
-               configure
-               make_install
-               exit $?;;
-       bootstrap|qdevice|crm_report|resource|geo|configure|constraints|ocfs2)
-               functional_tests $1 $2
-               exit $?;;
-       *|original)
-               configure
-               make_install
-               regression_tests;;
-esac

Reply via email to