Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package crmsh for openSUSE:Factory checked 
in at 2021-03-15 10:56:19
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/crmsh (Old)
 and      /work/SRC/openSUSE:Factory/.crmsh.new.2401 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "crmsh"

Mon Mar 15 10:56:19 2021 rev:203 rq:879018 version:4.3.0+git.20210311.c2e8856c

Changes:
--------
--- /work/SRC/openSUSE:Factory/crmsh/crmsh.changes      2021-02-19 
23:44:59.919347706 +0100
+++ /work/SRC/openSUSE:Factory/.crmsh.new.2401/crmsh.changes    2021-03-15 
10:56:19.869349531 +0100
@@ -1,0 +2,21 @@
+Mon Mar 15 02:25:26 UTC 2021 - xli...@suse.com
+
+- Update to version 4.3.0+git.20210311.c2e8856c:
+  * Dev: behave: change functional test about lock directory
+  * Dev: unittest: change lock directory as constant in ut code
+  * Dev: lock: change lock directory under /run
+  * Dev: unittest: adjust unittest for diskless SBD warning codes
+  * Fix: bootstrap: raise warning when configuring diskless SBD with node's 
count less than 3(bsc#1181907)
+  * Dev: unittest: Adjust unit test since qdevice code changed
+  * Dev: behave: Add functional test for qdevice add/remove on a single node 
cluster
+  * Fix: bootstrap: Adjust qdevice configure/remove process to avoid race 
condition due to quorum lost(bsc#1181415)
+  * Dev: utils: remove unused utils.cluster_stack and its related codes
+  * Dev: cibconfig: remove related code about detecting crm_diff support 
--no-verion
+  * Fix: ui_configure: raise error when params not exist(bsc#1180126)
+  * Dev: doc: remove doc for crm node status
+  * Dev: ui_node: remove status subcommand
+  * Fix: hb_report: walk through hb_report process under 
hacluster(CVE-2020-35459, bsc#1179999; CVE-2021-3020, bsc#1180571)
+  * Dev: unittest: add unit test for authorized ssh access for hacluster
+  * Fix: bootstrap: setup authorized ssh access for hacluster(CVE-2020-35459, 
bsc#1179999; CVE-2021-3020, bsc#1180571)
+
+-------------------------------------------------------------------

Old:
----
  crmsh-4.3.0+git.20210219.811c32f0.tar.bz2

New:
----
  crmsh-4.3.0+git.20210311.c2e8856c.tar.bz2

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ crmsh.spec ++++++
--- /var/tmp/diff_new_pack.Qsy4dL/_old  2021-03-15 10:56:20.561350593 +0100
+++ /var/tmp/diff_new_pack.Qsy4dL/_new  2021-03-15 10:56:20.565350600 +0100
@@ -36,7 +36,7 @@
 Summary:        High Availability cluster command-line interface
 License:        GPL-2.0-or-later
 Group:          %{pkg_group}
-Version:        4.3.0+git.20210219.811c32f0
+Version:        4.3.0+git.20210311.c2e8856c
 Release:        0
 URL:            http://crmsh.github.io
 Source0:        %{name}-%{version}.tar.bz2

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.Qsy4dL/_old  2021-03-15 10:56:20.617350680 +0100
+++ /var/tmp/diff_new_pack.Qsy4dL/_new  2021-03-15 10:56:20.617350680 +0100
@@ -9,6 +9,6 @@
 </service>
 <service name="tar_scm">
   <param name="url">https://github.com/ClusterLabs/crmsh.git</param>
-  <param 
name="changesrevision">811c32f0044997e5be46cf2f705ded005dfff590</param>
+  <param 
name="changesrevision">e0bb141ccc848feedda7cab741fc035f62f19bc5</param>
 </service>
 </servicedata>
\ No newline at end of file

++++++ crmsh-4.3.0+git.20210219.811c32f0.tar.bz2 -> 
crmsh-4.3.0+git.20210311.c2e8856c.tar.bz2 ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.0+git.20210219.811c32f0/crmsh/bootstrap.py 
new/crmsh-4.3.0+git.20210311.c2e8856c/crmsh/bootstrap.py
--- old/crmsh-4.3.0+git.20210219.811c32f0/crmsh/bootstrap.py    2021-02-19 
02:48:25.000000000 +0100
+++ new/crmsh-4.3.0+git.20210311.c2e8856c/crmsh/bootstrap.py    2021-03-11 
08:39:55.000000000 +0100
@@ -22,6 +22,7 @@
 from string import Template
 from lxml import etree
 from pathlib import Path
+from enum import Enum
 from . import config
 from . import utils
 from . import xmlutil
@@ -31,6 +32,7 @@
 from . import clidisplay
 from . import term
 from . import lock
+from . import userdir
 
 
 LOG_FILE = "/var/log/crmsh/ha-cluster-bootstrap.log"
@@ -42,13 +44,18 @@
 SYSCONFIG_FW_CLUSTER = "/etc/sysconfig/SuSEfirewall2.d/services/cluster"
 PCMK_REMOTE_AUTH = "/etc/pacemaker/authkey"
 COROSYNC_CONF_ORIG = tmpfiles.create()[1]
-RSA_PRIVATE_KEY = "/root/.ssh/id_rsa"
-RSA_PUBLIC_KEY = "/root/.ssh/id_rsa.pub"
-AUTHORIZED_KEYS_FILE = "/root/.ssh/authorized_keys"
 SERVICES_STOP_LIST = ["corosync-qdevice.service", "corosync.service", 
"hawk.service"]
+USER_LIST = ["root", "hacluster"]
+QDEVICE_ADD = "add"
+QDEVICE_REMOVE = "remove"
 
 INIT_STAGES = ("ssh", "ssh_remote", "csync2", "csync2_remote", "corosync", 
"storage", "sbd", "cluster", "vgfs", "admin", "qdevice")
 
+class QdevicePolicy(Enum):
+    QDEVICE_RELOAD = 0
+    QDEVICE_RESTART = 1
+    QDEVICE_RESTART_LATER = 2
+
 
 class Context(object):
     """
@@ -80,6 +87,7 @@
         self.qdevice_heuristics = None
         self.qdevice_heuristics_mode = None
         self.qdevice_rm_flag = None
+        self.qdevice_reload_policy = QdevicePolicy.QDEVICE_RESTART
         self.shared_device = None
         self.ocfs2_device = None
         self.cluster_node = None
@@ -95,6 +103,7 @@
         self.args = None
         self.ui_context = None
         self.interfaces_inst = None
+        self.with_other_user = True
         self.default_nic_list = []
         self.default_ip_list = []
         self.local_ip_list = []
@@ -339,6 +348,8 @@
   are a good choice.  Note that all data on the partition you
   specify here will be destroyed.
 """
+    DISKLESS_SBD_WARNING = """Diskless SBD requires cluster with three or more 
nodes.
+If you want to use diskless SBD for two-nodes cluster, should be combined with 
QDevice."""
 
     def __init__(self, sbd_devices=None, diskless_sbd=False):
         """
@@ -516,6 +527,8 @@
         if not self._sbd_devices and not self.diskless_sbd:
             invoke("systemctl disable sbd.service")
             return
+        if self.diskless_sbd:
+            warn(self.DISKLESS_SBD_WARNING)
         status_long("Initializing {}SBD...".format("diskless " if 
self.diskless_sbd else ""))
         self._initialize_sbd()
         self._update_configuration()
@@ -554,6 +567,10 @@
         dev_list = self._get_sbd_device_from_config()
         if dev_list:
             self._verify_sbd_device(dev_list, [peer_host])
+        else:
+            vote_dict = utils.get_quorum_votes_dict(peer_host)
+            if int(vote_dict['Expected']) < 2:
+                warn(self.DISKLESS_SBD_WARNING)
         status("Got {}SBD configuration".format("" if dev_list else "diskless 
"))
         invoke("systemctl enable sbd.service")
 
@@ -1134,22 +1151,66 @@
     Configure passwordless SSH.
     """
     utils.start_service("sshd.service", enable=True)
-    configure_local_ssh_key()
+    for user in USER_LIST:
+        configure_local_ssh_key(user)
+
+
+def key_files(user):
+    """
+    Find home directory for user and return key files with abspath
+    """
+    keyfile_dict = {}
+    home_dir = userdir.gethomedir(user)
+    keyfile_dict['private'] = "{}/.ssh/id_rsa".format(home_dir)
+    keyfile_dict['public'] = "{}/.ssh/id_rsa.pub".format(home_dir)
+    keyfile_dict['authorized'] = "{}/.ssh/authorized_keys".format(home_dir)
+    return keyfile_dict
+
+
+def is_nologin(user):
+    """
+    Check if user's shell is /sbin/nologin
+    """
+    with open("/etc/passwd") as f:
+        return re.search("{}:.*:/sbin/nologin".format(user), f.read())
+
+
+def change_user_shell(user):
+    """
+    To change user's login shell
+    """
+    if user != "root" and is_nologin(user):
+        if not _context.yes_to_all:
+            status("""
+User {} will be changed the login shell as /bin/bash, and
+be setted up authorized ssh access among cluster nodes""".format(user))
+            if not confirm("Continue?"):
+                _context.with_other_user = False
+                return
+        invoke("usermod -s /bin/bash {}".format(user))
 
 
-def configure_local_ssh_key():
+def configure_local_ssh_key(user="root"):
     """
     Configure ssh rsa key locally
 
-    If /root/.ssh/id_rsa not exist, generate a new one
-    Add /root/.ssh/id_rsa.pub to /root/.ssh/authorized_keys anyway, make sure 
itself authorized
+    If <home_dir>/.ssh/id_rsa not exist, generate a new one
+    Add <home_dir>/.ssh/id_rsa.pub to <home_dir>/.ssh/authorized_keys anyway, 
make sure itself authorized
     """
-    if not os.path.exists(RSA_PRIVATE_KEY):
-        status("Generating SSH key")
-        invoke("ssh-keygen -q -f {} -C 'Cluster Internal on {}' -N 
''".format(RSA_PRIVATE_KEY, utils.this_node()))
-    if not os.path.exists(AUTHORIZED_KEYS_FILE):
-        open(AUTHORIZED_KEYS_FILE, 'w').close()
-    append_unique(RSA_PUBLIC_KEY, AUTHORIZED_KEYS_FILE)
+    change_user_shell(user)
+
+    private_key, public_key, authorized_file = key_files(user).values()
+    if not os.path.exists(private_key):
+        status("Generating SSH key for {}".format(user))
+        cmd = "ssh-keygen -q -f {} -C 'Cluster Internal on {}' -N 
''".format(private_key, utils.this_node())
+        cmd = utils.add_su(cmd, user)
+        rc, _, err = invoke(cmd)
+        if not rc:
+            error("Failed to generate ssh key for {}: {}".format(user, err))
+
+    if not os.path.exists(authorized_file):
+        open(authorized_file, 'w').close()
+    append_unique(public_key, authorized_file)
 
 
 def init_ssh_remote():
@@ -1784,6 +1845,34 @@
     wait_for_resource("Configuring virtual IP ({})".format(adminaddr), 
"admin-ip")
 
 
+def evaluate_qdevice_quorum_effect(mode):
+    """
+    While adding/removing qdevice, get current expected votes and actual total 
votes,
+    to calculate after adding/removing qdevice, whether cluster has quorum
+    return different policy
+    """
+    quorum_votes_dict = utils.get_quorum_votes_dict()
+    expected_votes = int(quorum_votes_dict["Expected"])
+    actual_votes = int(quorum_votes_dict["Total"])
+    if mode == QDEVICE_ADD:
+        expected_votes += 1
+    elif mode == QDEVICE_REMOVE:
+        actual_votes -= 1
+
+    if utils.is_quorate(expected_votes, actual_votes):
+        # safe to use reload
+        return QdevicePolicy.QDEVICE_RELOAD
+    elif utils.has_resource_running():
+        # will lose quorum, and with RA running
+        # no reload, no restart cluster service
+        # just leave a warning
+        return QdevicePolicy.QDEVICE_RESTART_LATER
+    else:
+        # will lose quorum, without RA running
+        # safe to restart cluster service
+        return QdevicePolicy.QDEVICE_RESTART
+
+
 def init_qdevice():
     """
     Setup qdevice and qnetd service
@@ -1793,6 +1882,10 @@
         utils.disable_service("corosync-qdevice.service")
         return
 
+    if _context.stage == "qdevice":
+        utils.check_all_nodes_reachable()
+        _context.qdevice_reload_policy = 
evaluate_qdevice_quorum_effect(QDEVICE_ADD)
+
     status("""
 Configure Qdevice/Qnetd:""")
     qdevice_inst = _context.qdevice_inst
@@ -1830,8 +1923,15 @@
 
     status("Enable corosync-qdevice.service in cluster")
     utils.cluster_run_cmd("systemctl enable corosync-qdevice")
-    status("Starting corosync-qdevice.service in cluster")
-    utils.cluster_run_cmd("systemctl start corosync-qdevice")
+    if _context.qdevice_reload_policy == QdevicePolicy.QDEVICE_RELOAD:
+        status("Starting corosync-qdevice.service in cluster")
+        utils.cluster_run_cmd("systemctl restart corosync-qdevice")
+    elif _context.qdevice_reload_policy == QdevicePolicy.QDEVICE_RESTART:
+        status("Restarting cluster service")
+        utils.cluster_run_cmd("crm cluster restart")
+        wait_for_cluster()
+    else:
+        warn("To use qdevice service, need to restart cluster service manually 
on each node")
 
     status("Enable corosync-qnetd.service on {}".format(qnetd_addr))
     qdevice_inst.enable_qnetd()
@@ -1851,7 +1951,8 @@
         corosync.add_nodelist_from_cmaptool()
     status_long("Update configuration")
     update_expected_votes()
-    utils.cluster_run_cmd("crm corosync reload")
+    if _context.qdevice_reload_policy == QdevicePolicy.QDEVICE_RELOAD:
+        utils.cluster_run_cmd("crm corosync reload")
     status_done()
 
 
@@ -1871,8 +1972,9 @@
         error("No existing IP/hostname specified (use -c option)")
 
     utils.start_service("sshd.service", enable=True)
-    configure_local_ssh_key()
-    swap_public_ssh_key(seed_host)
+    for user in USER_LIST:
+        configure_local_ssh_key(user)
+        swap_public_ssh_key(seed_host, user)
 
     # This makes sure the seed host has its own SSH keys in its own
     # authorized_keys file (again, to help with the case where the
@@ -1883,30 +1985,34 @@
         error("Can't invoke crm cluster init -i {} ssh_remote on {}: 
{}".format(_context.default_nic_list[0], seed_host, err))
 
 
-def swap_public_ssh_key(remote_node):
+def swap_public_ssh_key(remote_node, user="root"):
     """
     Swap public ssh key between remote_node and local
     """
+    if user != "root" and not _context.with_other_user:
+        return
+
+    _, public_key, authorized_file = key_files(user).values()
     # Detect whether need password to login to remote_node
-    if utils.check_ssh_passwd_need(remote_node):
+    if utils.check_ssh_passwd_need(remote_node, user):
         # If no passwordless configured, paste /root/.ssh/id_rsa.pub to 
remote_node's /root/.ssh/authorized_keys
-        status("Configuring SSH passwordless with root@{}".format(remote_node))
+        status("Configuring SSH passwordless with {}@{}".format(user, 
remote_node))
         # After this, login to remote_node is passwordless
-        append_to_remote_file(RSA_PUBLIC_KEY, remote_node, 
AUTHORIZED_KEYS_FILE)
+        append_to_remote_file(public_key, remote_node, authorized_file)
 
     try:
         # Fetch public key file from remote_node
-        public_key_file_remote = fetch_public_key_from_remote_node(remote_node)
+        public_key_file_remote = 
fetch_public_key_from_remote_node(remote_node, user)
     except ValueError as err:
         warn(err)
         return
     # Append public key file from remote_node to local's 
/root/.ssh/authorized_keys
     # After this, login from remote_node is passwordless
     # Should do this step even passwordless is True, to make sure we got 
two-way passwordless
-    append_unique(public_key_file_remote, AUTHORIZED_KEYS_FILE)
+    append_unique(public_key_file_remote, authorized_file)
 
 
-def fetch_public_key_from_remote_node(node):
+def fetch_public_key_from_remote_node(node, user="root"):
     """
     Fetch public key file from remote node
     Return a temp file contains public key
@@ -1915,8 +2021,9 @@
 
     # For dsa, might need to add PubkeyAcceptedKeyTypes=+ssh-dss to config 
file, see
     # 
https://superuser.com/questions/1016989/ssh-dsa-keys-no-longer-work-for-password-less-authentication
+    home_dir = userdir.gethomedir(user)
     for key in ("id_rsa", "id_ecdsa", "id_ed25519", "id_dsa"):
-        public_key_file = "/root/.ssh/{}.pub".format(key)
+        public_key_file = "{}/.ssh/{}.pub".format(home_dir, key)
         cmd = "ssh -oStrictHostKeyChecking=no root@{} 'test -f 
{}'".format(node, public_key_file)
         if not invokerc(cmd):
             continue
@@ -2128,7 +2235,8 @@
 
     # Swap ssh public key between join node and other cluster nodes
     for node in cluster_nodes_list:
-        swap_public_ssh_key(node)
+        for user in USER_LIST:
+            swap_public_ssh_key(node, user)
 
 
 def join_cluster(seed_host):
@@ -2487,7 +2595,7 @@
     check_tty()
 
     corosync_active = utils.service_is_active("corosync.service")
-    if corosync_active:
+    if corosync_active and _context.stage != "ssh":
         error("Abort: Cluster is currently active. Run this command on a node 
joining the cluster.")
 
     if not check_prereqs("join"):
@@ -2545,10 +2653,14 @@
     if not confirm("Removing QDevice service and configuration from cluster: 
Are you sure?"):
         return
 
+    utils.check_all_nodes_reachable()
+    _context.qdevice_reload_policy = 
evaluate_qdevice_quorum_effect(QDEVICE_REMOVE)
+
     status("Disable corosync-qdevice.service")
     invoke("crm cluster run 'systemctl disable corosync-qdevice'")
-    status("Stopping corosync-qdevice.service")
-    invoke("crm cluster run 'systemctl stop corosync-qdevice'")
+    if _context.qdevice_reload_policy == QdevicePolicy.QDEVICE_RELOAD:
+        status("Stopping corosync-qdevice.service")
+        invoke("crm cluster run 'systemctl stop corosync-qdevice'")
 
     status_long("Removing QDevice configuration from cluster")
     qnetd_host = corosync.get_value('quorum.device.net.host')
@@ -2556,8 +2668,15 @@
     qdevice_inst.remove_qdevice_config()
     qdevice_inst.remove_qdevice_db()
     update_expected_votes()
-    invoke("crm cluster run 'crm corosync reload'")
     status_done()
+    if _context.qdevice_reload_policy == QdevicePolicy.QDEVICE_RELOAD:
+        invoke("crm cluster run 'crm corosync reload'")
+    elif _context.qdevice_reload_policy == QdevicePolicy.QDEVICE_RESTART:
+        status("Restarting cluster service")
+        utils.cluster_run_cmd("crm cluster restart")
+        wait_for_cluster()
+    else:
+        warn("To remove qdevice service, need to restart cluster service 
manually on each node")
 
 
 def bootstrap_remove(context):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.0+git.20210219.811c32f0/crmsh/cibconfig.py 
new/crmsh-4.3.0+git.20210311.c2e8856c/crmsh/cibconfig.py
--- old/crmsh-4.3.0+git.20210219.811c32f0/crmsh/cibconfig.py    2021-02-19 
02:48:25.000000000 +0100
+++ new/crmsh-4.3.0+git.20210311.c2e8856c/crmsh/cibconfig.py    2021-03-11 
08:39:55.000000000 +0100
@@ -2391,7 +2391,7 @@
         self.last_commit_time = 0
         # internal (just not to produce silly messages)
         self._no_constraint_rm_msg = False
-        self._crm_diff_cmd = None
+        self._crm_diff_cmd = "crm_diff --no-version"
 
     def is_cib_sane(self):
         # try to initialize
@@ -2531,17 +2531,6 @@
         schema.init_schema(self.cib_elem)
         return True
 
-    #
-    # create a doc from the list of objects
-    # (used by CibObjectSetRaw)
-    #
-    def bump_epoch(self):
-        try:
-            self.cib_attrs["epoch"] = str(int(self.cib_attrs["epoch"])+1)
-        except:
-            self.cib_attrs["epoch"] = "1"
-        common_debug("Bump epoch to %s" % (self.cib_attrs["epoch"]))
-
     def _get_cib_attributes(self, cib):
         for attr in list(cib.keys()):
             self.cib_attrs[attr] = cib.get(attr)
@@ -2669,20 +2658,8 @@
         if current_cib is None:
             return False
 
-        # check if crm_diff supports --no-version
-        if self._crm_diff_cmd is None:
-            rc, out = utils.get_stdout("crm_diff --help")
-            if "--no-version" in out:
-                self._crm_diff_cmd = 'crm_diff --no-version'
-            else:
-                self._crm_diff_cmd = 'crm_diff'
-
         self._copy_cib_attributes(current_cib, self.cib_orig)
         current_cib = None  # don't need that anymore
-        # only bump epoch if we don't have support for --no-version
-        if not self._crm_diff_cmd.endswith('--no-version'):
-            # now increase the epoch by 1
-            self.bump_epoch()
         self._set_cib_attributes(self.cib_elem)
         cib_s = xml_tostring(self.cib_orig, pretty_print=True)
         tmpf = str2tmp(cib_s, suffix=".xml")
@@ -2705,13 +2682,7 @@
         elif not cib_diff:
             common_err("crm_diff apparently failed to produce the diff 
(rc=%d)" % rc)
             return False
-        if not self._crm_diff_cmd.endswith('--no-version'):
-            # skip the version information for source and target
-            # if we dont have support for --no-version
-            e = etree.fromstring(cib_diff)
-            for tag in e.xpath("./version/*[self::target or self::source]"):
-                tag.attrib.clear()
-            cib_diff = xml_tostring(e)
+
         # for v1 diffs, fall back to non-patching if
         # any containers are modified, else strip the digest
         if "<diff" in cib_diff and "digest=" in cib_diff:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.0+git.20210219.811c32f0/crmsh/corosync.py 
new/crmsh-4.3.0+git.20210311.c2e8856c/crmsh/corosync.py
--- old/crmsh-4.3.0+git.20210219.811c32f0/crmsh/corosync.py     2021-02-19 
02:48:25.000000000 +0100
+++ new/crmsh-4.3.0+git.20210311.c2e8856c/crmsh/corosync.py     2021-03-11 
08:39:55.000000000 +0100
@@ -19,10 +19,6 @@
     return os.getenv('COROSYNC_MAIN_CONFIG_FILE', 
'/etc/corosync/corosync.conf')
 
 
-def is_corosync_stack():
-    return utils.cluster_stack() == 'corosync'
-
-
 def check_tools():
     return all(utils.is_program(p)
                for p in ['corosync-cfgtool', 'corosync-quorumtool', 
'corosync-cmapctl'])
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.0+git.20210219.811c32f0/crmsh/lock.py 
new/crmsh-4.3.0+git.20210311.c2e8856c/crmsh/lock.py
--- old/crmsh-4.3.0+git.20210219.811c32f0/crmsh/lock.py 2021-02-19 
02:48:25.000000000 +0100
+++ new/crmsh-4.3.0+git.20210311.c2e8856c/crmsh/lock.py 2021-03-11 
08:39:55.000000000 +0100
@@ -28,7 +28,7 @@
     A base class define a lock mechanism used to exclude other nodes
     """
 
-    LOCK_DIR = "/tmp/.crmsh_lock_directory"
+    LOCK_DIR = "/run/.crmsh_lock_directory"
     MKDIR_CMD = "mkdir {}".format(LOCK_DIR)
     RM_CMD = "rm -rf {}".format(LOCK_DIR)
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.0+git.20210219.811c32f0/crmsh/ra.py 
new/crmsh-4.3.0+git.20210311.c2e8856c/crmsh/ra.py
--- old/crmsh-4.3.0+git.20210219.811c32f0/crmsh/ra.py   2021-02-19 
02:48:25.000000000 +0100
+++ new/crmsh-4.3.0+git.20210311.c2e8856c/crmsh/ra.py   2021-03-11 
08:39:55.000000000 +0100
@@ -520,21 +520,21 @@
         rc = 0
         d = {}
         for nvp in nvpairs:
-            if 'name' in nvp.attrib and 'value' in nvp.attrib:
+            if 'name' in nvp.attrib:
                 d[nvp.get('name')] = nvp.get('value')
         if not existence_only:
             for p in reqd_params_list():
                 if unreq_param(p):
                     continue
                 if p not in d:
-                    common_err("%s: required parameter %s not defined" % 
(ident, p))
+                    common_err("{}: required parameter \"{}\" not 
defined".format(ident, p))
                     rc |= utils.get_check_rc()
         for p in d:
             if p.startswith("$"):
                 # these are special, non-RA parameters
                 continue
             if p not in self.params():
-                common_err("%s: parameter %s does not exist" % (ident, p))
+                common_err("{}: parameter \"{}\" is not known".format(ident, 
p))
                 rc |= utils.get_check_rc()
         return rc
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.0+git.20210219.811c32f0/crmsh/ui_cluster.py 
new/crmsh-4.3.0+git.20210311.c2e8856c/crmsh/ui_cluster.py
--- old/crmsh-4.3.0+git.20210219.811c32f0/crmsh/ui_cluster.py   2021-02-19 
02:48:25.000000000 +0100
+++ new/crmsh-4.3.0+git.20210311.c2e8856c/crmsh/ui_cluster.py   2021-03-11 
08:39:55.000000000 +0100
@@ -113,7 +113,7 @@
             if not utils.service_is_active("corosync.service"):
                 err_buf.info("Cluster services already stopped")
                 return
-            if utils.is_qdevice_configured():
+            if utils.service_is_active("corosync-qdevice"):
                 utils.stop_service("corosync-qdevice")
             utils.stop_service("corosync")
             err_buf.info("Cluster services stopped")
@@ -589,26 +589,21 @@
         '''
         Quick cluster health status. Corosync status, DRBD status...
         '''
-
-        stack = utils.cluster_stack()
-        if not stack:
-            err_buf.error("No supported cluster stack found (tried 
heartbeat|openais|corosync)")
-        if utils.cluster_stack() == 'corosync':
-            print("Name: {}\n".format(get_cluster_name()))
-            print("Services:")
-            for svc in ["corosync", "pacemaker"]:
-                info = utils.service_info(svc)
-                if info:
-                    print("%-16s %s" % (svc, info))
-                else:
-                    print("%-16s unknown" % (svc))
-
-            rc, outp = utils.get_stdout(['corosync-cfgtool', '-s'], 
shell=False)
-            if rc == 0:
-                print("")
-                print(outp)
+        print("Name: {}\n".format(get_cluster_name()))
+        print("Services:")
+        for svc in ["corosync", "pacemaker"]:
+            info = utils.service_info(svc)
+            if info:
+                print("%-16s %s" % (svc, info))
             else:
-                print("Failed to get corosync status")
+                print("%-16s unknown" % (svc))
+
+        rc, outp = utils.get_stdout(['corosync-cfgtool', '-s'], shell=False)
+        if rc == 0:
+            print("")
+            print(outp)
+        else:
+            print("Failed to get corosync status")
 
     @command.completers_repeating(compl.choice(['10', '60', '600']))
     def do_wait_for_startup(self, context, timeout='10'):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.0+git.20210219.811c32f0/crmsh/ui_configure.py 
new/crmsh-4.3.0+git.20210311.c2e8856c/crmsh/ui_configure.py
--- old/crmsh-4.3.0+git.20210219.811c32f0/crmsh/ui_configure.py 2021-02-19 
02:48:25.000000000 +0100
+++ new/crmsh-4.3.0+git.20210311.c2e8856c/crmsh/ui_configure.py 2021-03-11 
08:39:55.000000000 +0100
@@ -327,7 +327,11 @@
     if last_keyw is None:
         return []
 
-    return completers_set[last_keyw](agent, args) + keywords
+    complete_results = completers_set[last_keyw](agent, args)
+    if len(args) > 4 and '=' in args[-2]: # args[-1] will be the space
+        return complete_results + keywords
+
+    return complete_results
 
 
 def container_helptxt(params, helptxt, topic):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.0+git.20210219.811c32f0/crmsh/ui_corosync.py 
new/crmsh-4.3.0+git.20210311.c2e8856c/crmsh/ui_corosync.py
--- old/crmsh-4.3.0+git.20210219.811c32f0/crmsh/ui_corosync.py  2021-02-19 
02:48:25.000000000 +0100
+++ new/crmsh-4.3.0+git.20210311.c2e8856c/crmsh/ui_corosync.py  2021-03-11 
08:39:55.000000000 +0100
@@ -52,10 +52,6 @@
     name = "corosync"
 
     def requires(self):
-        stack = utils.cluster_stack()
-        if len(stack) > 0 and stack != 'corosync':
-            err_buf.warning("Unsupported cluster stack %s detected." % (stack))
-            return False
         return corosync.check_tools()
 
     @command.completers(completers.choice(['ring', 'quorum', 'qnetd']))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.0+git.20210219.811c32f0/crmsh/ui_node.py 
new/crmsh-4.3.0+git.20210311.c2e8856c/crmsh/ui_node.py
--- old/crmsh-4.3.0+git.20210219.811c32f0/crmsh/ui_node.py      2021-02-19 
02:48:25.000000000 +0100
+++ new/crmsh-4.3.0+git.20210311.c2e8856c/crmsh/ui_node.py      2021-03-11 
08:39:55.000000000 +0100
@@ -250,13 +250,6 @@
                 return False
         return True
 
-    @command.completers(compl.nodes)
-    def do_status(self, context, node=None):
-        'usage: status [<node>]'
-        a = node and ('--xpath "//nodes/node[@uname=\'%s\']"' % node) or \
-            '-o nodes'
-        return utils.ext_cmd("%s %s" % (xmlutil.cib_dump, a)) == 0
-
     @command.alias('list')
     @command.completers(compl.nodes)
     def do_show(self, context, node=None):
@@ -387,21 +380,18 @@
     def _call_delnode(self, node):
         "Remove node (how depends on cluster stack)"
         rc = True
-        if utils.cluster_stack() == "heartbeat":
-            cmd = (self.hb_delnode % node)
-        else:
-            ec, s = utils.get_stdout("%s -p" % self.crm_node)
-            if not s:
-                common_err('%s -p could not list any nodes (rc=%d)' %
-                           (self.crm_node, ec))
+        ec, s = utils.get_stdout("%s -p" % self.crm_node)
+        if not s:
+            common_err('%s -p could not list any nodes (rc=%d)' %
+                       (self.crm_node, ec))
+            rc = False
+        else:
+            partition_l = s.split()
+            if node in partition_l:
+                common_err("according to %s, node %s is still active" %
+                           (self.crm_node, node))
                 rc = False
-            else:
-                partition_l = s.split()
-                if node in partition_l:
-                    common_err("according to %s, node %s is still active" %
-                               (self.crm_node, node))
-                    rc = False
-            cmd = "%s --force -R %s" % (self.crm_node, node)
+        cmd = "%s --force -R %s" % (self.crm_node, node)
         if not rc:
             if config.core.force:
                 common_info('proceeding with node %s removal' % node)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.0+git.20210219.811c32f0/crmsh/utils.py 
new/crmsh-4.3.0+git.20210311.c2e8856c/crmsh/utils.py
--- old/crmsh-4.3.0+git.20210219.811c32f0/crmsh/utils.py        2021-02-19 
02:48:25.000000000 +0100
+++ new/crmsh-4.3.0+git.20210311.c2e8856c/crmsh/utils.py        2021-03-11 
08:39:55.000000000 +0100
@@ -350,6 +350,15 @@
     return cmd
 
 
+def add_su(cmd, user):
+    """
+    Wrapped cmd with su -c "<cmd>" <user>
+    """
+    if user == "root":
+        return cmd
+    return "su -c \"{}\" {}".format(cmd, user)
+
+
 def chown(path, user, group):
     if isinstance(user, int):
         uid = user
@@ -1094,17 +1103,6 @@
     traceback.print_stack(sf)
 
 
-@memoize
-def cluster_stack():
-    if is_process("heartbeat:.[m]aster"):
-        return "heartbeat"
-    elif is_process("[a]isexec"):
-        return "openais"
-    elif os.path.exists("/etc/corosync/corosync.conf") or 
is_program('corosync-cfgtool'):
-        return "corosync"
-    return ""
-
-
 def edit_file(fname):
     'Edit a file.'
     if not fname:
@@ -2091,12 +2089,13 @@
     return re.findall(r'id\s*=\s*(.*)', out)
 
 
-def check_ssh_passwd_need(host):
+def check_ssh_passwd_need(host, user="root"):
     """
     Check whether access to host need password
     """
     ssh_options = "-o StrictHostKeyChecking=no -o EscapeChar=none -o 
ConnectTimeout=15"
     ssh_cmd = "ssh {} -T -o Batchmode=yes {} true".format(ssh_options, host)
+    ssh_cmd = add_su(ssh_cmd, user)
     rc, _, _ = get_stdout_stderr(ssh_cmd)
     return rc != 0
 
@@ -2639,4 +2638,48 @@
     rc, _, err = get_stdout_stderr("ping -c 1 {}".format(node))
     if rc != 0:
         raise ValueError("host \"{}\" is unreachable: {}".format(node, err))
+
+
+def is_quorate(expected_votes, actual_votes):
+    """
+    Given expected votes and actual votes, calculate if is quorated
+    """
+    return int(actual_votes)/int(expected_votes) > 0.5
+
+
+def get_stdout_or_raise_error(cmd, remote=None, success_val=0):
+    """
+    Common function to get stdout from cmd or raise exception
+    """
+    if remote:
+        cmd = "ssh -o StrictHostKeyChecking=no root@{} \"{}\"".format(remote, 
cmd)
+    rc, out, err = get_stdout_stderr(cmd)
+    if rc != success_val:
+        raise ValueError("Failed to run \"{}\": {}".format(cmd, err))
+    return out
+
+
+def get_quorum_votes_dict(remote=None):
+    """
+    Return a dictionary which contain expect votes and total votes
+    """
+    out = get_stdout_or_raise_error("corosync-quorumtool -s", remote=remote)
+    return dict(re.findall("(Expected|Total) votes:\s+(\d+)", out))
+
+
+def has_resource_running():
+    """
+    Check if any RA is running
+    """
+    out = get_stdout_or_raise_error("crm_mon -1")
+    return re.search("No active resources", out) is None
+
+
+def check_all_nodes_reachable():
+    """
+    Check if all cluster nodes are reachable
+    """
+    out = get_stdout_or_raise_error("crm_node -l")
+    for node in re.findall("\d+ (.*) \w+", out):
+        ping_node(node)
 # vim:ts=4:sw=4:et:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.0+git.20210219.811c32f0/doc/crm.8.adoc 
new/crmsh-4.3.0+git.20210311.c2e8856c/doc/crm.8.adoc
--- old/crmsh-4.3.0+git.20210219.811c32f0/doc/crm.8.adoc        2021-02-19 
02:48:25.000000000 +0100
+++ new/crmsh-4.3.0+git.20210311.c2e8856c/doc/crm.8.adoc        2021-03-11 
08:39:55.000000000 +0100
@@ -2539,18 +2539,6 @@
 standby bob reboot
 ...............
 
-
-[[cmdhelp_node_status,show nodes' status as XML]]
-==== `status`
-
-Show nodes' status as XML. If the node parameter is omitted then
-all nodes are shown.
-
-Usage:
-...............
-status [<node>]
-...............
-
 [[cmdhelp_node_status-attr,manage status attributes]]
 ==== `status-attr`
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.0+git.20210219.811c32f0/hb_report/hb_report.in 
new/crmsh-4.3.0+git.20210311.c2e8856c/hb_report/hb_report.in
--- old/crmsh-4.3.0+git.20210219.811c32f0/hb_report/hb_report.in        
2021-02-19 02:48:25.000000000 +0100
+++ new/crmsh-4.3.0+git.20210311.c2e8856c/hb_report/hb_report.in        
2021-03-11 08:39:55.000000000 +0100
@@ -82,7 +82,10 @@
     if constants.CTS:
         pass  # TODO
     else:
-        getstampproc = utillib.find_getstampproc(constants.HA_LOG)
+        try:
+            getstampproc = utillib.find_getstampproc(constants.HA_LOG)
+        except PermissionError:
+            return
         if getstampproc:
             constants.GET_STAMP_FUNC = getstampproc
             if utillib.dump_logset(constants.HA_LOG, constants.FROM_TIME, 
constants.TO_TIME, outf):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.0+git.20210219.811c32f0/hb_report/utillib.py 
new/crmsh-4.3.0+git.20210311.c2e8856c/hb_report/utillib.py
--- old/crmsh-4.3.0+git.20210219.811c32f0/hb_report/utillib.py  2021-02-19 
02:48:25.000000000 +0100
+++ new/crmsh-4.3.0+git.20210311.c2e8856c/hb_report/utillib.py  2021-03-11 
08:39:55.000000000 +0100
@@ -1521,14 +1521,12 @@
 
 def start_slave_collector(node, arg_str):
     if node == constants.WE:
-        cmd = r"hb_report __slave".format(os.getcwd())
+        cmd = r"/usr/sbin/hb_report __slave".format(os.getcwd())
         for item in arg_str.split():
             cmd += " {}".format(str(item))
         _, out = crmutils.get_stdout(cmd)
     else:
-        cmd = r'ssh {} {} "{} hb_report __slave"'.\
-              format(constants.SSH_OPTS, node,
-                     constants.SUDO, os.getcwd())
+        cmd = r'ssh {} {} "/usr/sbin/hb_report 
__slave"'.format(constants.SSH_OPTS, node, os.getcwd())
         for item in arg_str.split():
             cmd += " {}".format(str(item))
         code, out, err = crmutils.get_stdout_stderr(cmd)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.0+git.20210219.811c32f0/test/features/bootstrap_bugs.feature 
new/crmsh-4.3.0+git.20210311.c2e8856c/test/features/bootstrap_bugs.feature
--- old/crmsh-4.3.0+git.20210219.811c32f0/test/features/bootstrap_bugs.feature  
2021-02-19 02:48:25.000000000 +0100
+++ new/crmsh-4.3.0+git.20210311.c2e8856c/test/features/bootstrap_bugs.feature  
2021-03-11 08:39:55.000000000 +0100
@@ -92,7 +92,7 @@
     Then    Cluster service is "started" on "hanode2"
     # Try to simulate the join process hanging on hanode2 or hanode2 died
     # Just leave the lock directory unremoved
-    When    Run "mkdir /tmp/.crmsh_lock_directory" on "hanode1"
+    When    Run "mkdir /run/.crmsh_lock_directory" on "hanode1"
     When    Try "crm cluster join -c hanode1 -y" on "hanode3"
-    Then    Except "ERROR: cluster.join: Timed out after 120 seconds. Cannot 
continue since the lock directory exists at the node 
(hanode1:/tmp/.crmsh_lock_directory)"
-    When    Run "rm -rf /tmp/.crmsh_lock_directory" on "hanode1"
+    Then    Except "ERROR: cluster.join: Timed out after 120 seconds. Cannot 
continue since the lock directory exists at the node 
(hanode1:/run/.crmsh_lock_directory)"
+    When    Run "rm -rf /run/.crmsh_lock_directory" on "hanode1"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.0+git.20210219.811c32f0/test/features/qdevice_setup_remove.feature
 
new/crmsh-4.3.0+git.20210311.c2e8856c/test/features/qdevice_setup_remove.feature
--- 
old/crmsh-4.3.0+git.20210219.811c32f0/test/features/qdevice_setup_remove.feature
    2021-02-19 02:48:25.000000000 +0100
+++ 
new/crmsh-4.3.0+git.20210311.c2e8856c/test/features/qdevice_setup_remove.feature
    2021-03-11 08:39:55.000000000 +0100
@@ -14,6 +14,8 @@
   Scenario: Setup qdevice/qnetd during init/join process
     When    Run "crm cluster init --qnetd-hostname=qnetd-node -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
+    # for bsc#1181415
+    Then    Expected "Restarting cluster service" in stdout
     And     Service "corosync-qdevice" is "started" on "hanode1"
     When    Run "crm cluster join -c hanode1 -y" on "hanode2"
     Then    Cluster service is "started" on "hanode2"
@@ -35,6 +37,8 @@
     When    Run "echo "# This is a test for bsc#1166684" >> 
/etc/corosync/corosync.conf" on "hanode1"
     When    Run "scp /etc/corosync/corosync.conf root@hanode2:/etc/corosync" 
on "hanode1"
     When    Run "crm cluster init qdevice --qnetd-hostname=qnetd-node -y" on 
"hanode1"
+    # for bsc#1181415
+    Then    Expected "Starting corosync-qdevice.service in cluster" in stdout
     Then    Service "corosync-qdevice" is "started" on "hanode1"
     And     Service "corosync-qdevice" is "started" on "hanode2"
     And     Service "corosync-qnetd" is "started" on "qnetd-node"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.0+git.20210219.811c32f0/test/features/qdevice_validate.feature 
new/crmsh-4.3.0+git.20210311.c2e8856c/test/features/qdevice_validate.feature
--- 
old/crmsh-4.3.0+git.20210219.811c32f0/test/features/qdevice_validate.feature    
    2021-02-19 02:48:25.000000000 +0100
+++ 
new/crmsh-4.3.0+git.20210311.c2e8856c/test/features/qdevice_validate.feature    
    2021-03-11 08:39:55.000000000 +0100
@@ -108,3 +108,39 @@
       usage: init [options] [STAGE]
       crm: error: Option --qnetd-hostname is required if want to configure 
qdevice
       """
+
+  @clean
+  Scenario: Setup qdevice on a single node cluster with RA running(bsc#1181415)
+    When    Run "crm cluster init -y" on "hanode1"
+    Then    Cluster service is "started" on "hanode1"
+    And     Service "corosync-qdevice" is "stopped" on "hanode1"
+    When    Run "crm configure primitive d Dummy op monitor interval=3s" on 
"hanode1"
+    When    Run "crm cluster init qdevice --qnetd-hostname=qnetd-node -y" on 
"hanode1"
+    Then    Expected "WARNING: To use qdevice service, need to restart cluster 
service manually on each node" in stdout
+    And     Service "corosync-qdevice" is "stopped" on "hanode1"
+    When    Run "crm cluster restart" on "hanode1"
+    Then    Service "corosync-qdevice" is "started" on "hanode1"
+
+  @clean
+  Scenario: Remove qdevice from a single node cluster(bsc#1181415)
+    When    Run "crm cluster init --qnetd-hostname=qnetd-node -y" on "hanode1"
+    Then    Cluster service is "started" on "hanode1"
+    And     Service "corosync-qdevice" is "started" on "hanode1"
+    When    Run "crm cluster remove --qdevice -y" on "hanode1"
+    Then    Expected "Restarting cluster service" in stdout
+    Then    Cluster service is "started" on "hanode1"
+    And     Service "corosync-qdevice" is "stopped" on "hanode1"
+
+  @clean
+  Scenario: Remove qdevice from a single node cluster which has RA 
running(bsc#1181415)
+    When    Run "crm cluster init --qnetd-hostname=qnetd-node -y" on "hanode1"
+    Then    Cluster service is "started" on "hanode1"
+    And     Service "corosync-qdevice" is "started" on "hanode1"
+    When    Run "crm configure primitive d Dummy op monitor interval=3s" on 
"hanode1"
+    When    Run "crm cluster remove --qdevice -y" on "hanode1"
+    Then    Expected "WARNING: To remove qdevice service, need to restart 
cluster service manually on each node" in stdout
+    Then    Cluster service is "started" on "hanode1"
+    And     Service "corosync-qdevice" is "started" on "hanode1"
+    When    Run "crm cluster restart" on "hanode1"
+    Then    Cluster service is "started" on "hanode1"
+    And     Service "corosync-qdevice" is "stopped" on "hanode1"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.0+git.20210219.811c32f0/test/features/steps/step_implenment.py 
new/crmsh-4.3.0+git.20210311.c2e8856c/test/features/steps/step_implenment.py
--- 
old/crmsh-4.3.0+git.20210219.811c32f0/test/features/steps/step_implenment.py    
    2021-02-19 02:48:25.000000000 +0100
+++ 
new/crmsh-4.3.0+git.20210311.c2e8856c/test/features/steps/step_implenment.py    
    2021-03-11 08:39:55.000000000 +0100
@@ -47,6 +47,16 @@
     context.stdout = out
 
 
+@then('Print stdout')
+def step_impl(context):
+    context.logger.info("\n{}".format(context.stdout))
+
+
+@then('Print stderr')
+def step_impl(context):
+    context.logger.info("\n{}".format(context.command_error_output))
+
+
 @when('Try "{cmd}" on "{addr}"')
 def step_impl(context, cmd, addr):
     run_command_local_or_remote(context, cmd, addr, err_record=True)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.0+git.20210219.811c32f0/test/unittests/test_bootstrap.py 
new/crmsh-4.3.0+git.20210311.c2e8856c/test/unittests/test_bootstrap.py
--- old/crmsh-4.3.0+git.20210219.811c32f0/test/unittests/test_bootstrap.py      
2021-02-19 02:48:25.000000000 +0100
+++ new/crmsh-4.3.0+git.20210311.c2e8856c/test/unittests/test_bootstrap.py      
2021-03-11 08:39:55.000000000 +0100
@@ -581,6 +581,7 @@
         mock_watchdog_inst.init_watchdog.assert_called_once_with()
         mock_invoke.assert_called_once_with("systemctl disable sbd.service")
 
+    @mock.patch('crmsh.bootstrap.warn')
     @mock.patch('crmsh.bootstrap.status_done')
     @mock.patch('crmsh.bootstrap.SBDManager._update_configuration')
     @mock.patch('crmsh.bootstrap.SBDManager._initialize_sbd')
@@ -588,7 +589,7 @@
     @mock.patch('crmsh.bootstrap.SBDManager._get_sbd_device')
     @mock.patch('crmsh.bootstrap.Watchdog')
     @mock.patch('crmsh.utils.package_is_installed')
-    def test_sbd_init(self, mock_package, mock_watchdog, mock_get_device, 
mock_status, mock_initialize, mock_update, mock_status_done):
+    def test_sbd_init(self, mock_package, mock_watchdog, mock_get_device, 
mock_status, mock_initialize, mock_update, mock_status_done, mock_warn):
         bootstrap._context = mock.Mock(watchdog=None)
         mock_package.return_value = True
         mock_watchdog_inst = mock.Mock()
@@ -604,6 +605,7 @@
         mock_status_done.assert_called_once_with()
         mock_watchdog.assert_called_once_with(_input=None)
         mock_watchdog_inst.init_watchdog.assert_called_once_with()
+        
mock_warn.assert_called_once_with(bootstrap.SBDManager.DISKLESS_SBD_WARNING)
 
     @mock.patch('crmsh.utils.package_is_installed')
     def test_configure_sbd_resource_not_installed(self, mock_package):
@@ -739,6 +741,38 @@
         mock_watchdog.assert_called_once_with(peer_host="node1")
         mock_watchdog_inst.join_watchdog.assert_called_once_with()
 
+    @mock.patch('crmsh.bootstrap.status')
+    @mock.patch('crmsh.bootstrap.warn')
+    @mock.patch('crmsh.utils.get_quorum_votes_dict')
+    @mock.patch('crmsh.bootstrap.SBDManager._get_sbd_device_from_config')
+    @mock.patch('crmsh.bootstrap.Watchdog')
+    @mock.patch('crmsh.bootstrap.invoke')
+    @mock.patch('crmsh.utils.service_is_enabled')
+    @mock.patch('os.path.exists')
+    @mock.patch('crmsh.utils.package_is_installed')
+    def test_join_sbd_diskless(self, mock_package, mock_exists, mock_enabled, 
mock_invoke, mock_watchdog, mock_get_device, mock_quorum_votes, mock_warn, 
mock_status):
+        mock_package.return_value = True
+        mock_exists.return_value = True
+        mock_enabled.return_value = True
+        mock_get_device.return_value = []
+        mock_watchdog_inst = mock.Mock()
+        mock_watchdog.return_value = mock_watchdog_inst
+        mock_watchdog_inst.join_watchdog = mock.Mock()
+        mock_quorum_votes.return_value = {'Expected': '1', 'Total': '1'}
+
+        self.sbd_inst.join_sbd("node1")
+
+        mock_package.assert_called_once_with("sbd")
+        mock_exists.assert_called_once_with("/etc/sysconfig/sbd")
+        mock_invoke.assert_called_once_with("systemctl enable sbd.service")
+        mock_get_device.assert_called_once_with()
+        mock_quorum_votes.assert_called_once_with("node1")
+        
mock_warn.assert_called_once_with(bootstrap.SBDManager.DISKLESS_SBD_WARNING)
+        mock_enabled.assert_called_once_with("sbd.service", "node1")
+        mock_status.assert_called_once_with("Got diskless SBD configuration")
+        mock_watchdog.assert_called_once_with(peer_host="node1")
+        mock_watchdog_inst.join_watchdog.assert_called_once_with()
+
     @mock.patch('crmsh.bootstrap.SBDManager._get_sbd_device_from_config')
     def test_verify_sbd_device_classmethod_exception(self, mock_get_config):
         mock_get_config.return_value = []
@@ -839,30 +873,104 @@
     def test_init_ssh(self, mock_start_service, mock_config_ssh):
         bootstrap.init_ssh()
         mock_start_service.assert_called_once_with("sshd.service", enable=True)
-        mock_config_ssh.assert_called_once_with()
+        mock_config_ssh.assert_has_calls([
+            mock.call("root"),
+            mock.call("hacluster")
+            ])
+
+    @mock.patch('crmsh.userdir.gethomedir')
+    def test_key_files(self, mock_gethome):
+        mock_gethome.return_value = "/root"
+        expected_res = {"private": "/root/.ssh/id_rsa", "public": 
"/root/.ssh/id_rsa.pub", "authorized": "/root/.ssh/authorized_keys"}
+        self.assertEqual(bootstrap.key_files("root"), expected_res)
+        mock_gethome.assert_called_once_with("root")
+
+    @mock.patch('builtins.open')
+    def test_is_nologin(self, mock_open_file):
+        data = "hacluster:x:90:90:heartbeat 
processes:/var/lib/heartbeat/cores/hacluster:/sbin/nologin"
+        mock_open_file.return_value = 
mock.mock_open(read_data=data).return_value
+        assert bootstrap.is_nologin("hacluster") is not None
+        mock_open_file.assert_called_once_with("/etc/passwd")
+
+    @mock.patch('crmsh.bootstrap.confirm')
+    @mock.patch('crmsh.bootstrap.status')
+    @mock.patch('crmsh.bootstrap.is_nologin')
+    def test_change_user_shell_return(self, mock_nologin, mock_status, 
mock_confirm):
+        bootstrap._context = mock.Mock(yes_to_all=False)
+        mock_nologin.return_value = True
+        mock_confirm.return_value = False
+
+        bootstrap.change_user_shell("hacluster")
+
+        mock_nologin.assert_called_once_with("hacluster")
+        mock_confirm.assert_called_once_with("Continue?")
+
+    @mock.patch('crmsh.bootstrap.invoke')
+    @mock.patch('crmsh.bootstrap.is_nologin')
+    def test_change_user_shell_return(self, mock_nologin, mock_invoke):
+        bootstrap._context = mock.Mock(yes_to_all=True)
+        mock_nologin.return_value = True
+
+        bootstrap.change_user_shell("hacluster")
+
+        mock_nologin.assert_called_once_with("hacluster")
+        mock_invoke.assert_called_once_with("usermod -s /bin/bash hacluster")
+
+    @mock.patch('crmsh.utils.this_node')
+    @mock.patch('crmsh.bootstrap.error')
+    @mock.patch('crmsh.bootstrap.invoke')
+    @mock.patch('crmsh.bootstrap.status')
+    @mock.patch('os.path.exists')
+    @mock.patch('crmsh.bootstrap.key_files')
+    @mock.patch('crmsh.bootstrap.change_user_shell')
+    def test_configure_local_ssh_key_error(self, mock_change_shell, 
mock_key_files, mock_exists, mock_status, mock_invoke, mock_error, 
mock_this_node):
+        mock_key_files.return_value = {"private": "/root/.ssh/id_rsa", 
"public": "/root/.ssh/id_rsa.pub", "authorized": "/root/.ssh/authorized_keys"}
+        mock_exists.return_value = False
+        mock_invoke.return_value = (False, None, "error")
+        mock_this_node.return_value = "node1"
+        mock_error.side_effect = SystemExit
+
+        with self.assertRaises(SystemExit) as err:
+            bootstrap.configure_local_ssh_key("root")
+
+        mock_change_shell.assert_called_once_with("root")
+        mock_key_files.assert_called_once_with("root")
+        mock_exists.assert_called_once_with("/root/.ssh/id_rsa")
+        mock_status.assert_called_once_with("Generating SSH key for root")
+        mock_invoke.assert_called_once_with("ssh-keygen -q -f 
/root/.ssh/id_rsa -C 'Cluster Internal on node1' -N ''")
+        mock_error.assert_called_once_with("Failed to generate ssh key for 
root: error")
 
     @mock.patch('crmsh.bootstrap.append_unique')
     @mock.patch('builtins.open', create=True)
-    @mock.patch('crmsh.utils.this_node')
     @mock.patch('crmsh.bootstrap.invoke')
+    @mock.patch('crmsh.utils.add_su')
+    @mock.patch('crmsh.utils.this_node')
     @mock.patch('crmsh.bootstrap.status')
     @mock.patch('os.path.exists')
-    def test_configure_local_ssh_key(self, mock_exists, mock_status, 
mock_invoke,
-            mock_this_node, mock_open_file, mock_append):
+    @mock.patch('crmsh.bootstrap.key_files')
+    @mock.patch('crmsh.bootstrap.change_user_shell')
+    def test_configure_local_ssh_key(self, mock_change_shell, mock_key_files, 
mock_exists, mock_status, mock_this_node, mock_su, mock_invoke, mock_open_file, 
mock_append):
+        bootstrap._context = mock.Mock(yes_to_all=True)
+        mock_key_files.return_value = {"private": "/test/.ssh/id_rsa", 
"public": "/test/.ssh/id_rsa.pub", "authorized": "/test/.ssh/authorized_keys"}
         mock_exists.side_effect = [False, False]
         mock_this_node.return_value = "node1"
+        mock_invoke.return_value = (True, None, None)
+        mock_su.return_value = "cmd with su"
 
-        bootstrap.configure_local_ssh_key()
+        bootstrap.configure_local_ssh_key("test")
 
+        mock_change_shell.assert_called_once_with("test")
+        mock_key_files.assert_called_once_with("test")
         mock_exists.assert_has_calls([
-            mock.call(bootstrap.RSA_PRIVATE_KEY),
-            mock.call(bootstrap.AUTHORIZED_KEYS_FILE)
+            mock.call("/test/.ssh/id_rsa"),
+            mock.call("/test/.ssh/authorized_keys")
             ])
-        mock_status.assert_called_once_with("Generating SSH key")
-        mock_invoke.assert_called_once_with("ssh-keygen -q -f {} -C 'Cluster 
Internal on {}' -N ''".format(bootstrap.RSA_PRIVATE_KEY, 
mock_this_node.return_value))
+        mock_status.assert_called_once_with("Generating SSH key for test")
+        mock_invoke.assert_called_once_with("cmd with su")
+        mock_su.assert_called_once_with("ssh-keygen -q -f /test/.ssh/id_rsa -C 
'Cluster Internal on node1' -N ''", "test")
         mock_this_node.assert_called_once_with()
-        mock_open_file.assert_called_once_with(bootstrap.AUTHORIZED_KEYS_FILE, 
'w')
-        mock_append.assert_called_once_with(bootstrap.RSA_PUBLIC_KEY, 
bootstrap.AUTHORIZED_KEYS_FILE)
+        mock_open_file.assert_called_once_with("/test/.ssh/authorized_keys", 
'w')
+        mock_append.assert_called_once_with("/test/.ssh/id_rsa.pub", 
"/test/.ssh/authorized_keys")
 
     @mock.patch('crmsh.bootstrap.append')
     @mock.patch('crmsh.utils.check_file_content_included')
@@ -931,40 +1039,56 @@
         bootstrap.join_ssh("node1")
 
         mock_start_service.assert_called_once_with("sshd.service", enable=True)
-        mock_config_ssh.assert_called_once_with()
-        mock_swap.assert_called_once_with("node1")
+        mock_config_ssh.assert_has_calls([
+            mock.call("root"),
+            mock.call("hacluster")
+            ])
+        mock_swap.assert_has_calls([
+            mock.call("node1", "root"),
+            mock.call("node1", "hacluster")
+            ])
         mock_invoke.assert_called_once_with("ssh root@node1 crm cluster init 
-i eth1 ssh_remote")
         mock_error.assert_called_once_with("Can't invoke crm cluster init -i 
eth1 ssh_remote on node1: error")
 
+    def test_swap_public_ssh_key_return(self):
+        bootstrap._context = mock.Mock(with_other_user=False)
+        bootstrap.swap_public_ssh_key("node1", "hacluster")
+
     @mock.patch('crmsh.bootstrap.warn')
     @mock.patch('crmsh.bootstrap.fetch_public_key_from_remote_node')
     @mock.patch('crmsh.utils.check_ssh_passwd_need')
-    def test_swap_public_ssh_key_exception(self, mock_check_passwd, 
mock_fetch, mock_warn):
+    @mock.patch('crmsh.bootstrap.key_files')
+    def test_swap_public_ssh_key_exception(self, mock_key_files, 
mock_check_passwd, mock_fetch, mock_warn):
+        mock_key_files.return_value = {"private": "/root/.ssh/id_rsa", 
"public": "/root/.ssh/id_rsa.pub", "authorized": "/root/.ssh/authorized_keys"}
         mock_check_passwd.return_value = False
         mock_fetch.side_effect = ValueError("No key exist")
 
         bootstrap.swap_public_ssh_key("node1")
 
+        mock_key_files.assert_called_once_with("root")
         mock_warn.assert_called_once_with(mock_fetch.side_effect)
-        mock_check_passwd.assert_called_once_with("node1")
-        mock_fetch.assert_called_once_with("node1")
+        mock_check_passwd.assert_called_once_with("node1", "root")
+        mock_fetch.assert_called_once_with("node1", "root")
 
     @mock.patch('crmsh.bootstrap.append_unique')
     @mock.patch('crmsh.bootstrap.fetch_public_key_from_remote_node')
     @mock.patch('crmsh.bootstrap.append_to_remote_file')
     @mock.patch('crmsh.bootstrap.status')
     @mock.patch('crmsh.utils.check_ssh_passwd_need')
-    def test_swap_public_ssh_key(self, mock_check_passwd, mock_status, 
mock_append_remote, mock_fetch, mock_append_unique):
+    @mock.patch('crmsh.bootstrap.key_files')
+    def test_swap_public_ssh_key(self, mock_key_files, mock_check_passwd, 
mock_status, mock_append_remote, mock_fetch, mock_append_unique):
+        mock_key_files.return_value = {"private": "/root/.ssh/id_rsa", 
"public": "/root/.ssh/id_rsa.pub", "authorized": "/root/.ssh/authorized_keys"}
         mock_check_passwd.return_value = True
         mock_fetch.return_value = "file1"
 
         bootstrap.swap_public_ssh_key("node1")
 
-        mock_check_passwd.assert_called_once_with("node1")
+        mock_key_files.assert_called_once_with("root")
+        mock_check_passwd.assert_called_once_with("node1", "root")
         mock_status.assert_called_once_with("Configuring SSH passwordless with 
root@node1")
-        mock_append_remote.assert_called_once_with(bootstrap.RSA_PUBLIC_KEY, 
"node1", bootstrap.AUTHORIZED_KEYS_FILE)
-        mock_fetch.assert_called_once_with("node1")
-        mock_append_unique.assert_called_once_with("file1", 
bootstrap.AUTHORIZED_KEYS_FILE)
+        mock_append_remote.assert_called_once_with("/root/.ssh/id_rsa.pub", 
"node1", "/root/.ssh/authorized_keys")
+        mock_fetch.assert_called_once_with("node1", "root")
+        mock_append_unique.assert_called_once_with("file1", 
"/root/.ssh/authorized_keys")
 
     @mock.patch('crmsh.bootstrap.error')
     @mock.patch('crmsh.utils.get_stdout_stderr')
@@ -1014,7 +1138,10 @@
             mock.call("ssh -o StrictHostKeyChecking=no root@node1 crm_node 
-l"),
             mock.call("ssh -o StrictHostKeyChecking=no root@node1 hostname")
             ])
-        mock_swap.assert_called_once_with("node2")
+        mock_swap.assert_has_calls([
+            mock.call("node2", "root"),
+            mock.call("node2", "hacluster")
+            ])
 
     @mock.patch('builtins.open')
     @mock.patch('crmsh.bootstrap.append')
@@ -1293,8 +1420,8 @@
     @mock.patch('crmsh.corosync.QDevice.enable_qnetd')
     @mock.patch('crmsh.utils.cluster_run_cmd')
     @mock.patch('crmsh.bootstrap.status')
-    def test_start_qdevice_service(self, mock_status, mock_cluster_run, 
mock_enable_qnetd, mock_start_qnetd):
-        bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip)
+    def test_start_qdevice_service_reload(self, mock_status, mock_cluster_run, 
mock_enable_qnetd, mock_start_qnetd):
+        bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip, 
qdevice_reload_policy=bootstrap.QdevicePolicy.QDEVICE_RELOAD)
 
         bootstrap.start_qdevice_service()
 
@@ -1306,11 +1433,57 @@
             ])
         mock_cluster_run.assert_has_calls([
             mock.call("systemctl enable corosync-qdevice"),
-            mock.call("systemctl start corosync-qdevice")
+            mock.call("systemctl restart corosync-qdevice")
             ])
         mock_enable_qnetd.assert_called_once_with()
         mock_start_qnetd.assert_called_once_with()
 
+    @mock.patch('crmsh.corosync.QDevice.start_qnetd')
+    @mock.patch('crmsh.corosync.QDevice.enable_qnetd')
+    @mock.patch('crmsh.bootstrap.wait_for_cluster')
+    @mock.patch('crmsh.utils.cluster_run_cmd')
+    @mock.patch('crmsh.bootstrap.status')
+    def test_start_qdevice_service_restart(self, mock_status, 
mock_cluster_run, mock_wait, mock_enable_qnetd, mock_start_qnetd):
+        bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip, 
qdevice_reload_policy=bootstrap.QdevicePolicy.QDEVICE_RESTART)
+
+        bootstrap.start_qdevice_service()
+
+        mock_status.assert_has_calls([
+            mock.call("Enable corosync-qdevice.service in cluster"),
+            mock.call("Restarting cluster service"),
+            mock.call("Enable corosync-qnetd.service on 10.10.10.123"),
+            mock.call("Starting corosync-qnetd.service on 10.10.10.123")
+            ])
+        mock_wait.assert_called_once_with()
+        mock_cluster_run.assert_has_calls([
+            mock.call("systemctl enable corosync-qdevice"),
+            mock.call("crm cluster restart")
+            ])
+        mock_enable_qnetd.assert_called_once_with()
+        mock_start_qnetd.assert_called_once_with()
+
+    @mock.patch('crmsh.corosync.QDevice.start_qnetd')
+    @mock.patch('crmsh.corosync.QDevice.enable_qnetd')
+    @mock.patch('crmsh.bootstrap.warn')
+    @mock.patch('crmsh.utils.cluster_run_cmd')
+    @mock.patch('crmsh.bootstrap.status')
+    def test_start_qdevice_service_warn(self, mock_status, mock_cluster_run, 
mock_warn, mock_enable_qnetd, mock_start_qnetd):
+        bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip, 
qdevice_reload_policy=bootstrap.QdevicePolicy.QDEVICE_RESTART_LATER)
+
+        bootstrap.start_qdevice_service()
+
+        mock_status.assert_has_calls([
+            mock.call("Enable corosync-qdevice.service in cluster"),
+            mock.call("Enable corosync-qnetd.service on 10.10.10.123"),
+            mock.call("Starting corosync-qnetd.service on 10.10.10.123")
+            ])
+        mock_cluster_run.assert_has_calls([
+            mock.call("systemctl enable corosync-qdevice"),
+            ])
+        mock_warn.assert_called_once_with("To use qdevice service, need to 
restart cluster service manually on each node")
+        mock_enable_qnetd.assert_called_once_with()
+        mock_start_qnetd.assert_called_once_with()
+
     @mock.patch('crmsh.bootstrap.status_done')
     @mock.patch('crmsh.utils.cluster_run_cmd')
     @mock.patch('crmsh.bootstrap.update_expected_votes')
@@ -1321,7 +1494,7 @@
     @mock.patch('crmsh.corosync.QDevice.remove_qdevice_db')
     def test_config_qdevice(self, mock_remove_qdevice_db, 
mock_write_qdevice_config, mock_is_unicast,
             mock_add_nodelist, mock_status_long, mock_update_votes, 
mock_cluster_run, mock_status_done):
-        bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip)
+        bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip, 
qdevice_reload_policy=bootstrap.QdevicePolicy.QDEVICE_RELOAD)
         mock_is_unicast.return_value = False
 
         bootstrap.config_qdevice()
@@ -1365,12 +1538,15 @@
     @mock.patch('crmsh.bootstrap.status_long')
     @mock.patch('crmsh.bootstrap.invoke')
     @mock.patch('crmsh.bootstrap.status')
+    @mock.patch('crmsh.bootstrap.evaluate_qdevice_quorum_effect')
+    @mock.patch('crmsh.utils.check_all_nodes_reachable')
     @mock.patch('crmsh.bootstrap.confirm')
     @mock.patch('crmsh.utils.is_qdevice_configured')
-    def test_remove_qdevice(self, mock_qdevice_configured, mock_confirm, 
mock_status, mock_invoke,
-            mock_status_long, mock_get_value, mock_qdevice, mock_update_votes, 
mock_status_done):
+    def test_remove_qdevice_reload(self, mock_qdevice_configured, 
mock_confirm, mock_reachable, mock_evaluate,
+            mock_status, mock_invoke, mock_status_long, mock_get_value, 
mock_qdevice, mock_update_votes, mock_status_done):
         mock_qdevice_configured.return_value = True
         mock_confirm.return_value = True
+        mock_evaluate.return_value = bootstrap.QdevicePolicy.QDEVICE_RELOAD
         mock_get_value.return_value = "10.10.10.123"
         mock_qdevice_inst = mock.Mock()
         mock_qdevice.return_value = mock_qdevice_inst
@@ -1381,6 +1557,8 @@
 
         mock_qdevice_configured.assert_called_once_with()
         mock_confirm.assert_called_once_with("Removing QDevice service and 
configuration from cluster: Are you sure?")
+        mock_reachable.assert_called_once_with()
+        mock_evaluate.assert_called_once_with(bootstrap.QDEVICE_REMOVE)
         mock_status.assert_has_calls([
             mock.call("Disable corosync-qdevice.service"),
             mock.call("Stopping corosync-qdevice.service")
@@ -1454,6 +1632,42 @@
         self.assertEqual(res, True)
         mock_invoke.assert_called_once_with("cmd")
 
+    @mock.patch('crmsh.utils.is_quorate')
+    @mock.patch('crmsh.utils.get_quorum_votes_dict')
+    def test_evaluate_qdevice_quorum_effect_reload(self, mock_get_dict, 
mock_quorate):
+        mock_get_dict.return_value = {'Expected': '2', 'Total': '2'}
+        mock_quorate.return_value = True
+        res = bootstrap.evaluate_qdevice_quorum_effect(bootstrap.QDEVICE_ADD)
+        self.assertEqual(res, bootstrap.QdevicePolicy.QDEVICE_RELOAD)
+        mock_get_dict.assert_called_once_with()
+        mock_quorate.assert_called_once_with(3, 2)
+
+    @mock.patch('crmsh.utils.has_resource_running')
+    @mock.patch('crmsh.utils.is_quorate')
+    @mock.patch('crmsh.utils.get_quorum_votes_dict')
+    def test_evaluate_qdevice_quorum_effect_reload(self, mock_get_dict, 
mock_quorate, mock_ra_running):
+        mock_get_dict.return_value = {'Expected': '2', 'Total': '2'}
+        mock_quorate.return_value = False
+        mock_ra_running.return_value = True
+        res = 
bootstrap.evaluate_qdevice_quorum_effect(bootstrap.QDEVICE_REMOVE)
+        self.assertEqual(res, bootstrap.QdevicePolicy.QDEVICE_RESTART_LATER)
+        mock_get_dict.assert_called_once_with()
+        mock_quorate.assert_called_once_with(2, 1)
+        mock_ra_running.assert_called_once_with()
+
+    @mock.patch('crmsh.utils.has_resource_running')
+    @mock.patch('crmsh.utils.is_quorate')
+    @mock.patch('crmsh.utils.get_quorum_votes_dict')
+    def test_evaluate_qdevice_quorum_effect(self, mock_get_dict, mock_quorate, 
mock_ra_running):
+        mock_get_dict.return_value = {'Expected': '2', 'Total': '2'}
+        mock_quorate.return_value = False
+        mock_ra_running.return_value = False
+        res = 
bootstrap.evaluate_qdevice_quorum_effect(bootstrap.QDEVICE_REMOVE)
+        self.assertEqual(res, bootstrap.QdevicePolicy.QDEVICE_RESTART)
+        mock_get_dict.assert_called_once_with()
+        mock_quorate.assert_called_once_with(2, 1)
+        mock_ra_running.assert_called_once_with()
+
 
 class TestValidation(unittest.TestCase):
     """
@@ -1764,67 +1978,6 @@
         mock_remove.assert_called_once_with()
 
     @mock.patch('crmsh.bootstrap.error')
-    @mock.patch('crmsh.utils.is_qdevice_configured')
-    def test_remove_qdevice_not_configured(self, mock_configured, mock_error):
-        mock_configured.return_value = False
-        mock_error.side_effect = SystemExit
-
-        with self.assertRaises(SystemExit):
-            bootstrap.remove_qdevice()
-
-        mock_configured.assert_called_once_with()
-        mock_error.assert_called_once_with("No QDevice configuration in this 
cluster")
-
-    @mock.patch('crmsh.bootstrap.confirm')
-    @mock.patch('crmsh.utils.is_qdevice_configured')
-    def test_remove_qdevice_not_confrim(self, mock_configured, mock_confirm):
-        mock_configured.return_value = True
-        mock_confirm.return_value = False
-
-        bootstrap.remove_qdevice()
-
-        mock_configured.assert_called_once_with()
-        mock_confirm.assert_called_once_with("Removing QDevice service and 
configuration from cluster: Are you sure?")
-
-    @mock.patch('crmsh.bootstrap.status_done')
-    @mock.patch('crmsh.bootstrap.update_expected_votes')
-    @mock.patch('crmsh.corosync.QDevice')
-    @mock.patch('crmsh.corosync.get_value')
-    @mock.patch('crmsh.bootstrap.status_long')
-    @mock.patch('crmsh.bootstrap.invoke')
-    @mock.patch('crmsh.bootstrap.status')
-    @mock.patch('crmsh.bootstrap.confirm')
-    @mock.patch('crmsh.utils.is_qdevice_configured')
-    def test_remove_qdevice(self, mock_configured, mock_confirm, mock_status, 
mock_invoke,
-            mock_long, mock_get_value, mock_qdevice, mock_expected, mock_done):
-        mock_configured.return_value = True
-        mock_confirm.return_value = True
-        mock_get_value.return_value = "node1"
-        mock_qdevice_inst = mock.Mock()
-        mock_qdevice.return_value = mock_qdevice_inst
-
-        bootstrap.remove_qdevice()
-
-        mock_configured.assert_called_once_with()
-        mock_confirm.assert_called_once_with("Removing QDevice service and 
configuration from cluster: Are you sure?")
-        mock_status.assert_has_calls([
-            mock.call("Disable corosync-qdevice.service"),
-            mock.call("Stopping corosync-qdevice.service")
-            ])
-        mock_invoke.assert_has_calls([
-            mock.call("crm cluster run 'systemctl disable corosync-qdevice'"),
-            mock.call("crm cluster run 'systemctl stop corosync-qdevice'"),
-            mock.call("crm cluster run 'crm corosync reload'")
-            ])
-        mock_long.assert_called_once_with("Removing QDevice configuration from 
cluster")
-        mock_get_value.assert_called_once_with("quorum.device.net.host")
-        mock_qdevice.assert_called_once_with("node1")
-        mock_qdevice_inst.remove_qdevice_config.assert_called_once_with()
-        mock_qdevice_inst.remove_qdevice_db.assert_called_once_with()
-        mock_expected.assert_called_once_with()
-        mock_done.assert_called_once_with()
-
-    @mock.patch('crmsh.bootstrap.error')
     @mock.patch('crmsh.utils.ext_cmd_nosudo')
     @mock.patch('crmsh.xmlutil.listnodes')
     def test_remove_self_other_nodes(self, mock_list, mock_ext, mock_error):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.0+git.20210219.811c32f0/test/unittests/test_lock.py 
new/crmsh-4.3.0+git.20210311.c2e8856c/test/unittests/test_lock.py
--- old/crmsh-4.3.0+git.20210219.811c32f0/test/unittests/test_lock.py   
2021-02-19 02:48:25.000000000 +0100
+++ new/crmsh-4.3.0+git.20210311.c2e8856c/test/unittests/test_lock.py   
2021-03-11 08:39:55.000000000 +0100
@@ -73,7 +73,7 @@
         mock_create.return_value = False
         with self.assertRaises(lock.ClaimLockError) as err:
             self.local_inst._lock_or_fail()
-        self.assertEqual("Failed to claim lock (the lock directory exists at 
/tmp/.crmsh_lock_directory)", str(err.exception))
+        self.assertEqual("Failed to claim lock (the lock directory exists at 
{})".format(lock.Lock.LOCK_DIR), str(err.exception))
         mock_create.assert_called_once_with()
 
     @mock.patch('crmsh.lock.Lock._run')
@@ -210,7 +210,7 @@
 
         with self.assertRaises(lock.ClaimLockError) as err:
             self.lock_inst._lock_or_wait()
-        self.assertEqual("Timed out after 120 seconds. Cannot continue since 
the lock directory exists at the node (node1:/tmp/.crmsh_lock_directory)", 
str(err.exception))
+        self.assertEqual("Timed out after 120 seconds. Cannot continue since 
the lock directory exists at the node (node1:{})".format(lock.Lock.LOCK_DIR), 
str(err.exception))
 
         mock_time.assert_has_calls([ mock.call(), mock.call()])
         mock_time_out.assert_has_calls([mock.call(), mock.call(), mock.call()])
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.0+git.20210219.811c32f0/test/unittests/test_ui_cluster.py 
new/crmsh-4.3.0+git.20210311.c2e8856c/test/unittests/test_ui_cluster.py
--- old/crmsh-4.3.0+git.20210219.811c32f0/test/unittests/test_ui_cluster.py     
2021-02-19 02:48:25.000000000 +0100
+++ new/crmsh-4.3.0+git.20210311.c2e8856c/test/unittests/test_ui_cluster.py     
2021-03-11 08:39:55.000000000 +0100
@@ -69,17 +69,14 @@
         mock_info.assert_called_once_with("Cluster services already stopped")
 
     @mock.patch('crmsh.ui_cluster.err_buf.info')
-    @mock.patch('crmsh.utils.is_qdevice_configured')
     @mock.patch('crmsh.utils.stop_service')
     @mock.patch('crmsh.utils.service_is_active')
-    def test_do_stop(self, mock_active, mock_stop, mock_qdevice_configured, 
mock_info):
+    def test_do_stop(self, mock_active, mock_stop, mock_info):
         context_inst = mock.Mock()
-        mock_active.return_value = True
-        mock_qdevice_configured.return_value = True
+        mock_active.side_effect = [True, True]
 
         self.ui_cluster_inst.do_stop(context_inst)
 
-        mock_active.assert_called_once_with("corosync.service")
+        mock_active.assert_has_calls([mock.call("corosync.service"), 
mock.call("corosync-qdevice")])
         mock_stop.assert_has_calls([mock.call("corosync-qdevice"), 
mock.call("corosync")])
-        mock_qdevice_configured.assert_called_once_with()
         mock_info.assert_called_once_with("Cluster services stopped")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.0+git.20210219.811c32f0/test/unittests/test_utils.py 
new/crmsh-4.3.0+git.20210311.c2e8856c/test/unittests/test_utils.py
--- old/crmsh-4.3.0+git.20210219.811c32f0/test/unittests/test_utils.py  
2021-02-19 02:48:25.000000000 +0100
+++ new/crmsh-4.3.0+git.20210311.c2e8856c/test/unittests/test_utils.py  
2021-03-11 08:39:55.000000000 +0100
@@ -1250,3 +1250,55 @@
         utils.ping_node("node_unreachable")
     assert str(err.value) == 'host "node_unreachable" is unreachable: error 
data'
     mock_run.assert_called_once_with("ping -c 1 node_unreachable")
+
+
+def test_is_quorate():
+    assert utils.is_quorate(3, 2) is True
+    assert utils.is_quorate(3, 1) is False
+
+
+@mock.patch("crmsh.utils.get_stdout_stderr")
+def test_get_stdout_or_raise_error_failed(mock_run):
+    mock_run.return_value = (1, None, "error data")
+    with pytest.raises(ValueError) as err:
+        utils.get_stdout_or_raise_error("cmd")
+    assert str(err.value) == 'Failed to run "cmd": error data'
+    mock_run.assert_called_once_with("cmd")
+
+
+@mock.patch("crmsh.utils.get_stdout_stderr")
+def test_get_stdout_or_raise_error(mock_run):
+    mock_run.return_value = (0, "output data", None)
+    res = utils.get_stdout_or_raise_error("cmd", remote="node1")
+    assert res == "output data"
+    mock_run.assert_called_once_with("ssh -o StrictHostKeyChecking=no 
root@node1 \"cmd\"")
+
+
+@mock.patch("crmsh.utils.get_stdout_or_raise_error")
+def test_get_quorum_votes_dict(mock_run):
+    mock_run.return_value = """
+Votequorum information
+----------------------
+Expected votes:   1
+Highest expected: 1
+Total votes:      1
+Quorum:           1
+Flags:            Quorate
+    """
+    res = utils.get_quorum_votes_dict()
+    assert res == {'Expected': '1', 'Total': '1'}
+    mock_run.assert_called_once_with("corosync-quorumtool -s", remote=None)
+
+
+@mock.patch("crmsh.utils.get_stdout_or_raise_error")
+def test_has_resource_running(mock_run):
+    mock_run.return_value = """
+Node List:
+  * Online: [ 15sp2-1 ]
+
+Active Resources:
+  * No active resources
+    """
+    res = utils.has_resource_running()
+    assert res is False
+    mock_run.assert_called_once_with("crm_mon -1")

Reply via email to