Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package crmsh for openSUSE:Factory checked 
in at 2025-06-30 13:06:10
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/crmsh (Old)
 and      /work/SRC/openSUSE:Factory/.crmsh.new.7067 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "crmsh"

Mon Jun 30 13:06:10 2025 rev:377 rq:1289219 version:5.0.0+20250630.23be67df

Changes:
--------
--- /work/SRC/openSUSE:Factory/crmsh/crmsh.changes      2025-06-23 
15:07:24.003057911 +0200
+++ /work/SRC/openSUSE:Factory/.crmsh.new.7067/crmsh.changes    2025-06-30 
13:07:24.836211543 +0200
@@ -1,0 +2,25 @@
+Mon Jun 30 08:39:37 UTC 2025 - xli...@suse.com
+
+- Update to version 5.0.0+20250630.23be67df:
+  * Fix: bootstrap: should fallback to default user when `core.hosts` is not 
availabe from the seed node (bsc#1245343)
+  * Fix: bootstrap: Refine qnetd passwordless configuration logic (bsc#1245387)
+  * Fix: log: Improve function confirm's logic (bsc#1245386)
+
+-------------------------------------------------------------------
+Sun Jun 29 08:46:20 UTC 2025 - xli...@suse.com
+
+- Update to version 5.0.0+20250629.3482516d:
+  * Dev: unittests: Adjust unit test for previous commit
+  * Dev: behave: Add functional test for previous commit
+  * Dev: bootstrap: Remove dead node from the cluster
+  * Dev: Prevent actions when offline nodes are unreachable
+  * Dev: xmlutil: Address circular import issue
+
+-------------------------------------------------------------------
+Thu Jun 26 07:49:12 UTC 2025 - xli...@suse.com
+
+- Update to version 5.0.0+20250626.4c49db91:
+  * Dev: unittests: Adjust unit test for previous commit
+  * Dev: bootstrap: Remove user@host item from /root/.config/crm/crm.conf when 
removing node
+
+-------------------------------------------------------------------

Old:
----
  crmsh-5.0.0+20250623.50ad8e8f.tar.bz2

New:
----
  crmsh-5.0.0+20250630.23be67df.tar.bz2

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ crmsh.spec ++++++
--- /var/tmp/diff_new_pack.BwFFDh/_old  2025-06-30 13:07:26.572283473 +0200
+++ /var/tmp/diff_new_pack.BwFFDh/_new  2025-06-30 13:07:26.572283473 +0200
@@ -41,7 +41,7 @@
 Summary:        High Availability cluster command-line interface
 License:        GPL-2.0-or-later
 Group:          %{pkg_group}
-Version:        5.0.0+20250623.50ad8e8f
+Version:        5.0.0+20250630.23be67df
 Release:        0
 URL:            http://crmsh.github.io
 Source0:        %{name}-%{version}.tar.bz2

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.BwFFDh/_old  2025-06-30 13:07:26.652286788 +0200
+++ /var/tmp/diff_new_pack.BwFFDh/_new  2025-06-30 13:07:26.680287948 +0200
@@ -9,7 +9,7 @@
 </service>
 <service name="tar_scm">
   <param name="url">https://github.com/ClusterLabs/crmsh.git</param>
-  <param 
name="changesrevision">50ad8e8fb153c925641f693a5f056f82e5a9c8a1</param>
+  <param 
name="changesrevision">23be67dfe981dba4f7cad256c4dc81ddbcf2744f</param>
 </service>
 </servicedata>
 (No newline at EOF)

++++++ crmsh-5.0.0+20250623.50ad8e8f.tar.bz2 -> 
crmsh-5.0.0+20250630.23be67df.tar.bz2 ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250623.50ad8e8f/crmsh/bootstrap.py 
new/crmsh-5.0.0+20250630.23be67df/crmsh/bootstrap.py
--- old/crmsh-5.0.0+20250623.50ad8e8f/crmsh/bootstrap.py        2025-06-23 
13:15:35.000000000 +0200
+++ new/crmsh-5.0.0+20250630.23be67df/crmsh/bootstrap.py        2025-06-30 
10:29:16.000000000 +0200
@@ -227,7 +227,7 @@
 
         if self.stage == "sbd":
             if self.cluster_is_running:
-                utils.check_all_nodes_reachable()
+                utils.check_all_nodes_reachable("setup SBD")
                 for node in utils.list_cluster_nodes():
                     if not utils.package_is_installed("sbd", node):
                         utils.fatal(SBDManager.SBD_NOT_INSTALLED_MSG + f" on 
{node}")
@@ -1630,6 +1630,7 @@
         return
 
     logger.info("""Configure Qdevice/Qnetd:""")
+    utils.check_all_nodes_reachable("setup Qdevice")
     cluster_node_list = utils.list_cluster_nodes()
     for node in cluster_node_list:
         if not 
ServiceManager().service_is_available("corosync-qdevice.service", node):
@@ -1895,16 +1896,22 @@
     init_node_hostname = out
     # Swap ssh public key between join node and other cluster nodes
     for node in (node for node in cluster_node_list if node != 
init_node_hostname):
-        remote_user_to_swap = utils.user_of(node)
-        remote_privileged_user = remote_user_to_swap
+        try:
+            remote_privileged_user = utils.user_of(node)
+        except UserNotFoundError:
+            remote_privileged_user = local_user
         result = ssh_copy_id_no_raise(local_user, remote_privileged_user, 
node, local_shell)
         if result.returncode != 0:
-            utils.fatal("Failed to login to remote host 
{}@{}".format(remote_user_to_swap, node))
+            utils.fatal("Failed to login to remote host 
{}@{}".format(remote_privileged_user, node))
+        else:
+            user_by_host.add(remote_privileged_user, node)
+            user_by_host.save_local()
         if utils.this_node() in cluster_node_list:
             nodes_including_self = cluster_node_list
         else:
             nodes_including_self = [utils.this_node()]
             nodes_including_self.extend(cluster_node_list)
+        # FIXME: 2 layers of loop is unnecessary?
         _merge_ssh_authorized_keys(shell, user_of_host.UserOfHost.instance(), 
nodes_including_self)
         if local_user != 'hacluster':
             change_user_shell('hacluster', node)
@@ -2004,10 +2011,11 @@
 
     shell = sh.cluster_shell()
 
-    if is_qdevice_configured and not _context.use_ssh_agent:
-        # trigger init_qnetd_remote on init node
-        cmd = f"crm cluster init qnetd_remote {utils.this_node()} -y"
-        shell.get_stdout_or_raise_error(cmd, seed_host)
+    if is_qdevice_configured:
+        if not _context.use_ssh_agent or not _keys_from_ssh_agent():
+            # trigger init_qnetd_remote on init node
+            cmd = f"crm cluster init qnetd_remote {utils.this_node()} -y"
+            shell.get_stdout_or_raise_error(cmd, seed_host)
 
     shutil.copy(corosync.conf(), COROSYNC_CONF_ORIG)
 
@@ -2167,14 +2175,15 @@
         shell.get_stdout_or_raise_error(cmd, remote)
 
 
-def remove_node_from_cluster(node):
+def remove_node_from_cluster(node, dead_node=False):
     """
     Remove node from running cluster and the corosync / pacemaker 
configuration.
     """
     node_ip = get_cluster_node_ip(node)
-    stop_services(SERVICES_STOP_LIST, remote_addr=node)
-    qdevice.QDevice.remove_qdevice_db([node])
-    rm_configuration_files(node)
+    if not dead_node:
+        stop_services(SERVICES_STOP_LIST, remote_addr=node)
+        qdevice.QDevice.remove_qdevice_db([node])
+        rm_configuration_files(node)
 
     # execute the command : crm node delete $HOSTNAME
     logger.info("Removing node %s from CIB", node)
@@ -2197,7 +2206,12 @@
 
     sh.cluster_shell().get_stdout_or_raise_error("corosync-cfgtool -R")
 
-    FirewallManager(peer=node).remove_service()
+    if not dead_node:
+        FirewallManager(peer=node).remove_service()
+
+    user_by_host = utils.HostUserConfig()
+    user_by_host.remove(node)
+    user_by_host.save_remote(utils.list_cluster_nodes())
 
 
 def ssh_stage_finished():
@@ -2394,6 +2408,7 @@
         try:
             with lock_inst.lock():
                 service_manager = ServiceManager()
+                utils.check_all_nodes_reachable("joining a node to the 
cluster", cluster_node)
                 _context.node_list_in_cluster = 
utils.fetch_cluster_node_list_from_node(cluster_node)
                 setup_passwordless_with_other_nodes(cluster_node)
                 _context.skip_csync2 = not 
service_manager.service_is_active(CSYNC2_SERVICE, cluster_node)
@@ -2435,7 +2450,7 @@
     if not confirm("Removing QDevice service and configuration from cluster: 
Are you sure?"):
         return
 
-    utils.check_all_nodes_reachable()
+    utils.check_all_nodes_reachable("removing QDevice from the cluster")
     qdevice_reload_policy = 
qdevice.evaluate_qdevice_quorum_effect(qdevice.QDEVICE_REMOVE)
 
     logger.info("Disable corosync-qdevice.service")
@@ -2501,6 +2516,16 @@
 
     remote_user, cluster_node = _parse_user_at_host(_context.cluster_node, 
_context.current_user)
 
+    try:
+        utils.check_all_nodes_reachable("removing a node from the cluster")
+    except utils.DeadNodeError as e:
+        if force_flag and cluster_node in e.dead_nodes:
+            remove_node_from_cluster(cluster_node, dead_node=True)
+            bootstrap_finished()
+            return
+        else:
+            raise
+
     if service_manager.service_is_active("pacemaker.service", cluster_node):
         cluster_node = get_node_canonical_hostname(cluster_node)
     else:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250623.50ad8e8f/crmsh/completers.py 
new/crmsh-5.0.0+20250630.23be67df/crmsh/completers.py
--- old/crmsh-5.0.0+20250623.50ad8e8f/crmsh/completers.py       2025-06-23 
13:15:35.000000000 +0200
+++ new/crmsh-5.0.0+20250630.23be67df/crmsh/completers.py       2025-06-30 
10:29:16.000000000 +0200
@@ -70,7 +70,7 @@
 
 
 nodes = call(xmlutil.listnodes)
-online_nodes = call(lambda x: xmlutil.CrmMonXmlParser().get_node_list(x), 
"online")
-standby_nodes = call(lambda x: xmlutil.CrmMonXmlParser().get_node_list(x), 
"standby")
+online_nodes = call(lambda x: 
xmlutil.CrmMonXmlParser().get_node_list(standby=x), False)
+standby_nodes = call(lambda x: 
xmlutil.CrmMonXmlParser().get_node_list(standby=x), True)
 
 shadows = call(xmlutil.listshadows)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250623.50ad8e8f/crmsh/log.py 
new/crmsh-5.0.0+20250630.23be67df/crmsh/log.py
--- old/crmsh-5.0.0+20250623.50ad8e8f/crmsh/log.py      2025-06-23 
13:15:35.000000000 +0200
+++ new/crmsh-5.0.0+20250630.23be67df/crmsh/log.py      2025-06-30 
10:29:16.000000000 +0200
@@ -431,8 +431,9 @@
         """
         while True:
             ans = self.wait_input("{} (y/n)? ".format(msg.strip("? ")))
-            if ans:
-                return ans.lower() == "y"
+            if not ans or ans.lower() not in ('y', 'n'):
+                continue
+            return ans.lower() == 'y'
 
     def syntax_err(self, s, token='', context='', msg=''):
         err = "syntax"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250623.50ad8e8f/crmsh/qdevice.py 
new/crmsh-5.0.0+20250630.23be67df/crmsh/qdevice.py
--- old/crmsh-5.0.0+20250623.50ad8e8f/crmsh/qdevice.py  2025-06-23 
13:15:35.000000000 +0200
+++ new/crmsh-5.0.0+20250630.23be67df/crmsh/qdevice.py  2025-06-30 
10:29:16.000000000 +0200
@@ -630,7 +630,6 @@
         """
         Adjust SBD_WATCHDOG_TIMEOUT when configuring qdevice and diskless SBD
         """
-        utils.check_all_nodes_reachable()
         self.using_diskless_sbd = SBDUtils.is_using_diskless_sbd()
         # add qdevice after diskless sbd started
         if self.using_diskless_sbd:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250623.50ad8e8f/crmsh/ui_sbd.py 
new/crmsh-5.0.0+20250630.23be67df/crmsh/ui_sbd.py
--- old/crmsh-5.0.0+20250623.50ad8e8f/crmsh/ui_sbd.py   2025-06-23 
13:15:35.000000000 +0200
+++ new/crmsh-5.0.0+20250630.23be67df/crmsh/ui_sbd.py   2025-06-30 
10:29:16.000000000 +0200
@@ -516,6 +516,8 @@
             if len(args) < 2:
                 raise self.SyntaxError("No device specified")
 
+            utils.check_all_nodes_reachable("configuring SBD device")
+
             logger.info("Configured sbd devices: %s", 
';'.join(self.device_list_from_config))
             if len(args) == 2 and ";" in args[1]:
                 device_list_from_args = args[1].split(";")
@@ -549,6 +551,8 @@
                 if not self._service_is_active(service):
                     return False
 
+            utils.check_all_nodes_reachable("configuring SBD")
+
             parameter_dict = self._parse_args(args)
             if sbd.SBDUtils.is_using_disk_based_sbd():
                 self._configure_diskbase(parameter_dict)
@@ -572,6 +576,8 @@
         if not self._service_is_active(constants.SBD_SERVICE):
             return False
 
+        utils.check_all_nodes_reachable("purging SBD")
+
         if args and args[0] == "crashdump":
             self._set_crashdump_option(delete=True)
             update_dict = self._set_crashdump_in_sysconfig(restore=True)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250623.50ad8e8f/crmsh/utils.py 
new/crmsh-5.0.0+20250630.23be67df/crmsh/utils.py
--- old/crmsh-5.0.0+20250623.50ad8e8f/crmsh/utils.py    2025-06-23 
13:15:35.000000000 +0200
+++ new/crmsh-5.0.0+20250630.23be67df/crmsh/utils.py    2025-06-30 
10:29:16.000000000 +0200
@@ -42,6 +42,7 @@
 from . import options
 from . import term
 from . import log
+from . import xmlutil
 from .prun import prun
 from .sh import ShellUtils
 from .service_manager import ServiceManager
@@ -1722,7 +1723,6 @@
     '''
     Returns a list of nodes in the cluster.
     '''
-    from . import xmlutil
     rc, out, err = ShellUtils().get_stdout_stderr(constants.CIB_QUERY, 
no_reg=no_reg)
     # When cluster service running
     if rc == 0:
@@ -2460,12 +2460,34 @@
     return dict(re.findall(r"(Expected|Total) votes:\s+(\d+)", out))
 
 
-def check_all_nodes_reachable():
+class DeadNodeError(ValueError):
+    def __init__(self, msg: str, dead_nodes=None):
+        super().__init__(msg)
+        self.dead_nodes = dead_nodes or []
+
+
+def check_all_nodes_reachable(action_to_do: str, peer_node: str = None):
     """
     Check if all cluster nodes are reachable
     """
-    out = sh.cluster_shell().get_stdout_or_raise_error("crm_node -l")
-    for node in re.findall(r"\d+ (.*) \w+", out):
+    crm_mon_inst = xmlutil.CrmMonXmlParser(peer_node)
+    online_nodes = crm_mon_inst.get_node_list()
+    offline_nodes = crm_mon_inst.get_node_list(online=False)
+    dead_nodes = []
+    for node in offline_nodes:
+        try:
+            node_reachable_check(node)
+        except ValueError:
+            dead_nodes.append(node)
+    if dead_nodes:
+        # dead nodes bring risk to cluster, either bring them online or remove 
them
+        msg = f"""There are offline nodes also unreachable: {', 
'.join(dead_nodes)}.
+Please bring them online before {action_to_do}.
+Or use `crm cluster remove <offline_node> --force` to remove the offline node.
+        """
+        raise DeadNodeError(msg, dead_nodes)
+
+    for node in online_nodes:
         node_reachable_check(node)
 
 
@@ -3051,6 +3073,10 @@
     def get(self, host):
         return self._hosts_users[host]
 
+    def remove(self, host):
+        if host in self._hosts_users:
+            del self._hosts_users[host]
+
     def add(self, user, host):
         self._hosts_users[host] = user
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250623.50ad8e8f/crmsh/xmlutil.py 
new/crmsh-5.0.0+20250630.23be67df/crmsh/xmlutil.py
--- old/crmsh-5.0.0+20250623.50ad8e8f/crmsh/xmlutil.py  2025-06-23 
13:15:35.000000000 +0200
+++ new/crmsh-5.0.0+20250630.23be67df/crmsh/xmlutil.py  2025-06-30 
10:29:16.000000000 +0200
@@ -18,10 +18,7 @@
 from . import constants
 from . import userdir
 from .sh import ShellUtils
-from .utils import add_sudo, str2file, str2tmp, get_boolean, 
handle_role_for_ocf_1_1, copy_local_file, rmfile
-from .utils import stdout2list, crm_msec, crm_time_cmp
-from .utils import olist, get_cib_in_use, get_tempdir, to_ascii, 
is_boolean_true
-from .utils import VerifyResult
+from . import utils
 from . import log
 
 
@@ -53,14 +50,14 @@
         # because xmlparse function requires the function descriptor not the 
plain text
         # and this would be so much work to redo it.
         # It's not too bad, but it's still a workaround and better be 
refactored, so FIXME!
-        copy_local_file(s, cib_tmp_copy)
+        utils.copy_local_file(s, cib_tmp_copy)
         f = open(cib_tmp_copy, 'r')
         logger.debug("{} successfully read the 
cib.xml".format(userdir.getuser()))
 
     cib_elem = xmlparse(f)
     f.close()
     if cib_tmp_copy != '':
-        rmfile(cib_tmp_copy)
+        utils.rmfile(cib_tmp_copy)
     if options.regression_tests and cib_elem is None:
         print("Failed to read CIB from file: %s" % (s))
     return cib_elem
@@ -91,7 +88,7 @@
 
 
 def sudocall(cmd):
-    cmd = add_sudo(cmd)
+    cmd = utils.add_sudo(cmd)
     if options.regression_tests:
         print(".EXT", cmd)
     p = subprocess.Popen(
@@ -102,7 +99,7 @@
     try:
         outp, errp = p.communicate()
         p.wait()
-        return p.returncode, to_ascii(outp), to_ascii(errp)
+        return p.returncode, utils.to_ascii(outp), utils.to_ascii(errp)
     except IOError as msg:
         logger.error("running %s: %s", cmd, msg)
         return None, None, None
@@ -111,7 +108,7 @@
 def cibdump2file(fname):
     _, outp, _ = sudocall(cib_dump)
     if outp is not None:
-        return str2file(outp, fname)
+        return utils.str2file(outp, fname)
     return None
 
 
@@ -119,7 +116,7 @@
     try:
         _, outp, _ = sudocall(cib_dump)
         if outp is not None:
-            return str2tmp(outp)
+            return utils.str2tmp(outp)
     except IOError as msg:
         logger.error(msg)
     return None
@@ -158,17 +155,17 @@
 
 
 def sanity_check_nvpairs(ident, node, attr_list):
-    rc = VerifyResult.SUCCESS
+    rc = utils.VerifyResult.SUCCESS
     for nvpair in node.iterchildren("nvpair"):
         n = nvpair.get("name")
         if n and n not in attr_list:
             logger.warning("%s: unknown attribute '%s'", ident, n)
-            rc |= VerifyResult.WARNING
+            rc |= utils.VerifyResult.WARNING
     return rc
 
 
 def sanity_check_meta(ident, node, attr_list):
-    rc = VerifyResult.SUCCESS
+    rc = utils.VerifyResult.SUCCESS
     if node is None or not attr_list:
         return rc
     for c in node.iterchildren():
@@ -397,7 +394,7 @@
 
 def is_live_cib():
     '''We working with the live cluster?'''
-    return not get_cib_in_use() and not os.getenv("CIB_file")
+    return not utils.get_cib_in_use() and not os.getenv("CIB_file")
 
 
 def is_crmuser():
@@ -413,14 +410,14 @@
     home = userdir.gethomedir(config.core.user)
     if home and home.startswith(os.path.sep):
         return os.path.join(home, ".cib")
-    return get_tempdir()
+    return utils.get_tempdir()
 
 
 def listshadows():
     d = cib_shadow_dir()
     if not os.path.isdir(d):
         return []
-    rc, l = stdout2list("ls %s | fgrep shadow. | sed 's/^shadow\\.//'" % d)
+    rc, l = utils.stdout2list("ls %s | fgrep shadow. | sed 's/^shadow\\.//'" % 
d)
     return l
 
 
@@ -564,7 +561,7 @@
 
 
 def is_ms_or_promotable_clone(node):
-    is_promotable_type = is_boolean_true(is_attr_set(node, "promotable"))
+    is_promotable_type = utils.is_boolean_true(is_attr_set(node, "promotable"))
     is_ms_type = node.tag in ("master", "ms")
     return is_ms_type or is_promotable_type
 
@@ -826,9 +823,9 @@
     interval = interval or "0"
     for op in matching_name:
         opint = op.get("interval")
-        if interval == "non-0" and crm_msec(opint) > 0:
+        if interval == "non-0" and utils.crm_msec(opint) > 0:
             return op
-        if crm_time_cmp(opint, interval) == 0:
+        if utils.crm_time_cmp(opint, interval) == 0:
             return op
     return None
 
@@ -837,7 +834,7 @@
     interval = (op == "monitor" and "non-0" or "0")
     op_n = find_operation(rsc_node, op == "probe" and "monitor" or op, 
interval)
     timeout = op_n is not None and op_n.get("timeout") or default_timeout
-    return crm_msec(timeout)
+    return utils.crm_msec(timeout)
 
 
 def op2list(node):
@@ -926,11 +923,11 @@
 
 
 def is_resource_cli(s):
-    return s in olist(constants.resource_cli_names)
+    return s in utils.olist(constants.resource_cli_names)
 
 
 def is_constraint_cli(s):
-    return s in olist(constants.constraint_cli_names)
+    return s in utils.olist(constants.constraint_cli_names)
 
 
 def referenced_resources(node):
@@ -1015,7 +1012,7 @@
             l.append(rset)
             c_obj.updated = True
             c_modified = True
-        elif not get_boolean(rset.get("sequential"), True) and rref_cnt > 1:
+        elif not utils.get_boolean(rset.get("sequential"), True) and rref_cnt 
> 1:
             nonseq_rset = True
         cnt += rref_cnt
     rmnodes(l)
@@ -1440,7 +1437,7 @@
     """
     <nvpair name="" value="" />
     """
-    value = handle_role_for_ocf_1_1(value, name=name)
+    value = utils.handle_role_for_ocf_1_1(value, name=name)
     return new("nvpair", name=name, value=value)
 
 
@@ -1534,16 +1531,20 @@
         xpath = f'//node[@name="{node}" and @online="true"]'
         return bool(self.xml_elem.xpath(xpath))
 
-    def get_node_list(self, attr=None):
+    def get_node_list(self, online=True, standby=False, exclude_remote=True) 
-> list[str]:
         """
         Get a list of nodes based on the given attribute
         """
-        attr_dict = {
-            'standby': '[@standby="true"]',
-            'online': '[@standby="false"]'
-        }
-        xpath_str = f'//node{attr_dict.get(attr, "")}'
-        return [e.get('name') for e in self.xml_elem.xpath(xpath_str)]
+        xpath_str = '//nodes/node'
+        conditions = []
+        online_value = "true" if online else "false"
+        conditions.append(f'@online="{online_value}"')
+        standby_value = "true" if standby else "false"
+        conditions.append(f'@standby="{standby_value}"')
+        if exclude_remote:
+            conditions.append('@type="member"')
+        xpath_str += '[' + ' and '.join(conditions) + ']'
+        return [elem.get('name') for elem in self.xml_elem.xpath(xpath_str)]
 
     def is_resource_configured(self, ra_type):
         """
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250623.50ad8e8f/test/features/bootstrap_bugs.feature 
new/crmsh-5.0.0+20250630.23be67df/test/features/bootstrap_bugs.feature
--- old/crmsh-5.0.0+20250623.50ad8e8f/test/features/bootstrap_bugs.feature      
2025-06-23 13:15:35.000000000 +0200
+++ new/crmsh-5.0.0+20250630.23be67df/test/features/bootstrap_bugs.feature      
2025-06-30 10:29:16.000000000 +0200
@@ -287,3 +287,17 @@
     When    Run "crm cluster join -c hanode1 -y" on "hanode2"
     And     Run "crm configure show" on "hanode1"
     Then    Expected "no-quorum-policy=ignore" not in stdout
+
+  @clean
+  Scenario: Join when `core.hosts` is not available from the seed node 
(bsc#1245343)
+    Given   Cluster service is "stopped" on "hanode1"
+    And     Cluster service is "stopped" on "hanode2"
+    When    Run "crm cluster init -y" on "hanode1"
+    And     Run "crm cluster join -c hanode1 -y" on "hanode2"
+    And     Run "rm -r /root/.config/crm" on "hanode1,hanode2"
+    And     Run "crm cluster join -c hanode1 -y" on "hanode3"
+    Then    Cluster service is "started" on "hanode3"
+    When    Run "crm cluster stop --all" on "hanode3"
+    Then    Cluster service is "stopped" on "hanode1"
+    And     Cluster service is "stopped" on "hanode2"
+    And     Cluster service is "stopped" on "hanode3"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250623.50ad8e8f/test/features/bootstrap_init_join_remove.feature
 
new/crmsh-5.0.0+20250630.23be67df/test/features/bootstrap_init_join_remove.feature
--- 
old/crmsh-5.0.0+20250623.50ad8e8f/test/features/bootstrap_init_join_remove.feature
  2025-06-23 13:15:35.000000000 +0200
+++ 
new/crmsh-5.0.0+20250630.23be67df/test/features/bootstrap_init_join_remove.feature
  2025-06-30 10:29:16.000000000 +0200
@@ -205,3 +205,16 @@
     Then    Cluster service is "started" on "hanode3"
     And     Online nodes are "hanode1 hanode2 hanode3"
     And     Check passwordless for hacluster between "hanode1 hanode2 hanode3" 
"successfully"
+
+  @skip_non_root
+  Scenario: Remove offline and unreachable node
+    When    Run "init 0" on "hanode2"
+    Then    Online nodes are "hanode1"
+    When    Run "sleep 10" on "hanode1"
+    When    Try "crm cluster remove hanode2 -y" on "hanode1"
+    Then    Expected "There are offline nodes also unreachable: hanode2" in 
stderr
+    When    Try "crm status|grep "OFFLINE.*hanode2"" on "hanode1"
+    Then    Expected return code is "0"
+    When    Run "crm cluster remove hanode2 -y --force" on "hanode1"
+    When    Try "crm status|grep "OFFLINE.*hanode2"" on "hanode1"
+    Then    Expected return code is "1"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250623.50ad8e8f/test/unittests/test_bootstrap.py 
new/crmsh-5.0.0+20250630.23be67df/test/unittests/test_bootstrap.py
--- old/crmsh-5.0.0+20250623.50ad8e8f/test/unittests/test_bootstrap.py  
2025-06-23 13:15:35.000000000 +0200
+++ new/crmsh-5.0.0+20250630.23be67df/test/unittests/test_bootstrap.py  
2025-06-30 10:29:16.000000000 +0200
@@ -181,7 +181,7 @@
         ctx.cluster_is_running = True
         with self.assertRaises(ValueError):
             ctx._validate_sbd_option()
-        mock_check_all.assert_called_once_with()
+        mock_check_all.assert_called_once_with("setup SBD")
         mock_installed.assert_has_calls([
             mock.call("sbd", "node1"),
             mock.call("sbd", "node2")
@@ -950,7 +950,7 @@
             mock.call('alice', 'node1'),
             mock.call('bob', 'node2'),
         ])
-        mock_host_user_config.return_value.save_local.assert_called_once_with()
+        mock_host_user_config.return_value.save_local.assert_called()
         mock_ssh_copy_id.assert_called_once_with('carol', 'foo', 'node2', 
mock_local_shell.return_value)
         
mock_merge_ssh_authorized_keys.assert_called_once_with(mock_cluster_shell.return_value,
 mock_user_of_host.instance.return_value, ['node3', 'node1', 'node2'])
         mock_change_user_shell.assert_called_once_with('hacluster', 'node2')
@@ -1147,6 +1147,7 @@
         mock_status.assert_not_called()
         mock_disable.assert_called_once_with("corosync-qdevice.service")
 
+    @mock.patch('crmsh.utils.check_all_nodes_reachable')
     
@mock.patch('crmsh.bootstrap._select_user_pair_for_ssh_for_secondary_components')
     @mock.patch('crmsh.utils.HostUserConfig')
     @mock.patch('crmsh.user_of_host.UserOfHost.instance')
@@ -1163,6 +1164,7 @@
             mock_qdevice_configured, mock_confirm, mock_list_nodes, 
mock_user_of_host,
             mock_host_user_config_class,
             mock_select_user_pair_for_ssh,
+            mock_check_all_nodes
     ):
         mock_list_nodes.return_value = []
         bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip, 
current_user="bob")
@@ -1186,7 +1188,9 @@
         mock_qdevice_configured.assert_called_once_with()
         mock_confirm.assert_called_once_with("Qdevice is already configured - 
overwrite?")
         self.qdevice_with_ip.start_qdevice_service.assert_called_once_with()
+        mock_check_all_nodes.assert_called_once_with("setup Qdevice")
 
+    @mock.patch('crmsh.utils.check_all_nodes_reachable')
     
@mock.patch('crmsh.bootstrap._select_user_pair_for_ssh_for_secondary_components')
     @mock.patch('crmsh.utils.HostUserConfig')
     @mock.patch('crmsh.user_of_host.UserOfHost.instance')
@@ -1201,7 +1205,7 @@
     @mock.patch('logging.Logger.info')
     def test_init_qdevice(self, mock_info, mock_local_shell, mock_ssh, 
mock_configure_ssh_key, mock_qdevice_configured,
                           mock_this_node, mock_list_nodes, 
mock_adjust_priority, mock_adjust_fence_delay,
-                          mock_user_of_host, mock_host_user_config_class, 
mock_select_user_pair_for_ssh):
+                          mock_user_of_host, mock_host_user_config_class, 
mock_select_user_pair_for_ssh, mock_check_all_nodes):
         bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip, 
current_user="bob")
         mock_this_node.return_value = "192.0.2.100"
         mock_list_nodes.return_value = []
@@ -1230,7 +1234,9 @@
         self.qdevice_with_ip.set_cluster_name.assert_called_once_with()
         self.qdevice_with_ip.valid_qnetd.assert_called_once_with()
         self.qdevice_with_ip.config_and_start_qdevice.assert_called_once_with()
+        mock_check_all_nodes.assert_called_once_with("setup Qdevice")
 
+    @mock.patch('crmsh.utils.check_all_nodes_reachable')
     @mock.patch('crmsh.utils.fatal')
     @mock.patch('crmsh.utils.HostUserConfig')
     @mock.patch('crmsh.service_manager.ServiceManager.service_is_available')
@@ -1241,6 +1247,7 @@
             mock_info, mock_list_nodes, mock_available,
             mock_host_user_config_class,
             mock_fatal,
+            mock_check_all_nodes
     ):
         bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip)
         mock_list_nodes.return_value = ["node1"]
@@ -1255,6 +1262,7 @@
         mock_fatal.assert_called_once_with("corosync-qdevice.service is not 
available on node1")
         mock_available.assert_called_once_with("corosync-qdevice.service", 
"node1")
         mock_info.assert_called_once_with("Configure Qdevice/Qnetd:")
+        mock_check_all_nodes.assert_called_once_with("setup Qdevice")
 
     @mock.patch('crmsh.bootstrap.prompt_for_string')
     def test_configure_qdevice_interactive_return(self, mock_prompt):
@@ -1364,7 +1372,7 @@
 
         mock_qdevice_configured.assert_called_once_with()
         mock_confirm.assert_called_once_with("Removing QDevice service and 
configuration from cluster: Are you sure?")
-        mock_reachable.assert_called_once_with()
+        mock_reachable.assert_called_once_with("removing QDevice from the 
cluster")
         mock_evaluate.assert_called_once_with(qdevice.QDEVICE_REMOVE)
         mock_status.assert_has_calls([
             mock.call("Disable corosync-qdevice.service"),
@@ -1699,6 +1707,7 @@
         mock_prompt.assert_called_once_with("IP address or hostname of cluster 
node (e.g.: 192.168.1.1)", ".+")
         mock_error.assert_called_once_with("No existing IP/hostname specified 
(use -c option)")
 
+    @mock.patch('crmsh.utils.check_all_nodes_reachable')
     @mock.patch('crmsh.bootstrap.confirm')
     @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
     @mock.patch('crmsh.bootstrap.remove_qdevice')
@@ -1707,7 +1716,7 @@
     @mock.patch('crmsh.bootstrap.init')
     @mock.patch('crmsh.bootstrap.Context')
     def test_bootstrap_remove_no_confirm(self, mock_context, mock_init, 
mock_active,
-            mock_error, mock_qdevice, mock_hostname, mock_confirm):
+            mock_error, mock_qdevice, mock_hostname, mock_confirm, 
mock_check_all_nodes):
         mock_context_inst = mock.Mock(cluster_node="node1", force=False, 
qdevice_rm_flag=None)
         mock_context.return_value = mock_context_inst
         mock_active.return_value = [True, True]
@@ -1725,7 +1734,9 @@
         mock_error.assert_not_called()
         mock_hostname.assert_called_once_with('node1')
         mock_confirm.assert_called_once_with('Removing node "node1" from the 
cluster: Are you sure?')
+        mock_check_all_nodes.assert_called_once_with("removing a node from the 
cluster")
 
+    @mock.patch('crmsh.utils.check_all_nodes_reachable')
     @mock.patch('crmsh.utils.this_node')
     @mock.patch('crmsh.bootstrap.confirm')
     @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
@@ -1735,7 +1746,7 @@
     @mock.patch('crmsh.bootstrap.init')
     @mock.patch('crmsh.bootstrap.Context')
     def test_bootstrap_remove_self_need_force(self, mock_context, mock_init, 
mock_active,
-            mock_error, mock_qdevice, mock_hostname, mock_confirm, 
mock_this_node):
+                                              mock_error, mock_qdevice, 
mock_hostname, mock_confirm, mock_this_node, mock_check_all_nodes):
         mock_context_inst = mock.Mock(cluster_node="node1", force=False, 
qdevice_rm_flag=None)
         mock_context.return_value = mock_context_inst
         mock_active.return_value = [True, True]
@@ -1758,6 +1769,7 @@
         mock_this_node.assert_called_once_with()
         mock_error.assert_called_once_with("Removing self requires --force")
 
+    @mock.patch('crmsh.utils.check_all_nodes_reachable')
     @mock.patch('crmsh.bootstrap.bootstrap_finished')
     @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
     @mock.patch('crmsh.bootstrap.remove_self')
@@ -1770,7 +1782,7 @@
     @mock.patch('crmsh.bootstrap.init')
     @mock.patch('crmsh.bootstrap.Context')
     def test_bootstrap_remove_self(self, mock_context, mock_init, mock_active,
-                                   mock_error, mock_qdevice, mock_hostname, 
mock_confirm, mock_this_node, mock_self, mock_run, mock_finished):
+                                   mock_error, mock_qdevice, mock_hostname, 
mock_confirm, mock_this_node, mock_self, mock_run, mock_finished, 
mock_check_all_nodes):
         mock_context_inst = mock.Mock(cluster_node="node1", force=True, 
qdevice_rm_flag=None)
         mock_context.return_value = mock_context_inst
         mock_active.return_value = [True, True]
@@ -1791,7 +1803,9 @@
         mock_error.assert_not_called()
         mock_self.assert_called_once_with(True)
         mock_run.assert_called_once_with('rm -rf /var/lib/crmsh', 'node1')
+        mock_check_all_nodes.assert_called_once_with("removing a node from the 
cluster")
 
+    @mock.patch('crmsh.utils.check_all_nodes_reachable')
     @mock.patch('crmsh.xmlutil.listnodes')
     @mock.patch('crmsh.utils.this_node')
     @mock.patch('crmsh.bootstrap.confirm')
@@ -1802,7 +1816,7 @@
     @mock.patch('crmsh.bootstrap.init')
     @mock.patch('crmsh.bootstrap.Context')
     def test_bootstrap_remove_not_in_cluster(self, mock_context, mock_init, 
mock_active,
-            mock_error, mock_qdevice, mock_hostname, mock_confirm, 
mock_this_node, mock_list):
+            mock_error, mock_qdevice, mock_hostname, mock_confirm, 
mock_this_node, mock_list, mock_check_all_nodes):
         mock_context_inst = mock.Mock(cluster_node="node2", force=True, 
qdevice_rm_flag=None)
         mock_context.return_value = mock_context_inst
         mock_active.return_value = [True, True]
@@ -1824,7 +1838,9 @@
         mock_confirm.assert_not_called()
         mock_this_node.assert_called_once_with()
         mock_error.assert_called_once_with("Specified node node2 is not 
configured in cluster! Unable to remove.")
+        mock_check_all_nodes.assert_called_once_with("removing a node from the 
cluster")
 
+    @mock.patch('crmsh.utils.check_all_nodes_reachable')
     @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
     @mock.patch('crmsh.utils.fetch_cluster_node_list_from_node')
     @mock.patch('crmsh.bootstrap.remove_node_from_cluster')
@@ -1839,7 +1855,7 @@
     @mock.patch('crmsh.bootstrap.Context')
     def test_bootstrap_remove(self, mock_context, mock_init, mock_active,
             mock_error, mock_qdevice, mock_hostname, mock_confirm, 
mock_this_node,
-            mock_list, mock_remove, mock_fetch, mock_run):
+            mock_list, mock_remove, mock_fetch, mock_run, 
mock_check_all_nodes):
         mock_context_inst = mock.Mock(cluster_node="node2", 
qdevice_rm_flag=None, force=True)
         mock_context.return_value = mock_context_inst
         mock_active.side_effect = [True, False, True]
@@ -1862,6 +1878,7 @@
         mock_error.assert_not_called()
         mock_remove.assert_called_once_with('node2')
         mock_run.assert_called_once_with('rm -rf /var/lib/crmsh', 'node2')
+        mock_check_all_nodes.assert_called_once_with("removing a node from the 
cluster")
 
     @mock.patch('crmsh.utils.fatal')
     @mock.patch('crmsh.sh.ClusterShell.get_rc_stdout_stderr_without_input')
@@ -1987,6 +2004,7 @@
             ])
         mock_error.assert_called_once_with("Removing the node node1 from {} 
failed".format(bootstrap.CSYNC2_CFG))
 
+    @mock.patch('crmsh.utils.HostUserConfig')
     @mock.patch('crmsh.sh.cluster_shell')
     @mock.patch('crmsh.bootstrap.FirewallManager')
     @mock.patch.object(NodeMgmt, 'call_delnode')
@@ -2005,7 +2023,7 @@
     @mock.patch('crmsh.bootstrap.get_cluster_node_ip')
     def test_remove_node_from_cluster_hostname(self, mock_get_ip, mock_stop, 
mock_status,
             mock_invoke, mock_invokerc, mock_error, mock_get_values, mock_del, 
mock_csync2,
-            mock_adjust_priority, mock_adjust_fence_delay, mock_rm_conf_files, 
mock_is_active, mock_cal_delnode, mock_firewall, mock_cluster_shell):
+            mock_adjust_priority, mock_adjust_fence_delay, mock_rm_conf_files, 
mock_is_active, mock_cal_delnode, mock_firewall, mock_cluster_shell, 
mock_host_user_config):
         mock_get_ip.return_value = "10.10.10.1"
         mock_cal_delnode.return_value = True
         mock_invoke.side_effect = [(True, None, None)]
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250623.50ad8e8f/test/unittests/test_qdevice.py 
new/crmsh-5.0.0+20250630.23be67df/test/unittests/test_qdevice.py
--- old/crmsh-5.0.0+20250623.50ad8e8f/test/unittests/test_qdevice.py    
2025-06-23 13:15:35.000000000 +0200
+++ new/crmsh-5.0.0+20250630.23be67df/test/unittests/test_qdevice.py    
2025-06-30 10:29:16.000000000 +0200
@@ -842,15 +842,13 @@
     @mock.patch('crmsh.sbd.SBDManager.update_sbd_configuration')
     @mock.patch('crmsh.sbd.SBDUtils.get_sbd_value_from_config')
     @mock.patch('crmsh.sbd.SBDUtils.is_using_diskless_sbd')
-    @mock.patch('crmsh.utils.check_all_nodes_reachable')
-    def test_adjust_sbd_watchdog_timeout_with_qdevice(self, 
mock_check_reachable, mock_using_diskless_sbd, mock_get_sbd_value, 
mock_update_config, mock_get_timeout, mock_set_property):
+    def test_adjust_sbd_watchdog_timeout_with_qdevice(self, 
mock_using_diskless_sbd, mock_get_sbd_value, mock_update_config, 
mock_get_timeout, mock_set_property):
         mock_using_diskless_sbd.return_value = True
         mock_get_sbd_value.return_value = ""
         mock_get_timeout.return_value = 100
 
         
self.qdevice_with_stage_cluster_name.adjust_sbd_watchdog_timeout_with_qdevice()
 
-        mock_check_reachable.assert_called_once_with()
         mock_using_diskless_sbd.assert_called_once_with()
         mock_get_sbd_value.assert_called_once_with("SBD_WATCHDOG_TIMEOUT")
         mock_update_config.assert_called_once_with({"SBD_WATCHDOG_TIMEOUT": 
str(sbd.SBDTimeout.SBD_WATCHDOG_TIMEOUT_DEFAULT_WITH_QDEVICE)})
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250623.50ad8e8f/test/unittests/test_ui_sbd.py 
new/crmsh-5.0.0+20250630.23be67df/test/unittests/test_ui_sbd.py
--- old/crmsh-5.0.0+20250623.50ad8e8f/test/unittests/test_ui_sbd.py     
2025-06-23 13:15:35.000000000 +0200
+++ new/crmsh-5.0.0+20250630.23be67df/test/unittests/test_ui_sbd.py     
2025-06-30 10:29:16.000000000 +0200
@@ -535,9 +535,10 @@
         mock_logger_error.assert_called_once_with('%s', "No device specified")
         mock_logger_info.assert_called_once_with("Usage: crm sbd device 
<add|remove> <device>...")
 
+    @mock.patch('crmsh.utils.check_all_nodes_reachable')
     @mock.patch('logging.Logger.info')
     @mock.patch('crmsh.sbd.SBDUtils.is_using_disk_based_sbd')
-    def test_do_device_add(self, mock_is_using_disk_based_sbd, 
mock_logger_info):
+    def test_do_device_add(self, mock_is_using_disk_based_sbd, 
mock_logger_info, mock_check_all_nodes_reachable):
         mock_is_using_disk_based_sbd.return_value = True
         self.sbd_instance_diskbased.service_is_active = 
mock.Mock(return_value=True)
         self.sbd_instance_diskbased._load_attributes = mock.Mock()
@@ -546,10 +547,12 @@
         self.assertTrue(res)
         
self.sbd_instance_diskbased._device_add.assert_called_once_with(["/dev/sda2", 
"/dev/sda3"])
         mock_logger_info.assert_called_once_with("Configured sbd devices: %s", 
"/dev/sda1")
+        mock_check_all_nodes_reachable.assert_called_once_with("configuring 
SBD device")
 
+    @mock.patch('crmsh.utils.check_all_nodes_reachable')
     @mock.patch('logging.Logger.info')
     @mock.patch('crmsh.sbd.SBDUtils.is_using_disk_based_sbd')
-    def test_do_device_remove(self, mock_is_using_disk_based_sbd, 
mock_logger_info):
+    def test_do_device_remove(self, mock_is_using_disk_based_sbd, 
mock_logger_info, mock_check_all_nodes_reachable):
         mock_is_using_disk_based_sbd.return_value = True
         self.sbd_instance_diskbased.service_is_active = 
mock.Mock(return_value=True)
         self.sbd_instance_diskbased._load_attributes = mock.Mock()
@@ -558,6 +561,7 @@
         self.assertTrue(res)
         
self.sbd_instance_diskbased._device_remove.assert_called_once_with(["/dev/sda1"])
         mock_logger_info.assert_called_once_with("Configured sbd devices: %s", 
"/dev/sda1")
+        mock_check_all_nodes_reachable.assert_called_once_with("configuring 
SBD device")
 
     @mock.patch('crmsh.sbd.purge_sbd_from_cluster')
     def test_do_purge_no_service(self, mock_purge_sbd_from_cluster):
@@ -567,8 +571,9 @@
         self.assertFalse(res)
         mock_purge_sbd_from_cluster.assert_not_called()
 
+    @mock.patch('crmsh.utils.check_all_nodes_reachable')
     @mock.patch('crmsh.sbd.purge_sbd_from_cluster')
-    def test_do_purge(self, mock_purge_sbd_from_cluster):
+    def test_do_purge(self, mock_purge_sbd_from_cluster, 
mock_check_all_nodes_reachable):
         self.sbd_instance_diskbased._load_attributes = mock.Mock()
         self.sbd_instance_diskbased._service_is_active = 
mock.Mock(return_value=True)
         res = self.sbd_instance_diskbased.do_purge(mock.Mock())
@@ -577,6 +582,7 @@
         self.sbd_instance_diskbased._load_attributes.assert_called_once()
         
self.sbd_instance_diskbased._service_is_active.assert_called_once_with(constants.SBD_SERVICE)
         mock_purge_sbd_from_cluster.assert_called_once_with()
+        mock_check_all_nodes_reachable.assert_called_once_with("purging SBD")
 
     @mock.patch('crmsh.xmlutil.CrmMonXmlParser')
     def test_print_sbd_agent_status(self, mock_CrmMonXmlParser):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250623.50ad8e8f/test/unittests/test_utils.py 
new/crmsh-5.0.0+20250630.23be67df/test/unittests/test_utils.py
--- old/crmsh-5.0.0+20250623.50ad8e8f/test/unittests/test_utils.py      
2025-06-23 13:15:35.000000000 +0200
+++ new/crmsh-5.0.0+20250630.23be67df/test/unittests/test_utils.py      
2025-06-30 10:29:16.000000000 +0200
@@ -971,12 +971,26 @@
 
 
 @mock.patch('crmsh.utils.node_reachable_check')
-@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
-def test_check_all_nodes_reachable(mock_run, mock_reachable):
-    mock_run.return_value = "1084783297 15sp2-1 member"
-    utils.check_all_nodes_reachable()
-    mock_run.assert_called_once_with("crm_node -l")
-    mock_reachable.assert_called_once_with("15sp2-1")
+@mock.patch('crmsh.xmlutil.CrmMonXmlParser')
+def test_check_all_nodes_reachable_dead_nodes(mock_xml, mock_reachable):
+    mock_xml_inst = mock.Mock()
+    mock_xml.return_value = mock_xml_inst
+    mock_xml_inst.get_node_list.side_effect = [["node1"], ["node2"]]
+    mock_reachable.side_effect = ValueError
+
+    with pytest.raises(utils.DeadNodeError) as err:
+        utils.check_all_nodes_reachable("testing")
+    assert err.value.dead_nodes == ["node2"]
+
+
+@mock.patch('crmsh.utils.node_reachable_check')
+@mock.patch('crmsh.xmlutil.CrmMonXmlParser')
+def test_check_all_nodes_reachable(mock_xml, mock_reachable):
+    mock_xml_inst = mock.Mock()
+    mock_xml.return_value = mock_xml_inst
+    mock_xml_inst.get_node_list.side_effect = [["node1"], []]
+    utils.check_all_nodes_reachable("testing")
+    mock_reachable.assert_called_once_with("node1")
 
 
 @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250623.50ad8e8f/test/unittests/test_xmlutil.py 
new/crmsh-5.0.0+20250630.23be67df/test/unittests/test_xmlutil.py
--- old/crmsh-5.0.0+20250623.50ad8e8f/test/unittests/test_xmlutil.py    
2025-06-23 13:15:35.000000000 +0200
+++ new/crmsh-5.0.0+20250630.23be67df/test/unittests/test_xmlutil.py    
2025-06-30 10:29:16.000000000 +0200
@@ -41,8 +41,8 @@
         assert self.parser_inst.is_node_online("tbw-2") is False
 
     def test_get_node_list(self):
-        assert self.parser_inst.get_node_list("standby") == ['tbw-1']
-        assert self.parser_inst.get_node_list("online") == ['tbw-2']
+        assert self.parser_inst.get_node_list(standby=True) == ['tbw-1']
+        assert self.parser_inst.get_node_list(online=False) == ['tbw-2']
 
     def test_is_resource_configured(self):
         assert self.parser_inst.is_resource_configured("test") is False

Reply via email to