Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package crmsh for openSUSE:Factory checked 
in at 2022-12-13 18:56:15
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/crmsh (Old)
 and      /work/SRC/openSUSE:Factory/.crmsh.new.1835 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "crmsh"

Tue Dec 13 18:56:15 2022 rev:271 rq:1042567 version:4.4.1+20221213.6e4f7dfd

Changes:
--------
--- /work/SRC/openSUSE:Factory/crmsh/crmsh.changes      2022-12-09 
13:19:37.663590267 +0100
+++ /work/SRC/openSUSE:Factory/.crmsh.new.1835/crmsh.changes    2022-12-13 
18:56:27.259519319 +0100
@@ -1,0 +2,8 @@
+Tue Dec 13 08:20:54 UTC 2022 - xli...@suse.com
+
+- Update to version 4.4.1+20221213.6e4f7dfd:
+  * Dev: unittest: Adjust unit test based on previous changes
+  * Dev: behave: Add functional test for -x option to skip csync2
+  * Dev: bootstrap: Add option -x to skip csync2 initialization stage during 
the whole cluster bootstrap
+
+-------------------------------------------------------------------

Old:
----
  crmsh-4.4.1+20221207.84e6ea16.tar.bz2

New:
----
  crmsh-4.4.1+20221213.6e4f7dfd.tar.bz2

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ crmsh.spec ++++++
--- /var/tmp/diff_new_pack.SCJuii/_old  2022-12-13 18:56:27.867522564 +0100
+++ /var/tmp/diff_new_pack.SCJuii/_new  2022-12-13 18:56:27.875522607 +0100
@@ -36,7 +36,7 @@
 Summary:        High Availability cluster command-line interface
 License:        GPL-2.0-or-later
 Group:          %{pkg_group}
-Version:        4.4.1+20221207.84e6ea16
+Version:        4.4.1+20221213.6e4f7dfd
 Release:        0
 URL:            http://crmsh.github.io
 Source0:        %{name}-%{version}.tar.bz2

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.SCJuii/_old  2022-12-13 18:56:27.919522841 +0100
+++ /var/tmp/diff_new_pack.SCJuii/_new  2022-12-13 18:56:27.923522863 +0100
@@ -9,7 +9,7 @@
 </service>
 <service name="tar_scm">
   <param name="url">https://github.com/ClusterLabs/crmsh.git</param>
-  <param 
name="changesrevision">84e6ea16faf7d379e21af00ba934a6b644723309</param>
+  <param 
name="changesrevision">196bfc0a6e739054a258f38ddf87012d7ba01dcc</param>
 </service>
 </servicedata>
 (No newline at EOF)

++++++ crmsh-4.4.1+20221207.84e6ea16.tar.bz2 -> 
crmsh-4.4.1+20221213.6e4f7dfd.tar.bz2 ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.4.1+20221207.84e6ea16/crmsh/bootstrap.py 
new/crmsh-4.4.1+20221213.6e4f7dfd/crmsh/bootstrap.py
--- old/crmsh-4.4.1+20221207.84e6ea16/crmsh/bootstrap.py        2022-12-07 
03:10:51.000000000 +0100
+++ new/crmsh-4.4.1+20221213.6e4f7dfd/crmsh/bootstrap.py        2022-12-13 
09:03:40.000000000 +0100
@@ -34,7 +34,8 @@
 from . import tmpfiles
 from . import lock
 from . import userdir
-from .constants import SSH_OPTION, QDEVICE_HELP_INFO, STONITH_TIMEOUT_DEFAULT, 
REJOIN_COUNT, REJOIN_INTERVAL, PCMK_DELAY_MAX, WAIT_TIMEOUT_MS_DEFAULT
+from .constants import SSH_OPTION, QDEVICE_HELP_INFO, STONITH_TIMEOUT_DEFAULT,\
+        REJOIN_COUNT, REJOIN_INTERVAL, PCMK_DELAY_MAX, CSYNC2_SERVICE, 
WAIT_TIMEOUT_MS_DEFAULT
 from . import ocfs2
 from . import qdevice
 from . import parallax
@@ -57,7 +58,7 @@
 SYSCONFIG_FW_CLUSTER = "/etc/sysconfig/SuSEfirewall2.d/services/cluster"
 PCMK_REMOTE_AUTH = "/etc/pacemaker/authkey"
 COROSYNC_CONF_ORIG = tmpfiles.create()[1]
-SERVICES_STOP_LIST = ["corosync-qdevice.service", "corosync.service", 
"hawk.service", "csync2.socket"]
+SERVICES_STOP_LIST = ["corosync-qdevice.service", "corosync.service", 
"hawk.service", CSYNC2_SERVICE]
 USER_LIST = ["root", "hacluster"]
 WATCHDOG_CFG = "/etc/modules-load.d/watchdog.conf"
 BOOTH_DIR = "/etc/booth"
@@ -91,6 +92,7 @@
         self.no_overwrite_sshkey = None
         self.nic_list = []
         self.node_list = []
+        self.node_list_in_cluster = []
         self.unicast = None
         self.multicast = None
         self.admin_ip = None
@@ -126,6 +128,7 @@
         self.cloud_type = None
         self.is_s390 = False
         self.profiles_data = None
+        self.skip_csync2 = None
         self.profiles_dict = {}
         self.default_nic_list = []
         self.default_ip_list = []
@@ -222,6 +225,10 @@
             logger.warning("-w option is deprecated and will be removed in 
future versions")
         if self.ocfs2_devices or self.stage == "ocfs2":
             ocfs2.OCFS2Manager.verify_ocfs2(self)
+        if not self.skip_csync2 and self.type == "init":
+            self.skip_csync2 = utils.get_boolean(os.getenv("SKIP_CSYNC2_SYNC"))
+        if self.skip_csync2 and self.stage:
+            utils.fatal("-x option or SKIP_CSYNC2_SYNC can't be used with any 
stage")
         self._validate_cluster_node()
         self._validate_nodes_option()
         self._validate_sbd_option()
@@ -439,7 +446,7 @@
     # The communication IP maybe mis-configured
     if not xmlutil.CrmMonXmlParser.is_node_online(peer_node):
         shutil.copy(COROSYNC_CONF_ORIG, corosync.conf())
-        csync2_update(corosync.conf())
+        sync_file(corosync.conf())
         utils.stop_service("corosync")
         print()
         utils.fatal("Cannot see peer node \"{}\", please check the 
communication IP".format(peer_node))
@@ -932,31 +939,48 @@
 
 
 def init_csync2():
+    host_list = _context.node_list_in_cluster
+
     logger.info("Configuring csync2")
     if os.path.exists(CSYNC2_KEY):
         if not confirm("csync2 is already configured - overwrite?"):
             return
 
     invoke("rm", "-f", CSYNC2_KEY)
-    with logger_utils.status_long("Generating csync2 shared key (this may take 
a while)"):
-        if not invokerc("csync2", "-k", CSYNC2_KEY):
-            utils.fatal("Can't create csync2 key {}".format(CSYNC2_KEY))
+    logger.debug("Generating csync2 shared key")
+    if not invokerc("csync2", "-k", CSYNC2_KEY):
+        utils.fatal("Can't create csync2 key {}".format(CSYNC2_KEY))
 
     csync2_file_list = ""
     for f in FILES_TO_SYNC:
         csync2_file_list += "include {};\n".format(f)
 
+    host_str = ""
+    for host in host_list:
+        host_str += 'host {};\n'.format(host)
+
     utils.str2file("""group ha_group
 {{
 key /etc/csync2/key_hagroup;
-host {};
+{}
 {}
 }}
-    """.format(utils.this_node(), csync2_file_list), CSYNC2_CFG)
+    """.format(host_str, csync2_file_list), CSYNC2_CFG)
 
-    utils.start_service("csync2.socket", enable=True)
-    with logger_utils.status_long("csync2 checking files"):
-        invoke("csync2", "-cr", "/")
+    if _context.skip_csync2:
+        for f in [CSYNC2_CFG, CSYNC2_KEY]:
+            sync_file(f)
+
+    for host in host_list:
+        logger.info("Starting {} service on {}".format(CSYNC2_SERVICE, host))
+        utils.start_service(CSYNC2_SERVICE, enable=True, remote_addr=host)
+
+    _msg = "syncing" if _context.skip_csync2 else "checking"
+    with logger_utils.status_long("csync2 {} files".format(_msg)):
+        if _context.skip_csync2:
+            csync2_update("/")
+        else:
+            invoke("csync2", "-cr", "/")
 
 
 def csync2_update(path):
@@ -1164,7 +1188,7 @@
             transport="udpu",
             ipv6=_context.ipv6,
             two_rings=two_rings)
-    csync2_update(corosync.conf())
+    sync_file(corosync.conf())
 
 
 def init_corosync_multicast():
@@ -1241,7 +1265,7 @@
         ipv6=_context.ipv6,
         nodeid=nodeid,
         two_rings=two_rings)
-    csync2_update(corosync.conf())
+    sync_file(corosync.conf())
 
 
 def adjust_corosync_parameters_according_to_profiles():
@@ -1530,44 +1554,46 @@
     """
     if not seed_host:
         utils.fatal("No existing IP/hostname specified (use -c option)")
-    with logger_utils.status_long("Configuring csync2"):
 
-        # Necessary if re-running join on a node that's been configured before.
-        rmfile("/var/lib/csync2/{}.db3".format(utils.this_node()), 
ignore_errors=True)
+    logger.info("Configuring csync2")
+    # Necessary if re-running join on a node that's been configured before.
+    rmfile("/var/lib/csync2/{}.db3".format(utils.this_node()), 
ignore_errors=True)
 
-        # Not automatically updating /etc/hosts - risky in the general case.
-        # etc_hosts_add_me
-        # local hosts_line=$(etc_hosts_get_me)
-        # [ -n "$hosts_line" ] || error "No valid entry for $(hostname) in 
/etc/hosts - csync2 can't work"
-
-        # If we *were* updating /etc/hosts, the next line would have 
"\"$hosts_line\"" as
-        # the last arg (but this requires re-enabling this functionality in 
ha-cluster-init)
-        cmd = "crm cluster init -i {} csync2_remote 
{}".format(_context.default_nic_list[0], utils.this_node())
-        rc, _, err = invoke("ssh {} root@{} {}".format(SSH_OPTION, seed_host, 
cmd))
-        if not rc:
-            utils.fatal("Can't invoke \"{}\" on {}: {}".format(cmd, seed_host, 
err))
+    # Not automatically updating /etc/hosts - risky in the general case.
+    # etc_hosts_add_me
+    # local hosts_line=$(etc_hosts_get_me)
+    # [ -n "$hosts_line" ] || error "No valid entry for $(hostname) in 
/etc/hosts - csync2 can't work"
+
+    # If we *were* updating /etc/hosts, the next line would have 
"\"$hosts_line\"" as
+    # the last arg (but this requires re-enabling this functionality in 
ha-cluster-init)
+    cmd = "crm cluster init -i {} csync2_remote 
{}".format(_context.default_nic_list[0], utils.this_node())
+    rc, _, err = invoke("ssh {} root@{} {}".format(SSH_OPTION, seed_host, cmd))
+    if not rc:
+        utils.fatal("Can't invoke \"{}\" on {}: {}".format(cmd, seed_host, 
err))
 
-        # This is necessary if syncing /etc/hosts (to ensure everyone's got the
-        # same list of hosts)
-        # local tmp_conf=/etc/hosts.$$
-        # invoke scp root@seed_host:/etc/hosts $tmp_conf \
-        #   || error "Can't retrieve /etc/hosts from seed_host"
-        # install_tmp $tmp_conf /etc/hosts
-        rc, _, err = invoke("scp 
root@%s:'/etc/csync2/{csync2.cfg,key_hagroup}' /etc/csync2" % (seed_host))
-        if not rc:
-            utils.fatal("Can't retrieve csync2 config from {}: 
{}".format(seed_host, err))
+    # This is necessary if syncing /etc/hosts (to ensure everyone's got the
+    # same list of hosts)
+    # local tmp_conf=/etc/hosts.$$
+    # invoke scp root@seed_host:/etc/hosts $tmp_conf \
+    #   || error "Can't retrieve /etc/hosts from seed_host"
+    # install_tmp $tmp_conf /etc/hosts
+    rc, _, err = invoke("scp root@%s:'/etc/csync2/{csync2.cfg,key_hagroup}' 
/etc/csync2" % (seed_host))
+    if not rc:
+        utils.fatal("Can't retrieve csync2 config from {}: 
{}".format(seed_host, err))
 
-        utils.start_service("csync2.socket", enable=True)
+    logger.info("Starting {} service".format(CSYNC2_SERVICE))
+    utils.start_service(CSYNC2_SERVICE, enable=True)
 
-        # Sync new config out.  This goes to all hosts; csync2.cfg definitely
-        # needs to go to all hosts (else hosts other than the seed and the
-        # joining host won't have the joining host in their config yet).
-        # Strictly, the rest of the files need only go to the new host which
-        # could theoretically be effected using `csync2 -xv -P $(hostname)`,
-        # but this still leaves all the other files in dirty state (becuase
-        # they haven't gone to all nodes in the cluster, which means a
-        # subseqent join of another node can fail its sync of corosync.conf
-        # when it updates expected_votes.  Grrr...
+    # Sync new config out.  This goes to all hosts; csync2.cfg definitely
+    # needs to go to all hosts (else hosts other than the seed and the
+    # joining host won't have the joining host in their config yet).
+    # Strictly, the rest of the files need only go to the new host which
+    # could theoretically be effected using `csync2 -xv -P $(hostname)`,
+    # but this still leaves all the other files in dirty state (becuase
+    # they haven't gone to all nodes in the cluster, which means a
+    # subseqent join of another node can fail its sync of corosync.conf
+    # when it updates expected_votes.  Grrr...
+    with logger_utils.status_long("csync2 syncing files in cluster"):
         if not invokerc('ssh {} root@{} "csync2 -rm /; csync2 -rxv || csync2 
-rf / && csync2 -rxv"'.format(SSH_OPTION, seed_host)):
             print("")
             logger.warning("csync2 run failed - some files may not be sync'd")
@@ -1579,11 +1605,7 @@
     """
     logger.info("Merging known_hosts")
 
-    hosts = [m.group(1)
-             for m in re.finditer(r"^\s*host\s*([^ ;]+)\s*;", 
open(CSYNC2_CFG).read(), re.M)]
-    if not hosts:
-        hosts = [_cluster_node]
-        logger.warning("Unable to extract host list from %s" % (CSYNC2_CFG))
+    hosts = _context.node_list_in_cluster or [_cluster_node]
 
     # To create local entry in known_hosts
     utils.get_stdout_or_raise_error("ssh {} {} true".format(SSH_OPTION, 
utils.this_node()))
@@ -1684,7 +1706,7 @@
         corosync.set_value("quorum.device.votes", device_votes)
     corosync.set_value("quorum.two_node", 1 if expected_votes == 2 else 0)
 
-    csync2_update(corosync.conf())
+    sync_file(corosync.conf())
 
 
 def setup_passwordless_with_other_nodes(init_node):
@@ -1693,39 +1715,12 @@
 
     Should fetch the node list from init node, then swap the key
     """
-    # Fetch cluster nodes list
-    cmd = "ssh {} root@{} crm_node -l".format(SSH_OPTION, init_node)
-    rc, out, err = utils.get_stdout_stderr(cmd)
-    if rc != 0:
-        utils.fatal("Can't fetch cluster nodes list from {}: 
{}".format(init_node, err))
-    cluster_nodes_list = []
-    for line in out.splitlines():
-        # Parse line in format: <id> <nodename> <state>, and collect the
-        # nodename.
-        tokens = line.split()
-        if len(tokens) == 0:
-            pass  # Skip any spurious empty line.
-        elif len(tokens) < 3:
-            logger.warning("Unable to configure passwordless ssh with nodeid 
{}. The "
-                 "node has no known name and/or state information".format(
-                     tokens[0]))
-        elif tokens[2] != "member":
-            logger.warning("Skipping configuration of passwordless ssh with 
node {} in "
-                 "state '{}'. The node is not a current member".format(
-                     tokens[1], tokens[2]))
-        else:
-            cluster_nodes_list.append(tokens[1])
-
-    # Filter out init node from cluster_nodes_list
-    cmd = "ssh {} root@{} hostname".format(SSH_OPTION, init_node)
-    rc, out, err = utils.get_stdout_stderr(cmd)
-    if rc != 0:
-        utils.fatal("Can't fetch hostname of {}: {}".format(init_node, err))
-    if out in cluster_nodes_list:
-        cluster_nodes_list.remove(out)
-
+    init_hostname = utils.get_stdout_or_raise_error("hostname", 
remote=init_node)
     # Swap ssh public key between join node and other cluster nodes
-    for node in cluster_nodes_list:
+    for node in _context.node_list_in_cluster:
+        # Filter out init node
+        if node == init_hostname:
+            continue
         for user in USER_LIST:
             swap_public_ssh_key(node, user)
 
@@ -1824,7 +1819,7 @@
             corosync.add_node_ucast(ringXaddr_res)
         except corosync.IPAlreadyConfiguredError as e:
             logger.warning(e)
-        csync2_update(corosync.conf())
+        sync_file(corosync.conf())
         invoke("ssh {} root@{} corosync-cfgtool -R".format(SSH_OPTION, 
seed_host))
 
     _context.sbd_manager.join_sbd(seed_host)
@@ -1941,7 +1936,7 @@
     with logger_utils.status_long("Starting corosync-qdevice.service"):
         if not corosync.is_unicast():
             corosync.add_nodelist_from_cmaptool()
-            csync2_update(corosync.conf())
+            sync_file(corosync.conf())
             invoke("crm corosync reload")
         if utils.is_qdevice_tls_on():
             qnetd_addr = corosync.get_value("quorum.device.net.host")
@@ -2023,8 +2018,8 @@
     adjust_properties()
 
     logger.info("Propagating configuration changes across the remaining nodes")
-    csync2_update(CSYNC2_CFG)
-    csync2_update(corosync.conf())
+    sync_file(CSYNC2_CFG)
+    sync_file(corosync.conf())
 
     # Trigger corosync config reload to ensure expected_votes is propagated
     invoke("corosync-cfgtool -R")
@@ -2106,11 +2101,20 @@
             utils.fatal("Expected NODE argument to csync2_remote")
         _context.cluster_node = args[1]
 
+    if stage and _context.cluster_is_running and not 
utils.service_is_active(CSYNC2_SERVICE):
+        _context.skip_csync2 = True
+        _context.node_list_in_cluster = utils.list_cluster_nodes()
+    elif not _context.cluster_is_running:
+        _context.node_list_in_cluster = [utils.this_node()]
+
     if stage != "":
         globals()["init_" + stage]()
     else:
         init_ssh()
-        init_csync2()
+        if _context.skip_csync2:
+            utils.stop_service(CSYNC2_SERVICE, disable=True)
+        else:
+            init_csync2()
         init_corosync()
         init_remote_auth()
         init_sbd()
@@ -2203,9 +2207,16 @@
         lock_inst = lock.RemoteLock(cluster_node)
         try:
             with lock_inst.lock():
+                _context.node_list_in_cluster = 
utils.fetch_cluster_node_list_from_node(cluster_node)
                 setup_passwordless_with_other_nodes(cluster_node)
                 join_remote_auth(cluster_node)
-                join_csync2(cluster_node)
+                _context.skip_csync2 = not 
utils.service_is_active(CSYNC2_SERVICE, cluster_node)
+                if _context.skip_csync2:
+                    utils.stop_service(CSYNC2_SERVICE, disable=True)
+                    retrieve_all_config_files(cluster_node)
+                    logger.warning("csync2 is not initiated yet. Before using 
csync2 for the first time, please run \"crm cluster init csync2 -y\" on any one 
node. Note, this may take a while.")
+                else:
+                    join_csync2(cluster_node)
                 join_ssh_merge(cluster_node)
                 probe_partitions()
                 join_ocfs2(cluster_node)
@@ -2287,6 +2298,10 @@
     if _context.qdevice_rm_flag and _context.cluster_node:
         utils.fatal("Either remove node or qdevice")
 
+    _context.skip_csync2 = not utils.service_is_active(CSYNC2_SERVICE)
+    if _context.skip_csync2:
+        _context.node_list_in_cluster = 
utils.fetch_cluster_node_list_from_node(utils.this_node())
+
     if _context.qdevice_rm_flag:
         remove_qdevice()
         return
@@ -2558,4 +2573,32 @@
     adjust_stonith_timeout()
     adjust_priority_in_rsc_defaults(is_2node_wo_qdevice)
     adjust_priority_fencing_delay(is_2node_wo_qdevice)
+
+
+def retrieve_all_config_files(cluster_node):
+    """
+    Retrieve config files from cluster_node if exists
+    """
+    with logger_utils.status_long("Retrieve all config files"):
+        for f in FILES_TO_SYNC:
+            if f in [CSYNC2_KEY, CSYNC2_CFG]:
+                continue
+            rc, _, _ = utils.run_cmd_on_remote("test -f {}".format(f), 
cluster_node)
+            if rc != 0:
+                continue
+            rc, _, err = utils.get_stdout_stderr("scp {} root@{}:{} 
{}".format(SSH_OPTION, cluster_node, f, os.path.dirname(f)))
+            if rc != 0:
+                utils.fatal("Can't retrieve {} from {}:{}".format(f, 
cluster_node, err))
+            if f in [PCMK_REMOTE_AUTH]:
+                utils.chown(f, "hacluster", "haclient")
+
+
+def sync_file(path):
+    """
+    Sync files between cluster nodes
+    """
+    if _context.skip_csync2:
+        utils.cluster_copy_file(path, nodes=_context.node_list_in_cluster, 
output=False)
+    else:
+        csync2_update(path)
 # EOF
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.4.1+20221207.84e6ea16/crmsh/constants.py 
new/crmsh-4.4.1+20221213.6e4f7dfd/crmsh/constants.py
--- old/crmsh-4.4.1+20221207.84e6ea16/crmsh/constants.py        2022-12-07 
03:10:51.000000000 +0100
+++ new/crmsh-4.4.1+20221213.6e4f7dfd/crmsh/constants.py        2022-12-13 
09:03:40.000000000 +0100
@@ -525,8 +525,8 @@
 ADVISED_ACTION_LIST = ['monitor', 'start', 'stop', 'promote', 'demote']
 ADVISED_KEY_LIST = ['timeout', 'interval', 'role']
 DEFAULT_INTERVAL_IN_ACTION = "20s"
-
 WAIT_TIMEOUT_MS_DEFAULT = 120000
+CSYNC2_SERVICE = "csync2.socket"
 
 RSC_ROLE_PROMOTED = "Promoted"
 RSC_ROLE_UNPROMOTED = "Unpromoted"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.4.1+20221207.84e6ea16/crmsh/sbd.py 
new/crmsh-4.4.1+20221213.6e4f7dfd/crmsh/sbd.py
--- old/crmsh-4.4.1+20221207.84e6ea16/crmsh/sbd.py      2022-12-07 
03:10:51.000000000 +0100
+++ new/crmsh-4.4.1+20221213.6e4f7dfd/crmsh/sbd.py      2022-12-13 
09:03:40.000000000 +0100
@@ -212,7 +212,7 @@
         utils.mkdirp(SBD_SYSTEMD_DELAY_START_DIR)
         sbd_delay_start_file = 
"{}/sbd_delay_start.conf".format(SBD_SYSTEMD_DELAY_START_DIR)
         
utils.str2file("[Service]\nTimeoutSec={}".format(int(1.2*int(sbd_delay_start_value))),
 sbd_delay_start_file)
-        bootstrap.csync2_update(SBD_SYSTEMD_DELAY_START_DIR)
+        bootstrap.sync_file(SBD_SYSTEMD_DELAY_START_DIR)
         utils.cluster_run_cmd("systemctl daemon-reload")
 
     def adjust_stonith_timeout(self):
@@ -428,7 +428,7 @@
         Update /etc/sysconfig/sbd
         """
         if self.no_update_config:
-            bootstrap.csync2_update(SYSCONFIG_SBD)
+            bootstrap.sync_file(SYSCONFIG_SBD)
             return
 
         shutil.copyfile(self.SYSCONFIG_SBD_TEMPLATE, SYSCONFIG_SBD)
@@ -439,7 +439,7 @@
         if self._sbd_devices:
             sbd_config_dict["SBD_DEVICE"] = ';'.join(self._sbd_devices)
         utils.sysconfig_set(SYSCONFIG_SBD, **sbd_config_dict)
-        bootstrap.csync2_update(SYSCONFIG_SBD)
+        bootstrap.sync_file(SYSCONFIG_SBD)
 
     def _get_sbd_device_from_config(self):
         """
@@ -601,7 +601,7 @@
         Update and sync sbd configuration
         """
         utils.sysconfig_set(SYSCONFIG_SBD, **sbd_config_dict)
-        bootstrap.csync2_update(SYSCONFIG_SBD)
+        bootstrap.sync_file(SYSCONFIG_SBD)
 
     @staticmethod
     def get_sbd_value_from_config(key):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.4.1+20221207.84e6ea16/crmsh/ui_cluster.py 
new/crmsh-4.4.1+20221213.6e4f7dfd/crmsh/ui_cluster.py
--- old/crmsh-4.4.1+20221207.84e6ea16/crmsh/ui_cluster.py       2022-12-07 
03:10:51.000000000 +0100
+++ new/crmsh-4.4.1+20221213.6e4f7dfd/crmsh/ui_cluster.py       2022-12-13 
09:03:40.000000000 +0100
@@ -347,6 +347,8 @@
                             help="Enable SBD even if no SBD device is 
configured (diskless mode)")
         parser.add_argument("-w", "--watchdog", dest="watchdog", 
metavar="WATCHDOG",
                             help="Use the given watchdog device or driver 
name")
+        parser.add_argument("-x", "--skip-csync2-sync", dest="skip_csync2", 
action="store_true",
+                            help="Skip csync2 initialization (an experimental 
option)")
         parser.add_argument("--no-overwrite-sshkey", action="store_true", 
dest="no_overwrite_sshkey",
                             help='Avoid "/root/.ssh/id_rsa" overwrite if "-y" 
option is used (False by default; Deprecated)')
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.4.1+20221207.84e6ea16/crmsh/utils.py 
new/crmsh-4.4.1+20221213.6e4f7dfd/crmsh/utils.py
--- old/crmsh-4.4.1+20221207.84e6ea16/crmsh/utils.py    2022-12-07 
03:10:51.000000000 +0100
+++ new/crmsh-4.4.1+20221213.6e4f7dfd/crmsh/utils.py    2022-12-13 
09:03:40.000000000 +0100
@@ -1925,30 +1925,25 @@
         print("%-16s: %s" % (host, 
hashlib.sha1(open(path).read()).hexdigest()))
 
 
-def cluster_copy_file(local_path, nodes=None):
+def cluster_copy_file(local_path, nodes=None, output=True):
     """
     Copies given file to all other cluster nodes.
     """
-    try:
-        import parallax
-    except ImportError:
-        raise ValueError("parallax is required to copy cluster files")
     if not nodes:
-        nodes = list_cluster_nodes()
-        nodes.remove(this_node())
-    opts = parallax.Options()
-    opts.timeout = 60
-    opts.ssh_options += ['ControlPersist=no']
-    ok = True
-    for host, result in parallax.copy(nodes,
-                                      local_path,
-                                      local_path, opts).items():
+        nodes = list_cluster_nodes_except_me()
+    rc = True
+    if not nodes:
+        return rc
+    results = parallax.parallax_copy(nodes, local_path, local_path, 
strict=False)
+    for host, result in results:
         if isinstance(result, parallax.Error):
-            logger.error("Failed to push %s to %s: %s", local_path, host, 
result)
-            ok = False
-        else:
+            logger.error("Failed to copy %s to %s: %s", local_path, host, 
result)
+            rc = False
+        elif output:
             logger.info(host)
-    return ok
+        else:
+            logger.debug("Sync file %s to %s", local_path, host)
+    return rc
 
 
 # a set of fnmatch patterns to match attributes whose values
@@ -3164,4 +3159,24 @@
         time.sleep(interval)
         current_time = int(time.time())
     raise TimeoutError
+
+
+def fetch_cluster_node_list_from_node(init_node):
+    """
+    Fetch cluster member list from one known cluster node
+    """
+    cluster_nodes_list = []
+    out = get_stdout_or_raise_error("crm_node -l", remote=init_node)
+    for line in out.splitlines():
+        # Parse line in format: <id> <nodename> <state>, and collect the 
nodename.
+        tokens = line.split()
+        if len(tokens) == 0:
+            pass  # Skip any spurious empty line.
+        elif len(tokens) < 3:
+            logger.warning("The node '%s' has no known name and/or state 
information", tokens[0])
+        elif tokens[2] != "member":
+            logger.warning("The node '%s'(state '%s') is not a current 
member", tokens[1], tokens[2])
+        else:
+            cluster_nodes_list.append(tokens[1])
+    return cluster_nodes_list
 # vim:ts=4:sw=4:et:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.1+20221207.84e6ea16/test/features/bootstrap_options.feature 
new/crmsh-4.4.1+20221213.6e4f7dfd/test/features/bootstrap_options.feature
--- old/crmsh-4.4.1+20221207.84e6ea16/test/features/bootstrap_options.feature   
2022-12-07 03:10:51.000000000 +0100
+++ new/crmsh-4.4.1+20221213.6e4f7dfd/test/features/bootstrap_options.feature   
2022-12-13 09:03:40.000000000 +0100
@@ -137,3 +137,17 @@
     When    Run "crm cluster init -N hanode1 -N hanode2 -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     And     Cluster service is "started" on "hanode2"
+
+  @clean
+  Scenario: Skip using csync2 by -x option
+    Given   Cluster service is "stopped" on "hanode1"
+    Given   Cluster service is "stopped" on "hanode2"
+    When    Run "crm cluster init -y -x" on "hanode1"
+    Then    Cluster service is "started" on "hanode1"
+    And     Service "csync2.socket" is "stopped" on "hanode1"
+    When    Run "crm cluster join -c hanode1 -y" on "hanode2"
+    Then    Cluster service is "started" on "hanode2"
+    And     Service "csync2.socket" is "stopped" on "hanode2"
+    When    Run "crm cluster init csync2 -y" on "hanode1"
+    Then    Service "csync2.socket" is "started" on "hanode1"
+    And     Service "csync2.socket" is "started" on "hanode2"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.1+20221207.84e6ea16/test/features/steps/const.py 
new/crmsh-4.4.1+20221213.6e4f7dfd/test/features/steps/const.py
--- old/crmsh-4.4.1+20221207.84e6ea16/test/features/steps/const.py      
2022-12-07 03:10:51.000000000 +0100
+++ new/crmsh-4.4.1+20221213.6e4f7dfd/test/features/steps/const.py      
2022-12-13 09:03:40.000000000 +0100
@@ -77,6 +77,8 @@
                         (diskless mode)
   -w WATCHDOG, --watchdog WATCHDOG
                         Use the given watchdog device or driver name
+  -x, --skip-csync2-sync
+                        Skip csync2 initialization (an experimental option)
   --no-overwrite-sshkey
                         Avoid "/root/.ssh/id_rsa" overwrite if "-y" option is
                         used (False by default; Deprecated)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.1+20221207.84e6ea16/test/unittests/test_bootstrap.py 
new/crmsh-4.4.1+20221213.6e4f7dfd/test/unittests/test_bootstrap.py
--- old/crmsh-4.4.1+20221207.84e6ea16/test/unittests/test_bootstrap.py  
2022-12-07 03:10:51.000000000 +0100
+++ new/crmsh-4.4.1+20221213.6e4f7dfd/test/unittests/test_bootstrap.py  
2022-12-13 09:03:40.000000000 +0100
@@ -544,57 +544,14 @@
             mock.call("Running command on node3: crm cluster join -y -c node1 
-i eth1")
             ])
 
-    @mock.patch('crmsh.utils.fatal')
-    @mock.patch('crmsh.utils.get_stdout_stderr')
-    def test_setup_passwordless_with_other_nodes_failed_fetch_nodelist(self, 
mock_run, mock_error):
-        mock_run.return_value = (1, None, None)
-        mock_error.side_effect = SystemExit
-
-        with self.assertRaises(SystemExit):
-            bootstrap.setup_passwordless_with_other_nodes("node1")
-
-        mock_run.assert_called_once_with("ssh {} root@node1 crm_node 
-l".format(constants.SSH_OPTION))
-        mock_error.assert_called_once_with("Can't fetch cluster nodes list 
from node1: None")
-
-    @mock.patch('crmsh.utils.fatal')
-    @mock.patch('crmsh.utils.get_stdout_stderr')
-    def test_setup_passwordless_with_other_nodes_failed_fetch_hostname(self, 
mock_run, mock_error):
-        out_node_list = """1 node1 member
-        2 node2 member"""
-        mock_run.side_effect = [
-                (0, out_node_list, None),
-                (1, None, None)
-                ]
-        mock_error.side_effect = SystemExit
-
-        with self.assertRaises(SystemExit):
-            bootstrap.setup_passwordless_with_other_nodes("node1")
-
-        mock_run.assert_has_calls([
-            mock.call("ssh {} root@node1 crm_node 
-l".format(constants.SSH_OPTION)),
-            mock.call("ssh {} root@node1 
hostname".format(constants.SSH_OPTION))
-            ])
-        mock_error.assert_called_once_with("Can't fetch hostname of node1: 
None")
-
     @mock.patch('crmsh.bootstrap.swap_public_ssh_key')
-    @mock.patch('crmsh.utils.get_stdout_stderr')
+    @mock.patch('crmsh.utils.get_stdout_or_raise_error')
     def test_setup_passwordless_with_other_nodes(self, mock_run, mock_swap):
-        out_node_list = """1 node1 member
-        2 node2 member"""
-        mock_run.side_effect = [
-                (0, out_node_list, None),
-                (0, "node1", None)
-                ]
-
+        bootstrap._context = mock.Mock(node_list_in_cluster=["node1", "node2"])
+        mock_run.return_value = "node1"
         bootstrap.setup_passwordless_with_other_nodes("node1")
-
-        mock_run.assert_has_calls([
-            mock.call("ssh {} root@node1 crm_node 
-l".format(constants.SSH_OPTION)),
-            mock.call("ssh {} root@node1 
hostname".format(constants.SSH_OPTION))
-            ])
         mock_swap.assert_has_calls([
-            mock.call("node2", "root"),
-            mock.call("node2", "hacluster")
+            mock.call("node2", u) for u in bootstrap.USER_LIST
             ])
 
     @mock.patch('builtins.open')
@@ -681,7 +638,7 @@
 
     @mock.patch('crmsh.utils.fatal')
     @mock.patch('crmsh.utils.stop_service')
-    @mock.patch('crmsh.bootstrap.csync2_update')
+    @mock.patch('crmsh.bootstrap.sync_file')
     @mock.patch('crmsh.corosync.conf')
     @mock.patch('shutil.copy')
     @mock.patch('crmsh.utils.this_node')
@@ -711,7 +668,7 @@
 
     @mock.patch('crmsh.utils.fatal')
     @mock.patch('crmsh.utils.stop_service')
-    @mock.patch('crmsh.bootstrap.csync2_update')
+    @mock.patch('crmsh.bootstrap.sync_file')
     @mock.patch('crmsh.corosync.conf')
     @mock.patch('shutil.copy')
     @mock.patch('crmsh.utils.this_node')
@@ -990,7 +947,7 @@
     @mock.patch('crmsh.corosync.get_value')
     @mock.patch('crmsh.utils.is_qdevice_tls_on')
     @mock.patch('crmsh.bootstrap.invoke')
-    @mock.patch('crmsh.bootstrap.csync2_update')
+    @mock.patch('crmsh.bootstrap.sync_file')
     @mock.patch('crmsh.corosync.conf')
     @mock.patch('crmsh.corosync.add_nodelist_from_cmaptool')
     @mock.patch('crmsh.corosync.is_unicast')
@@ -1131,6 +1088,18 @@
         mock_adj_priority.assert_called_once_with(True)
         mock_adj_fence.assert_called_once_with(True)
 
+    @mock.patch('crmsh.utils.cluster_copy_file')
+    def test_sync_file_skip_csync2(self, mock_copy):
+        bootstrap._context = mock.Mock(skip_csync2=True, 
node_list_in_cluster=["node1", "node2"])
+        bootstrap.sync_file("/file1")
+        mock_copy.assert_called_once_with("/file1", nodes=["node1", "node2"], 
output=False)
+
+    @mock.patch('crmsh.bootstrap.csync2_update')
+    def test_sync_file(self, mock_csync2_update):
+        bootstrap._context = mock.Mock(skip_csync2=False)
+        bootstrap.sync_file("/file1")
+        mock_csync2_update.assert_called_once_with("/file1")
+
 
 class TestValidation(unittest.TestCase):
     """
@@ -1247,12 +1216,15 @@
             mock_error, mock_qdevice):
         mock_context_inst = mock.Mock(qdevice=True, cluster_node=None)
         mock_context.return_value = mock_context_inst
-        mock_active.return_value = True
+        mock_active.return_value = [True, True]
 
         bootstrap.bootstrap_remove(mock_context_inst)
 
         mock_init.assert_called_once_with()
-        mock_active.assert_called_once_with("corosync.service")
+        mock_active.assert_has_calls([
+            mock.call("corosync.service"),
+            mock.call("csync2.socket")
+            ])
         mock_error.assert_not_called()
         mock_qdevice.assert_called_once_with()
 
@@ -1284,7 +1256,7 @@
             mock_error, mock_qdevice, mock_status, mock_prompt):
         mock_context_inst = mock.Mock(yes_to_all=False, cluster_node=None, 
qdevice_rm_flag=None)
         mock_context.return_value = mock_context_inst
-        mock_active.return_value = True
+        mock_active.return_value = [True, True]
         mock_prompt.return_value = None
         mock_error.side_effect = SystemExit
 
@@ -1292,7 +1264,10 @@
             bootstrap.bootstrap_remove(mock_context_inst)
 
         mock_init.assert_called_once_with()
-        mock_active.assert_called_once_with("corosync.service")
+        mock_active.assert_has_calls([
+            mock.call("corosync.service"),
+            mock.call("csync2.socket")
+            ])
         mock_qdevice.assert_not_called()
         mock_status.assert_called_once_with('Remove This Node from Cluster:\n  
You will be asked for the IP address or name of an existing node,\n  which will 
be removed from the cluster. This command must be\n  executed from a different 
node in the cluster.\n')
         mock_prompt.assert_called_once_with("IP address or hostname of cluster 
node (e.g.: 192.168.1.1)", ".+")
@@ -1309,14 +1284,17 @@
             mock_error, mock_qdevice, mock_hostname, mock_confirm):
         mock_context_inst = mock.Mock(cluster_node="node1", force=False, 
qdevice_rm_flag=None)
         mock_context.return_value = mock_context_inst
-        mock_active.return_value = True
+        mock_active.return_value = [True, True]
         mock_hostname.return_value = "node1"
         mock_confirm.return_value = False
 
         bootstrap.bootstrap_remove(mock_context_inst)
 
         mock_init.assert_called_once_with()
-        mock_active.assert_called_once_with("corosync.service")
+        mock_active.assert_has_calls([
+            mock.call("corosync.service"),
+            mock.call("csync2.socket")
+            ])
         mock_qdevice.assert_not_called()
         mock_error.assert_not_called()
         mock_hostname.assert_called_once_with()
@@ -1334,7 +1312,7 @@
             mock_error, mock_qdevice, mock_hostname, mock_confirm, 
mock_this_node):
         mock_context_inst = mock.Mock(cluster_node="node1", force=False, 
qdevice_rm_flag=None)
         mock_context.return_value = mock_context_inst
-        mock_active.return_value = True
+        mock_active.return_value = [True, True]
         mock_hostname.return_value = "node1"
         mock_confirm.return_value = True
         mock_this_node.return_value = "node1"
@@ -1344,7 +1322,10 @@
             bootstrap.bootstrap_remove(mock_context_inst)
 
         mock_init.assert_called_once_with()
-        mock_active.assert_called_once_with("corosync.service")
+        mock_active.assert_has_calls([
+            mock.call("corosync.service"),
+            mock.call("csync2.socket")
+            ])
         mock_qdevice.assert_not_called()
         mock_hostname.assert_called_once_with()
         mock_confirm.assert_called_once_with('Removing node "node1" from the 
cluster: Are you sure?')
@@ -1364,14 +1345,17 @@
             mock_error, mock_qdevice, mock_hostname, mock_confirm, 
mock_this_node, mock_self):
         mock_context_inst = mock.Mock(cluster_node="node1", force=True, 
qdevice_rm_flag=None)
         mock_context.return_value = mock_context_inst
-        mock_active.return_value = True
+        mock_active.return_value = [True, True]
         mock_hostname.return_value = "node1"
         mock_this_node.return_value = "node1"
 
         bootstrap.bootstrap_remove(mock_context_inst)
 
         mock_init.assert_called_once_with()
-        mock_active.assert_called_once_with("corosync.service")
+        mock_active.assert_has_calls([
+            mock.call("corosync.service"),
+            mock.call("csync2.socket")
+            ])
         mock_qdevice.assert_not_called()
         mock_hostname.assert_called_once_with()
         mock_confirm.assert_not_called()
@@ -1392,7 +1376,7 @@
             mock_error, mock_qdevice, mock_hostname, mock_confirm, 
mock_this_node, mock_list):
         mock_context_inst = mock.Mock(cluster_node="node2", force=True, 
qdevice_rm_flag=None)
         mock_context.return_value = mock_context_inst
-        mock_active.return_value = True
+        mock_active.return_value = [True, True]
         mock_hostname.return_value = "node2"
         mock_this_node.return_value = "node1"
         mock_list.return_value = ["node1", "node3"]
@@ -1402,13 +1386,17 @@
             bootstrap.bootstrap_remove(mock_context_inst)
 
         mock_init.assert_called_once_with()
-        mock_active.assert_called_once_with("corosync.service")
+        mock_active.assert_has_calls([
+            mock.call("corosync.service"),
+            mock.call("csync2.socket")
+            ])
         mock_qdevice.assert_not_called()
         mock_hostname.assert_called_once_with()
         mock_confirm.assert_not_called()
         mock_this_node.assert_called_once_with()
         mock_error.assert_called_once_with("Specified node node2 is not 
configured in cluster! Unable to remove.")
 
+    @mock.patch('crmsh.utils.fetch_cluster_node_list_from_node')
     @mock.patch('crmsh.bootstrap.remove_node_from_cluster')
     @mock.patch('crmsh.xmlutil.listnodes')
     @mock.patch('crmsh.utils.this_node')
@@ -1421,22 +1409,25 @@
     @mock.patch('crmsh.bootstrap.Context')
     def test_bootstrap_remove(self, mock_context, mock_init, mock_active,
             mock_error, mock_qdevice, mock_hostname, mock_confirm, 
mock_this_node,
-            mock_list, mock_remove):
+            mock_list, mock_remove, mock_fetch):
         mock_context_inst = mock.Mock(cluster_node="node2", 
qdevice_rm_flag=None, force=True)
         mock_context.return_value = mock_context_inst
-        mock_active.return_value = True
+        mock_active.side_effect = [True, False]
         mock_hostname.return_value = "node2"
         mock_this_node.return_value = "node1"
         mock_list.return_value = ["node1", "node2"]
+        mock_fetch.return_value = ["node1", "node2"]
 
         bootstrap.bootstrap_remove(mock_context_inst)
 
         mock_init.assert_called_once_with()
-        mock_active.assert_called_once_with("corosync.service")
+        mock_active.assert_has_calls([
+            mock.call("corosync.service"),
+            mock.call("csync2.socket")
+            ])
         mock_qdevice.assert_not_called()
         mock_hostname.assert_called_once_with()
         mock_confirm.assert_not_called()
-        mock_this_node.assert_called_once_with()
         mock_error.assert_not_called()
         mock_remove.assert_called_once_with()
 
@@ -1563,7 +1554,7 @@
     @mock.patch('crmsh.bootstrap.rm_configuration_files')
     @mock.patch('crmsh.bootstrap.adjust_priority_fencing_delay')
     @mock.patch('crmsh.bootstrap.adjust_priority_in_rsc_defaults')
-    @mock.patch('crmsh.bootstrap.csync2_update')
+    @mock.patch('crmsh.bootstrap.sync_file')
     @mock.patch('crmsh.bootstrap.decrease_expected_votes')
     @mock.patch('crmsh.corosync.del_node')
     @mock.patch('crmsh.corosync.get_values')
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.1+20221207.84e6ea16/test/unittests/test_sbd.py 
new/crmsh-4.4.1+20221213.6e4f7dfd/test/unittests/test_sbd.py
--- old/crmsh-4.4.1+20221207.84e6ea16/test/unittests/test_sbd.py        
2022-12-07 03:10:51.000000000 +0100
+++ new/crmsh-4.4.1+20221213.6e4f7dfd/test/unittests/test_sbd.py        
2022-12-13 09:03:40.000000000 +0100
@@ -255,7 +255,7 @@
         mock_mkdirp.assert_not_called()
 
     @mock.patch('crmsh.utils.cluster_run_cmd')
-    @mock.patch('crmsh.bootstrap.csync2_update')
+    @mock.patch('crmsh.bootstrap.sync_file')
     @mock.patch('crmsh.utils.str2file')
     @mock.patch('crmsh.utils.mkdirp')
     @mock.patch('crmsh.utils.get_systemd_timeout_start_in_sec')
@@ -505,7 +505,7 @@
             ])
         mock_error.assert_called_once_with("Failed to initialize SBD device 
/dev/sdc1: error")
 
-    @mock.patch('crmsh.bootstrap.csync2_update')
+    @mock.patch('crmsh.bootstrap.sync_file')
     @mock.patch('crmsh.utils.sysconfig_set')
     @mock.patch('shutil.copyfile')
     def test_update_configuration(self, mock_copy, mock_sysconfig, 
mock_update):
@@ -882,7 +882,7 @@
         mock_context.assert_called_once_with()
         mock_get_sbd.assert_called_once_with()
 
-    @mock.patch('crmsh.bootstrap.csync2_update')
+    @mock.patch('crmsh.bootstrap.sync_file')
     @mock.patch('crmsh.utils.sysconfig_set')
     def test_update_configuration_static(self, mock_config_set, mock_csync2):
         sbd_config_dict = {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.1+20221207.84e6ea16/test/unittests/test_utils.py 
new/crmsh-4.4.1+20221213.6e4f7dfd/test/unittests/test_utils.py
--- old/crmsh-4.4.1+20221207.84e6ea16/test/unittests/test_utils.py      
2022-12-07 03:10:51.000000000 +0100
+++ new/crmsh-4.4.1+20221213.6e4f7dfd/test/unittests/test_utils.py      
2022-12-13 09:03:40.000000000 +0100
@@ -13,7 +13,7 @@
 import logging
 from unittest import mock
 from itertools import chain
-from crmsh import utils, config, tmpfiles, constants
+from crmsh import utils, config, tmpfiles, constants, parallax
 
 logging.basicConfig(level=logging.DEBUG)
 
@@ -1733,3 +1733,26 @@
 
 def test_compatible_role():
     assert utils.compatible_role("Slave", "Unpromoted") is True
+
+
+@mock.patch('logging.Logger.warning')
+@mock.patch('crmsh.utils.get_stdout_or_raise_error')
+def test_fetch_cluster_node_list_from_node(mock_run, mock_warn):
+    mock_run.return_value = """
+
+    1 node1
+    2 node2 lost
+    3 node3 member
+    """
+    assert utils.fetch_cluster_node_list_from_node("node1") == ["node3"]
+    mock_run.assert_called_once_with("crm_node -l", remote="node1")
+    mock_warn.assert_has_calls([
+        mock.call("The node '%s' has no known name and/or state information", 
"1"),
+        mock.call("The node '%s'(state '%s') is not a current member", 
"node2", "lost")
+        ])
+
+
+@mock.patch('crmsh.utils.list_cluster_nodes_except_me')
+def test_cluster_copy_file_return(mock_list_nodes):
+    mock_list_nodes.return_value = []
+    assert utils.cluster_copy_file("/file1") == True

Reply via email to