Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package crmsh for openSUSE:Factory checked 
in at 2024-04-25 20:48:33
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/crmsh (Old)
 and      /work/SRC/openSUSE:Factory/.crmsh.new.1880 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "crmsh"

Thu Apr 25 20:48:33 2024 rev:331 rq:1170010 version:4.6.0+20240424.11e262d0

Changes:
--------
--- /work/SRC/openSUSE:Factory/crmsh/crmsh.changes      2024-04-11 
19:41:41.054939497 +0200
+++ /work/SRC/openSUSE:Factory/.crmsh.new.1880/crmsh.changes    2024-04-25 
20:48:35.616783113 +0200
@@ -1,0 +2,27 @@
+Wed Apr 24 09:43:44 UTC 2024 - xli...@suse.com
+
+- Update to version 4.6.0+20240424.11e262d0:
+  * Dev: behave: Add functional test for previous change
+  * Dev: ui_context: Skip querying CIB when in a sublevel or help command
+  * Dev: unittest: Adjust unit test for previous commit
+  * Dev: behave: Adjust functional test for previous changes
+  * Dev: bootstrap: Setup the stage dependency on init and join side 
(bsc#1175865, bsc#1219940)
+  * Dev: ui_cluster: Remove unused codes
+  * Dev: bootstrap: Enhance stage validation
+  * Dev: bootstrap: Remove unused function join_remote_auth
+
+-------------------------------------------------------------------
+Mon Apr 22 07:38:44 UTC 2024 - xli...@suse.com
+
+- Update to version 4.6.0+20240422.73eaf02a:
+  * Fix: healthcheck: Add crmsh.constants.SSH_OPTION when doing ssh in 
check_local
+  * Fix: healthcheck: Missing 'id_' prefix while checking the ssh key existence
+
+-------------------------------------------------------------------
+Tue Apr 16 07:54:40 UTC 2024 - xli...@suse.com
+
+- Update to version 4.6.0+20240416.3c953893:
+  * Dev: unittest: Adjust unit test for previous commit
+  * Fix: bootstrap: Detect cluster service on init node before saving the 
canonical hostname (bsc#1222714)
+
+-------------------------------------------------------------------

Old:
----
  crmsh-4.6.0+20240411.9c4bf6c1.tar.bz2

New:
----
  crmsh-4.6.0+20240424.11e262d0.tar.bz2

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ crmsh.spec ++++++
--- /var/tmp/diff_new_pack.KxoAoj/_old  2024-04-25 20:48:36.488815133 +0200
+++ /var/tmp/diff_new_pack.KxoAoj/_new  2024-04-25 20:48:36.492815280 +0200
@@ -36,7 +36,7 @@
 Summary:        High Availability cluster command-line interface
 License:        GPL-2.0-or-later
 Group:          %{pkg_group}
-Version:        4.6.0+20240411.9c4bf6c1
+Version:        4.6.0+20240424.11e262d0
 Release:        0
 URL:            http://crmsh.github.io
 Source0:        %{name}-%{version}.tar.bz2

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.KxoAoj/_old  2024-04-25 20:48:36.536816896 +0200
+++ /var/tmp/diff_new_pack.KxoAoj/_new  2024-04-25 20:48:36.540817042 +0200
@@ -9,7 +9,7 @@
 </service>
 <service name="tar_scm">
   <param name="url">https://github.com/ClusterLabs/crmsh.git</param>
-  <param 
name="changesrevision">a369f9ca76c9bc8b063843ba9d913fdab6db7bf6</param>
+  <param 
name="changesrevision">11e262d05440b23219eb9365e81d65488e83e080</param>
 </service>
 </servicedata>
 (No newline at EOF)

++++++ crmsh-4.6.0+20240411.9c4bf6c1.tar.bz2 -> 
crmsh-4.6.0+20240424.11e262d0.tar.bz2 ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.6.0+20240411.9c4bf6c1/crmsh/bootstrap.py 
new/crmsh-4.6.0+20240424.11e262d0/crmsh/bootstrap.py
--- old/crmsh-4.6.0+20240411.9c4bf6c1/crmsh/bootstrap.py        2024-04-11 
08:31:14.000000000 +0200
+++ new/crmsh-4.6.0+20240424.11e262d0/crmsh/bootstrap.py        2024-04-24 
11:30:11.000000000 +0200
@@ -47,6 +47,7 @@
 from .sh import ShellUtils
 from .ui_node import NodeMgmt
 from .user_of_host import UserOfHost, UserNotFoundError
+import crmsh.healthcheck
 
 logger = log.setup_logger(__name__)
 logger_utils = log.LoggerUtils(logger)
@@ -74,7 +75,11 @@
         "/etc/drbd.conf", "/etc/drbd.d", "/etc/ha.d/ldirectord.cf", 
"/etc/lvm/lvm.conf", "/etc/multipath.conf",
         "/etc/samba/smb.conf", SYSCONFIG_NFS, SYSCONFIG_PCMK, SYSCONFIG_SBD, 
PCMK_REMOTE_AUTH, WATCHDOG_CFG,
         PROFILES_FILE, CRM_CFG, SBD_SYSTEMD_DELAY_START_DIR)
-INIT_STAGES = ("ssh", "csync2", "csync2_remote", "qnetd_remote", "corosync", 
"remote_auth", "sbd", "cluster", "ocfs2", "admin", "qdevice")
+
+INIT_STAGES_EXTERNAL = ("ssh", "csync2", "corosync", "sbd", "cluster", 
"ocfs2", "admin", "qdevice")
+INIT_STAGES_INTERNAL = ("csync2_remote", "qnetd_remote", "remote_auth")
+INIT_STAGES_ALL = INIT_STAGES_EXTERNAL + INIT_STAGES_INTERNAL
+JOIN_STAGES_EXTERNAL = ("ssh", "csync2", "ssh_merge", "cluster")
 
 
 class Context(object):
@@ -232,7 +237,7 @@
         """
         Validate cluster_node on join side
         """
-        if self.cluster_node and self.type == 'join':
+        if self.type == "join" and self.cluster_node:
             user, node = _parse_user_at_host(self.cluster_node, None)
             try:
                 # self.cluster_node might be hostname or IP address
@@ -240,7 +245,32 @@
                 if utils.InterfacesInfo.ip_in_local(ip_addr):
                     utils.fatal("Please specify peer node's hostname or IP 
address")
             except socket.gaierror as err:
-                utils.fatal("\"{}\": {}".format(node, err))
+                utils.fatal(f"\"{node}\": {err}")
+
+    def _validate_stage(self):
+        """
+        Validate stage argument
+        """
+        if not self.stage:
+            if self.cluster_is_running:
+                utils.fatal("Cluster is already running!")
+            return
+
+        if self.type == "init":
+            if self.stage not in INIT_STAGES_ALL:
+                utils.fatal(f"Invalid stage: {self.stage}(available stages: 
{', '.join(INIT_STAGES_EXTERNAL)})")
+            if self.stage in ("admin", "qdevice", "ocfs2") and not 
self.cluster_is_running:
+                utils.fatal(f"Cluster is inactive, can't run '{self.stage}' 
stage")
+            if self.stage in ("corosync", "cluster") and 
self.cluster_is_running:
+                utils.fatal(f"Cluster is active, can't run '{self.stage}' 
stage")
+
+        elif self.type == "join":
+            if self.stage not in JOIN_STAGES_EXTERNAL:
+                utils.fatal(f"Invalid stage: {self.stage}(available stages: 
{', '.join(JOIN_STAGES_EXTERNAL)})")
+            if self.stage and self.cluster_node is None:
+                utils.fatal(f"Can't use stage({self.stage}) without specifying 
cluster node")
+            if self.stage in ("cluster", ) and self.cluster_is_running:
+                utils.fatal(f"Cluster is active, can't run '{self.stage}' 
stage")
 
     def validate_option(self):
         """
@@ -263,6 +293,7 @@
             self.skip_csync2 = utils.get_boolean(os.getenv("SKIP_CSYNC2_SYNC"))
         if self.skip_csync2 and self.stage:
             utils.fatal("-x option or SKIP_CSYNC2_SYNC can't be used with any 
stage")
+        self._validate_stage()
         self._validate_cluster_node()
         self._validate_nodes_option()
         self._validate_sbd_option()
@@ -553,7 +584,7 @@
         return False
 
 
-def check_prereqs(stage):
+def check_prereqs():
     warned = False
 
     if not my_hostname_resolves():
@@ -1702,6 +1733,7 @@
     user_by_host.add(local_user, utils.this_node())
     user_by_host.set_no_generating_ssh_key(bool(ssh_public_keys))
     user_by_host.save_local()
+    detect_cluster_service_on_node(seed_host)
     user_by_host.add(seed_user, get_node_canonical_hostname(seed_host))
     user_by_host.save_local()
 
@@ -1709,6 +1741,9 @@
     change_user_shell('hacluster')
     swap_public_ssh_key_for_secondary_user(sh.cluster_shell(), seed_host, 
'hacluster')
 
+    if _context.stage:
+        setup_passwordless_with_other_nodes(seed_host, seed_user)
+
 
 def join_ssh_with_ssh_agent(
         local_shell: sh.LocalShell,
@@ -2366,46 +2401,87 @@
     corosync.set_value("quorum.expected_votes", str(new_quorum))
 
 
+def ssh_stage_finished():
+    """
+    Dectect if the ssh stage is finished
+    """
+    feature_check = 
crmsh.healthcheck.PasswordlessHaclusterAuthenticationFeature()
+    return feature_check.check_quick() and 
feature_check.check_local([utils.this_node()])
+
+
+def csync2_stage_finished():
+    """
+    Dectect if the csync2 stage is finished
+    """
+    return ServiceManager().service_is_active(CSYNC2_SERVICE)
+
+
+def corosync_stage_finished():
+    """
+    Dectect if the corosync stage is finished
+    """
+    return os.path.exists(corosync.conf())
+
+
+INIT_STAGE_CHECKER = {
+        # stage: (function, is_internal)
+        "ssh": (ssh_stage_finished, False),
+        "csync2": (csync2_stage_finished, False),
+        "corosync": (corosync_stage_finished, False),
+        "remote_auth": (init_remote_auth, True),
+        "sbd": (lambda: True, False),
+        "upgradeutil": (init_upgradeutil, True),
+        "cluster": (is_online, False)
+}
+
+
+JOIN_STAGE_CHECKER = {
+        # stage: (function, is_internal)
+        "ssh": (ssh_stage_finished, False),
+        "csync2": (csync2_stage_finished, False),
+        "ssh_merge": (lambda: True, False),
+        "cluster": (is_online, False)
+}
+
+
+def check_stage_dependency(stage):
+    stage_checker = INIT_STAGE_CHECKER if _context.type == "init" else 
JOIN_STAGE_CHECKER
+    if stage not in stage_checker:
+        return
+    stage_order = list(stage_checker.keys())
+    for stage_name in stage_order:
+        if stage == stage_name:
+            break
+        func, is_internal = stage_checker[stage_name]
+        if is_internal:
+            func()
+        elif not func():
+            utils.fatal(f"Please run '{stage_name}' stage first")
+
+
 def bootstrap_init(context):
     """
     Init cluster process
     """
     global _context
     _context = context
-
-    init()
-
     stage = _context.stage
-    if stage is None:
-        stage = ""
 
-    # vgfs stage requires running cluster, everything else requires inactive 
cluster,
-    # except ssh and csync2 (which don't care) and csync2_remote (which 
mustn't care,
-    # just in case this breaks ha-cluster-join on another node).
-    if stage in ("vgfs", "admin", "qdevice", "ocfs2"):
-        if not _context.cluster_is_running:
-            utils.fatal("Cluster is inactive - can't run %s stage" % (stage))
-    elif stage == "":
-        if _context.cluster_is_running:
-            utils.fatal("Cluster is currently active - can't run")
-    elif stage not in ("ssh", "csync2", "csync2_remote", "qnetd_remote", 
"sbd", "ocfs2"):
-        if _context.cluster_is_running:
-            utils.fatal("Cluster is currently active - can't run %s stage" % 
(stage))
+    init()
 
     _context.load_profiles()
     _context.init_sbd_manager()
 
-    # Need hostname resolution to work, want NTP (but don't block 
csync2_remote)
-    if stage not in ('csync2_remote', 'qnetd_remote'):
-        check_tty()
-        if not check_prereqs(stage):
-            return
-    else:
+    if stage in ('csync2_remote', 'qnetd_remote'):
         args = _context.args
-        logger_utils.log_only_to_file("args: {}".format(args))
+        logger_utils.log_only_to_file(f"args: {args}")
         if len(args) != 2:
-            utils.fatal(f"Expected NODE argument to {stage} stage")
+            utils.fatal(f"Expected NODE argument for '{stage}' stage")
         _context.cluster_node = args[1]
+    else:
+        check_tty()
+        if not check_prereqs():
+            return
 
     if stage and _context.cluster_is_running and \
             not 
ServiceManager(shell=sh.ClusterShellAdaptorForLocalShell(sh.LocalShell())).service_is_active(CSYNC2_SERVICE):
@@ -2415,6 +2491,7 @@
         _context.node_list_in_cluster = [utils.this_node()]
 
     if stage != "":
+        check_stage_dependency(stage)
         globals()["init_" + stage]()
     else:
         init_ssh()
@@ -2468,6 +2545,17 @@
         print(out)
 
 
+def detect_cluster_service_on_node(peer_node):
+    service_manager = ServiceManager()
+    for _ in range(REJOIN_COUNT):
+        if service_manager.service_is_active("pacemaker.service", peer_node):
+            break
+        logger.warning("Cluster is inactive on %s. Retry in %d seconds", 
peer_node, REJOIN_INTERVAL)
+        sleep(REJOIN_INTERVAL)
+    else:
+        utils.fatal("Cluster is inactive on {}".format(peer_node))
+
+
 def bootstrap_join(context):
     """
     Join cluster process
@@ -2480,15 +2568,13 @@
 
     check_tty()
 
-    corosync_active = 
ServiceManager(sh.ClusterShellAdaptorForLocalShell(sh.LocalShell())).service_is_active("corosync.service")
-    if corosync_active and _context.stage != "ssh":
-        utils.fatal("Abort: Cluster is currently active. Run this command on a 
node joining the cluster.")
-
-    if not check_prereqs("join"):
+    if not check_prereqs():
         return
 
     if _context.stage != "":
         remote_user, cluster_node = _parse_user_at_host(_context.cluster_node, 
_context.current_user)
+        init_upgradeutil()
+        check_stage_dependency(_context.stage)
         globals()["join_" + _context.stage](cluster_node, remote_user)
     else:
         if not _context.yes_to_all and _context.cluster_node is None:
@@ -2506,27 +2592,15 @@
         init_upgradeutil()
         remote_user, cluster_node = _parse_user_at_host(_context.cluster_node, 
_context.current_user)
         utils.ping_node(cluster_node)
-
         join_ssh(cluster_node, remote_user)
         remote_user = utils.user_of(cluster_node)
 
-        service_manager = ServiceManager()
-        n = 0
-        while n < REJOIN_COUNT:
-            if service_manager.service_is_active("pacemaker.service", 
cluster_node):
-                break
-            n += 1
-            logger.warning("Cluster is inactive on %s. Retry in %d seconds", 
cluster_node, REJOIN_INTERVAL)
-            sleep(REJOIN_INTERVAL)
-        else:
-            utils.fatal("Cluster is inactive on {}".format(cluster_node))
-
         lock_inst = lock.RemoteLock(cluster_node)
         try:
             with lock_inst.lock():
+                service_manager = ServiceManager()
                 _context.node_list_in_cluster = 
utils.fetch_cluster_node_list_from_node(cluster_node)
                 setup_passwordless_with_other_nodes(cluster_node, remote_user)
-                join_remote_auth(cluster_node, remote_user)
                 _context.skip_csync2 = not 
service_manager.service_is_active(CSYNC2_SERVICE, cluster_node)
                 if _context.skip_csync2:
                     service_manager.stop_service(CSYNC2_SERVICE, disable=True)
@@ -2556,14 +2630,6 @@
     ocfs2_inst.join_ocfs2(peer_host)
 
 
-def join_remote_auth(node, user):
-    if os.path.exists(PCMK_REMOTE_AUTH):
-        utils.rmfile(PCMK_REMOTE_AUTH)
-    pcmk_remote_dir = os.path.dirname(PCMK_REMOTE_AUTH)
-    utils.mkdirs_owned(pcmk_remote_dir, mode=0o750, gid="haclient")
-    utils.touch(PCMK_REMOTE_AUTH)
-
-
 def remove_qdevice():
     """
     Remove qdevice service and configuration from cluster
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.6.0+20240411.9c4bf6c1/crmsh/constants.py 
new/crmsh-4.6.0+20240424.11e262d0/crmsh/constants.py
--- old/crmsh-4.6.0+20240411.9c4bf6c1/crmsh/constants.py        2024-04-11 
08:31:14.000000000 +0200
+++ new/crmsh-4.6.0+20240424.11e262d0/crmsh/constants.py        2024-04-24 
11:30:11.000000000 +0200
@@ -511,4 +511,6 @@
 RSC_ROLE_PROMOTED_LEGACY = "Master"
 RSC_ROLE_UNPROMOTED_LEGACY = "Slave"
 PCMK_VERSION_DEFAULT = "2.0.0"
+
+NON_FUNCTIONAL_COMMANDS = ('help', 'cd', 'ls', 'quit', 'up')
 # vim:ts=4:sw=4:et:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.6.0+20240411.9c4bf6c1/crmsh/healthcheck.py 
new/crmsh-4.6.0+20240424.11e262d0/crmsh/healthcheck.py
--- old/crmsh-4.6.0+20240411.9c4bf6c1/crmsh/healthcheck.py      2024-04-11 
08:31:14.000000000 +0200
+++ new/crmsh-4.6.0+20240424.11e262d0/crmsh/healthcheck.py      2024-04-24 
11:30:11.000000000 +0200
@@ -6,6 +6,7 @@
 import sys
 import typing
 
+import crmsh.constants
 import crmsh.parallax
 import crmsh.utils
 
@@ -120,8 +121,8 @@
     def check_quick(self) -> bool:
         for key_type in self.KEY_TYPES:
             try:
-                os.stat('{}/{}'.format(self.SSH_DIR, key_type))
-                os.stat('{}/{}.pub'.format(self.SSH_DIR, key_type))
+                os.stat('{}/id_{}'.format(self.SSH_DIR, key_type))
+                os.stat('{}/id_{}.pub'.format(self.SSH_DIR, key_type))
                 return True
             except FileNotFoundError:
                 pass
@@ -131,7 +132,7 @@
         try:
             for node in nodes:
                 subprocess.check_call(
-                    ['sudo', 'su', '-', 'hacluster', '-c', 'ssh hacluster@{} 
true'.format(node)],
+                    ['sudo', 'su', '-', 'hacluster', '-c', 'ssh {} 
hacluster@{} true'.format(crmsh.constants.SSH_OPTION, node)],
                     stdin=subprocess.DEVNULL,
                     stdout=subprocess.DEVNULL,
                     stderr=subprocess.DEVNULL,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.6.0+20240411.9c4bf6c1/crmsh/ui_cluster.py 
new/crmsh-4.6.0+20240424.11e262d0/crmsh/ui_cluster.py
--- old/crmsh-4.6.0+20240411.9c4bf6c1/crmsh/ui_cluster.py       2024-04-11 
08:31:14.000000000 +0200
+++ new/crmsh-4.6.0+20240424.11e262d0/crmsh/ui_cluster.py       2024-04-24 
11:30:11.000000000 +0200
@@ -331,12 +331,6 @@
         '''
         Initialize a cluster.
         '''
-        def looks_like_hostnames(lst):
-            sectionlist = bootstrap.INIT_STAGES
-            return all(not (l.startswith('-') or l in sectionlist) for l in 
lst)
-        if len(args) > 0:
-            if '--dry-run' in args or looks_like_hostnames(args):
-                args = ['--yes', '--nodes'] + [arg for arg in args if arg != 
'--dry-run']
         parser = ArgumentParser(description="""
 Initialize a cluster from scratch. This command configures
 a complete cluster, and can also add additional cluster
@@ -471,8 +465,6 @@
         if stage == "vgfs":
             stage = "ocfs2"
             logger.warning("vgfs stage was deprecated and is an alias of ocfs2 
stage now")
-        if stage not in bootstrap.INIT_STAGES and stage != "":
-            parser.error("Invalid stage (%s)" % (stage))
 
         if options.qnetd_addr_input:
             if not 
ServiceManager().service_is_available("corosync-qdevice.service"):
@@ -547,12 +539,11 @@
         stage = ""
         if len(args) == 1:
             stage = args[0]
-        if stage not in ("ssh", "csync2", "ssh_merge", "cluster", ""):
-            parser.error("Invalid stage (%s)" % (stage))
 
         join_context = bootstrap.Context.set_context(options)
         join_context.ui_context = context
         join_context.stage = stage
+        join_context.cluster_is_running = 
ServiceManager(sh.ClusterShellAdaptorForLocalShell(sh.LocalShell())).service_is_active("pacemaker.service")
         join_context.type = "join"
         join_context.validate_option()
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.6.0+20240411.9c4bf6c1/crmsh/ui_context.py 
new/crmsh-4.6.0+20240424.11e262d0/crmsh/ui_context.py
--- old/crmsh-4.6.0+20240411.9c4bf6c1/crmsh/ui_context.py       2024-04-11 
08:31:14.000000000 +0200
+++ new/crmsh-4.6.0+20240424.11e262d0/crmsh/ui_context.py       2024-04-24 
11:30:11.000000000 +0200
@@ -82,6 +82,10 @@
                     cmd = True
                     break
             if cmd:
+                if self.command_name not in constants.NON_FUNCTIONAL_COMMANDS:
+                    entry = self.current_level()
+                    if 'requires' in dir(entry) and not entry.requires():
+                        self.fatal_error("Missing requirements")
                 utils.check_user_access(self.current_level().name)
                 rv = self.execute_command() is not False
         except (ValueError, IOError) as e:
@@ -239,8 +243,7 @@
     def enter_level(self, level):
         '''
         Pushes an instance of the given UILevel
-        subclass onto self.stack. Checks prerequirements
-        for the level (if any).
+        subclass onto self.stack.
         '''
         # on entering new level we need to set the
         # interactive option _before_ creating the level
@@ -251,8 +254,6 @@
         self._in_transit = True
 
         entry = level()
-        if 'requires' in dir(entry) and not entry.requires():
-            self.fatal_error("Missing requirements")
         self.stack.append(entry)
         self.clear_readline_cache()
 
@@ -320,7 +321,8 @@
         '''
         ok = True
         if len(self.stack) > 1:
-            ok = 
self.current_level().end_game(no_questions_asked=self._in_transit) is not False
+            if self.command_name and self.command_name not in 
constants.NON_FUNCTIONAL_COMMANDS:
+                ok = 
self.current_level().end_game(no_questions_asked=self._in_transit) is not False
             self.stack.pop()
             self.clear_readline_cache()
         return ok
@@ -341,7 +343,9 @@
         '''
         Exit from the top level
         '''
-        ok = self.current_level().end_game()
+        ok = True
+        if self.command_name and self.command_name not in 
constants.NON_FUNCTIONAL_COMMANDS:
+            ok = self.current_level().end_game()
         if options.interactive and not options.batch:
             if constants.need_reset:
                 utils.ext_cmd("reset")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.6.0+20240411.9c4bf6c1/crmsh/utils.py 
new/crmsh-4.6.0+20240424.11e262d0/crmsh/utils.py
--- old/crmsh-4.6.0+20240411.9c4bf6c1/crmsh/utils.py    2024-04-11 
08:31:14.000000000 +0200
+++ new/crmsh-4.6.0+20240424.11e262d0/crmsh/utils.py    2024-04-24 
11:30:11.000000000 +0200
@@ -468,14 +468,6 @@
             fatal("Failed to chmod {}: {}".format(path, err))
 
 
-def touch(file_name):
-    rc, out, err = ShellUtils().get_stdout_stderr("touch " + file_name, 
no_reg=True)
-    if rc != 0:
-        rc, out, err = ShellUtils().get_stdout_stderr("sudo touch " + 
file_name, no_reg=True)
-        if rc != 0:
-            fatal("Failed create file {}: {}".format(file_name, err))
-
-
 def copy_local_file(src, dest):
     try:
         shutil.copyfile(src, dest)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.6.0+20240411.9c4bf6c1/test/features/bootstrap_bugs.feature 
new/crmsh-4.6.0+20240424.11e262d0/test/features/bootstrap_bugs.feature
--- old/crmsh-4.6.0+20240411.9c4bf6c1/test/features/bootstrap_bugs.feature      
2024-04-11 08:31:14.000000000 +0200
+++ new/crmsh-4.6.0+20240424.11e262d0/test/features/bootstrap_bugs.feature      
2024-04-24 11:30:11.000000000 +0200
@@ -5,6 +5,31 @@
   Need nodes: hanode1 hanode2 hanode3
 
   @clean
+  Scenario: Stages dependency (bsc#1175865)
+    Given   Cluster service is "stopped" on "hanode1"
+    And     Cluster service is "stopped" on "hanode2"
+    When    Try "crm cluster init cluster -y" on "hanode1"
+    Then    Except "ERROR: cluster.init: Please run 'ssh' stage first"
+    When    Run "crm cluster init ssh -y" on "hanode1"
+    When    Try "crm cluster init cluster -y" on "hanode1"
+    Then    Except "ERROR: cluster.init: Please run 'csync2' stage first"
+    When    Run "crm cluster init csync2 -y" on "hanode1"
+    When    Try "crm cluster init cluster -y" on "hanode1"
+    Then    Except "ERROR: cluster.init: Please run 'corosync' stage first"
+    When    Run "crm cluster init corosync -y" on "hanode1"
+    When    Run "crm cluster init cluster -y" on "hanode1"
+    Then    Cluster service is "started" on "hanode1"
+
+    When    Try "crm cluster join cluster -c hanode1 -y" on "hanode2"
+    Then    Except "ERROR: cluster.join: Please run 'ssh' stage first"
+    When    Try "crm cluster join ssh -c hanode1 -y" on "hanode2"
+    When    Try "crm cluster join cluster -c hanode1 -y" on "hanode2"
+    Then    Except "ERROR: cluster.join: Please run 'csync2' stage first"
+    When    Try "crm cluster join csync2 -c hanode1 -y" on "hanode2"
+    When    Try "crm cluster join cluster -c hanode1 -y" on "hanode2"
+    Then    Cluster service is "started" on "hanode2"
+
+  @clean
   Scenario: Set placement-strategy value as "default"(bsc#1129462)
     Given   Cluster service is "stopped" on "hanode1"
     And     Cluster service is "stopped" on "hanode2"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.6.0+20240411.9c4bf6c1/test/features/bootstrap_options.feature 
new/crmsh-4.6.0+20240424.11e262d0/test/features/bootstrap_options.feature
--- old/crmsh-4.6.0+20240411.9c4bf6c1/test/features/bootstrap_options.feature   
2024-04-11 08:31:14.000000000 +0200
+++ new/crmsh-4.6.0+20240424.11e262d0/test/features/bootstrap_options.feature   
2024-04-24 11:30:11.000000000 +0200
@@ -14,6 +14,7 @@
 
   @clean
   Scenario: Check help output
+    When    Run "crm configure help primitive" OK              
     When    Run "crm -h" on "hanode1"
     Then    Output is the same with expected "crm" help output
     When    Run "crm cluster init -h" on "hanode1"
@@ -42,6 +43,15 @@
     Then    Expected "Can't use -N/--nodes option and stage(sbd) together" in 
stderr
 
   @clean
+  Scenario: Stage validation
+    When    Try "crm cluster init fdsf -y" on "hanode1"
+    Then    Expected "Invalid stage: fdsf(available stages: ssh, csync2, 
corosync, sbd, cluster, ocfs2, admin, qdevice)" in stderr
+    When    Try "crm cluster join fdsf -y" on "hanode1"
+    Then    Expected "Invalid stage: fdsf(available stages: ssh, csync2, 
ssh_merge, cluster)" in stderr
+    When    Try "crm cluster join ssh -y" on "hanode1"
+    Then    Expected "Can't use stage(ssh) without specifying cluster node" in 
stderr
+
+  @clean
   Scenario: Init whole cluster service on node "hanode1" using "--node" option
     Given   Cluster service is "stopped" on "hanode1"
     And     Cluster service is "stopped" on "hanode2"
@@ -51,6 +61,9 @@
     And     Online nodes are "hanode1 hanode2"
     And     Show cluster status on "hanode1"
 
+    When    Try "crm cluster init cluster -y" on "hanode1"
+    Then    Expected "Cluster is active, can't run 'cluster' stage" in stderr
+
   @clean
   Scenario: Bind specific network interface using "-i" option
     Given   Cluster service is "stopped" on "hanode1"
@@ -96,6 +109,9 @@
     And     Cluster virtual IP is "@vip.0"
     And     Show cluster status on "hanode1"
 
+    When    Try "crm cluster init cluster -y" on "hanode1"
+    Then    Expected "Cluster is active, can't run 'cluster' stage" in stderr
+
   @clean
   Scenario: Init cluster service with udpu using "-u" option
     Given   Cluster service is "stopped" on "hanode1"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.6.0+20240411.9c4bf6c1/test/features/qdevice_validate.feature 
new/crmsh-4.6.0+20240424.11e262d0/test/features/qdevice_validate.feature
--- old/crmsh-4.6.0+20240411.9c4bf6c1/test/features/qdevice_validate.feature    
2024-04-11 08:31:14.000000000 +0200
+++ new/crmsh-4.6.0+20240424.11e262d0/test/features/qdevice_validate.feature    
2024-04-24 11:30:11.000000000 +0200
@@ -110,7 +110,7 @@
   Scenario: Run qdevice stage on inactive cluster node
     Given   Cluster service is "stopped" on "hanode1"
     When    Try "crm cluster init qdevice --qnetd-hostname=qnetd-node"
-    Then    Except "ERROR: cluster.init: Cluster is inactive - can't run 
qdevice stage"
+    Then    Except "ERROR: cluster.init: Cluster is inactive, can't run 
'qdevice' stage"
 
   @clean
   Scenario: Run qdevice stage but miss "--qnetd-hostname" option
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.6.0+20240411.9c4bf6c1/test/unittests/test_bootstrap.py 
new/crmsh-4.6.0+20240424.11e262d0/test/unittests/test_bootstrap.py
--- old/crmsh-4.6.0+20240411.9c4bf6c1/test/unittests/test_bootstrap.py  
2024-04-11 08:31:14.000000000 +0200
+++ new/crmsh-4.6.0+20240424.11e262d0/test/unittests/test_bootstrap.py  
2024-04-24 11:30:11.000000000 +0200
@@ -550,6 +550,7 @@
             bootstrap.join_ssh(None, None)
         mock_error.assert_called_once_with("No existing IP/hostname specified 
(use -c option)")
 
+    @mock.patch('crmsh.bootstrap.detect_cluster_service_on_node')
     @mock.patch('crmsh.bootstrap.get_node_canonical_hostname')
     @mock.patch('crmsh.bootstrap.swap_public_ssh_key_for_secondary_user')
     @mock.patch('crmsh.bootstrap.change_user_shell')
@@ -561,8 +562,9 @@
             self,
             mock_start_service, mock_config_ssh, mock_ssh_copy_id, mock_swap, 
mock_change, mock_swap_2,
             mock_get_node_cononical_hostname,
+            mock_detect_cluster_service_on_node
     ):
-        bootstrap._context = mock.Mock(current_user="bob", default_nic="eth1", 
use_ssh_agent=False)
+        bootstrap._context = mock.Mock(current_user="bob", default_nic="eth1", 
use_ssh_agent=False, stage=None)
         mock_swap.return_value = None
         mock_ssh_copy_id.return_value = 0
         mock_get_node_cononical_hostname.return_value='node1'

Reply via email to