Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package crmsh for openSUSE:Factory checked 
in at 2025-07-31 17:47:22
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/crmsh (Old)
 and      /work/SRC/openSUSE:Factory/.crmsh.new.1944 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "crmsh"

Thu Jul 31 17:47:22 2025 rev:380 rq:1296733 version:5.0.0+20250731.d3091c0c

Changes:
--------
--- /work/SRC/openSUSE:Factory/crmsh/crmsh.changes      2025-07-23 
16:40:10.035992618 +0200
+++ /work/SRC/openSUSE:Factory/.crmsh.new.1944/crmsh.changes    2025-07-31 
17:50:31.874950445 +0200
@@ -1,0 +2,25 @@
+Thu Jul 31 08:47:31 UTC 2025 - xli...@suse.com
+
+- Update to version 5.0.0+20250731.d3091c0c:
+  * Dev: command: Show help topic completion only at root level
+
+-------------------------------------------------------------------
+Wed Jul 30 07:30:49 UTC 2025 - xli...@suse.com
+
+- Update to version 5.0.0+20250730.55c60660:
+  * Fix: sbd: should show warning instead of error when unable to restart the 
cluster automatically after changing configs (bsc#1246956)
+
+-------------------------------------------------------------------
+Fri Jul 25 09:41:34 UTC 2025 - xli...@suse.com
+
+- Update to version 5.0.0+20250725.84f2955a:
+  * Dev: unittests: Adjust unit test for previous commit
+  * Dev: qdevice: Reload corosync configuration on one node
+
+-------------------------------------------------------------------
+Wed Jul 23 23:59:12 UTC 2025 - xli...@suse.com
+
+- Update to version 5.0.0+20250724.4eb69e1d:
+  * Fix: bootstrap: continue qnetd setup when ssh keypair is not found for 
some cluster nodes (#1850)
+
+-------------------------------------------------------------------

Old:
----
  crmsh-5.0.0+20250723.07081e35.tar.bz2

New:
----
  crmsh-5.0.0+20250731.d3091c0c.tar.bz2

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ crmsh.spec ++++++
--- /var/tmp/diff_new_pack.nk0Qt5/_old  2025-07-31 17:50:32.854991151 +0200
+++ /var/tmp/diff_new_pack.nk0Qt5/_new  2025-07-31 17:50:32.854991151 +0200
@@ -41,7 +41,7 @@
 Summary:        High Availability cluster command-line interface
 License:        GPL-2.0-or-later
 Group:          %{pkg_group}
-Version:        5.0.0+20250723.07081e35
+Version:        5.0.0+20250731.d3091c0c
 Release:        0
 URL:            http://crmsh.github.io
 Source0:        %{name}-%{version}.tar.bz2

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.nk0Qt5/_old  2025-07-31 17:50:32.906993311 +0200
+++ /var/tmp/diff_new_pack.nk0Qt5/_new  2025-07-31 17:50:32.906993311 +0200
@@ -9,7 +9,7 @@
 </service>
 <service name="tar_scm">
   <param name="url">https://github.com/ClusterLabs/crmsh.git</param>
-  <param 
name="changesrevision">cd1d7ec47e28185333c42e5129873165b8541840</param>
+  <param 
name="changesrevision">d3091c0cdbf52ac3e322ffaf40177122c1156960</param>
 </service>
 </servicedata>
 (No newline at EOF)

++++++ crmsh-5.0.0+20250723.07081e35.tar.bz2 -> 
crmsh-5.0.0+20250731.d3091c0c.tar.bz2 ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250723.07081e35/crmsh/bootstrap.py 
new/crmsh-5.0.0+20250731.d3091c0c/crmsh/bootstrap.py
--- old/crmsh-5.0.0+20250723.07081e35/crmsh/bootstrap.py        2025-07-23 
14:56:37.000000000 +0200
+++ new/crmsh-5.0.0+20250731.d3091c0c/crmsh/bootstrap.py        2025-07-31 
10:20:46.000000000 +0200
@@ -1609,9 +1609,16 @@
         if node == utils.this_node():
             continue
         local_user, remote_user, node = 
_select_user_pair_for_ssh_for_secondary_components(node)
-        remote_key_content = ssh_key.fetch_public_key_content_list(node, 
remote_user)[0]
-        in_memory_key = ssh_key.InMemoryPublicKey(remote_key_content)
-        ssh_key.AuthorizedKeyManager(cluster_shell).add(qnetd_addr, 
qnetd_user, in_memory_key)
+        try:
+            remote_key_content = ssh_key.fetch_public_key_content_list(node, 
remote_user)[0]
+            in_memory_key = ssh_key.InMemoryPublicKey(remote_key_content)
+            ssh_key.AuthorizedKeyManager(cluster_shell).add(qnetd_addr, 
qnetd_user, in_memory_key)
+        except ssh_key.Error:
+            # crmsh#1850: if ssh-agent was used, there will not be a key pair 
on the node.
+            logger.debug(
+                "Skip adding the ssh key of %s:%s to authorized_keys of the 
qnetd node: keypair does not exist",
+                remote_user, node,
+            )
 
     user_by_host = utils.HostUserConfig()
     user_by_host.add(local_user, utils.this_node())
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250723.07081e35/crmsh/command.py 
new/crmsh-5.0.0+20250731.d3091c0c/crmsh/command.py
--- old/crmsh-5.0.0+20250723.07081e35/crmsh/command.py  2025-07-23 
14:56:37.000000000 +0200
+++ new/crmsh-5.0.0+20250731.d3091c0c/crmsh/command.py  2025-07-31 
10:20:46.000000000 +0200
@@ -231,8 +231,10 @@
 
 
 def _help_completer(args, context):
-    'TODO: make better completion'
-    return help_module.list_help_topics() + 
context.current_level().get_completions()
+    current_level_completions = context.current_level().get_completions()
+    if context.current_level().name == 'root':
+        return help_module.list_help_topics() + current_level_completions
+    return current_level_completions
 
 
 class UI(object):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250723.07081e35/crmsh/qdevice.py 
new/crmsh-5.0.0+20250731.d3091c0c/crmsh/qdevice.py
--- old/crmsh-5.0.0+20250723.07081e35/crmsh/qdevice.py  2025-07-23 
14:56:37.000000000 +0200
+++ new/crmsh-5.0.0+20250731.d3091c0c/crmsh/qdevice.py  2025-07-31 
10:20:46.000000000 +0200
@@ -588,7 +588,7 @@
             corosync.configure_two_node(qdevice_adding=True)
             bootstrap.sync_file(corosync.conf())
             if self.qdevice_reload_policy == QdevicePolicy.QDEVICE_RELOAD:
-                utils.cluster_run_cmd("crm corosync reload")
+                sh.cluster_shell().get_stdout_or_raise_error("corosync-cfgtool 
-R")
 
     def config_qnetd_port(self):
         """
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250723.07081e35/crmsh/sbd.py 
new/crmsh-5.0.0+20250731.d3091c0c/crmsh/sbd.py
--- old/crmsh-5.0.0+20250723.07081e35/crmsh/sbd.py      2025-07-23 
14:56:37.000000000 +0200
+++ new/crmsh-5.0.0+20250731.d3091c0c/crmsh/sbd.py      2025-07-31 
10:20:46.000000000 +0200
@@ -557,12 +557,17 @@
     def restart_cluster_if_possible(with_maintenance_mode=False):
         if not ServiceManager().service_is_active(constants.PCMK_SERVICE):
             return
-        if xmlutil.CrmMonXmlParser().is_any_resource_running() and not 
with_maintenance_mode:
+        if not xmlutil.CrmMonXmlParser().is_any_resource_running():
+            bootstrap.restart_cluster()
+        elif with_maintenance_mode:
+            if not utils.is_dlm_running():
+                bootstrap.restart_cluster()
+            else:
+                logger.warning("Resource is running, need to restart cluster 
service manually on each node")
+        else:
             logger.warning("Resource is running, need to restart cluster 
service manually on each node")
             logger.warning("Or, run with `crm -F` or `--force` option, the 
`sbd` subcommand will leverage maintenance mode for any changes that require 
restarting sbd.service")
             logger.warning("Understand risks that running RA has no cluster 
protection while the cluster is in maintenance mode and restarting")
-        else:
-            bootstrap.restart_cluster()
 
     def configure_sbd(self):
         '''
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250723.07081e35/test/unittests/test_qdevice.py 
new/crmsh-5.0.0+20250731.d3091c0c/test/unittests/test_qdevice.py
--- old/crmsh-5.0.0+20250723.07081e35/test/unittests/test_qdevice.py    
2025-07-23 14:56:37.000000000 +0200
+++ new/crmsh-5.0.0+20250731.d3091c0c/test/unittests/test_qdevice.py    
2025-07-31 10:20:46.000000000 +0200
@@ -958,12 +958,14 @@
         mock_parser_inst.remove.assert_called_once_with("quorum.device")
         mock_parser_inst.save.assert_called_once()
 
-    @mock.patch('crmsh.utils.cluster_run_cmd')
+    @mock.patch('crmsh.sh.cluster_shell')
     @mock.patch('crmsh.bootstrap.sync_file')
     @mock.patch('crmsh.corosync.configure_two_node')
     @mock.patch('crmsh.log.LoggerUtils.status_long')
     @mock.patch('crmsh.qdevice.QDevice.write_qdevice_config')
-    def test_config_qdevice(self, mock_write_config, mock_status_long, 
mock_config_two_node, mock_sync_file, mock_run_cmd):
+    def test_config_qdevice(self, mock_write_config, mock_status_long, 
mock_config_two_node, mock_sync_file, mock_cluster_shell):
+        mock_cluster_shell_instance = mock.Mock()
+        mock_cluster_shell.return_value = mock_cluster_shell_instance
         mock_status_long.return_value.__enter__ = mock.Mock()
         mock_status_long.return_value.__exit__ = mock.Mock()
         self.qdevice_with_ip.qdevice_reload_policy = 
qdevice.QdevicePolicy.QDEVICE_RELOAD
@@ -971,4 +973,4 @@
         self.qdevice_with_ip.config_qdevice()
         mock_status_long.assert_called_once_with("Update configuration")
         mock_config_two_node.assert_called_once_with(qdevice_adding=True)
-        mock_run_cmd.assert_called_once_with("crm corosync reload")
+        
mock_cluster_shell_instance.get_stdout_or_raise_error.assert_called_once_with("corosync-cfgtool
 -R")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250723.07081e35/test/unittests/test_sbd.py 
new/crmsh-5.0.0+20250731.d3091c0c/test/unittests/test_sbd.py
--- old/crmsh-5.0.0+20250723.07081e35/test/unittests/test_sbd.py        
2025-07-23 14:56:37.000000000 +0200
+++ new/crmsh-5.0.0+20250731.d3091c0c/test/unittests/test_sbd.py        
2025-07-31 10:20:46.000000000 +0200
@@ -409,11 +409,15 @@
         mock_CrmMonXmlParser.assert_not_called()
 
     @patch('logging.Logger.warning')
+    @patch('crmsh.utils.is_dlm_running')
     @patch('crmsh.xmlutil.CrmMonXmlParser')
     @patch('crmsh.sbd.ServiceManager')
-    def test_restart_cluster_if_possible_manually(self, mock_ServiceManager, 
mock_CrmMonXmlParser, mock_logger_warning):
+    def test_restart_cluster_if_possible_manually(
+            self, mock_ServiceManager, mock_CrmMonXmlParser, 
mock_is_dlm_running, mock_logger_warning,
+    ):
         mock_ServiceManager.return_value.service_is_active.return_value = True
         mock_CrmMonXmlParser.return_value.is_any_resource_running.return_value 
= True
+        mock_is_dlm_running.return_value = False
         SBDManager.restart_cluster_if_possible()
         
mock_ServiceManager.return_value.service_is_active.assert_called_once_with(constants.PCMK_SERVICE)
         mock_logger_warning.assert_has_calls([
@@ -422,6 +426,20 @@
             call("Understand risks that running RA has no cluster protection 
while the cluster is in maintenance mode and restarting")
         ])
 
+    @patch('logging.Logger.warning')
+    @patch('crmsh.utils.is_dlm_running')
+    @patch('crmsh.xmlutil.CrmMonXmlParser')
+    @patch('crmsh.sbd.ServiceManager')
+    def test_restart_cluster_if_possible_dlm_running(
+            self, mock_ServiceManager, mock_CrmMonXmlParser, 
mock_is_dlm_running, mock_logger_warning,
+    ):
+        mock_ServiceManager.return_value.service_is_active.return_value = True
+        mock_CrmMonXmlParser.return_value.is_any_resource_running.return_value 
= True
+        mock_is_dlm_running.return_value = True
+        SBDManager.restart_cluster_if_possible(with_maintenance_mode=True)
+        
mock_ServiceManager.return_value.service_is_active.assert_called_once_with(constants.PCMK_SERVICE)
+        mock_logger_warning.assert_called_once_with("Resource is running, need 
to restart cluster service manually on each node")
+
     @patch('crmsh.bootstrap.restart_cluster')
     @patch('logging.Logger.warning')
     @patch('crmsh.xmlutil.CrmMonXmlParser')

Reply via email to