Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package crmsh for openSUSE:Factory checked 
in at 2023-12-12 19:32:14
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/crmsh (Old)
 and      /work/SRC/openSUSE:Factory/.crmsh.new.25432 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "crmsh"

Tue Dec 12 19:32:14 2023 rev:315 rq:1132598 version:4.6.0+20231212.54751d3f

Changes:
--------
--- /work/SRC/openSUSE:Factory/crmsh/crmsh.changes      2023-12-11 
21:51:15.809111225 +0100
+++ /work/SRC/openSUSE:Factory/.crmsh.new.25432/crmsh.changes   2023-12-12 
19:32:36.897313091 +0100
@@ -1,0 +2,8 @@
+Tue Dec 12 03:10:42 UTC 2023 - xli...@suse.com
+
+- Update to version 4.6.0+20231212.54751d3f:
+  * Dev: unittest: Adjust unit test for previous change
+  * Dev: behave: Add functional test for previous change
+  * Fix: ui_cluster: Improve the process of 'crm cluster stop' (bsc#1213889)
+
+-------------------------------------------------------------------

Old:
----
  crmsh-4.6.0+20231211.4b74412a.tar.bz2

New:
----
  crmsh-4.6.0+20231212.54751d3f.tar.bz2

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ crmsh.spec ++++++
--- /var/tmp/diff_new_pack.nBsgRl/_old  2023-12-12 19:32:37.505335526 +0100
+++ /var/tmp/diff_new_pack.nBsgRl/_new  2023-12-12 19:32:37.505335526 +0100
@@ -36,7 +36,7 @@
 Summary:        High Availability cluster command-line interface
 License:        GPL-2.0-or-later
 Group:          %{pkg_group}
-Version:        4.6.0+20231211.4b74412a
+Version:        4.6.0+20231212.54751d3f
 Release:        0
 URL:            http://crmsh.github.io
 Source0:        %{name}-%{version}.tar.bz2

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.nBsgRl/_old  2023-12-12 19:32:37.545337002 +0100
+++ /var/tmp/diff_new_pack.nBsgRl/_new  2023-12-12 19:32:37.549337150 +0100
@@ -9,7 +9,7 @@
 </service>
 <service name="tar_scm">
   <param name="url">https://github.com/ClusterLabs/crmsh.git</param>
-  <param 
name="changesrevision">a23bea64548038b55b3650ae73cb3b88f025ad68</param>
+  <param 
name="changesrevision">db02a38df79fc1e1ee20c9dc4d1c39028e2c3c11</param>
 </service>
 </servicedata>
 (No newline at EOF)

++++++ crmsh-4.6.0+20231211.4b74412a.tar.bz2 -> 
crmsh-4.6.0+20231212.54751d3f.tar.bz2 ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.6.0+20231211.4b74412a/crmsh/ui_cluster.py 
new/crmsh-4.6.0+20231212.54751d3f/crmsh/ui_cluster.py
--- old/crmsh-4.6.0+20231211.4b74412a/crmsh/ui_cluster.py       2023-12-11 
03:36:33.000000000 +0100
+++ new/crmsh-4.6.0+20231212.54751d3f/crmsh/ui_cluster.py       2023-12-12 
03:47:05.000000000 +0100
@@ -193,42 +193,77 @@
         for node in node_list:
             logger.info("The cluster stack started on {}".format(node))
 
-    @command.skill_level('administrator')
-    def do_stop(self, context, *args):
-        '''
-        Stops the cluster stack on all nodes or specific node(s)
-        '''
+    @staticmethod
+    def _node_ready_to_stop_cluster_service(node):
+        """
+        Check if the specific node is ready to stop cluster service
+
+        If both corosync.service and pacemaker.service is active, return True
+        If some services started, stop them first and return False
+        """
         service_manager = ServiceManager()
-        node_list = parse_option_for_nodes(context, *args)
-        for node in node_list[:]:
-            if not service_manager.service_is_active("corosync.service", 
remote_addr=node):
-                if service_manager.service_is_active("sbd.service", 
remote_addr=node):
-                    service_manager.stop_service("corosync", remote_addr=node)
-                    logger.info("The cluster stack stopped on {}".format(node))
-                else:
-                    logger.info("The cluster stack already stopped on 
{}".format(node))
-                node_list.remove(node)
-            elif not service_manager.service_is_active("pacemaker.service", 
remote_addr=node):
+
+        corosync_active = 
service_manager.service_is_active("corosync.service", remote_addr=node)
+        sbd_active = service_manager.service_is_active("sbd.service", 
remote_addr=node)
+        pacemaker_active = 
service_manager.service_is_active("pacemaker.service", remote_addr=node)
+
+        if not corosync_active:
+            if sbd_active:
                 service_manager.stop_service("corosync", remote_addr=node)
-                logger.info("The cluster stack stopped on {}".format(node))
-                node_list.remove(node)
-        if not node_list:
+                logger.info(f"The cluster stack stopped on {node}")
+            else:
+                logger.info(f"The cluster stack already stopped on {node}")
+            return False
+
+        elif not pacemaker_active:
+            service_manager.stop_service("corosync", remote_addr=node)
+            logger.info("The cluster stack stopped on {}".format(node))
+            return False
+
+        return True
+
+    @staticmethod
+    def _wait_for_dc(node=None):
+        """
+        Wait for the cluster's DC to become available
+        """
+        if not ServiceManager().service_is_active("pacemaker.service", 
remote_addr=node):
             return
 
-        dc_deadtime = utils.get_property("dc-deadtime") or 
str(constants.DC_DEADTIME_DEFAULT)
+        dc_deadtime = utils.get_property("dc-deadtime", peer=node) or 
str(constants.DC_DEADTIME_DEFAULT)
         dc_timeout = int(dc_deadtime.strip('s')) + 5
         try:
-            utils.check_function_with_timeout(utils.get_dc, 
wait_timeout=dc_timeout)
+            utils.check_function_with_timeout(utils.get_dc, 
wait_timeout=dc_timeout, peer=node)
         except TimeoutError:
             logger.error("No DC found currently, please wait if the cluster is 
still starting")
-            return False
+            raise utils.TerminateSubCommand
 
-        # When dlm running and quorum is lost, before stop cluster service, 
should set
-        # enable_quorum_fencing=0, enable_quorum_lockspace=0 for dlm config 
option
-        if utils.is_dlm_running() and not utils.is_quorate():
+    @staticmethod
+    def _set_dlm(node=None):
+        """
+        When dlm running and quorum is lost, before stop cluster service, 
should set
+        enable_quorum_fencing=0, enable_quorum_lockspace=0 for dlm config 
option
+        """
+        if utils.is_dlm_running(node) and not utils.is_quorate(node):
             logger.debug("Quorum is lost; Set enable_quorum_fencing=0 and 
enable_quorum_lockspace=0 for dlm")
-            utils.set_dlm_option(enable_quorum_fencing=0, 
enable_quorum_lockspace=0)
+            utils.set_dlm_option(peer=node, enable_quorum_fencing=0, 
enable_quorum_lockspace=0)
+
+    @command.skill_level('administrator')
+    def do_stop(self, context, *args):
+        '''
+        Stops the cluster stack on all nodes or specific node(s)
+        '''
+        node_list = parse_option_for_nodes(context, *args)
+        node_list = [n for n in node_list if 
self._node_ready_to_stop_cluster_service(n)]
+        if not node_list:
+            return
+        logger.debug(f"stop node list: {node_list}")
 
+        self._wait_for_dc(node_list[0])
+
+        self._set_dlm(node_list[0])
+
+        service_manager = ServiceManager()
         # Stop pacemaker since it can make sure cluster has quorum until stop 
corosync
         node_list = service_manager.stop_service("pacemaker", 
node_list=node_list)
         # Then, stop qdevice if is active
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.6.0+20231211.4b74412a/crmsh/utils.py 
new/crmsh-4.6.0+20231212.54751d3f/crmsh/utils.py
--- old/crmsh-4.6.0+20231211.4b74412a/crmsh/utils.py    2023-12-11 
03:36:33.000000000 +0100
+++ new/crmsh-4.6.0+20231212.54751d3f/crmsh/utils.py    2023-12-12 
03:47:05.000000000 +0100
@@ -957,14 +957,14 @@
         return False
 
 
-def get_dc():
+def get_dc(peer=None):
     cmd = "crmadmin -D -t 1"
-    rc, s, _ = ShellUtils().get_stdout_stderr(add_sudo(cmd))
-    if rc != 0:
+    _, out, _ = sh.cluster_shell().get_rc_stdout_stderr_without_input(peer, 
cmd)
+    if not out:
         return None
-    if not s.startswith("Designated"):
+    if not out.startswith("Designated"):
         return None
-    return s.split()[-1]
+    return out.split()[-1]
 
 
 def wait4dc(what="", show_progress=True):
@@ -2741,48 +2741,63 @@
     return re.search(r'Node\s+{}:\s+standby'.format(node), out) is not None
 
 
-def get_dlm_option_dict():
+def get_dlm_option_dict(peer=None):
     """
     Get dlm config option dictionary
     """
-    out = sh.cluster_shell().get_stdout_or_raise_error("dlm_tool dump_config")
+    out = sh.cluster_shell().get_stdout_or_raise_error("dlm_tool dump_config", 
peer)
     return dict(re.findall("(\w+)=(\w+)", out))
 
 
-def set_dlm_option(**kargs):
+def set_dlm_option(peer=None, **kargs):
     """
     Set dlm option
     """
     shell = sh.cluster_shell()
-    dlm_option_dict = get_dlm_option_dict()
+    dlm_option_dict = get_dlm_option_dict(peer=peer)
     for option, value in kargs.items():
         if option not in dlm_option_dict:
-            raise ValueError('"{}" is not dlm config option'.format(option))
+            raise ValueError(f'"{option}" is not dlm config option')
         if dlm_option_dict[option] != value:
-            shell.get_stdout_or_raise_error('dlm_tool set_config 
"{}={}"'.format(option, value))
+            shell.get_stdout_or_raise_error(f'dlm_tool set_config 
"{option}={value}"', peer)
 
 
-def is_dlm_running():
+def is_dlm_running(peer=None):
     """
     Check if dlm ra controld is running
     """
-    from . import xmlutil
-    return 
xmlutil.CrmMonXmlParser().is_resource_started(constants.DLM_CONTROLD_RA)
+    return is_resource_running(constants.DLM_CONTROLD_RA, peer=peer)
+
+
+def has_resource_configured(ra_type, peer=None):
+    """
+    Check if the RA configured
+    """
+    out = sh.cluster_shell().get_stdout_or_raise_error("crm_mon -1rR", peer)
+    return re.search(ra_type, out) is not None
 
 
-def is_dlm_configured():
+def is_resource_running(ra_type, peer=None):
+    """
+    Check if the RA running
+    """
+    out = sh.cluster_shell().get_stdout_or_raise_error("crm_mon -1rR", peer)
+    patt = f"\({ra_type}\):\s*Started"
+    return re.search(patt, out) is not None
+
+
+def is_dlm_configured(peer=None):
     """
     Check if dlm configured
     """
-    from . import xmlutil
-    return 
xmlutil.CrmMonXmlParser().is_resource_configured(constants.DLM_CONTROLD_RA)
+    return has_resource_configured(constants.DLM_CONTROLD_RA, peer=peer)
 
 
-def is_quorate():
+def is_quorate(peer=None):
     """
     Check if cluster is quorated
     """
-    out = sh.cluster_shell().get_stdout_or_raise_error("corosync-quorumtool 
-s", success_exit_status={0, 2})
+    out = sh.cluster_shell().get_stdout_or_raise_error("corosync-quorumtool 
-s", peer, success_exit_status={0, 2})
     res = re.search(r'Quorate:\s+(.*)', out)
     if res:
         return res.group(1) == "Yes"
@@ -2808,7 +2823,7 @@
     return 0
 
 
-def get_property(name, property_type="crm_config"):
+def get_property(name, property_type="crm_config", peer=None):
     """
     Get cluster properties
 
@@ -2819,7 +2834,7 @@
         cmd = "CIB_file={} sudo --preserve-env=CIB_file crm configure 
get_property {}".format(cib_path, name)
     else:
         cmd = "sudo crm_attribute -t {} -n {} -Gq".format(property_type, name)
-    rc, stdout, _ = ShellUtils().get_stdout_stderr(cmd)
+    rc, stdout, _ = 
sh.cluster_shell().get_rc_stdout_stderr_without_input(peer, cmd)
     return stdout if rc == 0 else None
 
 
@@ -2952,7 +2967,7 @@
     return rc
 
 
-def check_function_with_timeout(check_function, wait_timeout=30, interval=1):
+def check_function_with_timeout(check_function, wait_timeout=30, interval=1, 
*args, **kwargs):
     """
     Run check_function in a loop
     Return when check_function is true
@@ -2961,7 +2976,7 @@
     current_time = int(time.time())
     timeout = current_time + wait_timeout
     while current_time <= timeout:
-        if check_function():
+        if check_function(*args, **kwargs):
             return
         time.sleep(interval)
         current_time = int(time.time())
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.6.0+20231211.4b74412a/test/features/bootstrap_bugs.feature 
new/crmsh-4.6.0+20231212.54751d3f/test/features/bootstrap_bugs.feature
--- old/crmsh-4.6.0+20231211.4b74412a/test/features/bootstrap_bugs.feature      
2023-12-11 03:36:33.000000000 +0100
+++ new/crmsh-4.6.0+20231212.54751d3f/test/features/bootstrap_bugs.feature      
2023-12-12 03:47:05.000000000 +0100
@@ -132,6 +132,20 @@
     When    Run "crm cluster stop" on "hanode1"
     Then    Service "corosync" is "stopped" on "hanode1"
 
+  @clean
+  Scenario: Can't stop all nodes' cluster service when local node's service is 
down(bsc#1213889)
+    Given   Cluster service is "stopped" on "hanode1"
+    And     Cluster service is "stopped" on "hanode2"
+    When    Run "crm cluster init -y" on "hanode1"
+    Then    Cluster service is "started" on "hanode1"
+    When    Run "crm cluster join -c hanode1 -y" on "hanode2"
+    Then    Cluster service is "started" on "hanode2"
+    When    Wait for DC
+    And     Run "crm cluster stop" on "hanode1"
+    And     Run "crm cluster stop --all" on "hanode1"
+    Then    Cluster service is "stopped" on "hanode1"
+    And     Cluster service is "stopped" on "hanode2"
+
   @skip_non_root
   @clean
   Scenario: crm cluster join default behavior change in ssh key handling 
(bsc#1210693)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.6.0+20231211.4b74412a/test/unittests/test_ui_cluster.py 
new/crmsh-4.6.0+20231212.54751d3f/test/unittests/test_ui_cluster.py
--- old/crmsh-4.6.0+20231211.4b74412a/test/unittests/test_ui_cluster.py 
2023-12-11 03:36:33.000000000 +0100
+++ new/crmsh-4.6.0+20231212.54751d3f/test/unittests/test_ui_cluster.py 
2023-12-12 03:47:05.000000000 +0100
@@ -80,52 +80,94 @@
         mock_qdevice_configured.assert_called_once_with()
         mock_info.assert_called_once_with("The cluster stack started on node1")
 
-    @mock.patch('logging.Logger.info')
-    @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+    @mock.patch('crmsh.ui_cluster.Cluster._wait_for_dc')
+    @mock.patch('crmsh.ui_cluster.Cluster._node_ready_to_stop_cluster_service')
     @mock.patch('crmsh.ui_cluster.parse_option_for_nodes')
-    def test_do_stop_already_stopped(self, mock_parse_nodes, mock_active, 
mock_info):
+    def test_do_stop_return(self, mock_parse_nodes, 
mock_node_ready_to_stop_cluster_service, mock_dc):
+        mock_parse_nodes.return_value = ["node1", "node2"]
+        mock_node_ready_to_stop_cluster_service.side_effect = [False, False]
+
         context_inst = mock.Mock()
-        mock_parse_nodes.return_value = ["node1"]
-        mock_active.side_effect = [False, False]
-        self.ui_cluster_inst.do_stop(context_inst, "node1")
-        mock_active.assert_has_calls([
-            mock.call("corosync.service", remote_addr="node1"),
-            mock.call("sbd.service", remote_addr="node1")
-            ])
-        mock_info.assert_called_once_with("The cluster stack already stopped 
on node1")
+        self.ui_cluster_inst.do_stop(context_inst, "node1", "node2")
+
+        mock_parse_nodes.assert_called_once_with(context_inst, "node1", 
"node2")
+        
mock_node_ready_to_stop_cluster_service.assert_has_calls([mock.call("node1"), 
mock.call("node2")])
+        mock_dc.assert_not_called()
 
     @mock.patch('logging.Logger.debug')
     @mock.patch('logging.Logger.info')
-    @mock.patch('crmsh.service_manager.ServiceManager.stop_service')
-    @mock.patch('crmsh.utils.set_dlm_option')
-    @mock.patch('crmsh.utils.is_quorate')
-    @mock.patch('crmsh.utils.is_dlm_running')
-    @mock.patch('crmsh.utils.get_dc')
-    @mock.patch('crmsh.utils.check_function_with_timeout')
-    @mock.patch('crmsh.utils.get_property')
-    @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
+    @mock.patch('crmsh.ui_cluster.ServiceManager')
+    @mock.patch('crmsh.ui_cluster.Cluster._set_dlm')
+    @mock.patch('crmsh.ui_cluster.Cluster._wait_for_dc')
+    @mock.patch('crmsh.ui_cluster.Cluster._node_ready_to_stop_cluster_service')
     @mock.patch('crmsh.ui_cluster.parse_option_for_nodes')
-    def test_do_stop(self, mock_parse_nodes, mock_active, mock_get_property, 
mock_check, mock_get_dc, mock_dlm_running, mock_is_quorate, mock_set_dlm, 
mock_stop, mock_info, mock_debug):
+    def test_do_stop(self, mock_parse_nodes, 
mock_node_ready_to_stop_cluster_service, mock_dc,
+                     mock_set_dlm, mock_service_manager, mock_info, 
mock_debug):
+        mock_parse_nodes.return_value = ["node1", "node2"]
+        mock_node_ready_to_stop_cluster_service.side_effect = [True, False]
+        mock_service_manager_inst = mock.Mock()
+        mock_service_manager.return_value = mock_service_manager_inst
+        mock_service_manager_inst.stop_service.side_effect = [["node1"], 
["node1"], ["node1"]]
+        mock_service_manager_inst.service_is_active.return_value = True
+
         context_inst = mock.Mock()
-        mock_stop.side_effect = [["node1"], ["ndoe1"], ["node1"]]
-        mock_parse_nodes.return_value = ["node1"]
-        mock_active.side_effect = [True, True, True]
-        mock_dlm_running.return_value = True
-        mock_is_quorate.return_value = False
-        mock_get_property.return_value = "20s"
+        self.ui_cluster_inst.do_stop(context_inst, "node1", "node2")
 
-        self.ui_cluster_inst.do_stop(context_inst, "node1")
+        mock_parse_nodes.assert_called_once_with(context_inst, "node1", 
"node2")
+        
mock_node_ready_to_stop_cluster_service.assert_has_calls([mock.call("node1"), 
mock.call("node2")])
+        mock_debug.assert_called_once_with("stop node list: ['node1']")
+        mock_dc.assert_called_once_with("node1")
+        mock_set_dlm.assert_called_once_with("node1")
+        mock_service_manager_inst.stop_service.assert_has_calls([
+            mock.call("pacemaker", node_list=["node1"]),
+            mock.call("corosync-qdevice.service", node_list=["node1"]),
+            mock.call("corosync", node_list=["node1"]),
+            ])
+        mock_info.assert_called_once_with("The cluster stack stopped on node1")
 
-        mock_active.assert_has_calls([
+    @mock.patch('logging.Logger.info')
+    @mock.patch('crmsh.ui_cluster.ServiceManager')
+    def test_node_ready_to_stop_cluster_service_corosync(self, 
mock_service_manager, mock_info):
+        mock_service_manager_inst = mock.Mock()
+        mock_service_manager.return_value = mock_service_manager_inst
+        mock_service_manager_inst.service_is_active.side_effect = [False, 
True, False]
+        res = self.ui_cluster_inst._node_ready_to_stop_cluster_service("node1")
+        assert res is False
+        mock_service_manager_inst.service_is_active.assert_has_calls([
             mock.call("corosync.service", remote_addr="node1"),
+            mock.call("sbd.service", remote_addr="node1"),
             mock.call("pacemaker.service", remote_addr="node1"),
-            mock.call("corosync-qdevice.service")
             ])
-        mock_stop.assert_has_calls([
-            mock.call("pacemaker", node_list=["node1"]),
-            mock.call("corosync-qdevice.service", node_list=["node1"]),
-            mock.call("corosync", node_list=["node1"])
+        
mock_service_manager_inst.stop_service.assert_called_once_with("corosync", 
remote_addr="node1")
+        mock_info.assert_called_once_with("The cluster stack stopped on node1")
+
+    @mock.patch('logging.Logger.info')
+    @mock.patch('crmsh.ui_cluster.ServiceManager')
+    def test_node_ready_to_stop_cluster_service_pacemaker(self, 
mock_service_manager, mock_info):
+        mock_service_manager_inst = mock.Mock()
+        mock_service_manager.return_value = mock_service_manager_inst
+        mock_service_manager_inst.service_is_active.side_effect = [True, True, 
False]
+        res = self.ui_cluster_inst._node_ready_to_stop_cluster_service("node1")
+        assert res is False
+        mock_service_manager_inst.service_is_active.assert_has_calls([
+            mock.call("corosync.service", remote_addr="node1"),
+            mock.call("sbd.service", remote_addr="node1"),
+            mock.call("pacemaker.service", remote_addr="node1"),
             ])
+        
mock_service_manager_inst.stop_service.assert_called_once_with("corosync", 
remote_addr="node1")
         mock_info.assert_called_once_with("The cluster stack stopped on node1")
-        mock_debug.assert_called_once_with("Quorum is lost; Set 
enable_quorum_fencing=0 and enable_quorum_lockspace=0 for dlm")
-        mock_check.assert_called_once_with(mock_get_dc, wait_timeout=25)
+
+    @mock.patch('logging.Logger.info')
+    @mock.patch('crmsh.ui_cluster.ServiceManager')
+    def test_node_ready_to_stop_cluster_service(self, mock_service_manager, 
mock_info):
+        mock_service_manager_inst = mock.Mock()
+        mock_service_manager.return_value = mock_service_manager_inst
+        mock_service_manager_inst.service_is_active.side_effect = [True, True, 
True]
+        res = self.ui_cluster_inst._node_ready_to_stop_cluster_service("node1")
+        assert res is True
+        mock_service_manager_inst.service_is_active.assert_has_calls([
+            mock.call("corosync.service", remote_addr="node1"),
+            mock.call("sbd.service", remote_addr="node1"),
+            mock.call("pacemaker.service", remote_addr="node1"),
+            ])
+        mock_info.assert_not_called()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.6.0+20231211.4b74412a/test/unittests/test_utils.py 
new/crmsh-4.6.0+20231212.54751d3f/test/unittests/test_utils.py
--- old/crmsh-4.6.0+20231211.4b74412a/test/unittests/test_utils.py      
2023-12-11 03:36:33.000000000 +0100
+++ new/crmsh-4.6.0+20231212.54751d3f/test/unittests/test_utils.py      
2023-12-12 03:47:05.000000000 +0100
@@ -1162,9 +1162,11 @@
     mock_run.assert_called_once_with("crm_mon -1")
 
 
-@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+@mock.patch('crmsh.sh.cluster_shell')
 def test_get_dlm_option_dict(mock_run):
-    mock_run.return_value = """
+    mock_run_inst = mock.Mock()
+    mock_run.return_value = mock_run_inst
+    mock_run_inst.get_stdout_or_raise_error.return_value = """
 key1=value1
 key2=value2
     """
@@ -1173,7 +1175,7 @@
             "key1": "value1",
             "key2": "value2"
             }
-    mock_run.assert_called_once_with("dlm_tool dump_config")
+    mock_run_inst.get_stdout_or_raise_error.assert_called_once_with("dlm_tool 
dump_config", None)
 
 
 @mock.patch('crmsh.utils.get_dlm_option_dict')
@@ -1187,41 +1189,47 @@
     assert str(err.value) == '"name" is not dlm config option'
 
 
-@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+@mock.patch('crmsh.sh.cluster_shell')
 @mock.patch('crmsh.utils.get_dlm_option_dict')
 def test_set_dlm_option(mock_get_dict, mock_run):
+    mock_run_inst = mock.Mock()
+    mock_run.return_value = mock_run_inst
     mock_get_dict.return_value = {
             "key1": "value1",
             "key2": "value2"
             }
     utils.set_dlm_option(key2="test")
-    mock_run.assert_called_once_with('dlm_tool set_config "key2=test"')
+    mock_run_inst.get_stdout_or_raise_error.assert_called_once_with('dlm_tool 
set_config "key2=test"', None)
 
 
-@mock.patch('crmsh.xmlutil.CrmMonXmlParser')
-def test_is_dlm_configured(mock_parser):
-    mock_parser().is_resource_configured.return_value = True
+@mock.patch('crmsh.utils.has_resource_configured')
+def test_is_dlm_configured(mock_configured):
+    mock_configured.return_value = True
     assert utils.is_dlm_configured() is True
-    
mock_parser().is_resource_configured.assert_called_once_with(constants.DLM_CONTROLD_RA)
+    mock_configured.assert_called_once_with(constants.DLM_CONTROLD_RA, 
peer=None)
 
 
-@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+@mock.patch('crmsh.sh.cluster_shell')
 def test_is_quorate_exception(mock_run):
-    mock_run.return_value = "data"
+    mock_run_inst = mock.Mock()
+    mock_run.return_value = mock_run_inst
+    mock_run_inst.get_stdout_or_raise_error.return_value = "data"
     with pytest.raises(ValueError) as err:
         utils.is_quorate()
     assert str(err.value) == "Failed to get quorate status from 
corosync-quorumtool"
-    mock_run.assert_called_once_with("corosync-quorumtool -s", 
success_exit_status={0, 2})
+    
mock_run_inst.get_stdout_or_raise_error.assert_called_once_with("corosync-quorumtool
 -s", None, success_exit_status={0, 2})
 
 
-@mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
+@mock.patch('crmsh.sh.cluster_shell')
 def test_is_quorate(mock_run):
-    mock_run.return_value = """
+    mock_run_inst = mock.Mock()
+    mock_run.return_value = mock_run_inst
+    mock_run_inst.get_stdout_or_raise_error.return_value = """
 Ring ID:          1084783297/440
 Quorate:          Yes
     """
     assert utils.is_quorate() is True
-    mock_run.assert_called_once_with("corosync-quorumtool -s", 
success_exit_status={0, 2})
+    
mock_run_inst.get_stdout_or_raise_error.assert_called_once_with("corosync-quorumtool
 -s", None, success_exit_status={0, 2})
 
 
 @mock.patch('crmsh.utils.etree.fromstring')
@@ -1290,12 +1298,14 @@
 
 
 @mock.patch('os.getenv')
-@mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr')
+@mock.patch('crmsh.sh.cluster_shell')
 def test_get_property(mock_run, mock_env):
-    mock_run.return_value = (0, "data", None)
+    mock_run_inst = mock.Mock()
+    mock_run.return_value = mock_run_inst
+    mock_run_inst.get_rc_stdout_stderr_without_input.return_value = (0, 
"data", "")
     mock_env.return_value = "cib.xml"
     assert utils.get_property("no-quorum-policy") == "data"
-    mock_run.assert_called_once_with("CIB_file=cib.xml sudo 
--preserve-env=CIB_file crm configure get_property no-quorum-policy")
+    
mock_run_inst.get_rc_stdout_stderr_without_input.assert_called_once_with(None, 
"CIB_file=cib.xml sudo --preserve-env=CIB_file crm configure get_property 
no-quorum-policy")
 
 
 @mock.patch('logging.Logger.warning')

Reply via email to