Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package crmsh for openSUSE:Factory checked 
in at 2022-12-16 17:51:59
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/crmsh (Old)
 and      /work/SRC/openSUSE:Factory/.crmsh.new.1835 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "crmsh"

Fri Dec 16 17:51:59 2022 rev:272 rq:1043241 version:4.4.1+20221215.752a541a

Changes:
--------
--- /work/SRC/openSUSE:Factory/crmsh/crmsh.changes      2022-12-13 
18:56:27.259519319 +0100
+++ /work/SRC/openSUSE:Factory/.crmsh.new.1835/crmsh.changes    2022-12-16 
17:52:05.608163550 +0100
@@ -1,0 +2,16 @@
+Thu Dec 15 14:33:39 UTC 2022 - xli...@suse.com
+
+- Update to version 4.4.1+20221215.752a541a:
+  * Fix: behave: cleanup is not applied to nodes that have been removed from 
cluster
+  * Dev: behave: add behave test for `crm -F node delete`
+  * Dev: ui_cluster: unused code removal
+  * Dev: unittest: adjust unit test based on previous changes
+  * Dev: ui_node: redirect `node delete` to `cluster remove`
+
+-------------------------------------------------------------------
+Thu Dec 15 09:42:11 UTC 2022 - xli...@suse.com
+
+- Update to version 4.4.1+20221215.9fa76957:
+  * Dev: behave: Add functional test for cluster API
+
+-------------------------------------------------------------------

Old:
----
  crmsh-4.4.1+20221213.6e4f7dfd.tar.bz2

New:
----
  crmsh-4.4.1+20221215.752a541a.tar.bz2

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ crmsh.spec ++++++
--- /var/tmp/diff_new_pack.CJjKKe/_old  2022-12-16 17:52:06.108166301 +0100
+++ /var/tmp/diff_new_pack.CJjKKe/_new  2022-12-16 17:52:06.112166323 +0100
@@ -36,7 +36,7 @@
 Summary:        High Availability cluster command-line interface
 License:        GPL-2.0-or-later
 Group:          %{pkg_group}
-Version:        4.4.1+20221213.6e4f7dfd
+Version:        4.4.1+20221215.752a541a
 Release:        0
 URL:            http://crmsh.github.io
 Source0:        %{name}-%{version}.tar.bz2

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.CJjKKe/_old  2022-12-16 17:52:06.160166587 +0100
+++ /var/tmp/diff_new_pack.CJjKKe/_new  2022-12-16 17:52:06.164166610 +0100
@@ -9,7 +9,7 @@
 </service>
 <service name="tar_scm">
   <param name="url">https://github.com/ClusterLabs/crmsh.git</param>
-  <param 
name="changesrevision">196bfc0a6e739054a258f38ddf87012d7ba01dcc</param>
+  <param 
name="changesrevision">752a541abb58dc5163732042ac2f8120739bc93e</param>
 </service>
 </servicedata>
 (No newline at EOF)

++++++ crmsh-4.4.1+20221213.6e4f7dfd.tar.bz2 -> 
crmsh-4.4.1+20221215.752a541a.tar.bz2 ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.1+20221213.6e4f7dfd/.github/workflows/crmsh-ci.yml 
new/crmsh-4.4.1+20221215.752a541a/.github/workflows/crmsh-ci.yml
--- old/crmsh-4.4.1+20221213.6e4f7dfd/.github/workflows/crmsh-ci.yml    
2022-12-13 09:03:40.000000000 +0100
+++ new/crmsh-4.4.1+20221215.752a541a/.github/workflows/crmsh-ci.yml    
2022-12-15 15:16:07.000000000 +0100
@@ -196,6 +196,17 @@
         sudo systemctl restart docker.service
         $DOCKER_SCRIPT `$GET_INDEX_OF healthcheck`
 
+  functional_test_cluster_api:
+    runs-on: ubuntu-20.04
+    timeout-minutes: 40
+    steps:
+    - uses: actions/checkout@v3
+    - name: functional test for cluster api
+      run:  |
+        echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee 
/etc/docker/daemon.json
+        sudo systemctl restart docker.service
+        $DOCKER_SCRIPT `$GET_INDEX_OF cluster_api`
+
   original_regression_test:
     runs-on: ubuntu-20.04
     timeout-minutes: 40
@@ -221,6 +232,7 @@
       functional_test_constraints_bugs,
       functional_test_geo_cluster,
       functional_test_healthcheck,
+      functional_test_cluster_api,
       original_regression_test]
     runs-on: ubuntu-20.04
     timeout-minutes: 10
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.4.1+20221213.6e4f7dfd/crmsh/bootstrap.py 
new/crmsh-4.4.1+20221215.752a541a/crmsh/bootstrap.py
--- old/crmsh-4.4.1+20221213.6e4f7dfd/crmsh/bootstrap.py        2022-12-13 
09:03:40.000000000 +0100
+++ new/crmsh-4.4.1+20221215.752a541a/crmsh/bootstrap.py        2022-12-15 
15:16:07.000000000 +0100
@@ -40,7 +40,7 @@
 from . import qdevice
 from . import parallax
 from . import log
-
+from .ui_node import NodeMgmt
 
 logger = log.setup_logger(__name__)
 logger_utils = log.LoggerUtils(logger)
@@ -2001,9 +2001,8 @@
 
     # execute the command : crm node delete $HOSTNAME
     logger.info("Removing the node {}".format(node))
-    rc, _, err = invoke("crm node delete {}".format(node))
-    if not rc:
-        utils.fatal("Failed to remove {}: {}".format(node, err))
+    if not NodeMgmt.call_delnode(node):
+        utils.fatal("Failed to remove {}.".format(node))
 
     if not invokerc("sed -i /{}/d {}".format(node, CSYNC2_CFG)):
         utils.fatal("Removing the node {} from {} failed".format(node, 
CSYNC2_CFG))
@@ -2325,7 +2324,7 @@
     if _context.cluster_node == utils.this_node():
         if not force_flag:
             utils.fatal("Removing self requires --force")
-        remove_self()
+        remove_self(force_flag)
     elif _context.cluster_node in xmlutil.listnodes():
         remove_node_from_cluster()
     else:
@@ -2334,14 +2333,14 @@
     bootstrap_finished()
 
 
-def remove_self():
+def remove_self(force_flag=False):
     me = _context.cluster_node
     yes_to_all = _context.yes_to_all
     nodes = xmlutil.listnodes(include_remote_nodes=False)
     othernode = next((x for x in nodes if x != me), None)
     if othernode is not None:
         # remove from other node
-        cmd = "crm cluster remove{} -c {}".format(" -y" if yes_to_all else "", 
me)
+        cmd = "crm{} cluster remove{} -c {}".format(" -F" if force_flag else 
"", " -y" if yes_to_all else "", me)
         rc = utils.ext_cmd_nosudo("ssh{} {} {} '{}'".format("" if yes_to_all 
else " -t", SSH_OPTION, othernode, cmd))
         if rc != 0:
             utils.fatal("Failed to remove this node from {}".format(othernode))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.4.1+20221213.6e4f7dfd/crmsh/ui_cluster.py 
new/crmsh-4.4.1+20221215.752a541a/crmsh/ui_cluster.py
--- old/crmsh-4.4.1+20221213.6e4f7dfd/crmsh/ui_cluster.py       2022-12-13 
09:03:40.000000000 +0100
+++ new/crmsh-4.4.1+20221215.752a541a/crmsh/ui_cluster.py       2022-12-15 
15:16:07.000000000 +0100
@@ -13,6 +13,7 @@
 from . import corosync
 from . import qdevice
 from .cibconfig import cib_factory
+from .ui_node import parse_option_for_nodes
 from . import constants
 
 
@@ -20,11 +21,6 @@
 logger = log.setup_logger(__name__)
 
 
-class ArgParser(ArgumentParser):
-    def format_epilog(self, formatter):
-        return self.epilog or ""
-
-
 def parse_options(parser, args):
     try:
         options, args = parser.parse_known_args(list(args))
@@ -37,60 +33,6 @@
     return options, args
 
 
-def parse_option_for_nodes(context, *args):
-    """
-    Parse option for nodes
-    Return a node list
-    """
-    action_type = context.get_command_name()
-    action_target = "node" if action_type in ["standby", "online"] else 
"cluster service"
-    action = "{} {}".format(action_type, action_target)
-    usage_template = """
-Specify node(s) on which to {action}.
-If no nodes are specified, {action} on the local node.
-If --all is specified, {action} on all nodes."""
-    addtion_usage = ""
-    if action_type == "standby":
-        usage_template += """
-\n\nAdditionally, you may specify a lifetime for the standby---if set to
-"reboot", the node will be back online once it reboots. "forever" will
-keep the node in standby after reboot. The life time defaults to
-"forever"."""
-        addtion_usage = " [lifetime]"
-
-    parser = ArgParser(description=usage_template.format(action=action),
-            usage="{} [--all | <node>... ]{}".format(action_type, 
addtion_usage),
-            add_help=False,
-            formatter_class=RawDescriptionHelpFormatter)
-    parser.add_argument("-h", "--help", action="store_true", dest="help", 
help="Show this help message")
-    parser.add_argument("--all", help="To {} on all nodes".format(action), 
action="store_true", dest="all")
-
-    options, args = parse_options(parser, args)
-    if options is None or args is None:
-        raise utils.TerminateSubCommand
-    if options.all and args:
-        context.fatal_error("Should either use --all or specific node(s)")
-
-    # return local node
-    if not options.all and not args:
-        return [utils.this_node()]
-    member_list = utils.list_cluster_nodes()
-    if not member_list:
-        context.fatal_error("Cannot get the node list from cluster")
-    for node in args:
-        if node not in member_list:
-            context.fatal_error("Node \"{}\" is not a cluster 
node".format(node))
-
-    node_list = member_list if options.all else args
-    for node in node_list:
-        try:
-            utils.ping_node(node)
-        except ValueError as err:
-            logger.warning(str(err))
-            node_list.remove(node)
-    return node_list
-
-
 def _remove_completer(args):
     try:
         n = utils.list_cluster_nodes()
@@ -276,7 +218,7 @@
         if len(args) > 0:
             if '--dry-run' in args or looks_like_hostnames(args):
                 args = ['--yes', '--nodes'] + [arg for arg in args if arg != 
'--dry-run']
-        parser = ArgParser(description="""
+        parser = ArgumentParser(description="""
 Initialize a cluster from scratch. This command configures
 a complete cluster, and can also add additional cluster
 nodes to the initial one-node cluster using the --nodes
@@ -435,7 +377,7 @@
         '''
         Join this node to an existing cluster
         '''
-        parser = ArgParser(description="""
+        parser = ArgumentParser(description="""
 Join the current node to an existing cluster. The
 current node cannot be a member of a cluster already.
 Pass any node in the existing cluster as the argument
@@ -493,7 +435,7 @@
         '''
         Remove the given node(s) from the cluster.
         '''
-        parser = ArgParser(description="""
+        parser = ArgumentParser(description="""
 Remove one or more nodes from the cluster.
 
 This command can remove the last node in the cluster,
@@ -579,7 +521,7 @@
         * arbitrator IP / hostname (optional)
         * list of tickets (can be empty)
         '''
-        parser = ArgParser(description="""
+        parser = ArgumentParser(description="""
 Create a new geo cluster with the current cluster as the
 first member. Pass the complete geo cluster topology as
 arguments to this command, and then use geo-join and
@@ -642,7 +584,7 @@
         '''
         Join this cluster to a geo configuration.
         '''
-        parser = ArgParser(description="""
+        parser = ArgumentParser(description="""
 This command should be run from one of the nodes in a cluster
 which is currently not a member of a geo cluster. The geo
 cluster configuration will be fetched from the provided node,
@@ -686,7 +628,7 @@
         '''
         Make this node a geo arbitrator.
         '''
-        parser = ArgParser(description="""
+        parser = ArgumentParser(description="""
 Configure the current node as a geo arbitrator. The command
 requires an existing geo cluster or geo arbitrator from which
 to get the geo cluster configuration.""",
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.4.1+20221213.6e4f7dfd/crmsh/ui_node.py 
new/crmsh-4.4.1+20221215.752a541a/crmsh/ui_node.py
--- old/crmsh-4.4.1+20221213.6e4f7dfd/crmsh/ui_node.py  2022-12-13 
09:03:40.000000000 +0100
+++ new/crmsh-4.4.1+20221215.752a541a/crmsh/ui_node.py  2022-12-15 
15:16:07.000000000 +0100
@@ -3,6 +3,9 @@
 # See COPYING for license information.
 
 import re
+import subprocess
+from argparse import ArgumentParser, RawDescriptionHelpFormatter
+
 from . import config
 from . import command
 from . import completers as compl
@@ -14,7 +17,6 @@
 from . import term
 from .cibconfig import cib_factory
 from .ui_resource import rm_meta_attribute
-from .ui_cluster import parse_option_for_nodes
 from . import log
 
 
@@ -203,6 +205,63 @@
         print(term.render("\t%s" % (s)))
 
 
+def parse_option_for_nodes(context, *args):
+    """
+    Parse option for nodes
+    Return a node list
+    """
+    action_type = context.get_command_name()
+    action_target = "node" if action_type in ["standby", "online"] else 
"cluster service"
+    action = "{} {}".format(action_type, action_target)
+    usage_template = """
+Specify node(s) on which to {action}.
+If no nodes are specified, {action} on the local node.
+If --all is specified, {action} on all nodes."""
+    addtion_usage = ""
+    if action_type == "standby":
+        usage_template += """
+\n\nAdditionally, you may specify a lifetime for the standby---if set to
+"reboot", the node will be back online once it reboots. "forever" will
+keep the node in standby after reboot. The life time defaults to
+"forever"."""
+        addtion_usage = " [lifetime]"
+
+    parser = ArgumentParser(description=usage_template.format(action=action),
+                       usage="{} [--all | <node>... ]{}".format(action_type, 
addtion_usage),
+                       add_help=False,
+                       formatter_class=RawDescriptionHelpFormatter)
+    parser.add_argument("-h", "--help", action="store_true", dest="help", 
help="Show this help message")
+    parser.add_argument("--all", help="To {} on all nodes".format(action), 
action="store_true", dest="all")
+
+    options, args = parser.parse_known_args(args)
+    if options.help:
+        parser.print_help()
+        raise utils.TerminateSubCommand
+    if options is None or args is None:
+        raise utils.TerminateSubCommand
+    if options.all and args:
+        context.fatal_error("Should either use --all or specific node(s)")
+
+    # return local node
+    if not options.all and not args:
+        return [utils.this_node()]
+    member_list = utils.list_cluster_nodes()
+    if not member_list:
+        context.fatal_error("Cannot get the node list from cluster")
+    for node in args:
+        if node not in member_list:
+            context.fatal_error("Node \"{}\" is not a cluster 
node".format(node))
+
+    node_list = member_list if options.all else args
+    for node in node_list:
+        try:
+            utils.ping_node(node)
+        except ValueError as err:
+            logger.warning(str(err))
+            node_list.remove(node)
+    return node_list
+
+
 class NodeMgmt(command.UI):
     '''
     Nodes management class
@@ -432,19 +491,20 @@
             return utils.ext_cmd(self.node_clear_state % ("-M -c", node, 
node)) == 0 and \
                 utils.ext_cmd(self.node_clear_state % ("-R", node, node)) == 0
 
-    def _call_delnode(self, node):
+    @classmethod
+    def call_delnode(cls, node):
         "Remove node (how depends on cluster stack)"
         rc = True
-        ec, s = utils.get_stdout("%s -p" % self.crm_node)
+        ec, s = utils.get_stdout("%s -p" % cls.crm_node)
         if not s:
-            logger.error('%s -p could not list any nodes (rc=%d)', 
self.crm_node, ec)
+            logger.error('%s -p could not list any nodes (rc=%d)', 
cls.crm_node, ec)
             rc = False
         else:
             partition_l = s.split()
             if node in partition_l:
-                logger.error("according to %s, node %s is still active", 
self.crm_node, node)
+                logger.error("according to %s, node %s is still active", 
cls.crm_node, node)
                 rc = False
-        cmd = "%s --force -R %s" % (self.crm_node, node)
+        cmd = "%s --force -R %s" % (cls.crm_node, node)
         if not rc:
             if config.core.force:
                 logger.info('proceeding with node %s removal', node)
@@ -458,24 +518,21 @@
             if rc != 0:
                 logger.error('"%s" failed, rc=%d, %s', cmd, rc, err)
                 return False
+        if utils.ext_cmd(cls.node_delete % node) != 0 or \
+                utils.ext_cmd(cls.node_delete_status % node) != 0:
+            logger.error("%s removed from membership, but not from CIB!", node)
+            return False
         return True
 
     @command.completers(compl.nodes)
     def do_delete(self, context, node):
         'usage: delete <node>'
-        if not utils.is_name_sane(node):
-            return False
-        if not xmlutil.is_our_node(node):
-            logger.error("node %s not found in the CIB", node)
-            return False
-        if not self._call_delnode(node):
-            return False
-        if utils.ext_cmd(self.node_delete % node) != 0 or \
-                utils.ext_cmd(self.node_delete_status % node) != 0:
-            logger.error("%s removed from membership, but not from CIB!", node)
-            return False
-        logger.info("node %s deleted", node)
-        return True
+        logger.warning('`crm node delete` is deprecated and will very likely 
be dropped in the near future. It is auto-replaced as `crm cluster remove -c 
{}`.'.format(node))
+        if config.core.force:
+            rc = subprocess.call(['crm', 'cluster', 'remove', '-F', '-c', 
node])
+        else:
+            rc = subprocess.call(['crm', 'cluster', 'remove', '-c', node])
+        return rc == 0
 
     @command.wait
     @command.completers(compl.nodes, compl.choice(['set', 'delete', 'show']), 
_find_attr)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.4.1+20221213.6e4f7dfd/data-manifest 
new/crmsh-4.4.1+20221215.752a541a/data-manifest
--- old/crmsh-4.4.1+20221213.6e4f7dfd/data-manifest     2022-12-13 
09:03:40.000000000 +0100
+++ new/crmsh-4.4.1+20221215.752a541a/data-manifest     2022-12-15 
15:16:07.000000000 +0100
@@ -70,6 +70,7 @@
 test/features/bootstrap_options.feature
 test/features/bootstrap_sbd_delay.feature
 test/features/bootstrap_sbd_normal.feature
+test/features/cluster_api.feature
 test/features/configure_bugs.feature
 test/features/constraints_bugs.feature
 test/features/crm_report_bugs.feature
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.4.1+20221213.6e4f7dfd/doc/crm.8.adoc 
new/crmsh-4.4.1+20221215.752a541a/doc/crm.8.adoc
--- old/crmsh-4.4.1+20221213.6e4f7dfd/doc/crm.8.adoc    2022-12-13 
09:03:40.000000000 +0100
+++ new/crmsh-4.4.1+20221215.752a541a/doc/crm.8.adoc    2022-12-15 
15:16:07.000000000 +0100
@@ -2077,13 +2077,10 @@
 clearstate <node>
 ...............
 
-[[cmdhelp_node_delete,delete node]]
+[[cmdhelp_node_delete,delete node (deprecated)]]
 ==== `delete`
 
-Delete a node. This command will remove the node from the CIB
-and, in case the cluster stack is running, use the appropriate
-program (`crm_node` or `hb_delnode`) to remove the node from the
-membership.
+Remove a node from cluster.
 
 If the node is still listed as active and a member of our
 partition we refuse to remove it. With the global force option
@@ -2094,7 +2091,15 @@
 delete <node>
 ...............
 
+.Deprecation note
+*****
+This command is deprecated and in favor of `crm cluster remove [-F] -c <node>`,
+which will adjust the related cluster configurations and clean up the leftover
+(eg. stopping the cluster services) on the removed node.
+*****
+
 [[cmdhelp_node_fence,fence node]]
+
 ==== `fence`
 
 Make CRM fence a node. This functionality depends on stonith
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.1+20221213.6e4f7dfd/test/features/bootstrap_init_join_remove.feature
 
new/crmsh-4.4.1+20221215.752a541a/test/features/bootstrap_init_join_remove.feature
--- 
old/crmsh-4.4.1+20221213.6e4f7dfd/test/features/bootstrap_init_join_remove.feature
  2022-12-13 09:03:40.000000000 +0100
+++ 
new/crmsh-4.4.1+20221215.752a541a/test/features/bootstrap_init_join_remove.feature
  2022-12-15 15:16:07.000000000 +0100
@@ -2,11 +2,11 @@
 Feature: crmsh bootstrap process - init, join and remove
 
   Test crmsh bootstrap init/join/remove process
-  Tag @clean means need to stop cluster service if the service is available
   Need nodes: hanode1 hanode2
 
   Background: Setup a two nodes cluster
-    Given   Cluster service is "stopped" on "hanode1"
+    Given   Nodes ["hanode1", "hanode2"] are cleaned up
+    And     Cluster service is "stopped" on "hanode1"
     And     Cluster service is "stopped" on "hanode2"
     When    Run "crm cluster init -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
@@ -16,10 +16,8 @@
     And     Online nodes are "hanode1 hanode2"
     And     Show cluster status on "hanode1"
 
-  @clean
   Scenario: Init cluster service on node "hanode1", and join on node "hanode2"
 
-  @clean
   Scenario: Support --all or specific node to manage cluster and nodes
     When    Run "crm node standby --all" on "hanode1"
     Then    Node "hanode1" is standby
@@ -47,7 +45,6 @@
     Then    Cluster service is "started" on "hanode1"
     And     Cluster service is "started" on "hanode2"
 
-  @clean
   Scenario: Remove peer node "hanode2"
     When    Run "crm configure primitive d1 Dummy" on "hanode1"
     When    Run "crm configure primitive d2 Dummy" on "hanode2"
@@ -75,7 +72,6 @@
     Then    Directory "/var/lib/pacemaker/pengine/" is empty on "hanode2"
     Then    Directory "/var/lib/corosync/" is empty on "hanode2"
 
-  @clean
   Scenario: Remove local node "hanode1"
     When    Run "crm configure primitive d1 Dummy" on "hanode1"
     When    Run "crm configure primitive d2 Dummy" on "hanode1"
@@ -92,6 +88,59 @@
     Then    Cluster service is "stopped" on "hanode1"
     And     Cluster service is "started" on "hanode2"
     And     Show cluster status on "hanode2"
+    Then    File "/etc/csync2/csync2.cfg" not exist on "hanode1"
+    Then    File "/etc/csync2/key_hagroup" not exist on "hanode1"
+    Then    File "/etc/corosync/authkey" not exist on "hanode1"
+    Then    File "/etc/corosync/corosync.conf" not exist on "hanode1"
+    Then    File "/etc/pacemaker/authkey" not exist on "hanode1"
+    Then    Directory "/var/lib/csync2/" is empty on "hanode1"
+    Then    Directory "/var/lib/pacemaker/cib/" is empty on "hanode1"
+    Then    Directory "/var/lib/pacemaker/pengine/" is empty on "hanode1"
+    Then    Directory "/var/lib/corosync/" is empty on "hanode1"
+
+  Scenario: Remove peer node "hanode2" with `crm -F node delete`
+    When    Run "crm configure primitive d1 Dummy" on "hanode1"
+    When    Run "crm configure primitive d2 Dummy" on "hanode2"
+    Then    File "/etc/csync2/csync2.cfg" exists on "hanode2"
+    Then    File "/etc/csync2/key_hagroup" exists on "hanode2"
+    Then    File "/etc/corosync/authkey" exists on "hanode2"
+    Then    File "/etc/corosync/corosync.conf" exists on "hanode2"
+    Then    File "/etc/pacemaker/authkey" exists on "hanode2"
+    Then    Directory "/var/lib/csync2/" not empty on "hanode2"
+    Then    Directory "/var/lib/pacemaker/cib/" not empty on "hanode2"
+    Then    Directory "/var/lib/pacemaker/pengine/" not empty on "hanode2"
+    Then    Directory "/var/lib/corosync/" not empty on "hanode2"
+    When    Run "crm -F cluster remove hanode2" on "hanode1"
+    Then    Cluster service is "started" on "hanode1"
+    And     Cluster service is "stopped" on "hanode2"
+    And     Online nodes are "hanode1"
+    And     Show cluster status on "hanode1"
+    Then    File "/etc/csync2/csync2.cfg" not exist on "hanode2"
+    Then    File "/etc/csync2/key_hagroup" not exist on "hanode2"
+    Then    File "/etc/corosync/authkey" not exist on "hanode2"
+    Then    File "/etc/corosync/corosync.conf" not exist on "hanode2"
+    Then    File "/etc/pacemaker/authkey" not exist on "hanode2"
+    Then    Directory "/var/lib/csync2/" is empty on "hanode2"
+    Then    Directory "/var/lib/pacemaker/cib/" is empty on "hanode2"
+    Then    Directory "/var/lib/pacemaker/pengine/" is empty on "hanode2"
+    Then    Directory "/var/lib/corosync/" is empty on "hanode2"
+
+  Scenario: Remove local node "hanode1" with `crm -F node delete`
+    When    Run "crm configure primitive d1 Dummy" on "hanode1"
+    When    Run "crm configure primitive d2 Dummy" on "hanode1"
+    Then    File "/etc/csync2/csync2.cfg" exists on "hanode1"
+    Then    File "/etc/csync2/key_hagroup" exists on "hanode1"
+    Then    File "/etc/corosync/authkey" exists on "hanode1"
+    Then    File "/etc/corosync/corosync.conf" exists on "hanode1"
+    Then    File "/etc/pacemaker/authkey" exists on "hanode1"
+    Then    Directory "/var/lib/csync2/" not empty on "hanode1"
+    Then    Directory "/var/lib/pacemaker/cib/" not empty on "hanode1"
+    Then    Directory "/var/lib/pacemaker/pengine/" not empty on "hanode1"
+    Then    Directory "/var/lib/corosync/" not empty on "hanode1"
+    When    Run "crm -F node delete hanode1" on "hanode1"
+    Then    Cluster service is "stopped" on "hanode1"
+    And     Cluster service is "started" on "hanode2"
+    And     Show cluster status on "hanode2"
     Then    File "/etc/csync2/csync2.cfg" not exist on "hanode1"
     Then    File "/etc/csync2/key_hagroup" not exist on "hanode1"
     Then    File "/etc/corosync/authkey" not exist on "hanode1"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.1+20221213.6e4f7dfd/test/features/bootstrap_sbd_normal.feature 
new/crmsh-4.4.1+20221215.752a541a/test/features/bootstrap_sbd_normal.feature
--- 
old/crmsh-4.4.1+20221213.6e4f7dfd/test/features/bootstrap_sbd_normal.feature    
    2022-12-13 09:03:40.000000000 +0100
+++ 
new/crmsh-4.4.1+20221215.752a541a/test/features/bootstrap_sbd_normal.feature    
    2022-12-15 15:16:07.000000000 +0100
@@ -158,3 +158,22 @@
     Then    Cluster service is "started" on "hanode2"
     And     Service "sbd" is "started" on "hanode2"
     And     Resource "stonith-sbd" type "external/sbd" is "Started"
+
+  @clean
+  Scenario: Setup sbd and test fence node
+    Given   Has disk "/dev/sda1" on "hanode1"
+    Given   Cluster service is "stopped" on "hanode1"
+    Given   Has disk "/dev/sda1" on "hanode2"
+    Given   Cluster service is "stopped" on "hanode2"
+    When    Run "crm cluster init -s /dev/sda1 -y" on "hanode1"
+    Then    Cluster service is "started" on "hanode1"
+    And     Service "sbd" is "started" on "hanode1"
+    And     Resource "stonith-sbd" type "external/sbd" is "Started"
+    When    Run "crm cluster join -c hanode1 -y" on "hanode2"
+    Then    Cluster service is "started" on "hanode2"
+    And     Service "sbd" is "started" on "hanode2"
+    When    Run "stonith_admin -H hanode2 -c" on "hanode1"
+    When    Run "su hacluster -c 'crm -F node fence hanode2'" on "hanode1"
+    Then    Expected return code is "0"
+    Then    Node "hanode2" is UNCLEAN
+    Then    Wait "60" seconds for "hanode2" successfully fenced
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.1+20221213.6e4f7dfd/test/features/cluster_api.feature 
new/crmsh-4.4.1+20221215.752a541a/test/features/cluster_api.feature
--- old/crmsh-4.4.1+20221213.6e4f7dfd/test/features/cluster_api.feature 
1970-01-01 01:00:00.000000000 +0100
+++ new/crmsh-4.4.1+20221215.752a541a/test/features/cluster_api.feature 
2022-12-15 15:16:07.000000000 +0100
@@ -0,0 +1,140 @@
+@cluster_api
+Feature: Functional test to cover SAP clusterAPI
+
+  To avoid possible regression on crmsh side when adapting SAP Applications 
+  Tag @clean means need to stop cluster service if the service is available
+  Need nodes: hanode1 hanode2
+
+  Background: Setup a two nodes cluster
+    Given   Cluster service is "stopped" on "hanode1"
+    And     Cluster service is "stopped" on "hanode2"
+    When    Run "crm cluster init -y" on "hanode1"
+    Then    Cluster service is "started" on "hanode1"
+    And     Show cluster status on "hanode1"
+    When    Run "crm cluster join -c hanode1 -y" on "hanode2"
+    Then    Cluster service is "started" on "hanode2"
+    And     Online nodes are "hanode1 hanode2"
+    When    Run "crm configure primitive d Dummy" on "hanode1"
+    And     Wait "3" seconds
+    Then    Resource "d" type "Dummy" is "Started"
+    And     Show cluster status on "hanode1"
+
+  @clean
+  Scenario: Start and stop resource by hacluster
+    When    Run "su hacluster -c 'crm resource stop d'" on "hanode1"
+    Then    Expected return code is "0"
+    When    Wait "3" seconds
+    Then    Resource "d" type "Dummy" is "Stopped"
+    And     Show cluster status on "hanode1"
+    When    Run "su hacluster -c 'crm resource start d'" on "hanode1"
+    Then    Expected return code is "0"
+    When    Wait "3" seconds
+    Then    Resource "d" type "Dummy" is "Started"
+    And     Show cluster status on "hanode1"
+
+  @clean
+  Scenario: Resource move by hacluster
+    Given   Resource "d" is started on "hanode1"
+    # move <res> <node>
+    When    Run "su hacluster -c 'crm resource move d hanode2'" on "hanode1"
+    Then    Expected return code is "0"
+    When    Run "sleep 2" on "hanode1"
+    Then    Resource "d" is started on "hanode2"
+    When    Run "su hacluster -c 'crm resource clear d'" on "hanode1"
+    Then    Expected return code is "0"
+
+    # move <res> <node> force
+    When    Run "su hacluster -c 'crm resource move d hanode1'" on "hanode1"
+    Then    Expected return code is "0"
+    When    Run "sleep 2" on "hanode1"
+    Then    Resource "d" is started on "hanode1"
+    When    Run "su hacluster -c 'crm resource clear d'" on "hanode1"
+    Then    Expected return code is "0"
+
+    # move <res> force
+    When    Run "su hacluster -c 'crm resource move d force'" on "hanode1"
+    Then    Expected return code is "0"
+    When    Run "sleep 2" on "hanode1"
+    Then    Resource "d" is started on "hanode2"
+    When    Run "su hacluster -c 'crm resource clear d'" on "hanode1"
+    Then    Expected return code is "0"
+
+    # move <res> <lifetime> force
+    When    Run "su hacluster -c 'crm resource move d PT5M force'" on "hanode1"
+    Then    Expected return code is "0"
+    When    Run "sleep 2" on "hanode1"
+    Then    Resource "d" is started on "hanode1"
+    When    Run "su hacluster -c 'crm resource clear d'" on "hanode1"
+    Then    Expected return code is "0"
+
+    # move <res> <node> <lifetime>
+    When    Run "su hacluster -c 'crm resource move d hanode2 PT5M'" on 
"hanode1"
+    Then    Expected return code is "0"
+    When    Run "sleep 2" on "hanode1"
+    Then    Resource "d" is started on "hanode2"
+    When    Run "su hacluster -c 'crm resource clear d'" on "hanode1"
+    Then    Expected return code is "0"
+
+    # move <res> <node> <lifetime> force
+    When    Run "su hacluster -c 'crm resource move d hanode1 PT5M force'" on 
"hanode1"
+    Then    Expected return code is "0"
+    When    Run "sleep 2" on "hanode1"
+    Then    Resource "d" is started on "hanode1"
+    When    Run "su hacluster -c 'crm resource clear d'" on "hanode1"
+    Then    Expected return code is "0"
+
+    When    Try "crm resource move d hanode2 PT5M force xxx"
+    Then    Except "ERROR: resource.move: usage: move <rsc> [<node>] 
[<lifetime>] [force]"
+    When    Try "crm resource move d hanode2 PT5M forcd"
+    Then    Except "ERROR: resource.move: usage: move <rsc> [<node>] 
[<lifetime>] [force]"
+    When    Try "crm resource move d xxxx PT5M force"
+    Then    Except "ERROR: resource.move: Not our node: xxxx"
+    When    Try "crm resource move d"
+    Then    Except "ERROR: resource.move: No target node: Move requires either 
a target node or 'force'"
+
+  @clean
+  Scenario: Run "crm configure show" by hacluster
+    When    Run "crm configure primitive d2 Dummy op monitor interval=10s 
timeout=20s on-fail=restart params fake=test meta resource-stickiness=5000" on 
"hanode1"
+    And     Run "crm configure group g d2 meta resource-stickiness=3000" on 
"hanode1"
+    And     Wait "3" seconds
+    Then    Resource "d2" type "Dummy" is "Started"
+    And     Show cluster status on "hanode1"
+    When    Run "su hacluster -c 'crm configure show'" on "hanode1"
+    Then    Expected return code is "0"
+    And     Expected multiple lines in output
+      """
+      primitive d2 Dummy \
+       params fake=test \
+       meta resource-stickiness=5000 \
+       op monitor interval=10s timeout=20s on-fail=restart \
+       op start timeout=20s interval=0s \
+       op stop timeout=20s interval=0s
+      group g d2 \
+       meta resource-stickiness=3000
+      """
+
+  @clean
+  Scenario: pacemaker ACL related operations by hacluster
+    When    Run "su hacluster -c 'crm configure primitive d2 Dummy'" on 
"hanode1"
+    And     Wait "3" seconds
+    Then    Resource "d2" type "Dummy" is "Started"
+    When    Run "su hacluster -c 'crm maintenance on'" on "hanode1"
+    When    Run "crm_mon -1" on "hanode1"
+    Then    Expected "Resource management is DISABLED" in stdout
+    When    Run "su hacluster -c 'crm maintenance off'" on "hanode1"
+    When    Run "crm_mon -1" on "hanode1"
+    Then    Expected "Resource management is DISABLED" not in stdout
+    When    Run "su hacluster -c 'crm node standby hanode2'" on "hanode1"
+    Then    Node "hanode2" is standby
+    When    Run "su hacluster -c 'crm node online hanode2'" on "hanode1"
+    Then    Node "hanode2" is online
+    When    Run "su hacluster -c 'crm ra providers Dummy'" on "hanode1"
+    Then    Expected "heartbeat pacemaker" in stdout
+    When    Run "su hacluster -c 'crm status'" on "hanode1"
+    Then    Expected "Online: [ hanode1 hanode2 ]" in stdout
+    When    Run "su - hacluster -c '/usr/sbin/crm report /tmp/report'" on 
"hanode1"
+    Then    File "/tmp/report.tar.bz2" exists on "hanode1"
+    And     Directory "hanode1" in "/tmp/report.tar.bz2"
+    And     Directory "hanode2" in "/tmp/report.tar.bz2"
+    And     File "pacemaker.log" in "/tmp/report.tar.bz2"
+    And     File "corosync.conf" in "/tmp/report.tar.bz2"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.1+20221213.6e4f7dfd/test/features/steps/step_implementation.py 
new/crmsh-4.4.1+20221215.752a541a/test/features/steps/step_implementation.py
--- 
old/crmsh-4.4.1+20221213.6e4f7dfd/test/features/steps/step_implementation.py    
    2022-12-13 09:03:40.000000000 +0100
+++ 
new/crmsh-4.4.1+20221215.752a541a/test/features/steps/step_implementation.py    
    2022-12-15 15:16:07.000000000 +0100
@@ -3,14 +3,26 @@
 import os
 import datetime
 import yaml
+
+import behave
 from behave import given, when, then
 from crmsh import corosync, parallax, sbd
 from crmsh import utils as crmutils
 from utils import check_cluster_state, check_service_state, online, 
run_command, me, \
                   run_command_local_or_remote, file_in_archive, \
-                  assert_eq
+                  assert_eq, is_unclean
 import const
 
+
+def _parse_str(text):
+    return text[1:-1].encode('utf-8').decode('unicode_escape')
+_parse_str.pattern='".*"'
+
+
+behave.use_step_matcher("cfparse")
+behave.register_type(str=_parse_str)
+
+
 @when('Write multi lines to file "{f}"')
 def step_impl(context, f):
     with open(f, 'w') as fd:
@@ -21,6 +33,20 @@
     assert check_cluster_state(context, state, addr) is True
 
 
+@given('Nodes [{nodes:str+}] are cleaned up')
+def step_impl(context, nodes):
+    run_command(context, 'crm resource cleanup || true')
+    for node in nodes:
+        # wait for ssh service
+        for _ in range(10):
+            rc, _, _ = crmutils.get_stdout_stderr('ssh {} true'.format(node))
+            if rc == 0:
+                break
+            time.sleep(1)
+        run_command_local_or_remote(context, "crm cluster stop {} || 
true".format(node), node)
+        assert check_cluster_state(context, 'stopped', node) is True
+
+
 @given('Service "{name}" is "{state}" on "{addr}"')
 def step_impl(context, name, state, addr):
     assert check_service_state(context, name, state, addr) is True
@@ -427,3 +453,21 @@
 @then('Directory "{path}" not empty on "{node}"')
 def step_impl(context, path, node):
     parallax.parallax_call([node], '[ "$(ls -A {})" ]'.format(path))
+
+
+@then('Node "{node}" is UNCLEAN')
+def step_impl(context, node):
+    assert is_unclean(node) is True
+
+
+@then('Wait "{count}" seconds for "{node}" successfully fenced')
+def step_impl(context, count, node):
+    index = 0
+    while index <= int(count):
+        rc, out, _ = crmutils.get_stdout_stderr("stonith_admin -h 
{}".format(node))
+        if "Node {} last fenced at:".format(node) in out:
+            return True
+        time.sleep(1)
+        index += 1
+    return False
+
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.1+20221213.6e4f7dfd/test/features/steps/utils.py 
new/crmsh-4.4.1+20221215.752a541a/test/features/steps/utils.py
--- old/crmsh-4.4.1+20221213.6e4f7dfd/test/features/steps/utils.py      
2022-12-13 09:03:40.000000000 +0100
+++ new/crmsh-4.4.1+20221215.752a541a/test/features/steps/utils.py      
2022-12-15 15:16:07.000000000 +0100
@@ -51,7 +51,8 @@
 
 def run_command_local_or_remote(context, cmd, addr, err_record=False):
     if addr == me():
-        _, out = run_command(context, cmd, err_record)
+        rc, out = run_command(context, cmd, err_record)
+        context.return_code = rc
         return out
     else:
         try:
@@ -63,6 +64,7 @@
             context.logger.error("\n{}\n".format(err))
             context.failed = True
         else:
+            context.return_code = 0
             return utils.to_ascii(results[0][1][1])
 
 
@@ -88,6 +90,11 @@
     return check_service_state(context, 'pacemaker.service', state, addr)
 
 
+def is_unclean(node):
+    rc, out, err = utils.get_stdout_stderr("crm_mon -1")
+    return "{}: UNCLEAN".format(node) in out
+
+
 def online(context, nodelist):
     rc = True
     _, out = utils.get_stdout("crm_node -l")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.1+20221213.6e4f7dfd/test/run-functional-tests 
new/crmsh-4.4.1+20221215.752a541a/test/run-functional-tests
--- old/crmsh-4.4.1+20221213.6e4f7dfd/test/run-functional-tests 2022-12-13 
09:03:40.000000000 +0100
+++ new/crmsh-4.4.1+20221215.752a541a/test/run-functional-tests 2022-12-15 
15:16:07.000000000 +0100
@@ -183,7 +183,7 @@
 deploy_ha_node() {
        node_name=$1
        docker_options="-d --name $node_name -h $node_name --privileged 
--shm-size 1g"
-       make_cmd="cd $PROJECT_INSIDE;./autogen.sh && ./configure --prefix /usr 
&& make install && make install-crmconfDATA prefix="
+       make_cmd="cd $PROJECT_INSIDE;./autogen.sh && ./configure --prefix /usr 
&& make install && make install-crmconfDATA prefix= && cp /usr/bin/crm 
/usr/sbin"
 
        info "Deploying \"$node_name\"..."
        docker run --restart always $docker_options $DOCKER_IMAGE &> /dev/null
@@ -445,6 +445,6 @@
        CONFIG_COROSYNC_FLAG=0
        setup_cluster ${node_arry[@]}
        adjust_test_case ${node_arry[0]} $case_file_in_container
-       docker_exec ${node_arry[0]} "behave $case_file_in_container || exit 1" 
|| exit 1
+       docker_exec ${node_arry[0]} "behave --no-logcapture 
$case_file_in_container || exit 1" || exit 1
        echo
 done
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.4.1+20221213.6e4f7dfd/test/unittests/test_bootstrap.py 
new/crmsh-4.4.1+20221215.752a541a/test/unittests/test_bootstrap.py
--- old/crmsh-4.4.1+20221213.6e4f7dfd/test/unittests/test_bootstrap.py  
2022-12-13 09:03:40.000000000 +0100
+++ new/crmsh-4.4.1+20221215.752a541a/test/unittests/test_bootstrap.py  
2022-12-15 15:16:07.000000000 +0100
@@ -15,6 +15,8 @@
 import yaml
 import socket
 
+from crmsh.ui_node import NodeMgmt
+
 try:
     from unittest import mock
 except ImportError:
@@ -1361,7 +1363,7 @@
         mock_confirm.assert_not_called()
         mock_this_node.assert_called_once_with()
         mock_error.assert_not_called()
-        mock_self.assert_called_once_with()
+        mock_self.assert_called_once_with(True)
 
     @mock.patch('crmsh.xmlutil.listnodes')
     @mock.patch('crmsh.utils.this_node')
@@ -1502,14 +1504,15 @@
             mock.call("csync2.socket", disable=True, remote_addr=None)
             ])
 
+    @mock.patch.object(NodeMgmt, 'call_delnode')
     @mock.patch('crmsh.bootstrap.rm_configuration_files')
     @mock.patch('crmsh.utils.fatal')
     @mock.patch('crmsh.bootstrap.invoke')
     @mock.patch('logging.Logger.info')
     @mock.patch('crmsh.bootstrap.stop_services')
     @mock.patch('crmsh.bootstrap.set_cluster_node_ip')
-    def test_remove_node_from_cluster_rm_node_failed(self, mock_get_ip, 
mock_stop, mock_status, mock_invoke, mock_error, mock_rm_conf_files):
-        mock_invoke.side_effect = [(False, None, "error data")]
+    def test_remove_node_from_cluster_rm_node_failed(self, mock_get_ip, 
mock_stop, mock_status, mock_invoke, mock_error, mock_rm_conf_files, 
mock_call_delnode):
+        mock_call_delnode.return_value = False
         mock_error.side_effect = SystemExit
 
         with self.assertRaises(SystemExit):
@@ -1519,11 +1522,11 @@
         mock_get_ip.assert_called_once_with()
         mock_status.assert_called_once_with("Removing the node node1")
         mock_stop.assert_called_once_with(bootstrap.SERVICES_STOP_LIST, 
remote_addr="node1")
-        mock_invoke.assert_has_calls([
-            mock.call('crm node delete node1')
-            ])
-        mock_error.assert_called_once_with("Failed to remove node1: error 
data")
+        mock_invoke.assert_not_called()
+        mock_call_delnode.assert_called_once_with("node1")
+        mock_error.assert_called_once_with("Failed to remove node1.")
 
+    @mock.patch.object(NodeMgmt, 'call_delnode')
     @mock.patch('crmsh.bootstrap.rm_configuration_files')
     @mock.patch('crmsh.utils.fatal')
     @mock.patch('crmsh.bootstrap.invokerc')
@@ -1531,8 +1534,8 @@
     @mock.patch('logging.Logger.info')
     @mock.patch('crmsh.bootstrap.stop_services')
     @mock.patch('crmsh.bootstrap.set_cluster_node_ip')
-    def test_remove_node_from_cluster_rm_csync_failed(self, mock_get_ip, 
mock_stop, mock_status, mock_invoke, mock_invokerc, mock_error, 
mock_rm_conf_files):
-        mock_invoke.side_effect = [(True, None, None)]
+    def test_remove_node_from_cluster_rm_csync_failed(self, mock_get_ip, 
mock_stop, mock_status, mock_invoke, mock_invokerc, mock_error, 
mock_rm_conf_files, mock_call_delnode):
+        mock_call_delnode.return_value = True
         mock_invokerc.return_value = False
         mock_error.side_effect = SystemExit
 
@@ -1543,14 +1546,14 @@
         mock_get_ip.assert_called_once_with()
         mock_status.assert_called_once_with("Removing the node node1")
         mock_stop.assert_called_once_with(bootstrap.SERVICES_STOP_LIST, 
remote_addr="node1")
-        mock_invoke.assert_has_calls([
-            mock.call('crm node delete node1')
-            ])
+        mock_invoke.assert_not_called()
+        mock_call_delnode.assert_called_once_with("node1")
         mock_invokerc.assert_has_calls([
             mock.call("sed -i /node1/d {}".format(bootstrap.CSYNC2_CFG))
             ])
         mock_error.assert_called_once_with("Removing the node node1 from {} 
failed".format(bootstrap.CSYNC2_CFG))
 
+    @mock.patch.object(NodeMgmt, 'call_delnode')
     @mock.patch('crmsh.bootstrap.rm_configuration_files')
     @mock.patch('crmsh.bootstrap.adjust_priority_fencing_delay')
     @mock.patch('crmsh.bootstrap.adjust_priority_in_rsc_defaults')
@@ -1565,8 +1568,9 @@
     @mock.patch('crmsh.bootstrap.stop_services')
     @mock.patch('crmsh.bootstrap.set_cluster_node_ip')
     def test_remove_node_from_cluster_hostname(self, mock_get_ip, mock_stop, 
mock_status,
-            mock_invoke, mock_invokerc, mock_error, mock_get_values, mock_del, 
mock_decrease, mock_csync2, mock_adjust_priority, mock_adjust_fence_delay, 
mock_rm_conf_files):
-        mock_invoke.side_effect = [(True, None, None), (True, None, None)]
+            mock_invoke, mock_invokerc, mock_error, mock_get_values, mock_del, 
mock_decrease, mock_csync2, mock_adjust_priority, mock_adjust_fence_delay, 
mock_rm_conf_files, mock_cal_delnode):
+        mock_cal_delnode.return_value = True
+        mock_invoke.side_effect = [(True, None, None)]
         mock_invokerc.return_value = True
         mock_get_values.return_value = ["10.10.10.1"]
 
@@ -1579,8 +1583,8 @@
             mock.call("Propagating configuration changes across the remaining 
nodes")
             ])
         mock_stop.assert_called_once_with(bootstrap.SERVICES_STOP_LIST, 
remote_addr="node1")
+        mock_cal_delnode.assert_called_once_with("node1")
         mock_invoke.assert_has_calls([
-            mock.call('crm node delete node1'),
             mock.call("corosync-cfgtool -R")
             ])
         mock_invokerc.assert_has_calls([

Reply via email to