Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package crmsh for openSUSE:Factory checked 
in at 2022-01-14 23:13:09
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/crmsh (Old)
 and      /work/SRC/openSUSE:Factory/.crmsh.new.1892 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "crmsh"

Fri Jan 14 23:13:09 2022 rev:236 rq:946294 version:4.3.1+20220114.07d84c75

Changes:
--------
--- /work/SRC/openSUSE:Factory/crmsh/crmsh.changes      2022-01-06 
15:50:54.340958815 +0100
+++ /work/SRC/openSUSE:Factory/.crmsh.new.1892/crmsh.changes    2022-01-14 
23:14:34.474683387 +0100
@@ -1,0 +2,42 @@
+Fri Jan 14 05:26:37 UTC 2022 - xli...@suse.com
+
+- Update to version 4.3.1+20220114.07d84c75:
+  * Dev: unittest: Adjust unit test for previous change
+  * Fix: bootstrap: Don't change pacemaker.service bootup preference 
(bsc#1194616)
+
+-------------------------------------------------------------------
+Fri Jan 14 03:32:50 UTC 2022 - xli...@suse.com
+
+- Update to version 4.3.1+20220114.2726e007:
+  * Fix: log: Change the log file owner as hacluster:haclient (bsc#1194619)
+  * Fix: crash_test: Adjust help output of 'crm cluster crash_test 
-h'(bsc#1194615)
+
+-------------------------------------------------------------------
+Fri Jan 14 02:49:39 UTC 2022 - xli...@suse.com
+
+- Update to version 4.3.1+20220114.2003afd7:
+  * Dev: crm.conf: Add OCF_1_1_SUPPORT flag to control ocf 1.1 feature
+  * Dev: doc: Introduce promotable clone and role Promoted/Unpromoted
+  * Dev: behave: Adjust functional test for previous changes
+  * Dev: unittest: Add unit test for previous changes
+  * Dev: utils: Convert Master/Slave to Promoted/Unpromoted if schema support 
OCF 1.1
+  * Dev: xmlutil: Replace Promoted/Unpromoted as Master/Slave when OCF 1.0 
schema detected
+  * Dev: doc: Replace pingd as ocf:pacemaker:ping
+  * Dev: ui_resource: set target-role as Promoted/Unpromoted when doing 
promote or demote
+  * Dev: ra: Support Promoted/Unpromoted
+
+-------------------------------------------------------------------
+Wed Jan 12 15:08:41 UTC 2022 - xli...@suse.com
+
+- Update to version 4.3.1+20220112.a945df76:
+  * Dev: unittest: Adjust unit test for previous changes
+  * Dev: ocfs2: Fix running ocfs2 stage on cluster with diskless-sbd
+
+-------------------------------------------------------------------
+Wed Jan 12 08:42:01 UTC 2022 - xli...@suse.com
+
+- Update to version 4.3.1+20220112.6245c22e:
+  * Dev: behave: Add functional test for the new added -U option
+  * Fix: bootstrap: Change default transport type as udpu(unicast) 
(bsc#1132375)
+
+-------------------------------------------------------------------

Old:
----
  crmsh-4.3.1+20220104.b683bf0d.tar.bz2

New:
----
  crmsh-4.3.1+20220114.07d84c75.tar.bz2

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ crmsh.spec ++++++
--- /var/tmp/diff_new_pack.9PwDbc/_old  2022-01-14 23:14:34.994683722 +0100
+++ /var/tmp/diff_new_pack.9PwDbc/_new  2022-01-14 23:14:35.002683727 +0100
@@ -36,7 +36,7 @@
 Summary:        High Availability cluster command-line interface
 License:        GPL-2.0-or-later
 Group:          %{pkg_group}
-Version:        4.3.1+20220104.b683bf0d
+Version:        4.3.1+20220114.07d84c75
 Release:        0
 URL:            http://crmsh.github.io
 Source0:        %{name}-%{version}.tar.bz2

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.9PwDbc/_old  2022-01-14 23:14:35.038683750 +0100
+++ /var/tmp/diff_new_pack.9PwDbc/_new  2022-01-14 23:14:35.042683753 +0100
@@ -9,7 +9,7 @@
 </service>
 <service name="tar_scm">
   <param name="url">https://github.com/ClusterLabs/crmsh.git</param>
-  <param 
name="changesrevision">0a4a198844f550840c8d0cecf95537dca4b93046</param>
+  <param 
name="changesrevision">07d84c751208a8b3e5781900674f3ea8e04e6dd0</param>
 </service>
 </servicedata>
 (No newline at EOF)

++++++ crmsh-4.3.1+20220104.b683bf0d.tar.bz2 -> 
crmsh-4.3.1+20220114.07d84c75.tar.bz2 ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.1+20220104.b683bf0d/crmsh/bootstrap.py 
new/crmsh-4.3.1+20220114.07d84c75/crmsh/bootstrap.py
--- old/crmsh-4.3.1+20220104.b683bf0d/crmsh/bootstrap.py        2022-01-04 
10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/crmsh/bootstrap.py        2022-01-14 
04:39:24.000000000 +0100
@@ -97,6 +97,7 @@
         self.no_overwrite_sshkey = None
         self.nic_list = None
         self.unicast = None
+        self.multicast = None
         self.admin_ip = None
         self.second_heartbeat = None
         self.ipv6 = None
@@ -668,11 +669,11 @@
     if pass_msg:
         logger.warning("You should change the hacluster password to something 
more secure!")
 
-    start_pacemaker()
+    start_pacemaker(enable_flag=True)
     wait_for_cluster()
 
 
-def start_pacemaker(node_list=[]):
+def start_pacemaker(node_list=[], enable_flag=False):
     """
     Start pacemaker service with wait time for sbd
     When node_list set, start pacemaker service in parallel
@@ -686,7 +687,7 @@
             SBDTimeout.is_sbd_delay_start():
         pacemaker_start_msg += "(delaying start of sbd for 
{}s)".format(SBDTimeout.get_sbd_delay_start_sec_from_sysconfig())
     with logger_utils.status_long(pacemaker_start_msg):
-        utils.start_service("pacemaker.service", enable=True, 
node_list=node_list)
+        utils.start_service("pacemaker.service", enable=enable_flag, 
node_list=node_list)
 
 
 def install_tmp(tmpfile, to):
@@ -1171,7 +1172,7 @@
         if not confirm("%s already exists - overwrite?" % (corosync.conf())):
             return
 
-    if _context.unicast or _context.cloud_type:
+    if _context.unicast or _context.cloud_type or not _context.multicast:
         init_corosync_unicast()
     else:
         init_corosync_multicast()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.1+20220104.b683bf0d/crmsh/cibconfig.py 
new/crmsh-4.3.1+20220114.07d84c75/crmsh/cibconfig.py
--- old/crmsh-4.3.1+20220104.b683bf0d/crmsh/cibconfig.py        2022-01-04 
10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/crmsh/cibconfig.py        2022-01-14 
04:39:24.000000000 +0100
@@ -2994,7 +2994,7 @@
                 rc = False
                 continue
             ra = get_ra(r_node)
-            if not ra.mk_ra_node():  # no RA found?
+            if ra.mk_ra_node() is None:  # no RA found?
                 if not self.is_asymm_cluster():
                     ra.error("no resource agent found for %s" % obj_id)
                 continue
@@ -3015,7 +3015,10 @@
                             implied_ms_actions.remove(op)
                         elif op not in other_actions:
                             continue
-                        adv_timeout = ra.get_adv_timeout(op, c2)
+                        adv_timeout = None
+                        role = c2.get('role')
+                        depth = c2.get('depth')
+                        adv_timeout = ra.get_op_attr_value(op, "timeout", 
role=role, depth=depth)
                         if adv_timeout:
                             c2.set("timeout", adv_timeout)
                             obj_modified = True
@@ -3025,7 +3028,7 @@
             if is_ms_or_promotable_clone(obj.node.getparent()):
                 l += implied_ms_actions
             for op in l:
-                adv_timeout = ra.get_adv_timeout(op)
+                adv_timeout = ra.get_op_attr_value(op, "timeout")
                 if not adv_timeout:
                     continue
                 n = etree.Element('op')
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.1+20220104.b683bf0d/crmsh/config.py 
new/crmsh-4.3.1+20220114.07d84c75/crmsh/config.py
--- old/crmsh-4.3.1+20220104.b683bf0d/crmsh/config.py   2022-01-04 
10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/crmsh/config.py   2022-01-14 
04:39:24.000000000 +0100
@@ -252,6 +252,7 @@
         'ignore_missing_metadata': opt_boolean('no'),
         'report_tool_options': opt_string(''),
         'lock_timeout': opt_string('120'),
+        'OCF_1_1_SUPPORT': opt_boolean('no'),
         'obscure_pattern': opt_string('passw*')
     },
     'path': {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.1+20220104.b683bf0d/crmsh/constants.py 
new/crmsh-4.3.1+20220114.07d84c75/crmsh/constants.py
--- old/crmsh-4.3.1+20220104.b683bf0d/crmsh/constants.py        2022-01-04 
10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/crmsh/constants.py        2022-01-14 
04:39:24.000000000 +0100
@@ -505,6 +505,7 @@
 
 CIB_QUERY = "cibadmin -Q"
 CIB_REPLACE = "cibadmin -R -X '{xmlstr}'"
+CIB_UPGRADE = "crm configure upgrade force"
 CIB_RAW_FILE = "/var/lib/pacemaker/cib/cib.xml"
 XML_NODE_PATH = "/cib/configuration/nodes/node"
 XML_STATUS_PATH = "/cib/status/node_state"
@@ -516,4 +517,9 @@
 PCMK_DELAY_MAX = 30
 DLM_CONTROLD_RA = "ocf::pacemaker:controld"
 LVMLOCKD_RA = "ocf::heartbeat:lvmlockd"
+HA_USER = "hacluster"
+HA_GROUP = "haclient"
+
+
+SCHEMA_MIN_VER_SUPPORT_OCF_1_1 = "pacemaker-3.7"
 # vim:ts=4:sw=4:et:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.1+20220104.b683bf0d/crmsh/crash_test/main.py 
new/crmsh-4.3.1+20220114.07d84c75/crmsh/crash_test/main.py
--- old/crmsh-4.3.1+20220104.b683bf0d/crmsh/crash_test/main.py  2022-01-04 
10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/crmsh/crash_test/main.py  2022-01-14 
04:39:24.000000000 +0100
@@ -129,11 +129,10 @@
     """
     parser = argparse.ArgumentParser(prog=context.process_name,
                                      description="""
-Cluster crash test tool set. It standardizes the steps to simulate
-cluster failures and to verify some key configuration before you move
-your cluster into production. It is carefully designed with the proper
-steps and does not change any configuration to harm the cluster without
-the confirmation from users.""",
+Cluster crash test tool set.
+It standardizes the steps to simulate cluster failures before you move your 
cluster
+into production. It is carefully designed with the proper steps and does not 
change
+any configuration to harm the cluster without the confirmation from users.""",
                                      add_help=False,
                                      formatter_class=MyArgParseFormatter,
                                      epilog='''
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.1+20220104.b683bf0d/crmsh/log.py 
new/crmsh-4.3.1+20220114.07d84c75/crmsh/log.py
--- old/crmsh-4.3.1+20220104.b683bf0d/crmsh/log.py      2022-01-04 
10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/crmsh/log.py      2022-01-14 
04:39:24.000000000 +0100
@@ -3,6 +3,7 @@
 import os
 import sys
 import socket
+import shutil
 import logging
 import logging.config
 from contextlib import contextmanager
@@ -440,6 +441,8 @@
     else:
         setup_directory_for_logfile()
     logging.config.dictConfig(LOGGING_CFG)
+    if os.path.exists(CRMSH_LOG_FILE):
+        shutil.chown(CRMSH_LOG_FILE, constants.HA_USER, constants.HA_GROUP)
 
 
 def setup_logger(name):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.1+20220104.b683bf0d/crmsh/parse.py 
new/crmsh-4.3.1+20220114.07d84c75/crmsh/parse.py
--- old/crmsh-4.3.1+20220104.b683bf0d/crmsh/parse.py    2022-01-04 
10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/crmsh/parse.py    2022-01-14 
04:39:24.000000000 +0100
@@ -11,6 +11,7 @@
 from . import schema
 from .utils import keyword_cmp, verify_boolean, lines2cli
 from .utils import get_boolean, olist, canonical_boolean
+from .utils import handle_role_for_ocf_1_1
 from . import xmlutil
 from . import log
 
@@ -467,7 +468,7 @@
                     idref = True
                 rule.set(idtyp, idval)
             if self.try_match(_ROLE_RE):
-                rule.set('role', self.matched(1))
+                rule.set('role', handle_role_for_ocf_1_1(self.matched(1)))
             if idref:
                 continue
             if self.try_match(_SCORE_RE):
@@ -924,10 +925,13 @@
             out.set('rsc', self.match_resource())
 
         while self.try_match(_ATTR_RE):
-            out.set(self.matched(1), self.matched(2))
+            name = self.matched(1)
+            value = handle_role_for_ocf_1_1(self.matched(2), name=name)
+            out.set(name, value)
 
+        # not sure this is necessary after parse _ATTR_RE in a while loop
         if self.try_match(_ROLE_RE) or self.try_match(_ROLE2_RE):
-            out.set('role', self.matched(1))
+            out.set('role', handle_role_for_ocf_1_1(self.matched(1)))
 
         score = False
         if self.try_match(_SCORE_RE):
@@ -937,7 +941,7 @@
             # backwards compatibility: role used to be read here
             if 'role' not in out:
                 if self.try_match(_ROLE_RE) or self.try_match(_ROLE2_RE):
-                    out.set('role', self.matched(1))
+                    out.set('role', handle_role_for_ocf_1_1(self.matched(1)))
         if not score:
             rules = self.match_rules()
             out.extend(rules)
@@ -1040,7 +1044,7 @@
 
     def _split_setref(self, typename, classifier):
         rsc, typ = self.match_split()
-        typ, t = classifier(typ)
+        typ, t = classifier(handle_role_for_ocf_1_1(typ, name=typename))
         if typ and not t:
             self.err("Invalid %s '%s' for '%s'" % (typename, typ, rsc))
         return rsc, typ, t
@@ -1627,7 +1631,7 @@
                     validator.resource_actions())
             else:
                 l[1] = validator.canonize(
-                    l[1],
+                    handle_role_for_ocf_1_1(l[1]),
                     validator.resource_roles())
             if not l[1]:
                 self.err('Invalid %s for %s' % (self.q_attr, p))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.1+20220104.b683bf0d/crmsh/ra.py 
new/crmsh-4.3.1+20220114.07d84c75/crmsh/ra.py
--- old/crmsh-4.3.1+20220104.b683bf0d/crmsh/ra.py       2022-01-04 
10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/crmsh/ra.py       2022-01-14 
04:39:24.000000000 +0100
@@ -14,7 +14,7 @@
 from . import userdir
 from . import utils
 from .utils import stdout2list, is_program, is_process, to_ascii
-from .utils import os_types_list, get_stdout, find_value
+from .utils import os_types_list, get_stdout
 from .utils import crm_msec, crm_time_cmp
 from . import log
 
@@ -286,25 +286,6 @@
         return ''
 
 
-def mk_monitor_name(role, depth):
-    depth = ("_%s" % depth) if depth != "0" else ""
-    return role and role != "Started" and \
-        "monitor_%s%s" % (role, depth) or \
-        "monitor%s" % depth
-
-
-def monitor_name_node(node):
-    depth = node.get("depth") or '0'
-    role = node.get("role")
-    return mk_monitor_name(role, depth)
-
-
-def monitor_name_pl(pl):
-    depth = find_value(pl, "depth") or '0'
-    role = find_value(pl, "role")
-    return mk_monitor_name(role, depth)
-
-
 def _param_type_default(n):
     """
     Helper function to get (type, default) from XML parameter node
@@ -322,8 +303,9 @@
     '''
     ra_tab = "    "  # four horses
     required_ops = ("start", "stop")
+    no_interval_ops = ("start", "stop", "promote", "demote")
     skip_ops = ("meta-data", "validate-all")
-    skip_op_attr = ("name", "depth", "role")
+    skip_op_attr = ("name",)
 
     def __init__(self, ra_class, ra_type, ra_provider="heartbeat", 
exclude_from_completion=None):
         self.excluded_from_completion = exclude_from_completion or []
@@ -439,30 +421,26 @@
             return cache.retrieve(ident)
         if self.mk_ra_node() is None:
             return None
-        d = {}
-        for c in self.ra_elem.xpath("//actions/action"):
-            name = c.get("name")
+
+        actions_dict = {}
+        actions_dict["monitor"] = []
+        for elem in self.ra_elem.xpath("//actions/action"):
+            name = elem.get("name")
             if not name or name in self.skip_ops:
                 continue
-            if name == "monitor":
-                name = monitor_name_node(c)
-            d[name] = {}
-            for a in list(c.attrib.keys()):
-                if a in self.skip_op_attr:
+            d = {}
+            for key in list(elem.attrib.keys()):
+                if key in self.skip_op_attr:
                     continue
-                v = c.get(a)
-                if v:
-                    d[name][a] = v
-        # add monitor ops without role, if they don't already
-        # exist
-        d2 = {}
-        for op in d:
-            if re.match("monitor_[^0-9]", op):
-                norole_op = re.sub(r'monitor_[^0-9_]+_(.*)', r'monitor_\1', op)
-                if norole_op not in d:
-                    d2[norole_op] = d[op]
-        d.update(d2)
-        return cache.store(ident, d)
+                value = elem.get(key)
+                if value:
+                    d[key] = value
+            if name == "monitor":
+                actions_dict[name].append(d)
+            else:
+                actions_dict[name] = d
+
+        return cache.store(ident, actions_dict)
 
     def param_default(self, pname):
         '''
@@ -542,13 +520,36 @@
                 rc |= utils.get_check_rc()
         return rc
 
-    def get_adv_timeout(self, op, node=None):
-        if node is not None and op == "monitor":
-            name = monitor_name_node(node)
-        else:
-            name = op
+    def get_op_attr_value(self, op, key, role=None, depth=None):
+        """
+        Get operation's attribute value
+        Multi monitors should be distinguished by role or depth
+        """
         try:
-            return self.actions()[name]["timeout"]
+            # actions_dict example:
+            # {'monitor': [
+            #    {'depth': '0', 'timeout': '20s', 'interval': '10s', 'role': 
'Promoted'},
+            #    {'depth': '0', 'timeout': '20s', 'interval': '11s', 'role': 
'Unpromoted'}
+            #    ],
+            #  'start': {'timeout': '20s'},
+            #  'stop': {'timeout': '20s'}}
+            actions_dict = self.actions()
+            if not actions_dict:
+                return None
+            if op == 'monitor':
+                if role is None and depth is None:
+                    return actions_dict[op][0][key]
+                if role:
+                    for idx, monitor_item in enumerate(actions_dict[op]):
+                        if monitor_item['role'] == role:
+                            return actions_dict[op][idx][key]
+                # Technically, there could be multiple entries defining 
different depths for a same role.
+                if depth:
+                    for idx, monitor_item in enumerate(actions_dict[op]):
+                        if monitor_item['depth'] == depth:
+                            return actions_dict[op][idx][key]
+            else:
+                return actions_dict[op][key]
         except:
             return None
 
@@ -558,7 +559,71 @@
         - do all operations exist
         - are timeouts sensible
         '''
-        def sanity_check_op(op, n_ops, intervals):
+        def timeout_check(op, item_dict, adv_timeout):
+            """
+            Helper method used by sanity_check_op_timeout, to check 
operation's timeout
+            """
+            rc = 0
+            if "timeout" in item_dict:
+                actual_timeout = item_dict["timeout"]
+                timeout_string = "specified timeout"
+            else:
+                actual_timeout = default_timeout
+                timeout_string = "default timeout"
+            if actual_timeout and crm_time_cmp(adv_timeout, actual_timeout) > 
0:
+                logger.warning("%s: %s %s for %s is smaller than the advised 
%s",
+                        ident, timeout_string, actual_timeout, op, adv_timeout)
+                rc |= 1
+            return rc
+
+        def sanity_check_op_timeout(op, op_dict):
+            """
+            Helper method used by sanity_check_op, to check operation's timeout
+            """
+            rc = 0
+            role = None
+            depth = None
+            if op == "monitor":
+                for monitor_item in op_dict[op]:
+                    role = monitor_item['role'] if 'role' in monitor_item else 
None
+                    depth = monitor_item['depth'] if 'depth' in monitor_item 
else None
+                    adv_timeout = self.get_op_attr_value(op, "timeout", 
role=role, depth=depth)
+                    rc |= timeout_check(op, monitor_item, adv_timeout)
+            else:
+                adv_timeout = self.get_op_attr_value(op, "timeout")
+                rc |= timeout_check(op, op_dict[op], adv_timeout)
+            return rc
+
+        def sanity_check_op_interval(op, op_dict):
+            """
+            Helper method used by sanity_check_op, to check operation's 
interval
+            """
+            rc = 0
+            prev_intervals = []
+            if op == "monitor":
+                for monitor_item in op_dict[op]:
+                    role = monitor_item['role'] if 'role' in monitor_item else 
None
+                    depth = monitor_item['depth'] if 'depth' in monitor_item 
else None
+                    # make sure interval in multi monitor operations is unique 
and non-zero
+                    adv_interval = self.get_op_attr_value(op, "interval", 
role=role, depth=depth)
+                    actual_interval_msec = crm_msec(monitor_item["interval"])
+                    if actual_interval_msec == 0:
+                        logger.warning("%s: interval in monitor should be 
larger than 0, advised is %s", ident, adv_interval)
+                        rc |= 1
+                    elif actual_interval_msec in prev_intervals:
+                        logger.warning("%s: interval in monitor must be 
unique, advised is %s", ident, adv_interval)
+                        rc |= 1
+                    else:
+                        prev_intervals.append(actual_interval_msec)
+            elif "interval" in op_dict[op]:
+                value = op_dict[op]["interval"]
+                value_msec = crm_msec(value)
+                if op in self.no_interval_ops and value_msec != 0:
+                    logger.warning("%s: Specified interval for %s is %s, it 
must be 0", ident, op, value)
+                    rc |= 1
+            return rc
+
+        def sanity_check_op(op, op_dict):
             """
             Helper method used by sanity_check_ops.
             """
@@ -568,54 +633,48 @@
             if op not in self.actions():
                 logger.warning("%s: action '%s' not found in Resource Agent 
meta-data", ident, op)
                 rc |= 1
-            if "interval" in n_ops[op]:
-                v = n_ops[op]["interval"]
-                v_msec = crm_msec(v)
-                if op in ("start", "stop") and v_msec != 0:
-                    logger.warning("%s: Specified interval for %s is %s, it 
must be 0", ident, op, v)
-                    rc |= 1
-                if op.startswith("monitor") and v_msec != 0:
-                    if v_msec not in intervals:
-                        intervals[v_msec] = 1
-                    else:
-                        logger.warning("%s: interval in %s must be unique", 
ident, op)
-                        rc |= 1
-            try:
-                adv_timeout = self.actions()[op]["timeout"]
-            except:
-                return rc
-            if "timeout" in n_ops[op]:
-                v = n_ops[op]["timeout"]
-                timeout_string = "specified timeout"
-            else:
-                v = default_timeout
-                timeout_string = "default timeout"
-            if crm_msec(v) < 0:
                 return rc
-            if crm_time_cmp(adv_timeout, v) > 0:
-                logger.warning("%s: %s %s for %s is smaller than the advised 
%s",
-                            ident, timeout_string, v, op, adv_timeout)
-                rc |= 1
+            rc |= sanity_check_op_interval(op, op_dict)
+            rc |= sanity_check_op_timeout(op, op_dict)
             return rc
 
+
         rc = 0
-        n_ops = {}
+        op_dict = {}
+        op_dict["monitor"] = []
+        # ops example:
+        # [
+        #   ['monitor', [['role', 'Promoted'], ['interval', '10s']]],
+        #   ['monitor', [['role', 'Unpromoted'], ['interval', '0']]],
+        #   ['start', [['timeout', '20s'], ['interval', '0']]]
+        # ]
         for op in ops:
-            n_op = monitor_name_pl(op[1]) if op[0] == "monitor" else op[0]
-            n_ops[n_op] = {}
-            for p, v in op[1]:
-                if p in self.skip_op_attr:
+            n_op = op[0]
+            d = {}
+            for key, value in op[1]:
+                if key in self.skip_op_attr:
                     continue
-                n_ops[n_op][p] = v
+                d[key] = value
+            if n_op == "monitor":
+                op_dict["monitor"].append(d)
+            else:
+                op_dict[n_op] = d
         for req_op in self.required_ops:
-            if req_op not in n_ops:
+            if req_op not in op_dict:
                 if not (self.ra_class == "stonith" and req_op in ("start", 
"stop")):
-                    n_ops[req_op] = {}
-        intervals = {}
-        for op in n_ops:
-            rc |= sanity_check_op(op, n_ops, intervals)
+                    op_dict[req_op] = {}
+        # op_dict example:
+        # {'monitor': [
+        #    {'role': 'Promoted', 'interval': '10s'},
+        #    {'role': 'Unpromoted', 'interval': '0'}],
+        #    'start': {'timeout': '20s', 'interval': '0'},
+        #    'stop': {}
+        # }
+        for op in op_dict:
+            rc |= sanity_check_op(op, op_dict)
         return rc
 
+
     def meta(self):
         '''
         RA meta-data as raw xml.
@@ -725,8 +784,6 @@
             name = n.get("name")
             if not name or name in self.skip_ops:
                 return ''
-            if name == "monitor":
-                name = monitor_name_node(n)
             s = "%-13s" % name
             for a in list(n.attrib.keys()):
                 if a in self.skip_op_attr:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.1+20220104.b683bf0d/crmsh/sbd.py 
new/crmsh-4.3.1+20220114.07d84c75/crmsh/sbd.py
--- old/crmsh-4.3.1+20220104.b683bf0d/crmsh/sbd.py      2022-01-04 
10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/crmsh/sbd.py      2022-01-14 
04:39:24.000000000 +0100
@@ -428,7 +428,7 @@
         if res:
             return utils.re_split_string(self.PARSE_RE, res)
         else:
-            return None
+            return []
 
     def _restart_cluster_and_configure_sbd_ra(self):
         """
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.1+20220104.b683bf0d/crmsh/schema.py 
new/crmsh-4.3.1+20220114.07d84c75/crmsh/schema.py
--- old/crmsh-4.3.1+20220104.b683bf0d/crmsh/schema.py   2022-01-04 
10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/crmsh/schema.py   2022-01-14 
04:39:24.000000000 +0100
@@ -151,6 +151,4 @@
     if _crm_schema is None:
         return []
     return _crm_schema.rng_xpath(xpath, namespaces=namespaces)
-
-
 # vim:ts=4:sw=4:et:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.1+20220104.b683bf0d/crmsh/ui_cluster.py 
new/crmsh-4.3.1+20220114.07d84c75/crmsh/ui_cluster.py
--- old/crmsh-4.3.1+20220104.b683bf0d/crmsh/ui_cluster.py       2022-01-04 
10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/crmsh/ui_cluster.py       2022-01-14 
04:39:24.000000000 +0100
@@ -312,8 +312,9 @@
         network_group.add_argument("-i", "--interface", dest="nic_list", 
metavar="IF", action="append", choices=utils.interface_choice(),
                                    help="Bind to IP address on interface IF. 
Use -i second time for second interface")
         network_group.add_argument("-u", "--unicast", action="store_true", 
dest="unicast",
-                                   help="Configure corosync to communicate 
over unicast (UDP), and not multicast. " +
-                                   "Default is multicast unless an environment 
where multicast cannot be used is detected.")
+                                   help="Configure corosync to communicate 
over unicast(udpu). This is the default transport type")
+        network_group.add_argument("-U", "--multicast", action="store_true", 
dest="multicast",
+                                   help="Configure corosync to communicate 
over multicast. Default is unicast")
         network_group.add_argument("-A", "--admin-ip", dest="admin_ip", 
metavar="IP",
                                    help="Configure IP address as an 
administration virtual IP")
         network_group.add_argument("-M", "--multi-heartbeats", 
action="store_true", dest="second_heartbeat",
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.1+20220104.b683bf0d/crmsh/ui_configure.py 
new/crmsh-4.3.1+20220114.07d84c75/crmsh/ui_configure.py
--- old/crmsh-4.3.1+20220104.b683bf0d/crmsh/ui_configure.py     2022-01-04 
10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/crmsh/ui_configure.py     2022-01-14 
04:39:24.000000000 +0100
@@ -244,43 +244,47 @@
 
 
 def _prim_op_completer(agent, args):
-    actions = agent.actions()
-    completing = args[-1]
-    if completing == 'op':
+
+    def concat_kv(k, v):
+        return "{}={}".format(k, v)
+
+    if args[-1] == 'op':
         return ['op']
+    actions = agent.actions()
+    if not actions:
+        return []
+    # list all actions, select one to complete
     if args[-2] == 'op':
-        # append action items which in agent default actions
-        # monitor_Master will be mapped to "monitor role=Master"
-        # monitor_Slave will be mapped to "monitor role=Slave"
-        op_list = list(constants.op_cli_names)
-        if "monitor_Master" in actions:
-            op_list.append("monitor_Master")
-        if "monitor_Slave" in actions:
-            op_list.append("monitor_Slave")
-        # remove action items which not in default actions
-        for item in ["monitor", "demote", "promote", "notify"]:
-            if item not in actions:
-                op_list.remove(item)
-        # remove action items which already used
-        for item in op_list:
-            if item in args[:-2]:
-                op_list.remove(item)
-        return op_list
+        return actions.keys()
+    # list all attributes of the action, select one to complete
     if args[-3] == 'op':
         res = []
-        # list all of default items
-        if actions and actions[args[-2]]:
-            for k, v in list(actions[args[-2]].items()):
-                res += ["%s=%s" % (k, v)]
-            return res
+        op_name = args[-2]
+        if op_name == 'monitor':
+            for one_monitor in actions[op_name]:
+                res += [concat_kv(k, v) for k, v in one_monitor.items()]
+        else:
+            res = [concat_kv(k, v) for k, v in actions[op_name].items()]
+        return res
+
     args.pop()
-    # make sure all of default items can be completed
-    if args[-2] in actions:
+    if '=' in args[-1]:
         res = []
-        for k, v in actions[args[-2]].items():
-            if args[-1].startswith(k+'='):
-                continue
-            res += ["%s=%s" % (k, v)]
+        # find latest action
+        op_name = None
+        for i, item in enumerate(reversed(args)):
+            if item in actions:
+                op_name = item
+                break
+        if not op_name:
+            return []
+        # list all left attributes of the action, select one to complete
+        actions_list_in_args = [arg.split('=')[0] for arg in 
args[len(args)-i:]]
+        if op_name == 'monitor':
+            for one_monitor in actions[op_name]:
+                res += [concat_kv(k, v) for k, v in one_monitor.items() if k 
not in actions_list_in_args]
+        else:
+            res = [concat_kv(k, v) for k, v in actions[op_name].items() if k 
not in actions_list_in_args]
         return res
 
     return []
@@ -331,7 +335,7 @@
         return []
 
     complete_results = completers_set[last_keyw](agent, args)
-    if len(args) > 4 and '=' in args[-2]: # args[-1] will be the space
+    if len(args) > 4 and '=' in args[-1]:
         return complete_results + keywords
 
     return complete_results
@@ -987,14 +991,7 @@
             [op op_type [<attribute>=<value>...]
                         [[op_params] <param>=<value> [<param>=<value>...]]
                         [op_meta <attribute>=<value> [<attribute>=<value>...]] 
...]]"""
-        tmp = list(args)
-        for item in ['monitor_Master', 'monitor_Slave']:
-            if item in tmp:
-                idx = tmp.index(item)
-                tmp.remove(item)
-                tmp.insert(idx, "monitor")
-                tmp.insert(idx+1, "role=%s" % item.split('_')[1])
-        return self.__conf_object(context.get_command_name(), *tuple(tmp))
+        return self.__conf_object(context.get_command_name(), *args)
 
     @command.completers_repeating(compl.attr_id, _container_type, 
container_complete_complex)
     def do_bundle(self, context, *args):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.1+20220104.b683bf0d/crmsh/ui_resource.py 
new/crmsh-4.3.1+20220114.07d84c75/crmsh/ui_resource.py
--- old/crmsh-4.3.1+20220104.b683bf0d/crmsh/ui_resource.py      2022-01-04 
10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/crmsh/ui_resource.py      2022-01-14 
04:39:24.000000000 +0100
@@ -338,7 +338,8 @@
         if not xmlutil.RscState().is_ms_or_promotable_clone(rsc):
             logger.error("%s is not a promotable resource", rsc)
             return False
-        return utils.ext_cmd(self.rsc_setrole % (rsc, "Master")) == 0
+        role = "Promoted" if config.core.OCF_1_1_SUPPORT else "Master"
+        return utils.ext_cmd(self.rsc_setrole % (rsc, role)) == 0
 
     def do_scores(self, context):
         "usage: scores"
@@ -366,7 +367,8 @@
         if not xmlutil.RscState().is_ms_or_promotable_clone(rsc):
             logger.error("%s is not a promotable resource", rsc)
             return False
-        return utils.ext_cmd(self.rsc_setrole % (rsc, "Slave")) == 0
+        role = "Unpromoted" if config.core.OCF_1_1_SUPPORT else "Slave"
+        return utils.ext_cmd(self.rsc_setrole % (rsc, role)) == 0
 
     @command.completers(compl.resources)
     def do_manage(self, context, rsc):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.1+20220104.b683bf0d/crmsh/utils.py 
new/crmsh-4.3.1+20220114.07d84c75/crmsh/utils.py
--- old/crmsh-4.3.1+20220104.b683bf0d/crmsh/utils.py    2022-01-04 
10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/crmsh/utils.py    2022-01-14 
04:39:24.000000000 +0100
@@ -28,6 +28,7 @@
 from . import options
 from . import term
 from . import parallax
+from distutils.version import LooseVersion
 from .constants import SSH_OPTION
 from . import log
 
@@ -1257,13 +1258,6 @@
             print(s)
 
 
-def find_value(pl, name):
-    for n, v in pl:
-        if n == name:
-            return v
-    return None
-
-
 def cli_replace_attr(pl, name, new_val):
     for i, attr in enumerate(pl):
         if attr[0] == name:
@@ -1550,6 +1544,10 @@
     return val_l
 
 
+def is_larger_than_min_version(version, min_version):
+    return LooseVersion(version) >= LooseVersion(min_version)
+
+
 def is_min_pcmk_ver(min_ver, cib_f=None):
     if not constants.pcmk_version:
         if cib_f:
@@ -1557,8 +1555,7 @@
             logger.debug("found pacemaker version: %s in cib: %s", 
constants.pcmk_version, cib_f)
         else:
             constants.pcmk_version = get_pcmk_version("1.1.11")
-    from distutils.version import LooseVersion
-    return LooseVersion(constants.pcmk_version) >= LooseVersion(min_ver)
+    return is_larger_than_min_version(constants.pcmk_version, min_ver)
 
 
 def is_pcmk_118(cib_f=None):
@@ -2727,8 +2724,11 @@
     """
     Check if any stonith device registered
     """
+    from . import sbd
     out = get_stdout_or_raise_error("stonith_admin -L")
-    return re.search("[1-9]+ fence device[s]* found", out) is not None
+    has_stonith_device = re.search("[1-9]+ fence device[s]* found", out) is 
not None
+    using_diskless_sbd = sbd.SBDManager.is_using_diskless_sbd()
+    return has_stonith_device or using_diskless_sbd
 
 
 def parse_append_action_argument(input_list, parse_re="[; ]"):
@@ -3038,4 +3038,33 @@
     res_min = re.search("(\d+)min", time_res)
     start_timeout += 60 * int(res_min.group(1)) if res_min else 0
     return start_timeout
+
+
+def is_ocf_1_1_cib_schema_detected():
+    """
+    Only turn on ocf_1_1 feature the cib schema version is pacemaker-3.7 or 
above
+    """
+    from .cibconfig import cib_factory
+    return is_larger_than_min_version(cib_factory.get_schema(), 
constants.SCHEMA_MIN_VER_SUPPORT_OCF_1_1)
+
+
+def handle_role_for_ocf_1_1(value, name='role'):
+    """
+    * Convert role from Promoted/Unpromoted to Master/Slave if schema doesn't 
support OCF 1.1
+    * Convert role from Master/Slave to Promoted/Unpromoted if ocf1.1 cib 
schema detected and OCF_1_1_SUPPORT is yes
+    """
+    role_names = ["role", "target-role"]
+    downgrade_dict = {"Promoted": "Master", "Unpromoted": "Slave"}
+    upgrade_dict = {v: k for k, v in downgrade_dict.items()}
+
+    if name not in role_names:
+        return value
+    if value in downgrade_dict and not is_ocf_1_1_cib_schema_detected():
+        logger.warning('Convert "%s" to "%s" since the current schema version 
is old and not upgraded yet. Please consider "%s"', value, 
downgrade_dict[value], constants.CIB_UPGRADE)
+        return downgrade_dict[value]
+    if value in upgrade_dict and is_ocf_1_1_cib_schema_detected() and 
config.core.OCF_1_1_SUPPORT:
+        logger.info('Convert deprecated "%s" to "%s"', value, 
upgrade_dict[value])
+        return upgrade_dict[value]
+
+    return value
 # vim:ts=4:sw=4:et:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.1+20220104.b683bf0d/crmsh/xmlutil.py 
new/crmsh-4.3.1+20220114.07d84c75/crmsh/xmlutil.py
--- old/crmsh-4.3.1+20220104.b683bf0d/crmsh/xmlutil.py  2022-01-04 
10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/crmsh/xmlutil.py  2022-01-14 
04:39:24.000000000 +0100
@@ -14,7 +14,7 @@
 from . import schema
 from . import constants
 from . import userdir
-from .utils import add_sudo, str2file, str2tmp, get_boolean
+from .utils import add_sudo, str2file, str2tmp, get_boolean, 
handle_role_for_ocf_1_1
 from .utils import get_stdout, get_stdout_or_raise_error, stdout2list, 
crm_msec, crm_time_cmp
 from .utils import olist, get_cib_in_use, get_tempdir, to_ascii, 
is_boolean_true
 from . import log
@@ -1427,6 +1427,7 @@
     """
     <nvpair name="" value="" />
     """
+    value = handle_role_for_ocf_1_1(value, name=name)
     return new("nvpair", name=name, value=value)
 
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.1+20220104.b683bf0d/doc/crm.8.adoc 
new/crmsh-4.3.1+20220114.07d84c75/doc/crm.8.adoc
--- old/crmsh-4.3.1+20220104.b683bf0d/doc/crm.8.adoc    2022-01-04 
10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/doc/crm.8.adoc    2022-01-14 
04:39:24.000000000 +0100
@@ -161,7 +161,7 @@
   primitive apcfence stonith:apcsmart \
     params ttydev=/dev/ttyS0 hostlist="node1 node2" \
     op start timeout=60s
-  primitive pingd pingd \
+  primitive pingd ocf:pacemaker:ping \
     params name=pingd dampen=5s multiplier=100 host_list="r1 r2"
   #
   # monitor apache and the UPS
@@ -1691,10 +1691,10 @@
 constraints <rsc>
 ................
 
-[[cmdhelp_resource_demote,demote a master-slave resource]]
+[[cmdhelp_resource_demote,demote a promotable resource]]
 ==== `demote`
 
-Demote a master-slave resource using the `target-role`
+Demote a promotable resource using the `target-role`
 attribute.
 
 Usage:
@@ -1831,10 +1831,10 @@
 param ip_0 show ip
 ...............
 
-[[cmdhelp_resource_promote,promote a master-slave resource]]
+[[cmdhelp_resource_promote,promote a promotable resource]]
 ==== `promote`
 
-Promote a master-slave resource using the `target-role`
+Promote a promotable resource using the `target-role`
 attribute.
 
 Usage:
@@ -2557,8 +2557,8 @@
 - `primitive`
 - `monitor`
 - `group`
-- `clone`
-- `ms`/`master` (master-slave)
+- `clone` (promotable clones)
+- `ms`/`master` (master-slave) (deprecated)
 
 In order to streamline large configurations, it is possible to
 define a template which can later be referenced in primitives:
@@ -2740,6 +2740,9 @@
 The `clone` command creates a resource clone. It may contain a
 single primitive resource or one group of resources.
 
++Promotable clones+ are clone resources with the +promotable=true+ meta 
attribute for the given promotable resources.
+It's used to deprecate the master-slave resources.
+
 Usage:
 ...............
 clone <name> <rsc>
@@ -3257,7 +3260,7 @@
 Note that after executing the command, the monitor operation may
 be shown as part of the primitive definition.
 
-[[cmdhelp_configure_ms,define a master-slave resource]]
+[[cmdhelp_configure_ms,define a master-slave resource (deprecated)]]
 ==== `ms` (`master`)
 
 The `ms` command creates a master/slave resource type. It may contain a
@@ -3409,7 +3412,7 @@
 ==== `primitive`
 
 The primitive command describes a resource. It may be referenced
-only once in group, clone, or master-slave objects. If it's not
+only once in group, or clone objects. If it's not
 referenced, then it is placed as a single resource in the CIB.
 
 Operations may be specified anonymously, as a group or by reference:
@@ -3429,6 +3432,7 @@
 +OCF_CHECK_LEVEL+.
 
 For multistate resources, roles are specified as +role=<role>+.
+The +Master/Slave+ resources are deprecated and replaced by 
+Promoted/Unpromoted+ promotable resources if desired.
 
 A template may be defined for resources which are of the same
 type and which share most of the configuration. See
@@ -3480,8 +3484,8 @@
 
 primitive r0 ocf:linbit:drbd \
   params drbd_resource=r0 \
-  op monitor role=Master interval=60s \
-  op monitor role=Slave interval=300s
+  op monitor role=Promoted interval=60s \
+  op monitor role=Unpromoted interval=300s
 
 primitive xen0 @vm_scheme1 xmfile=/etc/xen/vm/xen0
 
@@ -3722,7 +3726,7 @@
 ...............
 rsc_ticket ticket-A_public-ip ticket-A: public-ip
 rsc_ticket ticket-A_bigdb ticket-A: bigdb loss-policy=fence
-rsc_ticket ticket-B_storage ticket-B: drbd-a:Master drbd-b:Master
+rsc_ticket ticket-B_storage ticket-B: drbd-a:Promoted drbd-b:Promoted
 ...............
 
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.1+20220104.b683bf0d/etc/crm.conf.in 
new/crmsh-4.3.1+20220114.07d84c75/etc/crm.conf.in
--- old/crmsh-4.3.1+20220104.b683bf0d/etc/crm.conf.in   2022-01-04 
10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/etc/crm.conf.in   2022-01-14 
04:39:24.000000000 +0100
@@ -21,6 +21,9 @@
 ; report_tool_options =
 ; lock_timeout = 120
 
+; set OCF_1_1_SUPPORT to yes is to fully turn on OCF 1.1 feature once the 
corresponding CIB detected.
+; OCF_1_1_SUPPORT = yes
+
 ; obscure_pattern option is the persisent configuration of CLI.
 ; Example, for the high security concern, obscure_pattern = passw* | ip
 ; which makes `crm configure show` is equal to
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.1+20220104.b683bf0d/test/features/bootstrap_options.feature 
new/crmsh-4.3.1+20220114.07d84c75/test/features/bootstrap_options.feature
--- old/crmsh-4.3.1+20220104.b683bf0d/test/features/bootstrap_options.feature   
2022-01-04 10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/test/features/bootstrap_options.feature   
2022-01-14 04:39:24.000000000 +0100
@@ -8,6 +8,7 @@
       "-n":      Set the name of the configured cluster
       "-A":      Configure IP address as an administration virtual IP
       "-u":      Configure corosync to communicate over unicast
+      "-U":      Configure corosync to communicate over multicast
   Tag @clean means need to stop cluster service if the service is available
 
   @clean
@@ -58,6 +59,7 @@
     And     IP "172.17.0.2" is used by corosync on "hanode1"
     And     IP "10.10.10.2" is used by corosync on "hanode1"
     And     Show corosync ring status
+    And     Corosync working on "unicast" mode
 
   @clean
   Scenario: Using multiple network interface using "-i" option
@@ -103,7 +105,7 @@
     When    Run "crm cluster join -c hanode1 -i eth1 -y" on "hanode2"
     Then    Cluster service is "started" on "hanode2"
     And     IP "2001:db8:10::3" is used by corosync on "hanode2"
-    And     Corosync working on "multicast" mode
+    And     Corosync working on "unicast" mode
 
   @clean
   Scenario: Init cluster service with ipv6 unicast using "-I" and "-u" option
@@ -117,3 +119,14 @@
     And     IP "2001:db8:10::3" is used by corosync on "hanode2"
     And     Show cluster status on "hanode1"
     And     Corosync working on "unicast" mode
+
+  @clean
+  Scenario: Init cluster service with multicast using "-U" option (bsc#1132375)
+    Given   Cluster service is "stopped" on "hanode1"
+    Given   Cluster service is "stopped" on "hanode2"
+    When    Run "crm cluster init -U -i eth1 -y" on "hanode1"
+    Then    Cluster service is "started" on "hanode1"
+    When    Run "crm cluster join -c hanode1 -i eth1 -y" on "hanode2"
+    Then    Cluster service is "started" on "hanode2"
+    And     Show cluster status on "hanode1"
+    And     Corosync working on "multicast" mode
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.1+20220104.b683bf0d/test/features/qdevice_usercase.feature 
new/crmsh-4.3.1+20220114.07d84c75/test/features/qdevice_usercase.feature
--- old/crmsh-4.3.1+20220104.b683bf0d/test/features/qdevice_usercase.feature    
2022-01-04 10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/test/features/qdevice_usercase.feature    
2022-01-14 04:39:24.000000000 +0100
@@ -41,19 +41,19 @@
     # Setup a two-nodes cluster
     When    Run "crm cluster init -y -i eth0" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
-    When    Run "crm cluster join -c hanode1 -y" on "hanode2"
+    When    Run "crm cluster join -c hanode1 -y -i eth0" on "hanode2"
     Then    Cluster service is "started" on "hanode2"
 
     # Generate script to check whether this node is master
     When    Write multi lines to file "/etc/corosync/qdevice/check_master.sh"
       """
       #!/usr/bin/sh
-      crm_resource --locate -r promotable-1 2>&1 | grep Master | grep 
`crm_node -n` >/dev/null 2>&1
+      crm_resource --locate -r promotable-1 2>&1 | grep -E "Master|Promoted" | 
grep `crm_node -n` >/dev/null 2>&1
       """
     And     Run "chmod +x /etc/corosync/qdevice/check_master.sh" on "hanode1"
     And     Run "scp -p /etc/corosync/qdevice/check_master.sh 
root@hanode2:/etc/corosync/qdevice" on "hanode1"
     # Add a promotable clone resource and make sure hanode1 is master
-    And     Run "crm configure primitive stateful-1 ocf:pacemaker:Stateful op 
monitor_Slave interval=10s op monitor_Master interval=5s" on "hanode1"
+    And     Run "crm configure primitive stateful-1 ocf:pacemaker:Stateful op 
monitor role=Promoted interval=10s op monitor role=Unpromoted interval=5s" on 
"hanode1"
     And     Run "crm configure clone promotable-1 stateful-1 meta 
promotable=true" on "hanode1"
     And     Run "sleep 5" on "hanode1"
     Then    Show cluster status on "hanode1"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.1+20220104.b683bf0d/test/features/resource_set.feature 
new/crmsh-4.3.1+20220114.07d84c75/test/features/resource_set.feature
--- old/crmsh-4.3.1+20220104.b683bf0d/test/features/resource_set.feature        
2022-01-04 10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/test/features/resource_set.feature        
2022-01-14 04:39:24.000000000 +0100
@@ -14,7 +14,7 @@
     When      Run "crm configure primitive vip IPaddr2 params ip=10.10.10.123 
op monitor interval=3s" on "hanode1"
     Then      Resource "vip" type "IPaddr2" is "Started"
     And       Cluster virtual IP is "10.10.10.123"
-    When      Run "crm configure primitive s ocf:pacemaker:Stateful op 
monitor_Master interval=3s op monitor_Slave interval=5s" on "hanode1"
+    When      Run "crm configure primitive s ocf:pacemaker:Stateful op monitor 
role=Promoted interval=3s op monitor role=Unpromoted interval=5s" on "hanode1"
     Then      Resource "s" type "Stateful" is "Started"
 
   @clean
@@ -95,3 +95,23 @@
     Then    Except "ERROR: resource.move: Not our node: xxxx"
     When    Try "crm resource move d"
     Then    Except "ERROR: resource.move: No target node: Move requires either 
a target node or 'force'"
+
+  @clean
+  Scenario: promote and demote promotable clone resource (bsc#1194125)
+    When    Run "crm configure primitive s2 ocf:pacemaker:Stateful op monitor 
role=Promoted interval=3s op monitor role=Unpromoted interval=5s" on "hanode1"
+    And     Run "crm configure clone p2 s2 meta promotable=true" on "hanode1"
+    And     Run "crm resource demote p2" on "hanode1"
+    Then    Run "sleep 2;! crm_resource --locate -r p2|grep -E 
'Master|Promoted'" OK
+    When    Run "crm resource promote p2" on "hanode2"
+    Then    Run "sleep 2;crm_resource --locate -r p2|grep -E 
'Master|Promoted'" OK
+
+  @clean
+  Scenario: operation warning
+    When    Run "crm configure primitive id=d2 Dummy op start interval=5s" on 
"hanode1"
+    Then    Expected "WARNING: d2: Specified interval for start is 5s, it must 
be 0" in stdout
+    When    Run "crm configure primitive id=d3 Dummy op monitor interval=0" on 
"hanode1"
+    Then    Expected "WARNING: d3: interval in monitor should be larger than 
0, advised is 10s" in stdout
+    When    Run "crm configure primitive s2 ocf:pacemaker:Stateful op monitor 
role=Promoted interval=3s op monitor role=Unpromoted interval=3s" on "hanode1"
+    Then    Expected "WARNING: s2: interval in monitor must be unique, advised 
is 11s" in stdout
+    When    Run "crm configure primitive id=d4 Dummy op start timeout=10s" on 
"hanode1"
+    Then    Expected "WARNING: d4: specified timeout 10s for start is smaller 
than the advised 20s" in stdout
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.1+20220104.b683bf0d/test/features/steps/const.py 
new/crmsh-4.3.1+20220114.07d84c75/test/features/steps/const.py
--- old/crmsh-4.3.1+20220104.b683bf0d/test/features/steps/const.py      
2022-01-04 10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/test/features/steps/const.py      
2022-01-14 04:39:24.000000000 +0100
@@ -87,10 +87,10 @@
   -i IF, --interface IF
                         Bind to IP address on interface IF. Use -i second time
                         for second interface
-  -u, --unicast         Configure corosync to communicate over unicast (UDP),
-                        and not multicast. Default is multicast unless an
-                        environment where multicast cannot be used is
-                        detected.
+  -u, --unicast         Configure corosync to communicate over unicast(udpu).
+                        This is the default transport type
+  -U, --multicast       Configure corosync to communicate over multicast.
+                        Default is unicast
   -A IP, --admin-ip IP  Configure IP address as an administration virtual IP
   -M, --multi-heartbeats
                         Configure corosync with second heartbeat line
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.1+20220104.b683bf0d/test/features/steps/step_implementation.py 
new/crmsh-4.3.1+20220114.07d84c75/test/features/steps/step_implementation.py
--- 
old/crmsh-4.3.1+20220104.b683bf0d/test/features/steps/step_implementation.py    
    2022-01-04 10:03:01.000000000 +0100
+++ 
new/crmsh-4.3.1+20220114.07d84c75/test/features/steps/step_implementation.py    
    2022-01-14 04:39:24.000000000 +0100
@@ -288,7 +288,7 @@
 @then('Corosync working on "{transport_type}" mode')
 def step_impl(context, transport_type):
     if transport_type == "multicast":
-        assert corosync.get_value("totem.transport") != "udpu"
+        assert corosync.get_value("totem.transport") is None
     if transport_type == "unicast":
         assert corosync.get_value("totem.transport") == "udpu"
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.1+20220104.b683bf0d/test/testcases/ra.exp 
new/crmsh-4.3.1+20220114.07d84c75/test/testcases/ra.exp
--- old/crmsh-4.3.1+20220104.b683bf0d/test/testcases/ra.exp     2022-01-04 
10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/test/testcases/ra.exp     2022-01-14 
04:39:24.000000000 +0100
@@ -41,7 +41,7 @@
 
     start         timeout=20s
     stop          timeout=20s
-    monitor       timeout=20s interval=10s
+    monitor       timeout=20s interval=10s depth=0
     reload        timeout=20s
     reload-agent  timeout=20s
     migrate_to    timeout=20s
@@ -88,6 +88,7 @@
     This prevents double fencing when different delays are configured on the 
nodes.
     Use this to enable a static delay for fencing actions.
     The overall delay is derived from a random delay value adding this static 
delay so that the sum is kept below the maximum delay.
+    Set to eg. node1:1s;node2:5 to set different value per node.
 
 pcmk_action_limit (integer, [1]): The maximum number of actions can be 
performed in parallel on this device
     Cluster property concurrent-fencing=true needs to be configured first.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.1+20220104.b683bf0d/test/unittests/test_bootstrap.py 
new/crmsh-4.3.1+20220114.07d84c75/test/unittests/test_bootstrap.py
--- old/crmsh-4.3.1+20220104.b683bf0d/test/unittests/test_bootstrap.py  
2022-01-04 10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/test/unittests/test_bootstrap.py  
2022-01-14 04:39:24.000000000 +0100
@@ -275,7 +275,7 @@
         mock_timeout.return_value = 60
         bootstrap.start_pacemaker()
         mock_long.assert_called_once_with('Starting pacemaker(delaying start 
of sbd for 60s)')
-        mock_start.assert_called_once_with('pacemaker.service', enable=True, 
node_list=[])
+        mock_start.assert_called_once_with('pacemaker.service', enable=False, 
node_list=[])
 
     @mock.patch('crmsh.bootstrap.configure_local_ssh_key')
     @mock.patch('crmsh.utils.start_service')
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.1+20220104.b683bf0d/test/unittests/test_sbd.py 
new/crmsh-4.3.1+20220114.07d84c75/test/unittests/test_sbd.py
--- old/crmsh-4.3.1+20220104.b683bf0d/test/unittests/test_sbd.py        
2022-01-04 10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/test/unittests/test_sbd.py        
2022-01-14 04:39:24.000000000 +0100
@@ -513,7 +513,7 @@
         mock_parse_inst.get.return_value = None
 
         res = self.sbd_inst._get_sbd_device_from_config()
-        assert res is None
+        assert res == []
 
         mock_parse.assert_called_once_with("/etc/sysconfig/sbd")
         mock_parse_inst.get.assert_called_once_with("SBD_DEVICE")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.1+20220104.b683bf0d/test/unittests/test_utils.py 
new/crmsh-4.3.1+20220114.07d84c75/test/unittests/test_utils.py
--- old/crmsh-4.3.1+20220104.b683bf0d/test/unittests/test_utils.py      
2022-01-04 10:03:01.000000000 +0100
+++ new/crmsh-4.3.1+20220114.07d84c75/test/unittests/test_utils.py      
2022-01-14 04:39:24.000000000 +0100
@@ -1404,15 +1404,18 @@
     assert res == ["/dev/sda1", "/dev/sda2"]
 
 
+@mock.patch('crmsh.sbd.SBDManager.is_using_diskless_sbd')
 @mock.patch('crmsh.utils.get_stdout_or_raise_error')
-def test_has_stonith_running(mock_run):
+def test_has_stonith_running(mock_run, mock_diskless):
     mock_run.return_value = """
 stonith-sbd
 1 fence device found
     """
+    mock_diskless.return_value = True
     res = utils.has_stonith_running()
     assert res is True
     mock_run.assert_called_once_with("stonith_admin -L")
+    mock_diskless.assert_called_once_with()
 
 
 @mock.patch('crmsh.utils.S_ISBLK')
@@ -1635,3 +1638,45 @@
     mock_get_property.return_value = "100s"
     utils.set_property_conditionally("stonith-timeout", 101)
     mock_run.assert_called_once_with("crm configure property 
stonith-timeout=101")
+
+
+@mock.patch('crmsh.utils.is_larger_than_min_version')
+@mock.patch('crmsh.cibconfig.cib_factory')
+def test_is_ocf_1_1_cib_schema_detected(mock_cib, mock_larger):
+    config.core.OCF_1_1_SUPPORT = True
+    mock_cib.get_schema = mock.Mock()
+    mock_cib.get_schema.return_value = "pacemaker-3.5"
+    mock_larger.return_value = True
+    assert utils.is_ocf_1_1_cib_schema_detected() is True
+    mock_cib.get_schema.assert_called_once_with()
+    mock_larger.assert_called_once_with("pacemaker-3.5", 
constants.SCHEMA_MIN_VER_SUPPORT_OCF_1_1)
+
+
+@mock.patch('logging.Logger.warning')
+@mock.patch('crmsh.utils.is_ocf_1_1_cib_schema_detected')
+def test_handle_role_for_ocf_1_1(mock_support, mock_warn):
+    mock_support.return_value = False
+    assert utils.handle_role_for_ocf_1_1("Promoted") == "Master"
+    mock_support.assert_called_once_with()
+    mock_warn.assert_called_once_with('Convert "%s" to "%s" since the current 
schema version is old and not upgraded yet. Please consider "%s"', "Promoted", 
"Master", constants.CIB_UPGRADE)
+
+
+@mock.patch('logging.Logger.info')
+@mock.patch('crmsh.utils.is_ocf_1_1_cib_schema_detected')
+def test_handle_role_for_ocf_1_1_convert_new(mock_support, mock_info):
+    config.core.OCF_1_1_SUPPORT = True
+    mock_support.return_value = True
+    assert utils.handle_role_for_ocf_1_1("Master") == "Promoted"
+    mock_support.assert_called_once_with()
+    mock_info.assert_called_once_with('Convert deprecated "%s" to "%s"', 
"Master", "Promoted")
+
+
+@mock.patch('crmsh.utils.is_ocf_1_1_cib_schema_detected')
+def test_handle_role_for_ocf_1_1_return(mock_support):
+    mock_support.return_value = True
+    assert utils.handle_role_for_ocf_1_1("Promoted") == "Promoted"
+    mock_support.assert_called_once_with()
+
+
+def test_handle_role_for_ocf_1_1_return_not_role():
+    assert utils.handle_role_for_ocf_1_1("test", name='other') == "test"

Reply via email to