On 16 May 2013 10:23, Thomas Thrainer <[email protected]> wrote:
> All LUInstance* classes are extracted to instance.py. Common functions
> are moved to common.py if used by non-node logical units as well.

s/non-node/non-instance/

> Additionally, helper functions which are only used by LUBackup* and
> LUInstance* are moved to instance_utils.py.
>
> Signed-off-by: Thomas Thrainer <[email protected]>
> ---
>  Makefile.am                       |    2 +
>  lib/cmdlib/__init__.py            | 8098 
> +------------------------------------
>  lib/cmdlib/common.py              |   20 +-
>  lib/cmdlib/instance.py            | 7663 +++++++++++++++++++++++++++++++++++
>  lib/cmdlib/instance_utils.py      |  472 +++
>  lib/cmdlib/node.py                |  194 +-
>  test/py/ganeti.cmdlib_unittest.py |  177 +-
>  test/py/ganeti.config_unittest.py |   10 +-
>  8 files changed, 8360 insertions(+), 8276 deletions(-)
>  create mode 100644 lib/cmdlib/instance.py
>  create mode 100644 lib/cmdlib/instance_utils.py


There are a few changes introduced in LUInstanceCreate,
LUInstanceRename, LUInstanceRecreateDisks that don't make sense.
Please revert them (please notice that some reformatting was good, so
don't just blindly copy the original classes):

LUInstanceCreate:
--- /tmp/tmpQC3Sw8      2013-05-16 17:26:21.931924627 +0200
+++ /tmp/tmpvKpDrT      2013-05-16 17:26:21.935924843 +0200
@@ -15,7 +15,7 @@
     if self.op.no_install and self.op.start:
       self.LogInfo("No-installation mode selected, disabling startup")
       self.op.start = False
-    # validate/normalize the instance name
+      # validate/normalize the instance name
     self.op.instance_name = \
       netutils.Hostname.GetNormalizedName(self.op.instance_name)

@@ -27,7 +27,7 @@
     # check nics' parameter names
     for nic in self.op.nics:
       utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
-    # check that NIC's parameters names are unique and valid
+      # check that NIC's parameters names are unique and valid
     utils.ValidateDeviceNames("NIC", self.op.nics)

     # check that disk's names are unique and valid
@@ -85,7 +85,7 @@

     # file storage checks
     if (self.op.file_driver and
-        not self.op.file_driver in constants.FILE_DRIVER):
+          not self.op.file_driver in constants.FILE_DRIVER):
       raise errors.OpPrereqError("Invalid file driver name '%s'" %
                                  self.op.file_driver, errors.ECODE_INVAL)

@@ -424,7 +424,7 @@
       self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()

     if (self.op.hypervisor is None and
-        einfo.has_option(constants.INISECT_INS, "hypervisor")):
+          einfo.has_option(constants.INISECT_INS, "hypervisor")):
       self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")

     if einfo.has_section(constants.INISECT_HYP):
@@ -439,7 +439,7 @@
       for name, value in einfo.items(constants.INISECT_BEP):
         if name not in self.op.beparams:
           self.op.beparams[name] = value
-        # Compatibility for the old "memory" be param
+          # Compatibility for the old "memory" be param
         if name == constants.BE_MEMORY:
           if constants.BE_MAXMEM not in self.op.beparams:
             self.op.beparams[constants.BE_MAXMEM] = value
@@ -449,7 +449,7 @@
       # try to read the parameters old style, from the main section
       for name in constants.BES_PARAMETERS:
         if (name not in self.op.beparams and
-            einfo.has_option(constants.INISECT_INS, name)):
+              einfo.has_option(constants.INISECT_INS, name)):
           self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)

     if einfo.has_section(constants.INISECT_OSP):
@@ -467,18 +467,18 @@
     for name in self.op.hvparams.keys():
       if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
         del self.op.hvparams[name]
-    # beparams
+        # beparams
     be_defs = cluster.SimpleFillBE({})
     for name in self.op.beparams.keys():
       if name in be_defs and be_defs[name] == self.op.beparams[name]:
         del self.op.beparams[name]
-    # nic params
+        # nic params
     nic_defs = cluster.SimpleFillNIC({})
     for nic in self.op.nics:
       for name in constants.NICS_PARAMETERS:
         if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
           del nic[name]
-    # osparams
+          # osparams
     os_defs = cluster.SimpleFillOS(self.op.os_type, {})
     for name in self.op.osparams.keys():
       if name in os_defs and os_defs[name] == self.op.osparams[name]:
@@ -527,12 +527,12 @@
       self._old_instance_name = None

     if (not self.cfg.GetVGName() and
-        self.op.disk_template not in constants.DTS_NOT_LVM):
+            self.op.disk_template not in constants.DTS_NOT_LVM):
       raise errors.OpPrereqError("Cluster does not support lvm-based"
                                  " instances", errors.ECODE_STATE)

     if (self.op.hypervisor is None or
-        self.op.hypervisor == constants.VALUE_AUTO):
+            self.op.hypervisor == constants.VALUE_AUTO):
       self.op.hypervisor = self.cfg.GetHypervisorType()

     cluster = self.cfg.GetClusterInfo()
@@ -766,7 +766,7 @@
         raise errors.OpPrereqError("Online logical volumes found, cannot"
                                    " adopt: %s" % utils.CommaJoin(online_lvs),
                                    errors.ECODE_STATE)
-      # update the size of disk based on what is found
+        # update the size of disk based on what is found
       for dsk in self.disks:
         dsk[constants.IDISK_SIZE] = \
           int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],


And in LUInstanceRename:
--- /tmp/tmpuZN17O      2013-05-16 17:26:21.943925272 +0200
+++ /tmp/tmpe5LEjH      2013-05-16 17:26:21.943925272 +0200
@@ -51,7 +51,7 @@
       hostname = _CheckHostnameSane(self, new_name)
       new_name = self.op.new_name = hostname.name
       if (self.op.ip_check and
-          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
+            netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
         raise errors.OpPrereqError("IP %s of instance %s already in use" %
                                    (hostname.ip, new_name),
                                    errors.ECODE_NOTUNIQUE)
@@ -70,7 +70,7 @@

     rename_file_storage = False
     if (inst.disk_template in constants.DTS_FILEBASED and
-        self.op.new_name != inst.name):
+            self.op.new_name != inst.name):
       old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
       rename_file_storage = True


And also LUInstanceRecreateDisks:
--- /tmp/tmpnq7_LX      2013-05-16 17:26:21.971926779 +0200
+++ /tmp/tmpE6URv2      2013-05-16 17:26:21.971926779 +0200
@@ -224,7 +224,7 @@
                                  errors.ECODE_INVAL)

     if ((self.op.nodes or self.op.iallocator) and
-        sorted(self.disks.keys()) != range(len(instance.disks))):
+            sorted(self.disks.keys()) != range(len(instance.disks))):
       raise errors.OpPrereqError("Can't recreate disks partially and"
                                  " change the nodes at the same time",
                                  errors.ECODE_INVAL)
@@ -265,7 +265,7 @@
         # need to update the nodes and minors
         assert len(self.op.nodes) == 2
         assert len(disk.logical_id) == 6 # otherwise disk internals
-                                         # have changed
+        # have changed
         (_, _, old_port, _, _, old_secret) = disk.logical_id
         new_minors = self.cfg.AllocateDRBDMinor(self.op.nodes, instance.name)
         new_id = (self.op.nodes[0], self.op.nodes[1], old_port,


And finally LUInstanceReinstall:
--- /tmp/tmpGPdvCh      2013-05-16 17:26:21.983927424 +0200
+++ /tmp/tmpZRCwvg      2013-05-16 17:26:21.983927424 +0200
@@ -34,7 +34,7 @@
     assert instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
     _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
-                     " offline, cannot reinstall")
+                                                  " offline, cannot reinstall")

     if instance.disk_template == constants.DT_DISKLESS:
       raise errors.OpPrereqError("Instance '%s' has no disks" %


Similarly, in instance_utils.py please revert this change in
_ShutdownInstanceDisks:

--- /tmp/tmp2tZCOQ      2013-05-16 17:26:22.003928500 +0200
+++ /tmp/tmpBsGhUi      2013-05-16 17:26:22.003928500 +0200
@@ -19,6 +19,6 @@
         lu.LogWarning("Could not shutdown block device %s on node %s: %s",
                       disk.iv_name, node, msg)
         if ((node == instance.primary_node and not ignore_primary) or
-            (node != instance.primary_node and not result.offline)):
+              (node != instance.primary_node and not result.offline)):
           all_result = False
   return all_result


Thanks,
Bernardo

Reply via email to