On 22 May 2013 10:52, Thomas Thrainer <[email protected]> wrote:
>
>
>
> On Mon, May 20, 2013 at 5:11 PM, Bernardo Dal Seno <[email protected]>
> wrote:
>>
>> Masterd checks that specifications for new disks don't include spindles
>> when exclusive storage is disabled.
>>
>> Signed-off-by: Bernardo Dal Seno <[email protected]>
>> ---
>>  lib/cmdlib/instance.py         | 27 +++++++++++++++++++++++----
>>  lib/cmdlib/instance_storage.py | 26 ++++++++++++++++++++++----
>>  2 files changed, 45 insertions(+), 8 deletions(-)
>>
>> diff --git a/lib/cmdlib/instance.py b/lib/cmdlib/instance.py
>> index 6498ce2..392ae49 100644
>> --- a/lib/cmdlib/instance.py
>> +++ b/lib/cmdlib/instance.py
>> @@ -1030,6 +1030,13 @@ class LUInstanceCreate(LogicalUnit):
>>          raise errors.OpPrereqError("Disk template %s not supported with"
>>                                     " exclusive storage" %
>> self.op.disk_template,
>>                                     errors.ECODE_STATE)
>
>
> In case of DRBD, only one of the two nodes has to have exclusive storage
> enabled to skip the "else" branch. The "then" branch would clearly produce
> an error then, but in the spirit of defensive programming a
> "compat.all(map(has_es, nodes))" instead of "else" would reveal the indent a
> bit better and be more future proof.

I'm not sure I follow you. If we don't support DRBD with exclusive
storage (e_s), as it's the case right now, in case one node has e_s
set then we report an error, and that's it. If we start supporting
DRBD, then the error is not reported and the instance is subject to
the e_s restriction if any of its node has e_s set; so spindles are
accepted as long as at least one node has e_s (it will ignored when
actually creating the LVs on the non-e_s node, of course). So the code
is partially ready to support DRBD, the missing part is the low-level
handling of the disks (in noded). Does it make sense?

> Isn't that basically the same check as above, but written in different
> terms? Couldn't we factor out a helper function?

Done.

>> +    excl_stor = compat.any(
>> +      rpc.GetExclusiveStorageForNodeNames(self.cfg, instance.all_nodes)
>> +      )
>> +
>
>
> Why "any" and not "all"? Is it enough that one node has exclusive storage
> enabled? I know, that there can only be one node if exclusive storage is
> enabled, but why assume that every reader always knows?

I think that this has been covered by the explanation above.

>> diff --git a/lib/cmdlib/ins
>> +    if self.op.nodes:
>> +      nodes = self.op.nodes
>> +    else:
>> +      nodes = instance.all_nodes
>> +    excl_stor = compat.any(
>> +      rpc.GetExclusiveStorageForNodeNames(self.cfg, nodes)
>> +      )
>
>
> Why "any"?

Same here.


> Looks again like the above two checks.

Refactored.


Interdiff, which also fixes two type problems that hid each other:

diff --git a/lib/cmdlib/instance.py b/lib/cmdlib/instance.py
index 392ae49..572ac20 100644
--- a/lib/cmdlib/instance.py
+++ b/lib/cmdlib/instance.py
@@ -54,7 +54,8 @@ from ganeti.cmdlib.instance_storage import CreateDisks, \
   CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
   IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
   CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
-  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
+  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
+  CheckSpindlesExclusiveStorage
 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
   GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
   NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
@@ -1025,18 +1026,13 @@ class LUInstanceCreate(LogicalUnit):
     if self.op.disk_template in constants.DTS_INT_MIRROR:
       nodes.append(snode)
     has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
-    if compat.any(map(has_es, nodes)):
-      if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
-        raise errors.OpPrereqError("Disk template %s not supported with"
-                                   " exclusive storage" % self.op.disk_template
-                                   errors.ECODE_STATE)
-    else:
-      for disk in self.disks:
-        if constants.IDISK_SPINDLES in disk:
-          raise errors.OpPrereqError("Spindles in instance disks cannot be"
-                                     " specified when exclusive storage is not"
-                                     " active",
-                                     errors.ECODE_INVAL)
+    excl_stor = compat.any(map(has_es, nodes))
+    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
+      raise errors.OpPrereqError("Disk template %s not supported with"
+                                 " exclusive storage" % self.op.disk_template,
+                                 errors.ECODE_STATE)
+    for disk in self.disks:
+      CheckSpindlesExclusiveStorage(disk, excl_stor)

     nodenames = [pnode.name] + self.secondaries

@@ -2242,13 +2238,7 @@ class LUInstanceSetParams(LogicalUnit):
       if name is not None and name.lower() == constants.VALUE_NONE:
         params[constants.IDISK_NAME] = None

-      spindles = params.get(constants.IDISK_SPINDLES, None)
-      if spindles is not None:
-        if not excl_stor:
-          raise errors.OpPrereqError("Spindles in instance disks cannot be"
-                                     " specified when exclusive storage is not"
-                                     " active",
-                                     errors.ECODE_INVAL)
+      CheckSpindlesExclusiveStorage(params, excl_stor)

     elif op == constants.DDM_MODIFY:
       if constants.IDISK_SIZE in params:
@@ -2627,7 +2617,7 @@ class LUInstanceSetParams(LogicalUnit):
     self.diskparams = self.cfg.GetInstanceDiskParams(instance)

     excl_stor = compat.any(
-      rpc.GetExclusiveStorageForNodeNames(self.cfg, instance.all_nodes)
+      rpc.GetExclusiveStorageForNodeNames(self.cfg, instance.all_nodes).values(
       )
 diff --git a/lib/cmdlib/instance_storage.py b/lib/cmdlib/instance_storage.py
index 6669404..3816088 100644
--- a/lib/cmdlib/instance_storage.py
+++ b/lib/cmdlib/instance_storage.py
@@ -518,6 +518,23 @@ def GenerateDiskTemplate(
   return disks


+def CheckSpindlesExclusiveStorage(diskdict, es_flag):
+  """Check the presence of the spindle options with exclusive_storage.
+
+  @type diskdict: dict
+  @param diskdict: disk parameters
+  @type es_flag: bool
+  @param es_flag: the effective value of the exlusive_storage flag
+  @raise errors.OpPrereqError when spindles are given and they should not
+
+  """
+  if (not es_flag and constants.IDISK_SPINDLES in diskdict and
+      diskdict[constants.IDISK_SPINDLES] is not None):
+    raise errors.OpPrereqError("Spindles in instance disks cannot be specified"
+                               " when exclusive storage is not active",
+                               errors.ECODE_INVAL)
+
+
 class LUInstanceRecreateDisks(LogicalUnit):
   """Recreate an instance's missing disks.

@@ -766,15 +783,10 @@ class LUInstanceRecreateDisks(LogicalUnit):
     else:
       nodes = instance.all_nodes
     excl_stor = compat.any(
-      rpc.GetExclusiveStorageForNodeNames(self.cfg, nodes)
+      rpc.GetExclusiveStorageForNodeNames(self.cfg, nodes).values()
       )
-    if not excl_stor:
-      for (_, new_params) in self.disks:
-        if constants.IDISK_SPINDLES in new_params:
-          raise errors.OpPrereqError("Spindles in instance disks cannot be"
-                                     " specified when exclusive storage is not"
-                                     " active",
-                                     errors.ECODE_INVAL)
+    for new_params in self.disks.values():
+      CheckSpindlesExclusiveStorage(new_params, excl_stor)

   def Exec(self, feedback_fn):
     """Recreate the disks.

     # Check disk modifications. This is done here and not in CheckArguments


Bernardo

Reply via email to