Thanks a lot for this patch

On Thu, Dec 18, 2014 at 10:55:06PM +0200, Alex Pyrgiotis wrote:
> The attach/detach test is merged with the add/remove test. The test is
> the following: an extra disk is added in each instance and is given a
> random name. Once it has been added successfully, we detach it and check
> if the instance operates normally. Then, we pick instances randomly and
> attach a detached disk to them, using an algorithm that matches a disk
> to an instance based on its disk nodes. Finally, we remove the disk.
>
> Moreover, move the start/stop at the top of the tests, since the
> start/stop functionality is erroneously used first by other functions
> (e.g. rename) and then tested on its own.
>
> Finally, split the Burner class in subclasses for maintenance reasons
> and reduce the maximum number of class attributes from 20 to 19.
>
> Signed-off-by: Alex Pyrgiotis <[email protected]>
>
> diff --git a/lib/tools/burnin.py b/lib/tools/burnin.py
> index 49ba6b6..67f2612 100755
> --- a/lib/tools/burnin.py
> +++ b/lib/tools/burnin.py
> @@ -37,8 +37,11 @@ import optparse
>  import time
>  import socket
>  import urllib
> +import random
> +import string # pylint: disable=W0402
>  from itertools import izip, islice, cycle
>  from cStringIO import StringIO
> +from operator import or_
>
>  from ganeti import opcodes
>  from ganeti import constants
> @@ -130,6 +133,10 @@ def Err(msg, exit_code=1):
>    sys.exit(exit_code)
>
>
> +def RandomString(size=8, chars=string.ascii_uppercase + string.digits):
> +  return ''.join(random.choice(chars) for x in range(size))
> +
> +
>  class SimpleOpener(urllib.FancyURLopener):
>    """A simple url opener"""
>    # pylint: disable=W0221
> @@ -312,24 +319,11 @@ def _DoBatch(retry):
>    return wrap
>
>
> -class Burner(object):
> -  """Burner class."""
> +class FeedbackAccumulator(object):
> +  """Feedback accumulator class."""
>
> -  def __init__(self):
> -    """Constructor."""
> -    self.url_opener = SimpleOpener()
> -    self._feed_buf = StringIO()
> -    self.nodes = []
> -    self.instances = []
> -    self.to_rem = []
> -    self.queued_ops = []
> -    self.opts = None
> -    self.queue_retry = False
> -    self.disk_count = self.disk_growth = self.disk_size = None
> -    self.hvp = self.bep = None
> -    self.ParseOptions()
> -    self.cl = cli.GetClient()
> -    self.GetState()
> +  _feed_buf = StringIO()
> +  opts = None
>
>    def ClearFeedbackBuf(self):
>      """Clear the feedback buffer."""
> @@ -346,6 +340,14 @@ class Burner(object):
>      if self.opts.verbose:
>        Log(formatted_msg, indent=3)
>
> +
> +class JobHandler(FeedbackAccumulator):
> +  """Class for handling Ganeti jobs."""
> +
> +  queued_ops = []
> +  queue_retry = False
> +  cl = cli.GetClient()

This needs to go into the constructor, otherwise it will be executed at
the time the class is defined, i.e. at import time.

> +
>    def MaybeRetry(self, retry_count, msg, fn, *args):
>      """Possibly retry a given function execution.
>
> @@ -480,6 +482,26 @@ class Burner(object):
>
>      return val
>
> +
> +class Burner(JobHandler):
> +  """Burner class."""
> +
> +  def __init__(self):
> +    """Constructor."""
> +    super(Burner, self).__init__()
> +
> +    self.url_opener = SimpleOpener()
> +    self.nodes = []
> +    self.instances = []
> +    self.to_rem = []
> +    self.disk_count = self.disk_growth = self.disk_size = None
> +    self.hvp = self.bep = None
> +    self.ParseOptions()
> +    self.disk_nodes = {}
> +    self.instance_nodes = {}
> +    self.GetState()
> +    self.confd_reply = None
> +
>    def ParseOptions(self):
>      """Parses the command line options.
>
> @@ -597,6 +619,16 @@ class Burner(object):
>      self.hv_can_migrate = \
>        hypervisor.GetHypervisorClass(self.hypervisor).CAN_MIGRATE
>
> +  def FindMatchingDisk(self, instance):
> +    """Find a disk whose nodes match the instance's disk nodes."""
> +    instance_nodes = self.instance_nodes[instance]
> +    for disk, disk_nodes in self.disk_nodes.iteritems():
> +      if instance_nodes == disk_nodes:
> +        # Erase that disk from the dictionary so that we don't pick it
again.
> +        del self.disk_nodes[disk]
> +        return disk
> +    Err("Couldn't find matching detached disk for instance %s" %
instance)
> +
>    @_DoCheckInstances
>    @_DoBatch(False)
>    def BurnCreateInstances(self):
> @@ -942,24 +974,6 @@ class Burner(object):
>        Log("deactivate disks (when offline)", indent=2)
>        self.ExecOrQueue(instance, [op_act, op_stop, op_act, op_deact,
op_start])
>
> -  @_DoCheckInstances
> -  @_DoBatch(False)
> -  def BurnAddRemoveDisks(self):
> -    """Add and remove an extra disk for the instances."""
> -    Log("Adding and removing disks")
> -    for instance in self.instances:
> -      Log("instance %s", instance, indent=1)
> -      op_add = opcodes.OpInstanceSetParams(
> -        instance_name=instance,
> -        disks=[(constants.DDM_ADD, {"size": self.disk_size[0]})])
> -      op_rem = opcodes.OpInstanceSetParams(
> -        instance_name=instance, disks=[(constants.DDM_REMOVE, {})])
> -      op_stop = self.StopInstanceOp(instance)
> -      op_start = self.StartInstanceOp(instance)
> -      Log("adding a disk", indent=2)
> -      Log("removing last disk", indent=2)
> -      self.ExecOrQueue(instance, [op_add, op_stop, op_rem, op_start])
> -
>    @_DoBatch(False)
>    def BurnAddRemoveNICs(self):
>      """Add, change and remove an extra NIC for the instances."""
> @@ -997,6 +1011,8 @@ class Burner(object):
>            Log("Node role for master: OK", indent=1)
>          else:
>            Err("Node role for master: wrong: %s" %
reply.server_reply.answer)
> +      elif reply.orig_request.type == constants.CONFD_REQ_INSTANCE_DISKS:
> +        self.confd_reply = reply.server_reply.answer
>
>    def DoConfdRequestReply(self, req):
>      self.confd_counting_callback.RegisterQuery(req.rsalt)
> @@ -1035,6 +1051,81 @@ class Burner(object):
>          query=self.cluster_info["master"])
>      self.DoConfdRequestReply(req)
>
> +  @_DoCheckInstances
> +  @_DoBatch(False)
> +  def BurnAddDisks(self):
> +    """Add an extra disk to every instance and then detach it."""
> +    Log("Adding and detaching disks")
> +
> +    # Instantiate a Confd client
> +    filter_callback = confd_client.ConfdFilterCallback(self.
ConfdCallback)
> +    counting_callback = confd_client.ConfdCountingCallback(filter_
callback)
> +    self.confd_counting_callback = counting_callback
> +    self.confd_client = confd_client.GetConfdClient(counting_callback)
> +
> +    # Iterate all instances, start them, add a disk with a unique name
and
> +    # detach it. Do all disk operations with hotplugging (if possible).
> +    for instance in self.instances:
> +      Log("instance %s", instance, indent=1)
> +
> +      # Fetch disk info for an instance from the confd. The result of
the query
> +      # will be stored in the confd_reply attribute of Burner.
> +      req = (confd_client.ConfdClientRequest(
> +        type=constants.CONFD_REQ_INSTANCE_DISKS, query=instance))
> +      self.DoConfdRequestReply(req)
> +
> +      disk_name = RandomString()
> +
> +      nodes = [set(disk["nodes"]) for disk in self.confd_reply]
> +      nodes = reduce(or_, nodes)
> +      self.instance_nodes[instance] = nodes
> +      self.disk_nodes[disk_name] = nodes
> +
> +      op_stop = self.StopInstanceOp(instance)
> +      op_add = opcodes.OpInstanceSetParams(
> +        instance_name=instance, hotplug_if_possible=True,
> +        disks=[(constants.DDM_ADD, {"size": self.disk_size[0],
> +                                    "name": disk_name})])
> +      op_detach = opcodes.OpInstanceSetParams(
> +        instance_name=instance, hotplug_if_possible=True,
> +        disks=[(constants.DDM_DETACH, {})])
> +      op_start = self.StartInstanceOp(instance)
> +      Log("adding a disk with name %s" % disk_name, indent=2)
> +      Log("detaching last disk", indent=2)
> +      self.ExecOrQueue(instance, [op_start, op_add, op_detach, op_stop,
> +                                  op_start])
> +
> +  @_DoCheckInstances
> +  @_DoBatch(False)
> +  def BurnRemoveDisks(self):
> +    """Attach a previously detached disk to an instance and then remove
it."""
> +    Log("Attaching and removing disks")
> +
> +    # Iterate all instances in random order, attach the detached disks,
remove
> +    # them and then restart the instances. Do all disk operation with
> +    # hotplugging (if possible).
> +    instances_copy = list(self.instances)
> +    random.shuffle(instances_copy)
> +    for instance in instances_copy:
> +      Log("instance %s", instance, indent=1)
> +
> +      disk_name = self.FindMatchingDisk(instance)
> +      op_attach = opcodes.OpInstanceSetParams(
> +        instance_name=instance, hotplug_if_possible=True,
> +        disks=[(constants.DDM_ATTACH, {"name": disk_name})])
> +      op_rem = opcodes.OpInstanceSetParams(
> +        instance_name=instance, hotplug_if_possible=True,
> +        disks=[(constants.DDM_REMOVE, {})])
> +      op_stop = self.StopInstanceOp(instance)
> +      op_start = self.StartInstanceOp(instance)
> +      Log("attaching a disk with name %s" % disk_name, indent=2)
> +      Log("removing last disk", indent=2)
> +      self.ExecOrQueue(instance, [op_attach, op_rem, op_stop, op_start])
> +
> +    # Disk nodes are useful only for this test.
> +    del self.disk_nodes
> +    del self.instance_nodes
> +
>    def _CheckInstanceAlive(self, instance):
>      """Check if an instance is alive by doing http checks.
>
> @@ -1081,6 +1172,9 @@ class Burner(object):
>      try:
>        self.BurnCreateInstances()
>
> +      if self.opts.do_startstop:
> +        self.BurnStopStart()
> +
>        if self.bep[constants.BE_MINMEM] < self.bep[constants.BE_MAXMEM]:
>          self.BurnModifyRuntimeMemory()
>
> @@ -1126,8 +1220,8 @@ class Burner(object):
>        if self.opts.do_renamesame:
>          self.BurnRenameSame(self.opts.name_check, self.opts.ip_check)
>
> -      if self.opts.do_addremove_disks:
> -        self.BurnAddRemoveDisks()
> +      if self.opts.do_confd_tests:
> +        self.BurnConfd()
>
>        default_nic_mode = self.cluster_default_
nicparams[constants.NIC_MODE]
>        # Don't add/remove nics in routed mode, as we would need an ip to
add
> @@ -1141,15 +1235,13 @@ class Burner(object):
>        if self.opts.do_activate_disks:
>          self.BurnActivateDisks()
>
> +      if self.opts.do_addremove_disks:
> +        self.BurnAddDisks()
> +        self.BurnRemoveDisks()
> +
>        if self.opts.rename:
>          self.BurnRename(self.opts.name_check, self.opts.ip_check)
>
> -      if self.opts.do_confd_tests:
> -        self.BurnConfd()
> -
> -      if self.opts.do_startstop:
> -        self.BurnStopStart()
> -
>        has_err = False
>      finally:
>        if has_err:
> diff --git a/pylintrc b/pylintrc
> index 47be2b8..2eca14f 100644
> --- a/pylintrc
> +++ b/pylintrc
> @@ -64,7 +64,7 @@ max-returns = 10
>  max-branchs = 80
>  max-statements = 200
>  max-parents = 7
> -max-attributes = 20
> +max-attributes = 19

Unfortunately by now there are some new offenders against 19 attributes, so
let's just remove the max-attribute decrement.

>  # zero as struct-like (PODS) classes don't export any methods
>  min-public-methods = 0
>  max-public-methods = 50
> --
> 1.7.10.4
>

Otherwise LGTM

I propose the following interdiff, is that ok for you?

diff --git a/lib/tools/burnin.py b/lib/tools/burnin.py
index 67f2612..2e41155 100755
--- a/lib/tools/burnin.py
+++ b/lib/tools/burnin.py
@@ -346,7 +346,9 @@ class JobHandler(FeedbackAccumulator):

   queued_ops = []
   queue_retry = False
-  cl = cli.GetClient()
+
+  def __init__(self):
+    self.cl = cli.GetClient()

   def MaybeRetry(self, retry_count, msg, fn, *args):
     """Possibly retry a given function execution.
diff --git a/pylintrc b/pylintrc
index 2eca14f..47be2b8 100644
--- a/pylintrc
+++ b/pylintrc
@@ -64,7 +64,7 @@ max-returns = 10
 max-branchs = 80
 max-statements = 200
 max-parents = 7
-max-attributes = 19
+max-attributes = 20
 # zero as struct-like (PODS) classes don't export any methods
 min-public-methods = 0
 max-public-methods = 50

--
Google Germany GmbH
Dienerstr. 12
80331 München

Registergericht und -nummer: Hamburg, HRB 86891
Sitz der Gesellschaft: Hamburg
Geschäftsführer: Graham Law, Christine Elizabeth Flores

Reply via email to