Saggi Mizrahi has uploaded a new change for review.

Change subject: Make libvirtvm.py PEP8 compliant
......................................................................

Make libvirtvm.py PEP8 compliant

Change-Id: I32570c2d1a2efa4a5446caa6d973a10b4c84d996
Signed-off-by: Saggi Mizrahi <[email protected]>
---
M Makefile.am
M vdsm/libvirtvm.py
2 files changed, 203 insertions(+), 145 deletions(-)


  git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/22/7422/1

diff --git a/Makefile.am b/Makefile.am
index 5f17f63..927992b 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -58,6 +58,7 @@
        vdsm/guestIF.py \
        vdsm/hooks.py \
        vdsm/libvirtev.py \
+       vdsm/libvirtvm.py \
        vdsm/md_utils.py \
        vdsm/momIF.py \
        vdsm/parted_utils.py \
diff --git a/vdsm/libvirtvm.py b/vdsm/libvirtvm.py
index 3ff3a01..134cd0a 100644
--- a/vdsm/libvirtvm.py
+++ b/vdsm/libvirtvm.py
@@ -44,17 +44,19 @@
 # service/daemon and in libvirtd (to be used with the quiesce flag).
 _QEMU_GA_DEVICE_NAME = 'org.qemu.guest_agent.0'
 
+
 class MERGESTATUS:
-    NOT_STARTED     = "Not Started"
-    IN_PROGRESS     = "In Progress"
-    FAILED          = "Failed"
-    COMPLETED       = "Completed"
-    UNKNOWN         = "Unknown"
+    NOT_STARTED = "Not Started"
+    IN_PROGRESS = "In Progress"
+    FAILED = "Failed"
+    COMPLETED = "Completed"
+    UNKNOWN = "Unknown"
     DRIVE_NOT_FOUND = "Drive Not Found"
-    BASE_NOT_FOUND  = "Base Not Found"
+    BASE_NOT_FOUND = "Base Not Found"
+
 
 class VmStatsThread(utils.AdvancedStatsThread):
-    MBPS_TO_BPS = 10**6 / 8
+    MBPS_TO_BPS = 10 ** 6 / 8
 
     def __init__(self, vm):
         utils.AdvancedStatsThread.__init__(self, log=vm.log, daemon=True)
@@ -71,15 +73,17 @@
         self.sampleDisk = utils.AdvancedStatsFunction(self._sampleDisk,
                              config.getint('vars', 'vm_sample_disk_interval'),
                              config.getint('vars', 'vm_sample_disk_window'))
-        self.sampleDiskLatency = 
utils.AdvancedStatsFunction(self._sampleDiskLatency,
-                             config.getint('vars', 
'vm_sample_disk_latency_interval'),
-                             config.getint('vars', 
'vm_sample_disk_latency_window'))
+        self.sampleDiskLatency = utils.AdvancedStatsFunction(
+                self._sampleDiskLatency,
+                config.getint('vars', 'vm_sample_disk_latency_interval'),
+                config.getint('vars', 'vm_sample_disk_latency_window'))
         self.sampleNet = utils.AdvancedStatsFunction(self._sampleNet,
                              config.getint('vars', 'vm_sample_net_interval'),
                              config.getint('vars', 'vm_sample_net_window'))
 
-        self.addStatsFunction(self.highWrite, self.updateVolumes, 
self.sampleCpu,
-                              self.sampleDisk, self.sampleDiskLatency, 
self.sampleNet)
+        self.addStatsFunction(self.highWrite, self.updateVolumes,
+                self.sampleCpu, self.sampleDisk, self.sampleDiskLatency,
+                self.sampleNet)
 
     def _highWrite(self):
         if not self._vm._volumesPrepared:
@@ -87,14 +91,18 @@
             return
 
         for vmDrive in self._vm._devices[vm.DISK_DEVICES]:
-            if vmDrive.blockDev and vmDrive.format == 'cow':
-                capacity, alloc, physical = \
-                                        self._vm._dom.blockInfo(vmDrive.path, 
0)
-                if physical - alloc < self._vm._MIN_DISK_REMAIN:
-                    self._log.info('%s/%s apparent: %s capacity: %s, alloc: %s 
phys: %s',
-                                  vmDrive.domainID, vmDrive.volumeID,
-                                  vmDrive.apparentsize, capacity, alloc, 
physical)
-                    self._vm._onHighWrite(vmDrive.name, alloc)
+            if not vmDrive.blockDev or vmDrive.format != 'cow':
+                continue
+
+            capacity, alloc, physical = \
+                    self._vm._dom.blockInfo(vmDrive.path, 0)
+
+            if physical - alloc < self._vm._MIN_DISK_REMAIN:
+                self._log.info(
+                        '%s/%s apparent: %s capacity: %s, alloc: %s phys: %s',
+                        vmDrive.domainID, vmDrive.volumeID,
+                        vmDrive.apparentsize, capacity, alloc, physical)
+                self._vm._onHighWrite(vmDrive.name, alloc)
 
     def _updateVolumes(self):
         if not self._vm._volumesPrepared:
@@ -111,7 +119,7 @@
 
     def _sampleCpu(self):
         state, maxMem, memory, nrVirtCpu, cpuTime = self._vm._dom.info()
-        return cpuTime / 1000**3
+        return cpuTime / (1000 ** 3)
 
     def _sampleDisk(self):
         if not self._vm._volumesPrepared:
@@ -130,38 +138,26 @@
             return
 
         def _blockstatsParses(devList):
-            # The json output looks like:
-            # {u'return': [{u'device': u'drive-ide0-0-0',
-            #               u'stats': {u'rd_operations': 0, 
u'flush_total_time_ns': 0, u'wr_highest_offset': 0, u'rd_total_time_ns': 0,
-            #                          u'rd_bytes': 0, u'wr_total_time_ns': 0, 
u'flush_operations': 0, u'wr_operations': 0, u'wr_bytes':0},
-            #               u'parent': {u'stats': {u'rd_operations': 0, 
u'flush_total_time_ns': 0, u'wr_highest_offset': 0,
-            #                                      u'rd_total_time_ns': 0, 
u'rd_bytes': 0, u'wr_total_time_ns': 0, u'flush_operations': 0,
-            #                                      u'wr_operations': 0, 
u'wr_bytes': 0}
-            #                          }
-            #               },
-            #               {u'device': u'drive-ide0-1-0',
-            #                u'stats': {u'rd_operations': 0, 
u'flush_total_time_ns': 0, u'wr_highest_offset': 0, u'rd_total_time_ns': 0,
-            #                           u'rd_bytes': 0, u'wr_total_time_ns': 
0, u'flush_operations': 0, u'wr_operations': 0, u'wr_bytes': 0}
-            #               }],
-            #  u'id': u'libvirt-9'}
             stats = {}
             for item in devList['return']:
                 fullDevName = item['device']
                 alias = fullDevName[len('drive-'):].strip()
                 devStats = item['stats']
-                stats[alias] = {'rd_op':devStats['rd_operations'],
-                                'wr_op':devStats['wr_operations'],
-                                'flush_op':devStats['flush_operations'],
-                                
'rd_total_time_ns':devStats['rd_total_time_ns'],
-                                
'wr_total_time_ns':devStats['wr_total_time_ns'],
-                                
'flush_total_time_ns':devStats['flush_total_time_ns']}
+                stats[alias] = {
+                        'rd_op': devStats['rd_operations'],
+                        'wr_op': devStats['wr_operations'],
+                        'flush_op': devStats['flush_operations'],
+                        'rd_total_time_ns': devStats['rd_total_time_ns'],
+                        'wr_total_time_ns': devStats['wr_total_time_ns'],
+                        'flush_total_time_ns': devStats['flush_total_time_ns']
+                }
 
             return stats
 
         diskLatency = {}
-        cmd = json.dumps({ "execute" : "query-blockstats" })
+        cmd = json.dumps({"execute": "query-blockstats"})
         res = libvirt_qemu.qemuMonitorCommand(self._vm._dom, cmd,
-                            
libvirt_qemu.VIR_DOMAIN_QEMU_MONITOR_COMMAND_DEFAULT)
+                libvirt_qemu.VIR_DOMAIN_QEMU_MONITOR_COMMAND_DEFAULT)
         out = json.loads(res)
 
         stats = _blockstatsParses(out)
@@ -169,10 +165,10 @@
             try:
                 diskLatency[vmDrive.name] = stats[vmDrive.alias]
             except KeyError:
-                diskLatency[vmDrive.name] = {'rd_op':0, 'wr_op':0, 
'flush_op':0,
-                                             'rd_total_time_ns':0,
-                                             'wr_total_time_ns':0,
-                                             'flush_total_time_ns':0}
+                diskLatency[vmDrive.name] = {
+                        'rd_op': 0, 'wr_op': 0, 'flush_op': 0,
+                        'rd_total_time_ns': 0, 'wr_total_time_ns': 0,
+                        'flush_total_time_ns': 0}
                 self._log.warn("Disk %s latency not available", vmDrive.name)
 
         return diskLatency
@@ -208,9 +204,9 @@
                        'state':     'unknown'}
 
             try:
-                ifStats['rxErrors']  = str(eInfo[nic.name][2])
+                ifStats['rxErrors'] = str(eInfo[nic.name][2])
                 ifStats['rxDropped'] = str(eInfo[nic.name][3])
-                ifStats['txErrors']  = str(eInfo[nic.name][6])
+                ifStats['txErrors'] = str(eInfo[nic.name][6])
                 ifStats['txDropped'] = str(eInfo[nic.name][7])
 
                 ifRxBytes = (100.0 * (eInfo[nic.name][0] - sInfo[nic.name][0])
@@ -248,16 +244,22 @@
     def _getDiskLatency(self, stats):
         sInfo, eInfo, sampleInterval = self.sampleDiskLatency.getStats()
 
+        def _calcLatency(eValue, sValue, eTotal, sTotal):
+            diff = eValue - sValue
+            if diff == 0:
+                return 0
+
+            return (eTotal - sTotal) / diff
+
         def _avgLatencyCalc(sData, eData):
-            readLatency = 0 if not (eData['rd_op'] - sData['rd_op']) \
-                            else (eData['rd_total_time_ns'] - 
sData['rd_total_time_ns']) / \
-                                 (eData['rd_op'] - sData['rd_op'])
-            writeLatency = 0 if not (eData['wr_op'] - sData['wr_op']) \
-                            else (eData['wr_total_time_ns'] - 
sData['wr_total_time_ns']) / \
-                                 (eData['wr_op'] - sData['wr_op'])
-            flushLatency = 0 if not (eData['flush_op'] - sData['flush_op']) \
-                            else (eData['flush_total_time_ns'] - 
sData['flush_total_time_ns']) / \
-                                 (eData['flush_op'] - sData['flush_op'])
+            readLatency = _calcLatency(eData['rd_op'], sData['rd_op'],
+                    eData['rd_total_time_ns'], sData['rd_total_time_ns'])
+
+            writeLatency = _calcLatency(eData['wr_op'], sData['wr_op'],
+                    eData['wr_total_time_ns'], sData['wr_total_time_ns'])
+
+            flushLatency = _calcLatency(eData['flush_op'], sData['flush_op'],
+                    eData['flush_total_time_ns'], sData['flush_total_time_ns'])
 
             return str(readLatency), str(writeLatency), str(flushLatency)
 
@@ -267,8 +269,9 @@
                         'writeLatency': '0',
                         'flushLatency': '0'}
             try:
-                dLatency['readLatency'], dLatency['writeLatency'], \
-                dLatency['flushLatency'] = _avgLatencyCalc(sInfo[dName], 
eInfo[dName])
+                (dLatency['readLatency'], dLatency['writeLatency'],
+                dLatency['flushLatency']) = _avgLatencyCalc(sInfo[dName],
+                        eInfo[dName])
             except (KeyError, TypeError):
                 self._log.debug("Disk %s latency not available", dName)
             else:
@@ -312,6 +315,7 @@
 
         return True
 
+
 class MigrationDowntimeThread(threading.Thread):
     def __init__(self, vm, downtime, wait):
         super(MigrationDowntimeThread, self).__init__()
@@ -344,14 +348,17 @@
         self._vm.log.debug('canceling migration downtime thread')
         self._stop.set()
 
+
 class MigrationMonitorThread(threading.Thread):
-    _MIGRATION_MONITOR_INTERVAL = config.getint('vars', 
'migration_monitor_interval')   # seconds
+    _MIGRATION_MONITOR_INTERVAL = \
+            config.getint('vars', 'migration_monitor_interval')   # seconds
 
     def __init__(self, vm):
         super(MigrationMonitorThread, self).__init__()
         self._stop = threading.Event()
         self._vm = vm
         self.daemon = True
+        self.migrationTimeout = config.getint('vars', 'migration_timeout')
 
     def run(self):
         self._vm.log.debug('starting migration monitor thread')
@@ -366,13 +373,16 @@
             memTotal, memProcessed, _,   \
             fileTotal, fileProcessed, _ = self._vm._dom.jobInfo()
 
-            if smallest_dataRemaining is None or smallest_dataRemaining > 
dataRemaining:
+            if (smallest_dataRemaining is None or
+                    smallest_dataRemaining > dataRemaining):
                 smallest_dataRemaining = dataRemaining
                 lastProgressTime = time.time()
-            elif time.time() - lastProgressTime > config.getint('vars', 
'migration_timeout'):
+            elif time.time() - lastProgressTime > self.migrationTimeout:
                 # Migration is stuck, abort
                 self._vm.log.warn(
-                        'Migration is stuck: Hasn\'t progressed in %s seconds. 
Aborting.' % (time.time() - lastProgressTime)
+                        'Migration is stuck: " \
+                        "Hasn\'t progressed in %s seconds. Aborting.' %
+                        (time.time() - lastProgressTime)
                     )
                 self._vm._dom.abortJob()
                 self.stop()
@@ -381,18 +391,19 @@
             if jobType == 0:
                 continue
 
-            dataProgress = 100*dataProcessed / dataTotal if dataTotal else 0
-            memProgress = 100*memProcessed / memTotal if memTotal else 0
+            dataProgress = 100 * dataProcessed / dataTotal if dataTotal else 0
+            memProgress = 100 * memProcessed / memTotal if memTotal else 0
 
             self._vm.log.info(
-                    'Migration Progress: %s seconds elapsed, %s%% of data 
processed, %s%% of mem processed'
-                    % (timeElapsed/1000,dataProgress,memProgress)
+                    'Migration Progress: %s seconds elapsed, %s%% of data ' \
+                    'processed, %s%% of mem processed'
+                    % (timeElapsed / 1000, dataProgress, memProgress)
                 )
-
 
     def stop(self):
         self._vm.log.debug('stopping migration monitor thread')
         self._stop.set()
+
 
 class MigrationSourceThread(vm.MigrationSourceThread):
 
@@ -417,11 +428,13 @@
                 self._vm._vmStats.cont()
                 raise
         else:
-            hooks.before_vm_migrate_source(self._vm._dom.XMLDesc(0), 
self._vm.conf)
+            hooks.before_vm_migrate_source(self._vm._dom.XMLDesc(0),
+                    self._vm.conf)
             response = self.destServer.migrationCreate(self._machineParams)
             if response['status']['code']:
                 self.status = response
-                raise RuntimeError('migration destination error: ' + 
response['status']['message'])
+                raise RuntimeError('migration destination error: ' +
+                        response['status']['message'])
             if config.getboolean('vars', 'ssl'):
                 transport = 'tls'
             else:
@@ -451,7 +464,8 @@
                 self._preparingMigrationEvt = False
                 if not self._migrationCanceledEvt:
                     self._vm._dom.migrateToURI2(duri, muri, None,
-                        libvirt.VIR_MIGRATE_LIVE | 
libvirt.VIR_MIGRATE_PEER2PEER,
+                        libvirt.VIR_MIGRATE_LIVE |
+                        libvirt.VIR_MIGRATE_PEER2PEER,
                         None, maxBandwidth)
             finally:
                 t.cancel()
@@ -468,7 +482,10 @@
             if not self._preparingMigrationEvt:
                     raise
 
-class TimeoutError(libvirt.libvirtError): pass
+
+class TimeoutError(libvirt.libvirtError):
+    pass
+
 
 class NotifyingVirDomain:
     # virDomain wrapper that notifies vm when a method raises an exception with
@@ -482,6 +499,7 @@
         attr = getattr(self._dom, name)
         if not callable(attr):
             return attr
+
         def f(*args, **kwargs):
             try:
                 ret = attr(*args, **kwargs)
@@ -598,7 +616,8 @@
         typeelem = self.doc.createElement('type')
         oselem.appendChild(typeelem)
         typeelem.setAttribute('arch', 'x86_64')
-        typeelem.setAttribute('machine', self.conf.get('emulatedMachine', 
'pc'))
+        typeelem.setAttribute('machine',
+                self.conf.get('emulatedMachine', 'pc'))
         typeelem.appendChild(self.doc.createTextNode('hvm'))
 
         qemu2libvirtBoot = {'a': 'fd', 'c': 'hd', 'd': 'cdrom', 'n': 'network'}
@@ -695,7 +714,8 @@
         m = self.doc.createElement('model')
         m.appendChild(self.doc.createTextNode(model))
         cpu.appendChild(m)
-        if 'smpCoresPerSocket' in self.conf or 'smpThreadsPerCore' in 
self.conf:
+        if any(key in self.conf.keys for key in ('smpCoresPerSocket',
+                                       'smpThreadsPerCore')):
             topo = self.doc.createElement('topology')
             vcpus = int(self.conf.get('smp', '1'))
             cores = int(self.conf.get('smpCoresPerSocket', '1'))
@@ -857,12 +877,14 @@
 
         return element
 
+
 class GeneralDevice(LibvirtVmDevice):
     def getXML(self):
         """
         Create domxml for general device
         """
         return self.createXmlElem(self.type, self.device, ['address'])
+
 
 class ControllerDevice(LibvirtVmDevice):
     def getXML(self):
@@ -876,6 +898,7 @@
             ctrl.setAttribute('ports', '16')
 
         return ctrl
+
 
 class VideoDevice(LibvirtVmDevice):
     def getXML(self):
@@ -892,6 +915,7 @@
 
         return video
 
+
 class SoundDevice(LibvirtVmDevice):
     def getXML(self):
         """
@@ -900,6 +924,7 @@
         sound = self.createXmlElem('sound', None, ['address'])
         sound.setAttribute('model', self.device)
         return sound
+
 
 class NetworkInterfaceDevice(LibvirtVmDevice):
     def __init__(self, conf, log, **kwargs):
@@ -980,6 +1005,7 @@
             iface.appendChild(tune)
 
         return iface
+
 
 class Drive(LibvirtVmDevice):
     def __init__(self, conf, log, **kwargs):
@@ -1099,11 +1125,12 @@
                 driver.setAttribute('error_policy', 'stop')
             diskelem.appendChild(driver)
         elif self.device == 'floppy':
-            if self.path and not 
utils.getUserPermissions(constants.QEMU_PROCESS_USER,
-                                                          self.path)['write']:
+            if self.path and not utils.getUserPermissions(
+                    constants.QEMU_PROCESS_USER, self.path)['write']:
                 diskelem.appendChild(doc.createElement('readonly'))
 
         return diskelem
+
 
 class BalloonDevice(LibvirtVmDevice):
     def getXML(self):
@@ -1111,12 +1138,14 @@
         Create domxml for a memory balloon device.
 
         <memballoon model='virtio'>
-          <address type='pci' domain='0x0000' bus='0x00' slot='0x04' 
function='0x0'/>
+          <address type='pci' domain='0x0000' bus='0x00' slot='0x04'
+          function='0x0'/>
         </memballoon>
         """
         m = self.createXmlElem(self.device, None, ['address'])
         m.setAttribute('model', self.specParams['model'])
         return m
+
 
 class RedirDevice(LibvirtVmDevice):
     def getXML(self):
@@ -1132,6 +1161,7 @@
 
 class LibvirtVm(vm.Vm):
     MigrationSourceThreadClass = MigrationSourceThread
+
     def __init__(self, cif, params):
         self._dom = None
         vm.Vm.__init__(self, cif, params)
@@ -1149,7 +1179,7 @@
         # config is initialized
         self._MIN_DISK_REMAIN = (100 -
                       config.getint('irs', 'volume_utilization_percent')) \
-            * config.getint('irs', 'volume_utilization_chunk_mb') * 2**20 \
+            * config.getint('irs', 'volume_utilization_chunk_mb') * 2 ** 20 \
             / 100
         self._lastXMLDesc = '<domain><uuid>%s</uuid></domain>' % self.id
         self._devXmlHash = '0'
@@ -1195,7 +1225,7 @@
         domxml.appendSysinfo(
             osname=caps.OSName.RHEVH,
             osversion=osd.get('version', '') + '-' + osd.get('release', ''),
-            hostUUID=utils.getHostUUID() )
+            hostUUID=utils.getHostUUID())
 
         domxml.appendClock()
         domxml.appendFeatures()
@@ -1244,7 +1274,8 @@
         utils.rmFile(self._qemuguestSocketFile)
 
     def updateGuestCpuRunning(self):
-        self._guestCpuRunning = self._dom.info()[0] == 
libvirt.VIR_DOMAIN_RUNNING
+        self._guestCpuRunning = (
+                self._dom.info()[0] == libvirt.VIR_DOMAIN_RUNNING)
 
     def _getUnderlyingVmDevicesInfo(self):
         """
@@ -1286,7 +1317,8 @@
             self.cif.channelListener, self.log,
             connect=utils.tobool(self.conf.get('vmchannel', 'true')))
 
-        self._guestCpuRunning = self._dom.info()[0] == 
libvirt.VIR_DOMAIN_RUNNING
+        self._guestCpuRunning = (
+                self._dom.info()[0] == libvirt.VIR_DOMAIN_RUNNING)
         if self.lastStatus not in ('Migration Destination',
                                    'Restoring state'):
             self._initTimePauseCode = self._readPauseCode(0)
@@ -1319,9 +1351,9 @@
 
             self.conf['devices'] = newDevices
             # We need to save conf here before we actually run VM.
-            # It's not enough to save conf only on status changes as we did 
before,
-            # because if vdsm will restarted between VM run and conf saving
-            # we will fail in inconsistent state during recovery.
+            # It's not enough to save conf only on status changes as we did
+            # before, because if vdsm will restarted between VM run and conf
+            # saving we will fail in inconsistent state during recovery.
             # So, to get proper device objects during VM recovery flow
             # we must to have updated conf before VM run
             self.saveState()
@@ -1330,8 +1362,8 @@
             # conf may be outdated if something happened during restart.
 
             # For BC we should to keep running VM run after vdsm upgrade.
-            # So, because this vm doesn't have normalize conf we need to build 
it
-            # in recovery flow
+            # So, because this vm doesn't have normalize conf we need to build
+            # it in recovery flow
             if not self.conf.get('devices'):
                 devices = self.buildConfDevices()
             else:
@@ -1348,7 +1380,8 @@
 
         for devType, devClass in devMap.items():
             for dev in devices[devType]:
-                self._devices[devType].append(devClass(self.conf, self.log, 
**dev))
+                dcls = devClass(self.conf, self.log, **dev)
+                self._devices[devType].append(dcls)
 
         # We should set this event as a last part of drives initialization
         self._pathsPreparedEvent.set()
@@ -1395,11 +1428,11 @@
 
     def hotplugNic(self, params):
         if self.isMigrating():
-           return errCode['migInProgress']
+            return errCode['migInProgress']
 
         nicParams = params.get('nic', {})
         nic = NetworkInterfaceDevice(self.conf, self.log, **nicParams)
-        nicXml =  nic.getXML().toprettyxml(encoding='utf-8')
+        nicXml = nic.getXML().toprettyxml(encoding='utf-8')
         self.log.debug("Hotplug NIC xml: %s" % (nicXml))
 
         try:
@@ -1408,8 +1441,9 @@
             self.log.error("Hotplug failed", exc_info=True)
             if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
                 return errCode['noVM']
-            return {'status' : {'code': 
errCode['hotplugNic']['status']['code'],
-                                'message': e.message}}
+            return {'status': {
+                'code': errCode['hotplugNic']['status']['code'],
+                'message': e.message}}
         else:
             # FIXME!  We may have a problem here if vdsm dies right after
             # we sent command to libvirt and before save conf. In this case
@@ -1424,7 +1458,7 @@
 
     def hotunplugNic(self, params):
         if self.isMigrating():
-           return errCode['migInProgress']
+            return errCode['migInProgress']
 
         nicParams = params.get('nic', {})
 
@@ -1439,9 +1473,11 @@
             nicXml = nic.getXML().toprettyxml(encoding='utf-8')
             self.log.debug("Hotunplug NIC xml: %s", nicXml)
         else:
-            self.log.error("Hotunplug NIC failed - NIC not found: %s", 
nicParams)
-            return {'status' : {'code': 
errCode['hotunplugNic']['status']['code'],
-                                'message': "NIC not found"}}
+            self.log.error("Hotunplug NIC failed - NIC not found: %s",
+                    nicParams)
+            return {'status': {
+                'code': errCode['hotunplugNic']['status']['code'],
+                'message': "NIC not found"}}
 
         # Remove found NIC from vm's NICs list
         if nic:
@@ -1469,14 +1505,15 @@
             if nic:
                 self._devices[vm.NIC_DEVICES].append(nic)
             self.saveState()
-            return {'status' : {'code': 
errCode['hotunplugNic']['status']['code'],
-                                'message': e.message}}
+            return {'status': {
+                'code': errCode['hotunplugNic']['status']['code'],
+                'message': e.message}}
 
         return {'status': doneCode, 'vmList': self.status()}
 
     def hotplugDisk(self, params):
         if self.isMigrating():
-           return errCode['migInProgress']
+            return errCode['migInProgress']
 
         diskParams = params.get('drive', {})
         diskParams['path'] = self.cif.prepareVolumePath(diskParams)
@@ -1486,7 +1523,7 @@
 
         self.updateDriveIndex(diskParams)
         drive = Drive(self.conf, self.log, **diskParams)
-        driveXml =  drive.getXML().toprettyxml(encoding='utf-8')
+        driveXml = drive.getXML().toprettyxml(encoding='utf-8')
         self.log.debug("Hotplug disk xml: %s" % (driveXml))
 
         try:
@@ -1496,8 +1533,9 @@
             self.cif.teardownVolumePath(diskParams)
             if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
                 return errCode['noVM']
-            return {'status' : {'code': 
errCode['hotplugDisk']['status']['code'],
-                                'message': e.message}}
+            return {'status': {
+                'code': errCode['hotplugDisk']['status']['code'],
+                'message': e.message}}
         else:
             # FIXME!  We may have a problem here if vdsm dies right after
             # we sent command to libvirt and before save conf. In this case
@@ -1512,7 +1550,7 @@
 
     def hotunplugDisk(self, params):
         if self.isMigrating():
-           return errCode['migInProgress']
+            return errCode['migInProgress']
 
         diskParams = params.get('drive', {})
         diskParams['path'] = self.cif.prepareVolumePath(diskParams)
@@ -1528,9 +1566,11 @@
             driveXml = drive.getXML().toprettyxml(encoding='utf-8')
             self.log.debug("Hotunplug disk xml: %s", driveXml)
         else:
-            self.log.error("Hotunplug disk failed - Disk not found: %s", 
diskParams)
-            return {'status' : {'code': 
errCode['hotunplugDisk']['status']['code'],
-                                'message': "Disk not found"}}
+            self.log.error("Hotunplug disk failed - Disk not found: %s",
+                    diskParams)
+            return {'status': {
+                'code': errCode['hotunplugDisk']['status']['code'],
+                'message': "Disk not found"}}
 
         # Remove found disk from vm's drives list
         if drive:
@@ -1558,8 +1598,9 @@
             if drive:
                 self._devices[vm.DISK_DEVICES].append(drive)
             self.saveState()
-            return {'status' : {'code': 
errCode['hotunplugDisk']['status']['code'],
-                                'message': e.message}}
+            return {'status': {
+                'code': errCode['hotunplugDisk']['status']['code'],
+                'message': e.message}}
         else:
             self._cleanup()
 
@@ -1596,7 +1637,9 @@
             except Exception, e:
                 # Improve description of exception
                 if not self._incomingMigrationFinished.isSet():
-                    newMsg = '%s - Timed out (did not receive success event)' 
% (e.args[0] if len(e.args) else 'Migration Error')
+                    errStr = (e.args[0] if len(e.args) else 'Migration Error')
+                    newMsg = (('%s: Timed out (did not receive success '
+                            'event)') % errStr)
                     e.args = (newMsg,) + e.args[1:]
                 raise
 
@@ -1622,7 +1665,7 @@
     def _findDriveByUUIDs(self, drive):
         """Find a drive given its definition"""
 
-        if drive.has_key("domainID"):
+        if "domainID" in drive:
             tgetDrv = (drive["domainID"], drive["imageID"],
                        drive["volumeID"])
 
@@ -1633,14 +1676,14 @@
                         device.volumeID) == tgetDrv):
                     return device
 
-        elif drive.has_key("GUID"):
+        elif "GUID" in drive:
             for device in self._devices[vm.DISK_DEVICES][:]:
                 if not hasattr(device, "GUID"):
                     continue
                 if device.GUID == drive["GUID"]:
                     return device
 
-        elif drive.has_key("UUID"):
+        elif "UUID"  in drive:
             for device in self._devices[vm.DISK_DEVICES][:]:
                 if not hasattr(device, "UUID"):
                     continue
@@ -1670,7 +1713,7 @@
         def _normSnapDriveParams(drive):
             """Normalize snapshot parameters"""
 
-            if drive.has_key("baseVolumeID"):
+            if "baseVolumeID" in drive:
                 baseDrv = {"device": "disk",
                            "domainID": drive["domainID"],
                            "imageID": drive["imageID"],
@@ -1678,18 +1721,18 @@
                 tgetDrv = baseDrv.copy()
                 tgetDrv["volumeID"] = drive["volumeID"]
 
-            elif drive.has_key("baseGUID"):
+            elif "baseGUID" in drive:
                 baseDrv = {"GUID": drive["baseGUID"]}
                 tgetDrv = {"GUID": drive["GUID"]}
 
-            elif drive.has_key("baseUUID"):
+            elif "baseUUID" in drive:
                 baseDrv = {"UUID": drive["baseUUID"]}
                 tgetDrv = {"UUID": drive["UUID"]}
 
             else:
                 baseDrv, tgetDrv = (None, None)
 
-            if drive.has_key("mirrorDomainID"):
+            if "mirrorDomainID" in drive:
                 mirrorDrv = {"domainID": drive["mirrorDomainID"],
                              "imageID": drive["mirrorImageID"],
                              "volumeID": drive["mirrorVolumeID"]}
@@ -1738,7 +1781,7 @@
         newDrives = {}
 
         if self.isMigrating():
-           return errCode['migInProgress']
+            return errCode['migInProgress']
 
         for drive in snapDrives:
             baseDrv, tgetDrv, mirrorDrv = _normSnapDriveParams(drive)
@@ -1824,7 +1867,8 @@
                 return errCode['snapshotErr']
             else:
                 # Update the drive information
-                for drive in newDrives.values(): _updateDrive(drive)
+                for drive in newDrives.values():
+                    _updateDrive(drive)
             finally:
                 self._volumesPrepared = True
 
@@ -1865,7 +1909,7 @@
                 jobInfo = None
 
             if not jobInfo:
-                 mergeStatus['status'] = MERGESTATUS.UNKNOWN
+                mergeStatus['status'] = MERGESTATUS.UNKNOWN
 
         self.saveState()
 
@@ -1917,8 +1961,8 @@
                                 if k not in ("path", "basePath")
             )
 
-        mergeStatus = [ _filterInternalInfo(x)
-                        for x in self.conf.get('liveMerge', []) ]
+        mergeStatus = [_filterInternalInfo(x)
+                        for x in self.conf.get('liveMerge', [])]
 
         return {'status': doneCode, 'mergeStatus': mergeStatus}
 
@@ -2000,7 +2044,6 @@
         graphics.setAttribute('connected', 'keep')
         self._dom.updateDeviceFlags(graphics.toxml(), 0)
 
-
     def _onAbnormalStop(self, blockDevAlias, err):
         """
         Called back by IO_ERROR_REASON event
@@ -2008,7 +2051,8 @@
         :param err: one of "eperm", "eio", "enospc" or "eother"
         Note the different API from that of Vm._onAbnormalStop
         """
-        self.log.info('abnormal vm stop device %s error %s', blockDevAlias, 
err)
+        self.log.info('abnormal vm stop device %s error %s',
+                blockDevAlias, err)
         self.conf['pauseCode'] = err.upper()
         self._guestCpuRunning = False
         if err.upper() == 'ENOSPC':
@@ -2069,7 +2113,8 @@
         Stop VM and release all resources
         """
 
-        #unsetting mirror network will clear both mirroring (on the same 
network).
+        # Unsetting mirror network will clear both mirroring (on the same
+        # network).
         for nic in self._devices[vm.NIC_DEVICES]:
             if hasattr(nic, 'portMirroring'):
                 for network in nic.portMirroring:
@@ -2078,7 +2123,7 @@
         # delete the payload devices
         for drive in self._devices[vm.DISK_DEVICES]:
             if hasattr(drive, 'specParams') and \
-                drive.specParams.has_key('vmPayload'):
+                'vmPayload' in drive:
                     supervdsm.getProxy().removeFs(drive.path)
 
         with self._releaseLock:
@@ -2094,11 +2139,14 @@
                     self.guestAgent.stop()
                 if self._dom:
                     try:
-                        
self._dom.destroyFlags(libvirt.VIR_DOMAIN_DESTROY_GRACEFUL)
+                        self._dom.destroyFlags(
+                                libvirt.VIR_DOMAIN_DESTROY_GRACEFUL)
                     except libvirt.libvirtError, e:
-                        if e.get_error_code() == 
libvirt.VIR_ERR_OPERATION_FAILED:
-                            self.log.warn("Failed to destroy VM '%s' 
gracefully",
-                                                            self.conf['vmId'])
+                        errCode = e.get_error_code()
+                        if errCode == libvirt.VIR_ERR_OPERATION_FAILED:
+                            self.log.warn(
+                                    "Failed to destroy VM '%s' gracefully",
+                                    self.conf['vmId'])
                             time.sleep(30)
                             self._dom.destroy()
             except libvirt.libvirtError, e:
@@ -2128,7 +2176,8 @@
             self.log.debug("Total desktops after destroy of %s is %d",
                      self.conf['vmId'], len(self.cif.vmContainer))
         except Exception:
-            self.log.error("Failed to delete VM %s", self.conf['vmId'], 
exc_info=True)
+            self.log.error("Failed to delete VM %s", self.conf['vmId'],
+                    exc_info=True)
 
     def destroy(self):
         self.log.debug('destroy Called')
@@ -2234,8 +2283,8 @@
             for dev in self.conf['devices']:
                 if (dev['type'] == vm.CONTROLLER_DEVICES) and \
                    (dev['device'] == device) and \
-                   (not dev.has_key('index') or dev['index'] == index) and \
-                   (not dev.has_key('model') or dev['model'] == model):
+                   (not 'index' in dev or dev['index'] == index) and \
+                   (not 'model' in dev or dev['model'] == model):
                     dev['address'] = address
                     dev['alias'] = alias
                     knownDev = True
@@ -2267,7 +2316,8 @@
                     dev.alias = alias
 
             for dev in self.conf['devices']:
-                if (dev['type'] == vm.BALLOON_DEVICES) and not 
dev.get('address'):
+                if ((dev['type'] == vm.BALLOON_DEVICES) and
+                        not dev.get('address')):
                     dev['address'] = address
                     dev['alias'] = alias
 
@@ -2286,7 +2336,8 @@
             # FIXME. We have an identification problem here.
             # Video card device has not unique identifier, except the alias
             # (but backend not aware to device's aliases).
-            # So, for now we can only assign the address according to devices 
order.
+            # So, for now we can only assign the address according to devices
+            # order.
             for vc in self._devices[vm.VIDEO_DEVICES]:
                 if not hasattr(vc, 'address') or not hasattr(vc, 'alias'):
                     vc.alias = alias
@@ -2315,7 +2366,8 @@
             # FIXME. We have an identification problem here.
             # Sound device has not unique identifier, except the alias
             # (but backend not aware to device's aliases).
-            # So, for now we can only assign the address according to devices 
order.
+            # So, for now we can only assign the address according to devices
+            # order.
             for sc in self._devices[vm.SOUND_DEVICES]:
                 if not hasattr(sc, 'address') or not hasattr(sc, 'alias'):
                     sc.alias = alias
@@ -2356,7 +2408,8 @@
 
             devType = x.getAttribute('device')
             if devType == 'disk':
-                drv = x.getElementsByTagName('driver')[0].getAttribute('type') 
# raw/qcow2
+                # raw/qcow2
+                drv = x.getElementsByTagName('driver')[0].getAttribute('type')
             else:
                 drv = 'raw'
             # Get disk address
@@ -2432,7 +2485,8 @@
             # Update vm's conf with address for known nic devices
             knownDev = False
             for dev in self.conf['devices']:
-                if dev['type'] == vm.NIC_DEVICES and dev['macAddr'].lower() == 
mac.lower():
+                if dev['type'] == vm.NIC_DEVICES and \
+                        dev['macAddr'].lower() == mac.lower():
                     dev['address'] = address
                     dev['alias'] = alias
                     knownDev = True
@@ -2484,7 +2538,9 @@
         prepareTimeout = self._loadCorrectedTimeout(
                           config.getint('vars', 'migration_listener_timeout'),
                           doubler=5)
-        self.log.debug('migration destination: waiting %ss for path 
preparation', prepareTimeout)
+        self.log.debug(
+                'migration destination: waiting %ss for path preparation',
+                prepareTimeout)
         self._pathsPreparedEvent.wait(prepareTimeout)
         if not self._pathsPreparedEvent.isSet():
             self.log.debug('Timeout while waiting for path preparation')
@@ -2492,6 +2548,7 @@
         srcDomXML = self.conf.pop('_srcDomXML')
         hooks.before_vm_migrate_destination(srcDomXML, self.conf)
         return True
+
 
 # A little unrelated hack to make xml.dom.minidom.Document.toprettyxml()
 # not wrap Text node with whitespace.
@@ -2503,7 +2560,7 @@
     # indent = current indentation
     # addindent = indentation to add to higher levels
     # newl = newline string
-    writer.write(indent+"<" + self.tagName)
+    writer.write(indent + "<" + self.tagName)
 
     attrs = self._get_attributes()
     a_names = attrs.keys()
@@ -2520,13 +2577,13 @@
            isinstance(self.childNodes[0], xml.dom.minidom.Text):
             writer.write(">")
             self.childNodes[0].writexml(writer)
-            writer.write("</%s>%s" % (self.tagName,newl))
+            writer.write("</%s>%s" % (self.tagName, newl))
         else:
-            writer.write(">%s"%(newl))
+            writer.write(">%s" % (newl))
             for node in self.childNodes:
-                node.writexml(writer,indent+addindent,addindent,newl)
-            writer.write("%s</%s>%s" % (indent,self.tagName,newl))
+                node.writexml(writer, indent + addindent, addindent, newl)
+            writer.write("%s</%s>%s" % (indent, self.tagName, newl))
     else:
-        writer.write("/>%s"%(newl))
-xml.dom.minidom.Element.writexml = __hacked_writexml
+        writer.write("/>%s" % (newl))
 
+xml.dom.minidom.Element.writexml = __hacked_writexml


--
To view, visit http://gerrit.ovirt.org/7422
To unsubscribe, visit http://gerrit.ovirt.org/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: I32570c2d1a2efa4a5446caa6d973a10b4c84d996
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Saggi Mizrahi <[email protected]>
_______________________________________________
vdsm-patches mailing list
[email protected]
https://lists.fedorahosted.org/mailman/listinfo/vdsm-patches

Reply via email to