Note there are some cases left which need extra cleanup.
---
lib/backend.py | 2 +-
lib/bdev.py | 4 ++--
lib/cmdlib.py | 19 +++++++++----------
lib/confd/querylib.py | 1 -
lib/config.py | 2 +-
lib/http/__init__.py | 2 +-
lib/http/server.py | 2 +-
lib/hypervisor/hv_chroot.py | 2 +-
lib/hypervisor/hv_kvm.py | 8 ++++----
lib/mcpu.py | 1 -
10 files changed, 20 insertions(+), 23 deletions(-)
diff --git a/lib/backend.py b/lib/backend.py
index f82f08f..d2b75c6 100644
--- a/lib/backend.py
+++ b/lib/backend.py
@@ -1516,7 +1516,7 @@ def BlockdevGetsize(disks):
for cf in disks:
try:
rbd = _RecursiveFindBD(cf)
- except errors.BlockDeviceError, err:
+ except errors.BlockDeviceError:
result.append(None)
continue
if rbd is None:
diff --git a/lib/bdev.py b/lib/bdev.py
index d6259ee..849bd9e 100644
--- a/lib/bdev.py
+++ b/lib/bdev.py
@@ -79,7 +79,7 @@ def _CanReadDevice(path):
try:
utils.ReadFile(path, size=_DEVICE_READ_SIZE)
return True
- except EnvironmentError, err:
+ except EnvironmentError:
logging.warning("Can't read from device %s", path, exc_info=True)
return False
@@ -623,7 +623,7 @@ class LogicalVolume(BlockDev):
_ThrowError("Can't compute PV info for vg %s", self._vg_name)
pvs_info.sort()
pvs_info.reverse()
- free_size, pv_name, _ = pvs_info[0]
+ free_size, _, _ = pvs_info[0]
if free_size < size:
_ThrowError("Not enough free space: required %s,"
" available %s", size, free_size)
diff --git a/lib/cmdlib.py b/lib/cmdlib.py
index a8e42e7..48b5b38 100644
--- a/lib/cmdlib.py
+++ b/lib/cmdlib.py
@@ -1163,7 +1163,7 @@ class LUVerifyCluster(LogicalUnit):
# check that ':' is not present in PV names, since it's a
# special character for lvcreate (denotes the range of PEs to
# use on the PV)
- for size, pvname, owner_vg in pvlist:
+ for _, pvname, owner_vg in pvlist:
test = ":" in pvname
_ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
" '%s' of VG '%s'", pvname, owner_vg)
@@ -1594,7 +1594,6 @@ class LUVerifyCluster(LogicalUnit):
assert hooks_results, "invalid result from hooks"
for node_name in hooks_results:
- show_node_header = True
res = hooks_results[node_name]
msg = res.fail_msg
test = msg and not res.offline
@@ -1684,7 +1683,7 @@ class LUVerifyDisks(NoHooksLU):
continue
lvs = node_res.payload
- for lv_name, (_, lv_inactive, lv_online) in lvs.items():
+ for lv_name, (_, _, lv_online) in lvs.items():
inst = nv_dict.pop((node, lv_name), None)
if (not lv_online and inst is not None
and inst.name not in res_instances):
@@ -2475,7 +2474,7 @@ class LURemoveNode(LogicalUnit):
# Run post hooks on the node before it's removed
hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
try:
- h_results = hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
+ hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
except:
self.LogWarning("Errors occurred running hooks on %s" % node.name)
@@ -2592,7 +2591,7 @@ class LUQueryNodes(NoHooksLU):
if inst_fields & frozenset(self.op.output_fields):
inst_data = self.cfg.GetAllInstancesInfo()
- for instance_name, inst in inst_data.items():
+ for inst in inst_data.values():
if inst.primary_node in node_to_primary:
node_to_primary[inst.primary_node].add(inst.name)
for secnode in inst.secondary_nodes:
@@ -4067,7 +4066,7 @@ class LURecreateInstanceDisks(LogicalUnit):
"""
to_skip = []
- for idx, disk in enumerate(self.instance.disks):
+ for idx, _ in enumerate(self.instance.disks):
if idx not in self.op.disks: # disk idx has not been passed in
to_skip.append(idx)
continue
@@ -6795,7 +6794,7 @@ class TLReplaceDisks(Tasklet):
return iv_names
def _CheckDevices(self, node_name, iv_names):
- for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
+ for name, (dev, _, _) in iv_names.iteritems():
self.cfg.SetDiskID(dev, node_name)
result = self.rpc.call_blockdev_find(node_name, dev)
@@ -6811,7 +6810,7 @@ class TLReplaceDisks(Tasklet):
raise errors.OpExecError("DRBD device %s is degraded!" % name)
def _RemoveOldStorage(self, node_name, iv_names):
- for name, (dev, old_lvs, _) in iv_names.iteritems():
+ for name, (_, old_lvs, _) in iv_names.iteritems():
self.lu.LogInfo("Remove logical volumes for %s" % name)
for lv in old_lvs:
@@ -7006,6 +7005,7 @@ class TLReplaceDisks(Tasklet):
if self.instance.primary_node == o_node1:
p_minor = o_minor1
else:
+ assert self.instance.primary_node == o_node2, "Three-node instance?"
p_minor = o_minor2
new_alone_id = (self.instance.primary_node, self.new_node, None,
@@ -7809,7 +7809,7 @@ class LUSetInstanceParams(LogicalUnit):
raise errors.OpPrereqError("Disk operations not supported for"
" diskless instances",
errors.ECODE_INVAL)
- for disk_op, disk_dict in self.op.disks:
+ for disk_op, _ in self.op.disks:
if disk_op == constants.DDM_REMOVE:
if len(instance.disks) == 1:
raise errors.OpPrereqError("Cannot remove the last disk of"
@@ -7853,7 +7853,6 @@ class LUSetInstanceParams(LogicalUnit):
result = []
instance = self.instance
- cluster = self.cluster
# disk changes
for disk_op, disk_dict in self.op.disks:
if disk_op == constants.DDM_REMOVE:
diff --git a/lib/confd/querylib.py b/lib/confd/querylib.py
index 6a13a2b..f2831a3 100644
--- a/lib/confd/querylib.py
+++ b/lib/confd/querylib.py
@@ -187,7 +187,6 @@ class InstanceIpToNodePrimaryIpQuery(ConfdQuery):
instances_list = query[constants.CONFD_REQQ_IPLIST]
mode = constants.CONFD_REQQ_IPLIST
else:
- status = constants.CONFD_REPL_STATUS_ERROR
logging.debug("missing IP or IPLIST in query dict")
return QUERY_ARGUMENT_ERROR
diff --git a/lib/config.py b/lib/config.py
index a55906f..5bb3218 100644
--- a/lib/config.py
+++ b/lib/config.py
@@ -424,7 +424,7 @@ class ConfigWriter:
node.offline))
# drbd minors check
- d_map, duplicates = self._UnlockedComputeDRBDMap()
+ _, duplicates = self._UnlockedComputeDRBDMap()
for node, minor, instance_a, instance_b in duplicates:
result.append("DRBD minor %d on node %s is assigned twice to instances"
" %s and %s" % (minor, node, instance_a, instance_b))
diff --git a/lib/http/__init__.py b/lib/http/__init__.py
index a1f5e86..c4b9ec2 100644
--- a/lib/http/__init__.py
+++ b/lib/http/__init__.py
@@ -340,7 +340,7 @@ def WaitForSocketCondition(sock, event, timeout):
if not io_events:
# Timeout
return None
- for (evfd, evcond) in io_events:
+ for (_, evcond) in io_events:
if evcond & check:
return evcond
finally:
diff --git a/lib/http/server.py b/lib/http/server.py
index d7e374c..d073cfb 100644
--- a/lib/http/server.py
+++ b/lib/http/server.py
@@ -504,7 +504,7 @@ class HttpServer(http.HttpBase, asyncore.dispatcher):
for child in self._children:
try:
- pid, status = os.waitpid(child, os.WNOHANG)
+ pid, _ = os.waitpid(child, os.WNOHANG)
except os.error:
pid = None
if pid and pid in self._children:
diff --git a/lib/hypervisor/hv_chroot.py b/lib/hypervisor/hv_chroot.py
index 6a813ac..69b9fe2 100644
--- a/lib/hypervisor/hv_chroot.py
+++ b/lib/hypervisor/hv_chroot.py
@@ -97,7 +97,7 @@ class ChrootManager(hv_base.BaseHypervisor):
fh = open("/proc/mounts", "r")
try:
for line in fh:
- fstype, mountpoint, rest = line.split(" ", 2)
+ _, mountpoint, _ = line.split(" ", 2)
if (mountpoint.startswith(path) and
mountpoint != path):
data.append(mountpoint)
diff --git a/lib/hypervisor/hv_kvm.py b/lib/hypervisor/hv_kvm.py
index 412e931..3ff6920 100644
--- a/lib/hypervisor/hv_kvm.py
+++ b/lib/hypervisor/hv_kvm.py
@@ -240,7 +240,7 @@ class KVMHypervisor(hv_base.BaseHypervisor):
@return: tuple (name, id, memory, vcpus, stat, times)
"""
- pidfile, pid, alive = self._InstancePidAlive(instance_name)
+ _, pid, alive = self._InstancePidAlive(instance_name)
if not alive:
return None
@@ -278,7 +278,7 @@ class KVMHypervisor(hv_base.BaseHypervisor):
if utils.IsProcessAlive(utils.ReadPidFile(filename)):
try:
info = self.GetInstanceInfo(name)
- except errors.HypervisorError, err:
+ except errors.HypervisorError:
continue
if info:
data.append(info)
@@ -470,7 +470,7 @@ class KVMHypervisor(hv_base.BaseHypervisor):
@param incoming: (target_host_ip, port)
"""
- pidfile, pid, alive = self._InstancePidAlive(instance.name)
+ pidfile, _, alive = self._InstancePidAlive(instance.name)
hvp = instance.hvparams
if alive:
raise errors.HypervisorError("Failed to start instance %s: %s" %
@@ -581,7 +581,7 @@ class KVMHypervisor(hv_base.BaseHypervisor):
# For some reason if we do a 'send-key ctrl-alt-delete' to the control
# socket the instance will stop, but now power up again. So we'll resort
# to shutdown and restart.
- pidfile, pid, alive = self._InstancePidAlive(instance.name)
+ _, _, alive = self._InstancePidAlive(instance.name)
if not alive:
raise errors.HypervisorError("Failed to reboot instance %s:"
" not running" % instance.name)
diff --git a/lib/mcpu.py b/lib/mcpu.py
index 0713aee..e97c743 100644
--- a/lib/mcpu.py
+++ b/lib/mcpu.py
@@ -50,7 +50,6 @@ def _CalculateLockAttemptTimeouts():
"""Calculate timeouts for lock attempts.
"""
- running_sum = 0
result = [1.0]
# Wait for a total of at least 150s before doing a blocking acquire
--
1.6.5.7