Signed-off-by: Michael Hanselmann <[email protected]>
---
doc/hooks.rst | 2 +-
doc/iallocator.rst | 2 +-
lib/backend.py | 18 +++++++++---------
lib/bdev.py | 14 +++++++-------
lib/bootstrap.py | 2 +-
lib/cli.py | 8 ++++----
lib/cmdlib.py | 38 +++++++++++++++++++-------------------
lib/config.py | 8 ++++----
lib/http/__init__.py | 4 ++--
lib/http/auth.py | 4 ++--
lib/http/server.py | 4 ++--
lib/jqueue.py | 10 +++++-----
lib/locking.py | 10 +++++-----
lib/luxi.py | 2 +-
lib/mcpu.py | 4 ++--
lib/rpc.py | 10 +++++-----
lib/ssh.py | 2 +-
lib/utils.py | 20 ++++++++++----------
man/gnt-node.sgml | 2 +-
tools/lvmstrap | 6 +++---
20 files changed, 85 insertions(+), 85 deletions(-)
diff --git a/doc/hooks.rst b/doc/hooks.rst
index 7dbe7d5..b2f05ce 100644
--- a/doc/hooks.rst
+++ b/doc/hooks.rst
@@ -104,7 +104,7 @@ The scripts will be run as follows:
be left
-All informations about the cluster is passed using environment
+All information about the cluster is passed using environment
variables. Different operations will have sligthly different
environments, but most of the variables are common.
diff --git a/doc/iallocator.rst b/doc/iallocator.rst
index f4b8bfc..9dfc847 100644
--- a/doc/iallocator.rst
+++ b/doc/iallocator.rst
@@ -233,7 +233,7 @@ The response message is much more simple than the input
one. It is
also a dict having three keys:
success
- a boolean value denoting if the allocation was successfull or not
+ a boolean value denoting if the allocation was successful or not
info
a string with information from the scripts; if the allocation fails,
diff --git a/lib/backend.py b/lib/backend.py
index bca3512..3f965a6 100644
--- a/lib/backend.py
+++ b/lib/backend.py
@@ -308,7 +308,7 @@ def LeaveCluster():
def GetNodeInfo(vgname, hypervisor_type):
- """Gives back a hash with different informations about the node.
+ """Gives back a hash with different information about the node.
@type vgname: C{string}
@param vgname: the name of the volume group to ask for disk space information
@@ -581,7 +581,7 @@ def GetInstanceList(hypervisor_list):
def GetInstanceInfo(instance, hname):
- """Gives back the informations about an instance as a dictionary.
+ """Gives back the information about an instance as a dictionary.
@type instance: string
@param instance: the instance name
@@ -746,7 +746,7 @@ def RunRenameInstance(instance, old_name):
def _GetVGInfo(vg_name):
- """Get informations about the volume group.
+ """Get information about the volume group.
@type vg_name: str
@param vg_name: the volume group which we query
@@ -1032,7 +1032,7 @@ def AcceptInstance(instance, info, target):
msg = "Failed to accept instance"
logging.exception(msg)
return (False, '%s: %s' % (msg, err))
- return (True, "Accept successfull")
+ return (True, "Accept successful")
def FinalizeMigration(instance, info, success):
@@ -1080,7 +1080,7 @@ def MigrateInstance(instance, target, live):
msg = "Failed to migrate instance"
logging.exception(msg)
return (False, "%s: %s" % (msg, err))
- return (True, "Migration successfull")
+ return (True, "Migration successful")
def BlockdevCreate(disk, size, owner, on_primary, info):
@@ -1273,7 +1273,7 @@ def BlockdevAssemble(disk, owner, as_primary):
def BlockdevShutdown(disk):
"""Shut down a block device.
- First, if the device is assembled (Attach() is successfull), then
+ First, if the device is assembled (Attach() is successful), then
the device is shutdown. Then the children of the device are
shutdown.
@@ -1391,7 +1391,7 @@ def BlockdevGetmirrorstatus(disks):
def _RecursiveFindBD(disk):
"""Check if a device is activated.
- If so, return informations about the real device.
+ If so, return information about the real device.
@type disk: L{objects.Disk}
@param disk: the disk object we need to find
@@ -1411,7 +1411,7 @@ def _RecursiveFindBD(disk):
def BlockdevFind(disk):
"""Check if a device is activated.
- If it is, return informations about the real device.
+ If it is, return information about the real device.
@type disk: L{objects.Disk}
@param disk: the disk to find
@@ -2094,7 +2094,7 @@ def RemoveFileStorageDir(file_storage_dir):
@param file_storage_dir: the directory we should cleanup
@rtype: tuple (success,)
@return: tuple of one element, C{success}, denoting
- whether the operation was successfull
+ whether the operation was successful
"""
file_storage_dir = _TransformFileStorageDir(file_storage_dir)
diff --git a/lib/bdev.py b/lib/bdev.py
index 006e5e3..4971b53 100644
--- a/lib/bdev.py
+++ b/lib/bdev.py
@@ -161,7 +161,7 @@ class BlockDev(object):
"""Remove this device.
This makes sense only for some of the device types: LV and file
- storeage. Also note that if the device can't attach, the removal
+ storage. Also note that if the device can't attach, the removal
can't be completed.
"""
@@ -444,7 +444,7 @@ class LogicalVolume(BlockDev):
def Assemble(self):
"""Assemble the device.
- We alway run `lvchange -ay` on the LV to ensure it's active before
+ We always run `lvchange -ay` on the LV to ensure it's active before
use, as there were cases when xenvg was not active after boot
(also possibly after disk issues).
@@ -1258,14 +1258,14 @@ class DRBD8(BaseDRBD):
If sync_percent is None, it means all is ok
- If estimated_time is None, it means we can't esimate
+ If estimated_time is None, it means we can't estimate
the time needed, otherwise it's the time left in seconds.
We set the is_degraded parameter to True on two conditions:
network not connected or local disk missing.
- We compute the ldisk parameter based on wheter we have a local
+ We compute the ldisk parameter based on whether we have a local
disk or not.
@rtype: tuple
@@ -1335,14 +1335,14 @@ class DRBD8(BaseDRBD):
ever_disconnected = _IgnoreError(self._ShutdownNet, self.minor)
timeout_limit = time.time() + self._NET_RECONFIG_TIMEOUT
- sleep_time = 0.100 # we start the retry time at 100 miliseconds
+ sleep_time = 0.100 # we start the retry time at 100 milliseconds
while time.time() < timeout_limit:
status = self.GetProcStatus()
if status.is_standalone:
break
# retry the disconnect, it seems possible that due to a
# well-time disconnect on the peer, my disconnect command might
- # be ingored and forgotten
+ # be ignored and forgotten
ever_disconnected = _IgnoreError(self._ShutdownNet, self.minor) or \
ever_disconnected
time.sleep(sleep_time)
@@ -1647,7 +1647,7 @@ class FileStorage(BlockDev):
def Shutdown(self):
"""Shutdown the device.
- This is a no-op for the file type, as we don't deacivate
+ This is a no-op for the file type, as we don't deactivate
the file on shutdown.
"""
diff --git a/lib/bootstrap.py b/lib/bootstrap.py
index 0308484..171d830 100644
--- a/lib/bootstrap.py
+++ b/lib/bootstrap.py
@@ -477,7 +477,7 @@ def GatherMasterVotes(node_list):
@type node_list: list
@param node_list: the list of nodes to query for master info; the current
- node wil be removed if it is in the list
+ node will be removed if it is in the list
@rtype: list
@return: list of (node, votes)
diff --git a/lib/cli.py b/lib/cli.py
index d351f2f..03c2ab1 100644
--- a/lib/cli.py
+++ b/lib/cli.py
@@ -320,7 +320,7 @@ keyval_option = KeyValOption
def _ParseArgs(argv, commands, aliases):
"""Parser for the command line arguments.
- This function parses the arguements and returns the function which
+ This function parses the arguments and returns the function which
must be executed together with its (modified) arguments.
@param argv: the command line
@@ -438,10 +438,10 @@ def AskUser(text, choices=None):
choices = [('y', True, 'Perform the operation'),
('n', False, 'Do not perform the operation')]
if not choices or not isinstance(choices, list):
- raise errors.ProgrammerError("Invalid choiches argument to AskUser")
+ raise errors.ProgrammerError("Invalid choices argument to AskUser")
for entry in choices:
if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
- raise errors.ProgrammerError("Invalid choiches element to AskUser")
+ raise errors.ProgrammerError("Invalid choices element to AskUser")
answer = choices[-1][1]
new_text = []
@@ -747,7 +747,7 @@ def GenericMain(commands, override=None, aliases=None):
except (errors.GenericError, luxi.ProtocolError,
JobSubmittedException), err:
result, err_msg = FormatError(err)
- logging.exception("Error durring command processing")
+ logging.exception("Error during command processing")
ToStderr(err_msg)
return result
diff --git a/lib/cmdlib.py b/lib/cmdlib.py
index cb3bb31..0809489 100644
--- a/lib/cmdlib.py
+++ b/lib/cmdlib.py
@@ -68,7 +68,7 @@ class LogicalUnit(object):
def __init__(self, processor, op, context, rpc):
"""Constructor for LogicalUnit.
- This needs to be overriden in derived classes in order to check op
+ This needs to be overridden in derived classes in order to check op
validity.
"""
@@ -116,7 +116,7 @@ class LogicalUnit(object):
CheckPrereq, doing these separate is better because:
- ExpandNames is left as as purely a lock-related function
- - CheckPrereq is run after we have aquired locks (and possible
+ - CheckPrereq is run after we have acquired locks (and possible
waited for them)
The function is allowed to change the self.op attribute so that
@@ -477,7 +477,7 @@ def _BuildInstanceHookEnv(name, primary_node,
secondary_nodes, os_type, status,
@param nics: list of tuples (ip, bridge, mac) representing
the NICs the instance has
@type disk_template: string
- @param disk_template: the distk template of the instance
+ @param disk_template: the disk template of the instance
@type disks: list
@param disks: the list of (size, mode) pairs
@type bep: dict
@@ -592,10 +592,10 @@ def _AdjustCandidatePool(lu):
def _CheckInstanceBridgesExist(lu, instance):
- """Check that the brigdes needed by an instance exist.
+ """Check that the bridges needed by an instance exist.
"""
- # check bridges existance
+ # check bridges existence
brlist = [nic.bridge for nic in instance.nics]
result = lu.rpc.call_bridges_exist(instance.primary_node, brlist)
result.Raise()
@@ -616,7 +616,7 @@ class LUDestroyCluster(NoHooksLU):
This checks whether the cluster is empty.
- Any errors are signalled by raising errors.OpPrereqError.
+ Any errors are signaled by raising errors.OpPrereqError.
"""
master = self.cfg.GetMasterNode()
@@ -669,7 +669,7 @@ class LUVerifyCluster(LogicalUnit):
Test list:
- compares ganeti version
- - checks vg existance and size > 20G
+ - checks vg existence and size > 20G
- checks config file checksum
- checks ssh to other nodes
@@ -908,7 +908,7 @@ class LUVerifyCluster(LogicalUnit):
if bep[constants.BE_AUTO_BALANCE]:
needed_mem += bep[constants.BE_MEMORY]
if nodeinfo['mfree'] < needed_mem:
- feedback_fn(" - ERROR: not enough memory on node %s to accomodate"
+ feedback_fn(" - ERROR: not enough memory on node %s to accommodate"
" failovers should node %s fail" % (node, prinode))
bad = True
return bad
@@ -927,7 +927,7 @@ class LUVerifyCluster(LogicalUnit):
def BuildHooksEnv(self):
"""Build hooks env.
- Cluster-Verify hooks just rone in the post phase and their failure makes
+ Cluster-Verify hooks just ran in the post phase and their failure makes
the output be logged in the verify output and the verification to fail.
"""
@@ -1194,7 +1194,7 @@ class LUVerifyCluster(LogicalUnit):
return not bad
def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
- """Analize the post-hooks' result
+ """Analyze the post-hooks' result
This method analyses the hook result, handles it, and sends some
nicely-formatted feedback back to the user.
@@ -1414,7 +1414,7 @@ def _RecursiveCheckIfLVMBased(disk):
@type disk: L{objects.Disk}
@param disk: the disk to check
- @rtype: booleean
+ @rtype: boolean
@return: boolean indicating whether a LD_LV dev_type was found or not
"""
@@ -1815,7 +1815,7 @@ class LURemoveNode(LogicalUnit):
- it does not have primary or secondary instances
- it's not the master
- Any errors are signalled by raising errors.OpPrereqError.
+ Any errors are signaled by raising errors.OpPrereqError.
"""
node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
@@ -2136,7 +2136,7 @@ class LUAddNode(LogicalUnit):
- it is resolvable
- its parameters (single/dual homed) matches the cluster
- Any errors are signalled by raising errors.OpPrereqError.
+ Any errors are signaled by raising errors.OpPrereqError.
"""
node_name = self.op.node_name
@@ -2190,7 +2190,7 @@ class LUAddNode(LogicalUnit):
raise errors.OpPrereqError("The master has a private ip but the"
" new node doesn't have one")
- # checks reachablity
+ # checks reachability
if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
raise errors.OpPrereqError("Node not reachable by ping")
@@ -2866,7 +2866,7 @@ class LUStartupInstance(LogicalUnit):
_CheckNodeOnline(self, instance.primary_node)
bep = self.cfg.GetClusterInfo().FillBE(instance)
- # check bridges existance
+ # check bridges existence
_CheckInstanceBridgesExist(self, instance)
remote_info = self.rpc.call_instance_info(instance.primary_node,
@@ -2944,7 +2944,7 @@ class LURebootInstance(LogicalUnit):
_CheckNodeOnline(self, instance.primary_node)
- # check bridges existance
+ # check bridges existence
_CheckInstanceBridgesExist(self, instance)
def Exec(self, feedback_fn):
@@ -3615,7 +3615,7 @@ class LUFailoverInstance(LogicalUnit):
self.LogInfo("Not checking memory on the secondary node as"
" instance will not be started")
- # check bridge existance
+ # check bridge existence
brlist = [nic.bridge for nic in instance.nics]
result = self.rpc.call_bridges_exist(target_node, brlist)
result.Raise()
@@ -3753,7 +3753,7 @@ class LUMigrateInstance(LogicalUnit):
instance.name, i_be[constants.BE_MEMORY],
instance.hypervisor)
- # check bridge existance
+ # check bridge existence
brlist = [nic.bridge for nic in instance.nics]
result = self.rpc.call_bridges_exist(target_node, brlist)
if result.failed or not result.data:
@@ -6289,7 +6289,7 @@ class LUExportInstance(LogicalUnit):
# remove it from its current node. In the future we could fix this by:
# - making a tasklet to search (share-lock all), then create the new one,
# then one to remove, after
- # - removing the removal operation altoghether
+ # - removing the removal operation altogether
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
def DeclareLocks(self, level):
diff --git a/lib/config.py b/lib/config.py
index 4075156..ceea4e7 100644
--- a/lib/config.py
+++ b/lib/config.py
@@ -796,7 +796,7 @@ class ConfigWriter:
self._config_data.instances.keys())
def _UnlockedGetInstanceInfo(self, instance_name):
- """Returns informations about an instance.
+ """Returns information about an instance.
This function is for internal use, when the config lock is already held.
@@ -808,9 +808,9 @@ class ConfigWriter:
@locking.ssynchronized(_config_lock, shared=1)
def GetInstanceInfo(self, instance_name):
- """Returns informations about an instance.
+ """Returns information about an instance.
- It takes the information from the configuration file. Other informations of
+ It takes the information from the configuration file. Other information of
an instance are taken from the live systems.
@param instance_name: name of the instance, e.g.
@@ -1208,7 +1208,7 @@ class ConfigWriter:
@locking.ssynchronized(_config_lock, shared=1)
def GetClusterInfo(self):
- """Returns informations about the cluster
+ """Returns information about the cluster
@rtype: L{objects.Cluster}
@return: the cluster object
diff --git a/lib/http/__init__.py b/lib/http/__init__.py
index 008cf9c..b7455dc 100644
--- a/lib/http/__init__.py
+++ b/lib/http/__init__.py
@@ -744,7 +744,7 @@ class HttpMessageWriter(object):
def HasMessageBody(self):
"""Checks whether the HTTP message contains a body.
- Can be overriden by subclasses.
+ Can be overridden by subclasses.
"""
return bool(self._msg.body)
@@ -937,7 +937,7 @@ class HttpMessageReader(object):
def ParseStartLine(self, start_line):
"""Parses the start line of a message.
- Must be overriden by subclass.
+ Must be overridden by subclass.
@type start_line: string
@param start_line: Start line string
diff --git a/lib/http/auth.py b/lib/http/auth.py
index 8a8d720..ff880ac 100644
--- a/lib/http/auth.py
+++ b/lib/http/auth.py
@@ -80,7 +80,7 @@ class HttpServerRequestAuthentication(object):
def GetAuthRealm(self, req):
"""Returns the authentication realm for a request.
- MAY be overriden by a subclass, which then can return different realms for
+ MAY be overridden by a subclass, which then can return different realms for
different paths. Returning "None" means no authentication is needed for a
request.
@@ -195,7 +195,7 @@ class HttpServerRequestAuthentication(object):
def Authenticate(self, req, user, password):
"""Checks the password for a user.
- This function MUST be overriden by a subclass.
+ This function MUST be overridden by a subclass.
"""
raise NotImplementedError()
diff --git a/lib/http/server.py b/lib/http/server.py
index b74eb36..95a3756 100644
--- a/lib/http/server.py
+++ b/lib/http/server.py
@@ -536,14 +536,14 @@ class HttpServer(http.HttpBase):
def PreHandleRequest(self, req):
"""Called before handling a request.
- Can be overriden by a subclass.
+ Can be overridden by a subclass.
"""
def HandleRequest(self, req):
"""Handles a request.
- Must be overriden by subclass.
+ Must be overridden by subclass.
"""
raise NotImplementedError()
diff --git a/lib/jqueue.py b/lib/jqueue.py
index 3364a93..e3ce73b 100644
--- a/lib/jqueue.py
+++ b/lib/jqueue.py
@@ -69,7 +69,7 @@ def TimeStampNow():
class _QueuedOpCode(object):
- """Encasulates an opcode object.
+ """Encapsulates an opcode object.
@ivar log: holds the execution log and consists of tuples
of the form C{(log_serial, timestamp, level, message)}
@@ -286,7 +286,7 @@ class _QueuedJob(object):
"""Selectively returns the log entries.
@type newer_than: None or int
- @param newer_than: if this is None, return all log enties,
+ @param newer_than: if this is None, return all log entries,
otherwise return only the log entries with serial higher
than this value
@rtype: list
@@ -469,7 +469,7 @@ class _JobQueueWorkerPool(workerpool.WorkerPool):
class JobQueue(object):
- """Quue used to manaage the jobs.
+ """Quue used to manage the jobs.
@cvar _RE_JOB_FILE: regex matching the valid job file names
@@ -651,7 +651,7 @@ class JobQueue(object):
Since we aim to keep consistency should this node (the current
master) fail, we will log errors if our rpc fail, and especially
- log the case when more than half of the nodes failes.
+ log the case when more than half of the nodes fails.
@param result: the data as returned from the rpc call
@type nodes: list
@@ -934,7 +934,7 @@ class JobQueue(object):
and in the future we might merge them.
@type drain_flag: boolean
- @param drain_flag: wheter to set or unset the drain flag
+ @param drain_flag: Whether to set or unset the drain flag
"""
if drain_flag:
diff --git a/lib/locking.py b/lib/locking.py
index 647e14f..16f302e 100644
--- a/lib/locking.py
+++ b/lib/locking.py
@@ -297,7 +297,7 @@ class SharedLock:
# Whenever we want to acquire a full LockSet we pass None as the value
-# to acquire. Hide this behing this nicely named constant.
+# to acquire. Hide this behind this nicely named constant.
ALL_SET = None
@@ -689,7 +689,7 @@ BGL = 'BGL'
class GanetiLockManager:
"""The Ganeti Locking Library
- The purpouse of this small library is to manage locking for ganeti clusters
+ The purpose of this small library is to manage locking for ganeti clusters
in a central place, while at the same time doing dynamic checks against
possible deadlocks. It will also make it easier to transition to a different
lock type should we migrate away from python threads.
@@ -774,7 +774,7 @@ class GanetiLockManager:
"""Acquire a set of resource locks, at the same level.
@param level: the level at which the locks shall be acquired;
- it must be a memmber of LEVELS.
+ it must be a member of LEVELS.
@param names: the names of the locks which shall be acquired
(special lock names, or instance/node names)
@param shared: whether to acquire in shared mode; by default
@@ -809,7 +809,7 @@ class GanetiLockManager:
mode, before releasing them.
@param level: the level at which the locks shall be released;
- it must be a memmber of LEVELS
+ it must be a member of LEVELS
@param names: the names of the locks which shall be released
(defaults to all the locks acquired at that level)
@@ -827,7 +827,7 @@ class GanetiLockManager:
"""Add locks at the specified level.
@param level: the level at which the locks shall be added;
- it must be a memmber of LEVELS_MOD.
+ it must be a member of LEVELS_MOD.
@param names: names of the locks to acquire
@param acquired: whether to acquire the newly added locks
@param shared: whether the acquisition will be shared
diff --git a/lib/luxi.py b/lib/luxi.py
index 308de9f..85c3aa5 100644
--- a/lib/luxi.py
+++ b/lib/luxi.py
@@ -191,7 +191,7 @@ class Transport:
raise TimeoutError("Sending timeout: %s" % str(err))
def Recv(self):
- """Try to receive a messae from the socket.
+ """Try to receive a message from the socket.
In case we already have messages queued, we just return from the
queue. Otherwise, we try to read data with a _rwtimeout network
diff --git a/lib/mcpu.py b/lib/mcpu.py
index 2512762..a6014a1 100644
--- a/lib/mcpu.py
+++ b/lib/mcpu.py
@@ -158,7 +158,7 @@ class Processor(object):
self.context.glm.add(level, add_locks, acquired=1, shared=share)
except errors.LockError:
raise errors.OpPrereqError(
- "Coudn't add locks (%s), probably because of a race condition"
+ "Couldn't add locks (%s), probably because of a race condition"
" with another job, who added them first" % add_locks)
try:
try:
@@ -187,7 +187,7 @@ class Processor(object):
@type run_notifier: callable (no arguments) or None
@param run_notifier: this function (if callable) will be called when
we are about to call the lu's Exec() method, that
- is, after we have aquired all locks
+ is, after we have acquired all locks
"""
if not isinstance(op, opcodes.OpCode):
diff --git a/lib/rpc.py b/lib/rpc.py
index b50c3d1..fc99fc2 100644
--- a/lib/rpc.py
+++ b/lib/rpc.py
@@ -83,7 +83,7 @@ class RpcResult(object):
calls we can't raise an exception just because one one out of many
failed, and therefore we use this class to encapsulate the result.
- @ivar data: the data payload, for successfull results, or None
+ @ivar data: the data payload, for successful results, or None
@type failed: boolean
@ivar failed: whether the operation failed at RPC level (not
application level on the remote node)
@@ -161,7 +161,7 @@ class Client:
list of nodes, will contact (in parallel) all nodes, and return a
dict of results (key: node name, value: result).
- One current bug is that generic failure is still signalled by
+ One current bug is that generic failure is still signaled by
'False' result, which is not good. This overloading of values can
cause bugs.
@@ -220,7 +220,7 @@ class Client:
@return: List of RPC results
"""
- assert _http_manager, "RPC module not intialized"
+ assert _http_manager, "RPC module not initialized"
_http_manager.ExecRequests(self.nc.values())
@@ -269,9 +269,9 @@ class RpcRunner(object):
@type instance: L{objects.Instance}
@param instance: an Instance object
@type hvp: dict or None
- @param hvp: a dictionary with overriden hypervisor parameters
+ @param hvp: a dictionary with overridden hypervisor parameters
@type bep: dict or None
- @param bep: a dictionary with overriden backend parameters
+ @param bep: a dictionary with overridden backend parameters
@rtype: dict
@return: the instance dict, with the hvparams filled with the
cluster defaults
diff --git a/lib/ssh.py b/lib/ssh.py
index 40df999..f0362b4 100644
--- a/lib/ssh.py
+++ b/lib/ssh.py
@@ -201,7 +201,7 @@ class SshRunner:
connected to).
This is used to detect problems in ssh known_hosts files
- (conflicting known hosts) and incosistencies between dns/hosts
+ (conflicting known hosts) and inconsistencies between dns/hosts
entries and local machine names
@param node: nodename of a host to check; can be short or
diff --git a/lib/utils.py b/lib/utils.py
index ac781fb..3d33694 100644
--- a/lib/utils.py
+++ b/lib/utils.py
@@ -136,7 +136,7 @@ def RunCmd(cmd, env=None, output=None, cwd='/'):
directory for the command; the default will be /
@rtype: L{RunResult}
@return: RunResult instance
- @raise erors.ProgrammerError: if we call this when forks are disabled
+ @raise errors.ProgrammerError: if we call this when forks are disabled
"""
if no_fork:
@@ -701,7 +701,7 @@ def IsValidIP(ip):
@type ip: str
@param ip: the address to be checked
@rtype: a regular expression match object
- @return: a regular epression match object, or None if the
+ @return: a regular expression match object, or None if the
address is not valid
"""
@@ -734,7 +734,7 @@ def BuildShellCmd(template, *args):
This function will check all arguments in the args list so that they
are valid shell parameters (i.e. they don't contain shell
- metacharaters). If everything is ok, it will return the result of
+ metacharacters). If everything is ok, it will return the result of
template % args.
@type template: str
@@ -1063,7 +1063,7 @@ def ShellQuoteArgs(args):
@type args: list
@param args: list of arguments to be quoted
@rtype: str
- @return: the quoted arguments concatenaned with spaces
+ @return: the quoted arguments concatenated with spaces
"""
return ' '.join([ShellQuote(i) for i in args])
@@ -1080,7 +1080,7 @@ def TcpPing(target, port, timeout=10,
live_port_needed=False, source=None):
@type port: int
@param port: the port to connect to
@type timeout: int
- @param timeout: the timeout on the connection attemp
+ @param timeout: the timeout on the connection attempt
@type live_port_needed: boolean
@param live_port_needed: whether a closed port will cause the
function to return failure, as if there was a timeout
@@ -1122,7 +1122,7 @@ def OwnIpAddress(address):
address.
@type address: string
- @param address: the addres to check
+ @param address: the address to check
@rtype: bool
@return: True if we own the address
@@ -1218,7 +1218,7 @@ def ReadFile(file_name, size=None):
@type size: None or int
@param size: Read at most size bytes
@rtype: str
- @return: the (possibly partial) conent of the file
+ @return: the (possibly partial) content of the file
"""
f = open(file_name, "r")
@@ -1378,7 +1378,7 @@ def UniqueSequence(seq):
Element order is preserved.
@type seq: sequence
- @param seq: the sequence with the source elementes
+ @param seq: the sequence with the source elements
@rtype: list
@return: list of unique elements from seq
@@ -1390,7 +1390,7 @@ def UniqueSequence(seq):
def IsValidMac(mac):
"""Predicate to check if a MAC address is valid.
- Checks wether the supplied MAC address is formally correct, only
+ Checks whether the supplied MAC address is formally correct, only
accepts colon separated format.
@type mac: str
@@ -1831,7 +1831,7 @@ def SafeEncode(text):
"""
if isinstance(text, unicode):
- # onli if unicode; if str already, we handle it below
+ # only if unicode; if str already, we handle it below
text = text.encode('ascii', 'backslashreplace')
resu = ""
for char in text:
diff --git a/man/gnt-node.sgml b/man/gnt-node.sgml
index 07b6f10..5071e5a 100644
--- a/man/gnt-node.sgml
+++ b/man/gnt-node.sgml
@@ -91,7 +91,7 @@
discussion in <citerefentry>
<refentrytitle>gnt-cluster</refentrytitle>
<manvolnum>8</manvolnum> </citerefentry> for more
- informations.
+ information.
</para>
<para>
diff --git a/tools/lvmstrap b/tools/lvmstrap
index 8af2f61..fdbf1fb 100755
--- a/tools/lvmstrap
+++ b/tools/lvmstrap
@@ -267,7 +267,7 @@ def CheckSysDev(name, devnum):
devnum: the device number, e.g. 0x803 (2051 in decimal) for sda3
Returns:
- None; failure of the check is signalled by raising a
+ None; failure of the check is signaled by raising a
SysconfigError exception
"""
@@ -449,7 +449,7 @@ def GetMountInfo():
def DevInfo(name, dev, mountinfo):
- """Computes miscellaneous informations about a block device.
+ """Computes miscellaneous information about a block device.
Args:
name: the device name, e.g. sda
@@ -478,7 +478,7 @@ def DevInfo(name, dev, mountinfo):
def ShowDiskInfo(opts):
"""Shows a nicely formatted block device list for this system.
- This function shows the user a table with the informations gathered
+ This function shows the user a table with the information gathered
by the other functions defined, in order to help the user make a
choice about which disks should be allocated to our volume group.
--
1.5.4.3