The upgrade of a Ganeti cluster is done in several
high-level steps ("Draining queue", "Pausing the watcher",
"Stopping daemons", ...). Log those headings as well in
order to simplify reading the log file; with these headings,
it is more easy to understand which goal is aimed for with
all the micro-step RunCmd log entries.Signed-off-by: Klaus Aehlig <[email protected]> --- lib/client/gnt_cluster.py | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/lib/client/gnt_cluster.py b/lib/client/gnt_cluster.py index bffc163..9a5c91a 100644 --- a/lib/client/gnt_cluster.py +++ b/lib/client/gnt_cluster.py @@ -2015,7 +2015,7 @@ def _UpgradeBeforeConfigurationChange(versionstring): rollback.append( lambda: utils.RunCmd(["rm", "-f", pathutils.INTENT_TO_UPGRADE])) - ToStdout("Draining queue") + ToStdoutAndLoginfo("Draining queue") client = GetClient() client.SetQueueDrainFlag(True) @@ -2027,11 +2027,11 @@ def _UpgradeBeforeConfigurationChange(versionstring): ToStderr("Failed to completely empty the queue.") return (False, rollback) - ToStdout("Pausing the watcher for one hour.") + ToStdoutAndLoginfo("Pausing the watcher for one hour.") rollback.append(lambda: GetClient().SetWatcherPause(None)) GetClient().SetWatcherPause(time.time() + 60 * 60) - ToStdout("Stopping daemons on master node.") + ToStdoutAndLoginfo("Stopping daemons on master node.") if not _RunCommandAndReport([pathutils.DAEMON_UTIL, "stop-all"]): return (False, rollback) @@ -2039,7 +2039,7 @@ def _UpgradeBeforeConfigurationChange(versionstring): utils.RunCmd([pathutils.DAEMON_UTIL, "start-all"]) return (False, rollback) - ToStdout("Stopping daemons everywhere.") + ToStdoutAndLoginfo("Stopping daemons everywhere.") rollback.append(lambda: _VerifyCommand([pathutils.DAEMON_UTIL, "start-all"])) badnodes = _VerifyCommand([pathutils.DAEMON_UTIL, "stop-all"]) if badnodes: @@ -2047,7 +2047,7 @@ def _UpgradeBeforeConfigurationChange(versionstring): return (False, rollback) backuptar = os.path.join(pathutils.BACKUP_DIR, "ganeti%d.tar" % time.time()) - ToStdout("Backing up configuration as %s" % backuptar) + ToStdoutAndLoginfo("Backing up configuration as %s", backuptar) if not _RunCommandAndReport(["mkdir", "-p", pathutils.BACKUP_DIR]): return (False, rollback) @@ -2075,7 +2075,7 @@ def _VersionSpecificDowngrade(): @return: True upon success """ - ToStdout("Performing version-specific downgrade tasks.") + ToStdoutAndLoginfo("Performing version-specific downgrade tasks.") nodes = ssconf.SimpleStore().GetOnlineNodeList() cluster_name = ssconf.SimpleStore().GetClusterName() @@ -2127,7 +2127,7 @@ def _SwitchVersionAndConfig(versionstring, downgrade): """ rollback = [] if downgrade: - ToStdout("Downgrading configuration") + ToStdoutAndLoginfo("Downgrading configuration") if not _RunCommandAndReport([pathutils.CFGUPGRADE, "--downgrade", "-f"]): return (False, rollback) # Note: version specific downgrades need to be done before switching @@ -2139,7 +2139,7 @@ def _SwitchVersionAndConfig(versionstring, downgrade): # Configuration change is the point of no return. From then onwards, it is # safer to push through the up/dowgrade than to try to roll it back. - ToStdout("Switching to version %s on all nodes" % versionstring) + ToStdoutAndLoginfo("Switching to version %s on all nodes", versionstring) rollback.append(lambda: _SetGanetiVersion(constants.DIR_VERSION)) badnodes = _SetGanetiVersion(versionstring) if badnodes: @@ -2154,7 +2154,7 @@ def _SwitchVersionAndConfig(versionstring, downgrade): # commands using their canonical (version independent) path. if not downgrade: - ToStdout("Upgrading configuration") + ToStdoutAndLoginfo("Upgrading configuration") if not _RunCommandAndReport([pathutils.CFGUPGRADE, "-f"]): return (False, rollback) @@ -2179,24 +2179,24 @@ def _UpgradeAfterConfigurationChange(oldversion): """ returnvalue = 0 - ToStdout("Ensuring directories everywhere.") + ToStdoutAndLoginfo("Ensuring directories everywhere.") badnodes = _VerifyCommand([pathutils.ENSURE_DIRS]) if badnodes: ToStderr("Warning: failed to ensure directories on %s." % (", ".join(badnodes))) returnvalue = 1 - ToStdout("Starting daemons everywhere.") + ToStdoutAndLoginfo("Starting daemons everywhere.") badnodes = _VerifyCommand([pathutils.DAEMON_UTIL, "start-all"]) if badnodes: ToStderr("Warning: failed to start daemons on %s." % (", ".join(badnodes),)) returnvalue = 1 - ToStdout("Redistributing the configuration.") + ToStdoutAndLoginfo("Redistributing the configuration.") if not _RunCommandAndReport(["gnt-cluster", "redist-conf", "--yes-do-it"]): returnvalue = 1 - ToStdout("Restarting daemons everywhere.") + ToStdoutAndLoginfo("Restarting daemons everywhere.") badnodes = _VerifyCommand([pathutils.DAEMON_UTIL, "stop-all"]) badnodes.extend(_VerifyCommand([pathutils.DAEMON_UTIL, "start-all"])) if badnodes: @@ -2204,21 +2204,21 @@ def _UpgradeAfterConfigurationChange(oldversion): (", ".join(list(set(badnodes))),)) returnvalue = 1 - ToStdout("Undraining the queue.") + ToStdoutAndLoginfo("Undraining the queue.") if not _RunCommandAndReport(["gnt-cluster", "queue", "undrain"]): returnvalue = 1 _RunCommandAndReport(["rm", "-f", pathutils.INTENT_TO_UPGRADE]) - ToStdout("Running post-upgrade hooks") + ToStdoutAndLoginfo("Running post-upgrade hooks") if not _RunCommandAndReport([pathutils.POST_UPGRADE, oldversion]): returnvalue = 1 - ToStdout("Unpausing the watcher.") + ToStdoutAndLoginfo("Unpausing the watcher.") if not _RunCommandAndReport(["gnt-cluster", "watcher", "continue"]): returnvalue = 1 - ToStdout("Verifying cluster.") + ToStdoutAndLoginfo("Verifying cluster.") if not _RunCommandAndReport(["gnt-cluster", "verify"]): returnvalue = 1 -- 2.6.0.rc2.230.g3dd15c0
