... including object, queries, opcode, LU, command line, upgrade, etc.
Signed-off-by: Jose A. Lopes <[email protected]>
---
lib/bootstrap.py | 9 ++++++++-
lib/cli.py | 7 +++++++
lib/client/gnt_cluster.py | 18 +++++++++++++++---
lib/cmdlib/cluster.py | 4 ++++
lib/objects.py | 4 ++++
src/Ganeti/Objects.hs | 1 +
src/Ganeti/OpCodes.hs | 1 +
src/Ganeti/OpParams.hs | 7 +++++++
src/Ganeti/Query/Server.hs | 2 ++
test/hs/Test/Ganeti/OpCodes.hs | 2 +-
test/py/cfgupgrade_unittest.py | 1 +
tools/cfgupgrade | 4 ++++
12 files changed, 55 insertions(+), 5 deletions(-)
diff --git a/lib/bootstrap.py b/lib/bootstrap.py
index 9cd7f2b..d0b56b2 100644
--- a/lib/bootstrap.py
+++ b/lib/bootstrap.py
@@ -552,15 +552,21 @@ def InitCluster(cluster_name, mac_prefix, # pylint:
disable=R0913, R0914
default_iallocator=None, default_iallocator_params=None,
primary_ip_version=None, ipolicy=None,
prealloc_wipe_disks=False, use_external_mip_script=False,
- hv_state=None, disk_state=None, enabled_disk_templates=None):
+ hv_state=None, disk_state=None, enabled_disk_templates=None,
+ enabled_user_shutdown=False):
"""Initialise the cluster.
@type candidate_pool_size: int
@param candidate_pool_size: master candidate pool size
+
@type enabled_disk_templates: list of string
@param enabled_disk_templates: list of disk_templates to be used in this
cluster
+ @type enabled_user_shutdown: bool
+ @param enabled_user_shutdown: whether user shutdown is enabled cluster
+ wide
+
"""
# TODO: complete the docstring
if config.ConfigWriter.IsCluster():
@@ -819,6 +825,7 @@ def InitCluster(cluster_name, mac_prefix, # pylint:
disable=R0913, R0914
disk_state_static=disk_state,
enabled_disk_templates=enabled_disk_templates,
candidate_certs=candidate_certs,
+ enabled_user_shutdown=enabled_user_shutdown,
)
master_node_config = objects.Node(name=hostname.name,
primary_ip=hostname.ip,
diff --git a/lib/cli.py b/lib/cli.py
index 9480505..dd81c81 100644
--- a/lib/cli.py
+++ b/lib/cli.py
@@ -83,6 +83,7 @@ __all__ = [
"EARLY_RELEASE_OPT",
"ENABLED_HV_OPT",
"ENABLED_DISK_TEMPLATES_OPT",
+ "ENABLED_USER_SHUTDOWN_OPT",
"ERROR_CODES_OPT",
"FAILURE_ONLY_OPT",
"FIELDS_OPT",
@@ -1273,6 +1274,12 @@ ENABLED_DISK_TEMPLATES_OPT =
cli_option("--enabled-disk-templates",
"disk templates",
type="string", default=None)
+ENABLED_USER_SHUTDOWN_OPT = cli_option("--user-shutdown",
+ default=None,
+ dest="enabled_user_shutdown",
+ help="Whether user shutdown is enabled",
+ type="bool")
+
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
type="keyval", default={},
help="NIC parameters")
diff --git a/lib/client/gnt_cluster.py b/lib/client/gnt_cluster.py
index fd3d74c..3e63d3a 100644
--- a/lib/client/gnt_cluster.py
+++ b/lib/client/gnt_cluster.py
@@ -265,6 +265,12 @@ def InitCluster(opts, args):
hv_state = dict(opts.hv_state)
default_ialloc_params = opts.default_iallocator_params
+
+ if opts.enabled_user_shutdown:
+ enabled_user_shutdown = True
+ else:
+ enabled_user_shutdown = False
+
bootstrap.InitCluster(cluster_name=args[0],
secondary_ip=opts.secondary_ip,
vg_name=vg_name,
@@ -295,6 +301,7 @@ def InitCluster(opts, args):
hv_state=hv_state,
disk_state=disk_state,
enabled_disk_templates=enabled_disk_templates,
+ enabled_user_shutdown=enabled_user_shutdown,
)
op = opcodes.OpClusterPostInit()
SubmitOpCode(op, opts=opts)
@@ -540,6 +547,7 @@ def ShowClusterConfig(opts, args):
utils.CommaJoin(pathutils.ES_SEARCH_PATH)),
("enabled disk templates",
utils.CommaJoin(result["enabled_disk_templates"])),
+ ("enabled user shutdown", result["enabled_user_shutdown"]),
]),
("Default node parameters",
@@ -1115,7 +1123,8 @@ def SetClusterParams(opts, args):
opts.ipolicy_spindle_ratio is not None or
opts.modify_etc_hosts is not None or
opts.file_storage_dir is not None or
- opts.shared_file_storage_dir is not None):
+ opts.shared_file_storage_dir is not None or
+ opts.enabled_user_shutdown is not None):
ToStderr("Please give at least one of the parameters.")
return 1
@@ -1227,6 +1236,7 @@ def SetClusterParams(opts, args):
force=opts.force,
file_storage_dir=opts.file_storage_dir,
shared_file_storage_dir=opts.shared_file_storage_dir,
+ enabled_user_shutdown=opts.enabled_user_shutdown,
)
SubmitOrSend(op, opts)
return 0
@@ -2109,7 +2119,8 @@ commands = {
PRIMARY_IP_VERSION_OPT, PREALLOC_WIPE_DISKS_OPT, NODE_PARAMS_OPT,
GLOBAL_SHARED_FILEDIR_OPT, USE_EXTERNAL_MIP_SCRIPT, DISK_PARAMS_OPT,
HV_STATE_OPT, DISK_STATE_OPT, ENABLED_DISK_TEMPLATES_OPT,
- IPOLICY_STD_SPECS_OPT, GLOBAL_GLUSTER_FILEDIR_OPT]
+ ENABLED_USER_SHUTDOWN_OPT, IPOLICY_STD_SPECS_OPT,
+ GLOBAL_GLUSTER_FILEDIR_OPT]
+ INSTANCE_POLICY_OPTS + SPLIT_ISPECS_OPTS,
"[opts...] <cluster_name>", "Initialises a new cluster configuration"),
"destroy": (
@@ -2192,7 +2203,8 @@ commands = {
DRY_RUN_OPT, PRIORITY_OPT, PREALLOC_WIPE_DISKS_OPT, NODE_PARAMS_OPT,
USE_EXTERNAL_MIP_SCRIPT, DISK_PARAMS_OPT, HV_STATE_OPT, DISK_STATE_OPT] +
SUBMIT_OPTS +
- [ENABLED_DISK_TEMPLATES_OPT, IPOLICY_STD_SPECS_OPT, MODIFY_ETCHOSTS_OPT] +
+ [ENABLED_DISK_TEMPLATES_OPT, IPOLICY_STD_SPECS_OPT, MODIFY_ETCHOSTS_OPT,
+ ENABLED_USER_SHUTDOWN_OPT] +
INSTANCE_POLICY_OPTS + [GLOBAL_FILEDIR_OPT, GLOBAL_SHARED_FILEDIR_OPT],
"[opts...]",
"Alters the parameters of the cluster"),
diff --git a/lib/cmdlib/cluster.py b/lib/cmdlib/cluster.py
index ecba889..2c480ad 100644
--- a/lib/cmdlib/cluster.py
+++ b/lib/cmdlib/cluster.py
@@ -434,6 +434,7 @@ class LUClusterQuery(NoHooksLU):
"hidden_os": cluster.hidden_os,
"blacklisted_os": cluster.blacklisted_os,
"enabled_disk_templates": cluster.enabled_disk_templates,
+ "enabled_user_shutdown": cluster.enabled_user_shutdown,
}
return result
@@ -1396,6 +1397,9 @@ class LUClusterSetParams(LogicalUnit):
if self.op.use_external_mip_script is not None:
self.cluster.use_external_mip_script = self.op.use_external_mip_script
+ if self.op.enabled_user_shutdown is not None:
+ self.cluster.enabled_user_shutdown = self.op.enabled_user_shutdown
+
def helper_os(aname, mods, desc):
desc += " OS list"
lst = getattr(self.cluster, aname)
diff --git a/lib/objects.py b/lib/objects.py
index 125d1e5..3415e0c 100644
--- a/lib/objects.py
+++ b/lib/objects.py
@@ -1592,6 +1592,7 @@ class Cluster(TaggableObject):
"enabled_disk_templates",
"candidate_certs",
"max_running_jobs",
+ "enabled_user_shutdown",
] + _TIMESTAMPS + _UUID
def UpgradeConfig(self):
@@ -1724,6 +1725,9 @@ class Cluster(TaggableObject):
if self.max_running_jobs is None:
self.max_running_jobs = constants.LUXID_MAXIMAL_RUNNING_JOBS_DEFAULT
+ if self.enabled_user_shutdown is None:
+ self.enabled_user_shutdown = False
+
@property
def primary_hypervisor(self):
"""The first hypervisor is the primary.
diff --git a/src/Ganeti/Objects.hs b/src/Ganeti/Objects.hs
index 18ca1f5..9fe1120 100644
--- a/src/Ganeti/Objects.hs
+++ b/src/Ganeti/Objects.hs
@@ -708,6 +708,7 @@ $(buildObject "Cluster" "cluster" $
, simpleField "enabled_disk_templates" [t| [DiskTemplate] |]
, simpleField "candidate_certs" [t| CandidateCertificates |]
, simpleField "max_running_jobs" [t| Int |]
+ , simpleField "enabled_user_shutdown" [t| Bool |]
]
++ timeStampFields
++ uuidFields
diff --git a/src/Ganeti/OpCodes.hs b/src/Ganeti/OpCodes.hs
index 8e7bf4f..b1964e4 100644
--- a/src/Ganeti/OpCodes.hs
+++ b/src/Ganeti/OpCodes.hs
@@ -236,6 +236,7 @@ $(genOpCode "OpCode"
, pClusterFileStorageDir
, pClusterSharedFileStorageDir
, pClusterGlusterStorageDir
+ , pEnabledUserShutdown
],
[])
, ("OpClusterRedistConf",
diff --git a/src/Ganeti/OpParams.hs b/src/Ganeti/OpParams.hs
index 1d077d1..54817d1 100644
--- a/src/Ganeti/OpParams.hs
+++ b/src/Ganeti/OpParams.hs
@@ -260,6 +260,7 @@ module Ganeti.OpParams
, pReason
, pSequential
, pEnabledDiskTemplates
+ , pEnabledUserShutdown
, pAdminStateSource
) where
@@ -745,6 +746,12 @@ pEnabledDiskTemplates =
optionalField $
simpleField "enabled_disk_templates" [t| [DiskTemplate] |]
+pEnabledUserShutdown :: Field
+pEnabledUserShutdown =
+ withDoc "Whether user shutdown is enabled cluster wide" .
+ optionalField $
+ simpleField "enabled_user_shutdown" [t| Bool |]
+
pQueryWhat :: Field
pQueryWhat =
withDoc "Resource(s) to query for" $
diff --git a/src/Ganeti/Query/Server.hs b/src/Ganeti/Query/Server.hs
index bdf1fac..c0377bd 100644
--- a/src/Ganeti/Query/Server.hs
+++ b/src/Ganeti/Query/Server.hs
@@ -165,6 +165,8 @@ handleCall _ _ cdata QueryClusterInfo =
, ("hidden_os", showJSON $ clusterHiddenOs cluster)
, ("blacklisted_os", showJSON $ clusterBlacklistedOs cluster)
, ("enabled_disk_templates", showJSON diskTemplates)
+ , ("enabled_user_shutdown",
+ showJSON $ clusterEnabledUserShutdown cluster)
]
in case master of
diff --git a/test/hs/Test/Ganeti/OpCodes.hs b/test/hs/Test/Ganeti/OpCodes.hs
index f677de3..b152ed7 100644
--- a/test/hs/Test/Ganeti/OpCodes.hs
+++ b/test/hs/Test/Ganeti/OpCodes.hs
@@ -183,7 +183,7 @@ instance Arbitrary OpCodes.OpCode where
emptyMUD <*> emptyMUD <*> arbitrary <*>
arbitrary <*> emptyMUD <*> arbitrary <*> arbitrary <*> arbitrary <*>
arbitrary <*> arbitrary <*> arbitrary <*> arbitrary <*> arbitrary <*>
- arbitrary <*> genMaybe genName <*> genMaybe genName
+ arbitrary <*> genMaybe genName <*> genMaybe genName <*> arbitrary
"OP_CLUSTER_REDIST_CONF" -> pure OpCodes.OpClusterRedistConf
"OP_CLUSTER_ACTIVATE_MASTER_IP" ->
pure OpCodes.OpClusterActivateMasterIp
diff --git a/test/py/cfgupgrade_unittest.py b/test/py/cfgupgrade_unittest.py
index b56a8c5..240225d 100755
--- a/test/py/cfgupgrade_unittest.py
+++ b/test/py/cfgupgrade_unittest.py
@@ -47,6 +47,7 @@ def GetMinimalConfig():
"default_iallocator_params": {},
"ndparams": {},
"candidate_certs": {},
+ "enabled_user_shutdown": False,
},
"instances": {},
"networks": {},
diff --git a/tools/cfgupgrade b/tools/cfgupgrade
index 654d193..a53ca90 100755
--- a/tools/cfgupgrade
+++ b/tools/cfgupgrade
@@ -152,6 +152,8 @@ def UpgradeCluster(config_data):
cluster["default_iallocator_params"] = {}
if not "candidate_certs" in cluster:
cluster["candidate_certs"] = {}
+ if "enabled_user_shutdown" not in cluster:
+ cluster["enabled_user_shutdown"] = False
def UpgradeGroups(config_data):
@@ -424,6 +426,8 @@ def DowngradeCluster(config_data):
del cluster[param]
if "max_running_jobs" in cluster:
del cluster["max_running_jobs"]
+ if "enabled_user_shutdown" in cluster:
+ del cluster["enabled_user_shutdown"]
DowngradeInstances(config_data)
--
2.0.0.526.g5318336