With this commit we introduce the External Storage Interface
to Ganeti, abbreviated: ExtStorage Interface.
The ExtStorage Interface provides Ganeti with the ability to interact
with externally connected shared storage pools, visible by all
VM-capable nodes. This means that Ganeti is able to handle VM disks
that reside inside a NAS/SAN or any distributed block storage provider.
The ExtStorage Interface provides a clear API, heavily inspired by the
gnt-os-interface API, that can be used by storage vendors or sysadmins
to write simple ExtStorage Providers (correlated to gnt-os-interface's
OS Definitions). Those Providers will glue externally attached shared
storage with Ganeti, without the need of preprovisioned block devices
on Ganeti VM-capable nodes as confined be the current `blockdev' disk
template.
To do so, we implement a new disk template called `ext' (of type
DTS_EXT_MIRROR) that passes control to externally provided scripts
(the ExtStorage Provider) for the template's basic functions:
create / attach / detach / remove / grow
The scripts reside under ES_SEARCH_PATH (correlated to OS_SEARCH_PATH)
and only one ExtStorage Provider is supported called `ext'.
The disk's logical id is the tuple ('ext', UUID.ext.diskX), where UUID
is generated as in disk template `plain' and X is the disk's index.
Signed-off-by: Constantinos Venetsanopoulos <[email protected]>
---
Makefile.am | 1 +
configure.ac | 12 ++
lib/bdev.py | 318 +++++++++++++++++++++++++++++++++++++++++++++
lib/client/gnt_cluster.py | 2 +
lib/cmdlib.py | 16 ++-
lib/constants.py | 40 +++++-
lib/masterd/instance.py | 1 +
lib/objects.py | 20 +++-
lib/pathutils.py | 2 +
tools/burnin | 1 +
10 files changed, 402 insertions(+), 11 deletions(-)
diff --git a/Makefile.am b/Makefile.am
index 58daa7a..99c1bc2 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1175,6 +1175,7 @@ lib/_autoconf.py: Makefile | stamp-directories
echo "SSH_CONFIG_DIR = '$(SSH_CONFIG_DIR)'"; \
echo "EXPORT_DIR = '$(EXPORT_DIR)'"; \
echo "OS_SEARCH_PATH = [$(OS_SEARCH_PATH)]"; \
+ echo "ES_SEARCH_PATH = [$(ES_SEARCH_PATH)]"; \
echo "XEN_BOOTLOADER = '$(XEN_BOOTLOADER)'"; \
echo "XEN_KERNEL = '$(XEN_KERNEL)'"; \
echo "XEN_INITRD = '$(XEN_INITRD)'"; \
diff --git a/configure.ac b/configure.ac
index 5bb14af..a697e31 100644
--- a/configure.ac
+++ b/configure.ac
@@ -60,6 +60,18 @@ AC_ARG_WITH([os-search-path],
[os_search_path="'/srv/ganeti/os'"])
AC_SUBST(OS_SEARCH_PATH, $os_search_path)
+# --with-extstorage-search-path=...
+# same black sed magic for quoting of the strings in the list
+AC_ARG_WITH([extstorage-search-path],
+ [AS_HELP_STRING([--with-extstorage-search-path=LIST],
+ [comma separated list of directories to]
+ [ search for External Storage Providers]
+ [ (default is /srv/ganeti/extstorage)]
+ )],
+ [es_search_path=`echo -n "$withval" | sed -e "s/\([[^,]]*\)/'\1'/g"`],
+ [es_search_path="'/srv/ganeti/extstorage'"])
+AC_SUBST(ES_SEARCH_PATH, $es_search_path)
+
# --with-iallocator-search-path=...
# do a bit of black sed magic to for quoting of the strings in the list
AC_ARG_WITH([iallocator-search-path],
diff --git a/lib/bdev.py b/lib/bdev.py
index d6c1a66..b0ed7c0 100644
--- a/lib/bdev.py
+++ b/lib/bdev.py
@@ -33,6 +33,7 @@ import logging
from ganeti import utils
from ganeti import errors
from ganeti import constants
+from ganeti import pathutils
from ganeti import objects
from ganeti import compat
from ganeti import netutils
@@ -2648,11 +2649,328 @@ class RADOSBlockDevice(BlockDev):
result.fail_reason, result.output)
+class ExtStorageDevice(BlockDev):
+ """A block device provided by an ExtStorage Provider.
+
+ This class implements the External Storage Interface, which means
+ handling of the externally provided block devices.
+
+ """
+ def __init__(self, unique_id, children, size, params):
+ """Attaches to an extstorage block device.
+
+ """
+ super(ExtStorageDevice, self).__init__(unique_id, children, size, params)
+ if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
+ raise ValueError("Invalid configuration data %s" % str(unique_id))
+
+ self.driver, self.vol_name = unique_id
+
+ self.major = self.minor = None
+ self.Attach()
+
+ @classmethod
+ def Create(cls, unique_id, children, size, params):
+ """Create a new extstorage device.
+
+ Provision a new volume using an extstorage provider, which will
+ then be mapped to a block device.
+
+ """
+ if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
+ raise errors.ProgrammerError("Invalid configuration data %s" %
+ str(unique_id))
+
+ # Call the External Storage's create script,
+ # to provision a new Volume inside the External Storage
+ _ExtStorageAction(constants.ES_ACTION_CREATE, unique_id, str(size))
+
+ return ExtStorageDevice(unique_id, children, size, params)
+
+ def Remove(self):
+ """Remove the extstorage device.
+
+ """
+ if not self.minor and not self.Attach():
+ # The extstorage device doesn't exist.
+ return
+
+ # First shutdown the device (remove mappings).
+ self.Shutdown()
+
+ # Call the External Storage's remove script,
+ # to remove the Volume from the External Storage
+ _ExtStorageAction(constants.ES_ACTION_REMOVE, self.unique_id)
+
+ def Rename(self, new_id):
+ """Rename this device.
+
+ """
+ pass
+
+ def Attach(self):
+ """Attach to an existing extstorage device.
+
+ This method maps the extstorage volume that matches our name with
+ a corresponding block device and then attaches to this device.
+
+ """
+ self.attached = False
+
+ # Call the External Storage's attach script,
+ # to attach an existing Volume to a block device under /dev
+ self.dev_path = _ExtStorageAction(constants.ES_ACTION_ATTACH,
+ self.unique_id)
+
+ try:
+ st = os.stat(self.dev_path)
+ except OSError, err:
+ logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
+ return False
+
+ if not stat.S_ISBLK(st.st_mode):
+ logging.error("%s is not a block device", self.dev_path)
+ return False
+
+ self.major = os.major(st.st_rdev)
+ self.minor = os.minor(st.st_rdev)
+ self.attached = True
+
+ return True
+
+ def Assemble(self):
+ """Assemble the device.
+
+ """
+ pass
+
+ def Shutdown(self):
+ """Shutdown the device.
+
+ """
+ if not self.minor and not self.Attach():
+ # The extstorage device doesn't exist.
+ return
+
+ # Call the External Storage's detach script,
+ # to detach an existing Volume from it's block device under /dev
+ _ExtStorageAction(constants.ES_ACTION_DETACH, self.unique_id)
+
+ self.minor = None
+ self.dev_path = None
+
+ def Open(self, force=False):
+ """Make the device ready for I/O.
+
+ """
+ pass
+
+ def Close(self):
+ """Notifies that the device will no longer be used for I/O.
+
+ """
+ pass
+
+ def Grow(self, amount, dryrun, backingstore):
+ """Grow the Volume.
+
+ @type amount: integer
+ @param amount: the amount (in mebibytes) to grow with
+ @type dryrun: boolean
+ @param dryrun: whether to execute the operation in simulation mode
+ only, without actually increasing the size
+
+ """
+ if not backingstore:
+ return
+ if not self.Attach():
+ _ThrowError("Can't attach to extstorage device during Grow()")
+
+ if dryrun:
+ # we do not support dry runs of resize operations for now.
+ return
+
+ new_size = self.size + amount
+
+ # Call the External Storage's grow script,
+ # to grow an existing Volume inside the External Storage
+ _ExtStorageAction(constants.ES_ACTION_GROW, self.unique_id,
+ str(self.size), grow=str(new_size))
+
+
+def _ExtStorageAction(action, unique_id, size=None, grow=None):
+ """Take an External Storage action.
+
+ Take an External Storage action concerning or affecting
+ a specific Volume inside the External Storage.
+
+ @type action: string
+ @param action: which action to perform. One of:
+ create / remove / grow / attach / detach
+ @type unique_id: tuple (driver, vol_name)
+ @param unique_id: a tuple containing the type of ExtStorage (driver)
+ and the Volume name
+ @type size: integer
+ @param size: the size of the Volume in mebibytes
+ @type grow: integer
+ @param grow: the new size in mebibytes (after grow)
+ @rtype: None or a block device path (during attach)
+
+ """
+ driver, vol_name = unique_id
+
+ # Create an External Storage instance of type `driver'
+ status, inst_es = ExtStorageFromDisk(driver)
+ if not status:
+ _ThrowError("%s" % inst_es)
+
+ # Create the basic environment for the driver's scripts
+ create_env = _ExtStorageEnvironment(unique_id, size, grow)
+
+ # Do not use log file for action `attach' as we need
+ # to get the output from RunResult
+ # TODO: find a way to have a log file for attach too
+ logfile = None
+ if action is not constants.ES_ACTION_ATTACH:
+ logfile = _VolumeLogName(action, driver, vol_name)
+
+ # Find out which external script to run according the given action
+ script_name = action + "_script"
+ script = getattr(inst_es, script_name)
+
+ # Run the external script
+ result = utils.RunCmd([script], env=create_env,
+ cwd=inst_es.path, output=logfile,)
+ if result.failed:
+ logging.error("External storage's %s command '%s' returned"
+ " error: %s, logfile: %s, output: %s",
+ action, result.cmd, result.fail_reason,
+ logfile, result.output)
+
+ # If logfile is 'None' (during attach), it breaks TailFile
+ # TODO: have a log file for attach too
+ if action is not constants.ES_ACTION_ATTACH:
+ lines = [utils.SafeEncode(val)
+ for val in utils.TailFile(logfile, lines=20)]
+ _ThrowError("External storage's %s script failed (%s), last"
+ " lines in the log file:\n%s",
+ action, result.fail_reason, "\n".join(lines))
+ else:
+ _ThrowError("External storage's %s script failed (%s)",
+ action, result.fail_reason)
+
+ if action == constants.ES_ACTION_ATTACH:
+ return result.stdout
+
+
+def ExtStorageFromDisk(name, base_dir=None):
+ """Create an ExtStorage instance from disk.
+
+ This function will return an ExtStorage instance
+ if the given name is a valid ExtStorage name.
+
+ @type base_dir: string
+ @keyword base_dir: Base directory containing ExtStorage installations.
+ Defaults to a search in all the ES_SEARCH_PATH dirs.
+ @rtype: tuple
+ @return: True and the ExtStorage instance if we find a valid one, or
+ False and the diagnose message on error
+
+ """
+ if base_dir is None:
+ es_dir = utils.FindFile(name, pathutils.ES_SEARCH_PATH, os.path.isdir)
+ else:
+ es_dir = utils.FindFile(name, [base_dir], os.path.isdir)
+
+ if es_dir is None:
+ return False, ("Directory for External Storage Provider %s not"
+ " found in search path" % name)
+
+ # ES Files dictionary, we will populate it with the absolute path
+ # names; if the value is True, then it is a required file, otherwise
+ # an optional one
+ es_files = dict.fromkeys(constants.ES_SCRIPTS, True)
+
+ for filename in es_files:
+ es_files[filename] = utils.PathJoin(es_dir, filename)
+
+ try:
+ st = os.stat(es_files[filename])
+ except EnvironmentError, err:
+ return False, ("File '%s' under path '%s' is missing (%s)" %
+ (filename, es_dir, utils.ErrnoOrStr(err)))
+
+ if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
+ return False, ("File '%s' under path '%s' is not a regular file" %
+ (filename, es_dir))
+
+ if filename in constants.ES_SCRIPTS:
+ if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
+ return False, ("File '%s' under path '%s' is not executable" %
+ (filename, es_dir))
+
+ es_obj = \
+ objects.ExtStorage(name=name, path=es_dir,
+ create_script=es_files[constants.ES_SCRIPT_CREATE],
+ remove_script=es_files[constants.ES_SCRIPT_REMOVE],
+ grow_script=es_files[constants.ES_SCRIPT_GROW],
+ attach_script=es_files[constants.ES_SCRIPT_ATTACH],
+ detach_script=es_files[constants.ES_SCRIPT_DETACH])
+ return True, es_obj
+
+
+def _ExtStorageEnvironment(unique_id, size=None, grow=None):
+ """Calculate the environment for an External Storage script.
+
+ @type unique_id: tuple (driver, vol_name)
+ @param unique_id: ExtStorage pool and name of the Volume
+ @type size: integer
+ @param size: size of the Volume in mebibytes
+ @rtype: dict
+ @return: dict of environment variables
+
+ """
+ vol_name = unique_id[1]
+
+ result = {}
+ result['VOL_NAME'] = vol_name
+
+ if size is not None:
+ result['VOL_SIZE'] = size
+
+ if grow is not None:
+ result['VOL_NEW_SIZE'] = grow
+
+ return result
+
+
+def _VolumeLogName(kind, es_name, volume):
+ """Compute the ExtStorage log filename for a given Volume and operation.
+
+ @type kind: string
+ @param kind: the operation type (e.g. create, remove etc.)
+ @type es_name: string
+ @param es_name: the ExtStorage name
+ @type volume: string
+ @param volume: the name of the Volume inside the External Storage
+
+ """
+ # Check if the extstorage log dir is a valid dir
+ if not os.path.isdir(pathutils.LOG_ES_DIR):
+ _ThrowError("Cannot find log directory: %s", pathutils.LOG_ES_DIR)
+
+ # TODO: Use tempfile.mkstemp to create unique filename
+ base = ("%s-%s-%s-%s.log" %
+ (kind, es_name, volume, utils.TimestampForFilename()))
+ return utils.PathJoin(pathutils.LOG_ES_DIR, base)
+
+
DEV_MAP = {
constants.LD_LV: LogicalVolume,
constants.LD_DRBD8: DRBD8,
constants.LD_BLOCKDEV: PersistentBlockDevice,
constants.LD_RBD: RADOSBlockDevice,
+ constants.LD_EXT: ExtStorageDevice,
}
if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
diff --git a/lib/client/gnt_cluster.py b/lib/client/gnt_cluster.py
index 2759844..dc625ac 100644
--- a/lib/client/gnt_cluster.py
+++ b/lib/client/gnt_cluster.py
@@ -454,6 +454,8 @@ def ShowClusterConfig(opts, args):
ToStdout(" - primary ip version: %d", result["primary_ip_version"])
ToStdout(" - preallocation wipe disks: %s", result["prealloc_wipe_disks"])
ToStdout(" - OS search path: %s", utils.CommaJoin(pathutils.OS_SEARCH_PATH))
+ ToStdout(" - ExtStorage Providers search path: %s",
+ utils.CommaJoin(pathutils.ES_SEARCH_PATH))
ToStdout("Default node parameters:")
_PrintGroupedParams(result["ndparams"], roman=opts.roman_integers)
diff --git a/lib/cmdlib.py b/lib/cmdlib.py
index 2068882..90b2117 100644
--- a/lib/cmdlib.py
+++ b/lib/cmdlib.py
@@ -8591,9 +8591,9 @@ class TLMigrateInstance(Tasklet):
self._GoReconnect(False)
self._WaitUntilSync()
- # If the instance's disk template is `rbd' and there was a successful
- # migration, unmap the device from the source node.
- if self.instance.disk_template == constants.DT_RBD:
+ # If the instance's disk template is `rbd' or `ext' and there was a
+ # successful migration, unmap the device from the source node.
+ if self.instance.disk_template in (constants.DT_RBD, constants.DT_EXT):
disks = _ExpandCheckDisks(instance, instance.disks)
self.feedback_fn("* unmapping instance's disks from %s" % source_node)
for disk in disks:
@@ -8843,6 +8843,7 @@ def _GenerateDRBD8Branch(lu, primary, secondary, size,
vgnames, names,
_DISK_TEMPLATE_NAME_PREFIX = {
constants.DT_PLAIN: "",
constants.DT_RBD: ".rbd",
+ constants.DT_EXT: ".ext",
}
@@ -8852,6 +8853,7 @@ _DISK_TEMPLATE_DEVICE_TYPE = {
constants.DT_SHARED_FILE: constants.LD_FILE,
constants.DT_BLOCK: constants.LD_BLOCKDEV,
constants.DT_RBD: constants.LD_RBD,
+ constants.DT_EXT: constants.LD_EXT,
}
@@ -8931,6 +8933,8 @@ def _GenerateDiskTemplate(
disk[constants.IDISK_ADOPT])
elif template_name == constants.DT_RBD:
logical_id_fn = lambda idx, _, disk: ("rbd", names[idx])
+ elif template_name == constants.DT_EXT:
+ logical_id_fn = lambda idx, _, disk: ("ext", names[idx])
else:
raise errors.ProgrammerError("Unknown disk template '%s'" %
template_name)
@@ -10017,6 +10021,9 @@ class LUInstanceCreate(LogicalUnit):
# Any function that checks prerequisites can be placed here.
# Check if there is enough space on the RADOS cluster.
_CheckRADOSFreeSpace()
+ elif self.op.disk_template == constants.DT_EXT:
+ # FIXME: Function that checks prereqs if needed
+ pass
else:
# Check lv size requirements, if not adopting
req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
@@ -11725,7 +11732,8 @@ class LUInstanceGrowDisk(LogicalUnit):
if instance.disk_template not in (constants.DT_FILE,
constants.DT_SHARED_FILE,
- constants.DT_RBD):
+ constants.DT_RBD,
+ constants.DT_EXT):
# TODO: check the free disk space for file, when that feature will be
# supported
_CheckNodesFreeDiskPerVG(self, nodenames,
diff --git a/lib/constants.py b/lib/constants.py
index 45b3f41..1cb7196 100644
--- a/lib/constants.py
+++ b/lib/constants.py
@@ -343,19 +343,21 @@ DT_FILE = "file"
DT_SHARED_FILE = "sharedfile"
DT_BLOCK = "blockdev"
DT_RBD = "rbd"
+DT_EXT = "ext"
# the set of network-mirrored disk templates
DTS_INT_MIRROR = frozenset([DT_DRBD8])
# the set of externally-mirrored disk templates (e.g. SAN, NAS)
-DTS_EXT_MIRROR = frozenset([DT_SHARED_FILE, DT_BLOCK, DT_RBD])
+DTS_EXT_MIRROR = frozenset([DT_SHARED_FILE, DT_BLOCK, DT_RBD, DT_EXT])
# the set of non-lvm-based disk templates
DTS_NOT_LVM = frozenset([DT_DISKLESS, DT_FILE, DT_SHARED_FILE,
- DT_BLOCK, DT_RBD])
+ DT_BLOCK, DT_RBD, DT_EXT])
# the set of disk templates which can be grown
-DTS_GROWABLE = frozenset([DT_PLAIN, DT_DRBD8, DT_FILE, DT_SHARED_FILE, DT_RBD])
+DTS_GROWABLE = frozenset([DT_PLAIN, DT_DRBD8, DT_FILE, DT_SHARED_FILE,
+ DT_RBD, DT_EXT])
# the set of disk templates that allow adoption
DTS_MAY_ADOPT = frozenset([DT_PLAIN, DT_BLOCK])
@@ -375,15 +377,17 @@ LD_DRBD8 = "drbd8"
LD_FILE = "file"
LD_BLOCKDEV = "blockdev"
LD_RBD = "rbd"
+LD_EXT = "ext"
LOGICAL_DISK_TYPES = frozenset([
LD_LV,
LD_DRBD8,
LD_FILE,
LD_BLOCKDEV,
LD_RBD,
+ LD_EXT,
])
-LDS_BLOCK = frozenset([LD_LV, LD_DRBD8, LD_BLOCKDEV, LD_RBD])
+LDS_BLOCK = frozenset([LD_LV, LD_DRBD8, LD_BLOCKDEV, LD_RBD, LD_EXT])
# drbd constants
DRBD_HMAC_ALG = "md5"
@@ -482,7 +486,8 @@ DISK_TEMPLATES = frozenset([
DT_FILE,
DT_SHARED_FILE,
DT_BLOCK,
- DT_RBD
+ DT_RBD,
+ DT_EXT
])
FILE_DRIVER = frozenset([FD_LOOP, FD_BLKTAP])
@@ -604,6 +609,26 @@ OS_PARAMETERS_FILE = "parameters.list"
OS_VALIDATE_PARAMETERS = "parameters"
OS_VALIDATE_CALLS = frozenset([OS_VALIDATE_PARAMETERS])
+# External Storage (ES) related constants
+ES_ACTION_CREATE = "create"
+ES_ACTION_REMOVE = "remove"
+ES_ACTION_GROW = "grow"
+ES_ACTION_ATTACH = "attach"
+ES_ACTION_DETACH = "detach"
+
+ES_SCRIPT_CREATE = ES_ACTION_CREATE
+ES_SCRIPT_REMOVE = ES_ACTION_REMOVE
+ES_SCRIPT_GROW = ES_ACTION_GROW
+ES_SCRIPT_ATTACH = ES_ACTION_ATTACH
+ES_SCRIPT_DETACH = ES_ACTION_DETACH
+ES_SCRIPTS = frozenset([
+ ES_SCRIPT_CREATE,
+ ES_SCRIPT_REMOVE,
+ ES_SCRIPT_GROW,
+ ES_SCRIPT_ATTACH,
+ ES_SCRIPT_DETACH
+ ])
+
# ssh constants
SSH = "ssh"
SCP = "scp"
@@ -1821,6 +1846,8 @@ DISK_LD_DEFAULTS = {
LD_RBD: {
LDP_POOL: "rbd"
},
+ LD_EXT: {
+ },
}
# readability shortcuts
@@ -1854,6 +1881,8 @@ DISK_DT_DEFAULTS = {
DT_RBD: {
RBD_POOL: DISK_LD_DEFAULTS[LD_RBD][LDP_POOL]
},
+ DT_EXT: {
+ },
}
# we don't want to export the shortcuts
@@ -2013,6 +2042,7 @@ VALID_ALLOC_POLICIES = [
# Temporary external/shared storage parameters
BLOCKDEV_DRIVER_MANUAL = "manual"
+EXTSTORAGE_SAMPLE_PROVIDER = "rbd"
# qemu-img path, required for ovfconverter
QEMUIMG_PATH = _autoconf.QEMUIMG_PATH
diff --git a/lib/masterd/instance.py b/lib/masterd/instance.py
index 83e7424..d99f4d8 100644
--- a/lib/masterd/instance.py
+++ b/lib/masterd/instance.py
@@ -1630,6 +1630,7 @@ def ComputeDiskSize(disk_template, disks):
constants.DT_SHARED_FILE: sum(d[constants.IDISK_SIZE] for d in disks),
constants.DT_BLOCK: 0,
constants.DT_RBD: sum(d[constants.IDISK_SIZE] for d in disks),
+ constants.DT_EXT: sum(d[constants.IDISK_SIZE] for d in disks),
}
if disk_template not in req_size_dict:
diff --git a/lib/objects.py b/lib/objects.py
index 4be25ab..da5bc5b 100644
--- a/lib/objects.py
+++ b/lib/objects.py
@@ -598,7 +598,8 @@ class Disk(ConfigObject):
"""
if self.dev_type in [constants.LD_LV, constants.LD_FILE,
- constants.LD_BLOCKDEV, constants.LD_RBD]:
+ constants.LD_BLOCKDEV, constants.LD_RBD,
+ constants.LD_EXT]:
result = [node]
elif self.dev_type in constants.LDS_DRBD:
result = [self.logical_id[0], self.logical_id[1]]
@@ -674,7 +675,7 @@ class Disk(ConfigObject):
"""
if self.dev_type in (constants.LD_LV, constants.LD_FILE,
- constants.LD_RBD):
+ constants.LD_RBD, constants.LD_EXT):
self.size += amount
elif self.dev_type == constants.LD_DRBD8:
if self.children:
@@ -1231,6 +1232,21 @@ class OS(ConfigObject):
return cls.SplitNameVariant(name)[1]
+class ExtStorage(ConfigObject):
+ """Config object representing an External Storage Provider.
+
+ """
+ __slots__ = [
+ "name",
+ "path",
+ "create_script",
+ "remove_script",
+ "grow_script",
+ "attach_script",
+ "detach_script",
+ ]
+
+
class NodeHvState(ConfigObject):
"""Hypvervisor state on a node.
diff --git a/lib/pathutils.py b/lib/pathutils.py
index 738c661..7983f48 100644
--- a/lib/pathutils.py
+++ b/lib/pathutils.py
@@ -30,6 +30,7 @@ DEFAULT_FILE_STORAGE_DIR = _autoconf.FILE_STORAGE_DIR
DEFAULT_SHARED_FILE_STORAGE_DIR = _autoconf.SHARED_FILE_STORAGE_DIR
EXPORT_DIR = _autoconf.EXPORT_DIR
OS_SEARCH_PATH = _autoconf.OS_SEARCH_PATH
+ES_SEARCH_PATH = _autoconf.ES_SEARCH_PATH
SSH_CONFIG_DIR = _autoconf.SSH_CONFIG_DIR
SYSCONFDIR = _autoconf.SYSCONFDIR
TOOLSDIR = _autoconf.TOOLSDIR
@@ -106,6 +107,7 @@ MASTER_SOCKET = SOCKET_DIR + "/ganeti-master"
QUERY_SOCKET = SOCKET_DIR + "/ganeti-query"
LOG_OS_DIR = LOG_DIR + "/os"
+LOG_ES_DIR = LOG_DIR + "/extstorage"
# Job queue paths
JOB_QUEUE_LOCK_FILE = QUEUE_DIR + "/lock"
diff --git a/tools/burnin b/tools/burnin
index 11a7042..adbf43b 100755
--- a/tools/burnin
+++ b/tools/burnin
@@ -463,6 +463,7 @@ class Burner(object):
constants.DT_PLAIN,
constants.DT_DRBD8,
constants.DT_RBD,
+ constants.DT_EXT,
)
if options.disk_template not in supported_disk_templates:
Err("Unknown disk template '%s'" % options.disk_template)
--
1.7.2.5