Yeela Kaplan has uploaded a new change for review.

Change subject: [WIP] Create storage pool using command type 1
......................................................................

[WIP] Create storage pool using command type 1

Change-Id: Ia64f6dd2df38d2968f03ce66094f3ba7b4343503
Signed-off-by: Yeela Kaplan <[email protected]>
---
M vdsm/storage/blockSD.py
M vdsm/storage/hsm.py
M vdsm/storage/lvm.py
M vdsm/storage/sd.py
M vdsm/storage/sp.py
5 files changed, 71 insertions(+), 74 deletions(-)


  git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/47/23647/1

diff --git a/vdsm/storage/blockSD.py b/vdsm/storage/blockSD.py
index 7980c80..bb7f365 100644
--- a/vdsm/storage/blockSD.py
+++ b/vdsm/storage/blockSD.py
@@ -92,6 +92,12 @@
 VERS_METADATA_TAG = (2, 3)
 
 
+def encodeVgTags(tagsDict):
+    return [VGTagMetadataRW.METADATA_TAG_PREFIX +
+            lvmTagEncode("%s=%s" % (k, v))
+            for k, v in tagsDict.items()]
+
+
 def encodePVInfo(pvInfo):
     return (
         "pv:%s," % pvInfo["guid"] +
@@ -130,6 +136,13 @@
 
 def lvmTagDecode(s):
     return LVM_ENC_ESCAPE.sub(lambda c: unichr(int(c.groups()[0])), s)
+
+
+def encodeVgTags(tagsDict):
+        tags = [VGTagMetadataRW.METADATA_TAG_PREFIX +
+                lvmTagEncode("%s=%s" % (k, v))
+                for k, v in tagsDict.items()]
+        return tuple(tags)
 
 
 def _tellEnd(devPath):
@@ -523,7 +536,7 @@
         # least SDMETADATA/METASIZE units, we know we can use the first
         # SDMETADATA bytes of the metadata volume for the SD metadata.
         # pass metadata's dev to ensure it is the first mapping
-        mapping = cls.getMetaDataMapping(vgName)
+        #mapping = cls.getMetaDataMapping(vgName)
 
         # Create the rest of the BlockSD internal volumes
         lvm.createLV(vgName, sd.LEASES, sd.LEASES_SIZE, safe=False)
@@ -558,6 +571,7 @@
 
         logBlkSize, phyBlkSize = lvm.getVGBlockSizes(vgName)
 
+        mapping = cls.getMetaDataMapping(vgName)
         # create domain metadata
         # FIXME : This is 99% like the metadata in file SD
         #         Do we really need to keep the VGUUID?
@@ -565,11 +579,11 @@
         initialMetadata = {
             sd.DMDK_VERSION: version,
             sd.DMDK_SDUUID: sdUUID,
-            sd.DMDK_TYPE: storageType,
-            sd.DMDK_CLASS: domClass,
+            sd.DMDK_TYPE: sd.storageType(storageType),
+            sd.DMDK_CLASS: sd.class2name(domClass),
             sd.DMDK_DESCRIPTION: domainName,
             sd.DMDK_ROLE: sd.REGULAR_DOMAIN,
-            sd.DMDK_POOLS: [],
+            sd.DMDK_POOLS: '',
             sd.DMDK_LOCK_POLICY: '',
             sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC: sd.DEFAULT_LEASE_PARAMS[
                 sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC],
@@ -585,8 +599,8 @@
         }
 
         initialMetadata.update(mapping)
-
-        md.update(initialMetadata)
+        toAdd = encodeVgTags(initialMetadata)
+        lvm.changeVGTags(vgName, delTags=(), addTags=toAdd, safe=False)
 
         # Mark VG with Storage Domain Tag
         try:
@@ -1302,6 +1316,22 @@
         # It is time to deactivate the master LV now
         lvm.deactivateLVs(self.sdUUID, MASTERLV)
 
+    def initMasterParams(self, poolMD, params):
+        vgUUID = self.getInfo()['vguuid']
+        vg = lvm.getVGbyUUID(vgUUID)
+        vgName = vg.name
+        toAdd = encodeVgTags(params)
+        lvm.changeVGTags(vgName, addTags=toAdd, safe=False)
+
+    def setMasterDomainParams(self, spUUID, leaseParams):
+        vgUUID = self.getInfo()['vguuid']
+        vg = lvm.getVGbyUUID(vgUUID)
+        vgName = vg.name
+        toAdd = encodeVgTags(leaseParams)
+        toAdd += encodeVgTags({sd.DMDK_POOLS: [spUUID],
+                                  sd.DMDK_ROLE: sd.MASTER_DOMAIN})
+        lvm.changeVGTags(vgName, delTags=(), addTags=toAdd, safe=False)
+
     def refreshDirTree(self):
         # create domain images folder
         imagesPath = os.path.join(self.domaindir, sd.DOMAIN_IMAGES)
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index 5c73dd9..ff27d53 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -942,35 +942,15 @@
         if masterDom not in domList:
             raise se.InvalidParameterException("masterDom", str(masterDom))
 
+        if len(domList) > 1:
+            raise NotImplementedError("Create storage pool "
+                                      "only with master domain")
+
         if len(poolName) > sp.MAX_POOL_DESCRIPTION_SIZE:
             raise se.StoragePoolDescriptionTooLongError()
 
-        msd = sdCache.produce(sdUUID=masterDom)
-        msdType = msd.getStorageType()
-        msdVersion = msd.getVersion()
-        if (msdType in sd.BLOCK_DOMAIN_TYPES and
-                msdVersion in blockSD.VERS_METADATA_LV and
-                len(domList) > sp.MAX_DOMAINS):
-            raise se.TooManyDomainsInStoragePoolError()
-
-        for sdUUID in domList:
-            try:
-                dom = sdCache.produce(sdUUID=sdUUID)
-                # TODO: consider removing validate() from here, as the domains
-                # are going to be accessed much later, and may loose validity
-                # until then.
-                dom.validate()
-            except:
-                raise se.StorageDomainAccessError(sdUUID)
-            # If you remove this condition, remove it from
-            # StoragePool.attachSD() too.
-            if dom.isData() and (dom.getVersion() > msdVersion):
-                raise se.MixedSDVersionError(dom.sdUUID, dom.getVersion(),
-                                             msd.sdUUID, msdVersion)
-
         vars.task.getExclusiveLock(STORAGE, spUUID)
-        for dom in sorted(domList):
-            vars.task.getExclusiveLock(STORAGE, dom)
+        vars.task.getExclusiveLock(STORAGE, masterDom)
 
         return sp.StoragePool(spUUID, self.domainMonitor, self.taskMng).create(
             poolName, masterDom, domList, masterVersion, leaseParams)
diff --git a/vdsm/storage/lvm.py b/vdsm/storage/lvm.py
index 0f96df6..c1a0b92 100644
--- a/vdsm/storage/lvm.py
+++ b/vdsm/storage/lvm.py
@@ -302,7 +302,7 @@
         if rc != 0:
             # Filter might be stale
             self.invalidateFilter()
-            newCmd = self._addExtraCfg(cmd, safe)
+            newCmd = self._addExtraCfg(cmd, tuple(), safe)
             # Before blindly trying again make sure
             # that the commands are not identical, because
             # the devlist is sorted there is no fear
diff --git a/vdsm/storage/sd.py b/vdsm/storage/sd.py
index 7f00533..c968d7b 100644
--- a/vdsm/storage/sd.py
+++ b/vdsm/storage/sd.py
@@ -766,6 +766,15 @@
     def isMaster(self):
         return self.getMetaParam(DMDK_ROLE).capitalize() == MASTER_DOMAIN
 
+    @classmethod
+    def initMasterParams(cls, poolMD, params):
+        poolMD.update(params)
+
+    def setMasterDomainParams(self, spUUID, leaseParams):
+        self.changeLeaseParams(leaseParams)
+        self.setMetaParam(DMDK_POOLS, [spUUID])
+        self.changeRole(MASTER_DOMAIN)
+
     def initMaster(self, spUUID, leaseParams):
         self.invalidateMetadata()
         pools = self.getPools()
@@ -774,9 +783,7 @@
             raise se.StorageDomainAlreadyAttached(pools[0], self.sdUUID)
 
         with self._metadata.transaction():
-            self.changeLeaseParams(leaseParams)
-            self.setMetaParam(DMDK_POOLS, [spUUID])
-            self.changeRole(MASTER_DOMAIN)
+            self.setMasterDomainParams(spUUID, leaseParams)
 
     def isISO(self):
         return self.getMetaParam(DMDK_CLASS) == ISO_DOMAIN
diff --git a/vdsm/storage/sp.py b/vdsm/storage/sp.py
index 50e29ef..0b00264 100644
--- a/vdsm/storage/sp.py
+++ b/vdsm/storage/sp.py
@@ -588,9 +588,8 @@
     @unsecured
     def create(self, poolName, msdUUID, domList, masterVersion, leaseParams):
         """
-        Create new storage pool with single/multiple image data domain.
-        The command will create new storage pool meta-data attach each
-        storage domain to that storage pool.
+        Create new storage pool with single image data domain.
+        The command will create new storage pool meta-data
         At least one data (images) domain must be provided
          'poolName' - storage pool name
          'msdUUID' - master domain of this pool (one of domList)
@@ -600,27 +599,20 @@
                       "masterVersion=%s %s", self.spUUID, poolName, msdUUID,
                       domList, masterVersion, leaseParams)
 
-        if msdUUID not in domList:
-            raise se.InvalidParameterException("masterDomain", msdUUID)
+        # Check the master domain before pool creation
+        try:
+            msd = sdCache.produce(msdUUID)
+            msd.validate()
+        except se.StorageException:
+            self.log.error("Unexpected error", exc_info=True)
+            raise se.StorageDomainAccessError(msdUUID)
 
-        # Check the domains before pool creation
-        for sdUUID in domList:
-            try:
-                domain = sdCache.produce(sdUUID)
-                domain.validate()
-                if sdUUID == msdUUID:
-                    msd = domain
-            except se.StorageException:
-                self.log.error("Unexpected error", exc_info=True)
-                raise se.StorageDomainAccessError(sdUUID)
-
-            # Validate unattached domains
-            if not domain.isISO():
-                domain.invalidateMetadata()
-                spUUIDs = domain.getPools()
-                # Non ISO domains have only 1 pool
-                if len(spUUIDs) > 0:
-                    raise se.StorageDomainAlreadyAttached(spUUIDs[0], sdUUID)
+        # Validate unattached domains
+        msd.invalidateMetadata()
+        spUUIDs = msd.getPools()
+        # Non ISO domains have only 1 pool
+        if len(spUUIDs) > 0:
+            raise se.StorageDomainAlreadyAttached(spUUIDs[0], msdUUID)
 
         fileUtils.createdir(self.poolPath)
         self._acquireTemporaryClusterLock(msdUUID, leaseParams)
@@ -629,23 +621,10 @@
             self._setSafe()
             # Mark 'master' domain
             # We should do it before actually attaching this domain to the pool
-            # During 'master' marking we create pool metadata and each attached
-            # domain should register there
+            # During 'master' marking we create pool metadata
             self.createMaster(poolName, msd, masterVersion, leaseParams)
             self.__rebuild(msdUUID=msdUUID, masterVersion=masterVersion)
-            # Attach storage domains to the storage pool
-            # Since we are creating the pool then attach is done from the hsm
-            # and not the spm therefore we must manually take the master domain
-            # lock
-            # TBD: create will receive only master domain and further attaches
-            #      should be done under SPM
 
-            # Master domain was already attached (in createMaster),
-            # no need to reattach
-            for sdUUID in domList:
-                # No need to attach the master
-                if sdUUID != msdUUID:
-                    self.attachSD(sdUUID)
         except Exception:
             self.log.error("Create pool %s canceled ", poolName, exc_info=True)
             try:
@@ -716,13 +695,14 @@
 
     @unsecured
     def initParameters(self, poolName, domain, masterVersion):
-        self._getPoolMD(domain).update({
+        params = {
             PMDK_SPM_ID: SPM_ID_FREE,
             PMDK_LVER: LVER_INVALID,
             PMDK_MASTER_VER: masterVersion,
             PMDK_POOL_DESCRIPTION: poolName,
             PMDK_DOMAINS: {domain.sdUUID: sd.DOM_ACTIVE_STATUS},
-        })
+        }
+        domain.initMasterParams(self._getPoolMD(domain), params)
 
     @unsecured
     def createMaster(self, poolName, domain, masterVersion, leaseParams):


-- 
To view, visit http://gerrit.ovirt.org/23647
To unsubscribe, visit http://gerrit.ovirt.org/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: Ia64f6dd2df38d2968f03ce66094f3ba7b4343503
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Yeela Kaplan <[email protected]>
_______________________________________________
vdsm-patches mailing list
[email protected]
https://lists.fedorahosted.org/mailman/listinfo/vdsm-patches

Reply via email to