Hello Adam Litke, Royce Lv,

I'd like you to do a code review.  Please visit

    http://gerrit.ovirt.org/8182

to review the following change.

Change subject: storage functional test with multiple storage domains and images
......................................................................

storage functional test with multiple storage domains and images

add xmlrpc functional test for local storage,
it can be extend to nfs and iscsi backends

Change-Id: I8287046046460f399f180d19e0717a91419297f8
Signed-off-by: Royce Lv <[email protected]>
Signed-off-by: Adam Litke <[email protected]>
Signed-off-by: Zhou Zheng Sheng <[email protected]>
---
M tests/functional/xmlrpcTests.py
1 file changed, 213 insertions(+), 1 deletion(-)


  git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/82/8182/1

diff --git a/tests/functional/xmlrpcTests.py b/tests/functional/xmlrpcTests.py
index 661b347..451a4cc 100644
--- a/tests/functional/xmlrpcTests.py
+++ b/tests/functional/xmlrpcTests.py
@@ -19,16 +19,58 @@
 #
 
 import os
+import sys
 import time
+import pwd
+import grp
+import shutil
+from contextlib import contextmanager
 
 from testrunner import VdsmTestCase as TestCaseBase
 from nose.plugins.skip import SkipTest
 
 from vdsm.config import config
+from vdsm.constants import VDSM_USER, VDSM_GROUP
+from storage.sd import BLANK_UUID
+from storage.sd import name2class
+from storage.sd import name2type as sdname2type
+from storage.volume import name2type as volname2type
 from vdsm import vdscli
 
 if not config.getboolean('vars', 'xmlrpc_enable'):
     raise SkipTest("XML-RPC Bindings are disabled")
+
+
+def rollbackManager(transaction):
+    @contextmanager
+    def wrapper(*args, **kwargs):
+        rollback = []
+        exception = None
+        traceback = None
+        try:
+            yield transaction(rollback=rollback, *args, **kwargs)
+        except Exception, e:
+            # keep the original exception and traceback info
+            exception = e
+            traceback = sys.exc_info()[2]
+        finally:
+            rollback.reverse()
+            _playRollback(rollback, exception, traceback)
+    return wrapper
+
+
+def _playRollback(rollback, exception=None, traceback=None):
+    for undo in rollback:
+        try:
+            undo()
+        except Exception, e:
+            # keep the earliest exception info
+            if not exception:
+                exception = e
+                # keep the original traceback info
+                traceback = sys.exc_info()[2]
+    if exception:
+        raise exception, None, traceback
 
 
 class XMLRPCTest(TestCaseBase):
@@ -81,7 +123,10 @@
 
     def assertVdsOK(self, vdsResult):
         # code == 0 means OK
-        self.assertEquals(vdsResult['status']['code'], 0)
+        self.assertEquals(
+            vdsResult['status']['code'], 0,
+            'error code: %s, message: %s' % (vdsResult['status']['code'],
+                                             vdsResult['status']['message']))
 
     def skipNoKVM(self):
         r = self.s.getVdsCapabilities()
@@ -145,3 +190,170 @@
             destroyResult = self.s.destroy(VMID)
 
         self.assertVdsOK(destroyResult)
+
+    def testLocalfs(self):
+        conf = storageLayouts['localfs']
+        with _localfsStore(conf['conn']), self._vdsmStorageLayout(conf):
+            pass
+
+    @rollbackManager
+    def _vdsmStorageLayout(self, conf, rollback):
+        connections = conf['conn']
+        storageDomains = conf['sd']
+        storagePools = conf['sp']
+        images = conf['img']
+        layout = conf['layout']
+
+        # Connect storage backends
+        r = self.s.storageServer_ConnectionRefs_acquire(connections)
+        self.assertVdsOK(r)
+        undo = lambda: self.assertVdsOK(
+                    self.s.storageServer_ConnectionRefs_release(
+                                                    connections.keys()))
+        rollback.append(undo)
+        for _refid, status in r['results'].iteritems():
+            self.assertEquals(status, 0)
+
+        # Create storage domains
+        for sdid, domain in storageDomains.iteritems():
+            r = self.s.createStorageDomain(
+                    sdname2type(domain['type']), sdid, domain['name'],
+                    domain['typeArgs'], name2class(domain['class']), 0)
+            self.assertVdsOK(r)
+            undo = lambda sdid=sdid: \
+                        self.assertVdsOK(
+                                self.s.formatStorageDomain(sdid, True))
+            rollback.append(undo)
+
+        # Create storage pools
+        # For now we actually just support 1 pool
+        # So there must be only 1 pool definition in the configuration
+        # This code is written to create pools in case we support several pools
+        poolType = 0  # not used
+        for poolid, pool in storagePools.iteritems():
+            r = self.s.createStoragePool(
+                    poolType, poolid, pool['name'], pool['master_uuid'],
+                    layout[poolid].keys(), pool['master_ver'])
+            self.assertVdsOK(r)
+            # Connect to pool
+            r = self.s.connectStoragePool(
+                    poolid, pool['host'], 'scsikey', pool['master_uuid'],
+                    pool['master_ver'])
+            self.assertVdsOK(r)
+
+        # If spmstart fails, there is no good rollback because we need to
+        # be spm to tear down the pool
+
+        # Become SPM (there is no undo operation required)
+        r = self.s.spmStart(storagePools.keys()[0], -1, -1, -1, 0)
+        self.assertVdsOK(r)
+        tid = r['uuid']
+        self._waitTask(tid)
+
+        for poolid in storagePools.keys():
+            undo = lambda poolid=poolid: \
+                        self.assertVdsOK(self.s.destroyStoragePool(
+                            poolid, storagePools[poolid]['host'], 'scsiKey'))
+            rollback.append(undo)
+
+        # Activate storage domains except master ones
+        for poolid, domains in layout.iteritems():
+            for sdid in domains.keys():
+                if sdid != storagePools[poolid]['master_uuid']:
+                    r = self.s.activateStorageDomain(sdid, poolid)
+                    self.assertVdsOK(r)
+                    undo = lambda sdid=sdid, poolid=poolid: \
+                                self.assertVdsOK(
+                                    self.s.detachStorageDomain(
+                                        sdid, poolid, BLANK_UUID,
+                                        storagePools[poolid]['master_ver']))
+                    rollback.append(undo)
+
+        # Create images and volumes
+        for poolid, domains in layout.iteritems():
+            for sdid, imageList in domains.iteritems():
+                for imgid in imageList:
+                    volume = images[imgid]
+                    r = self.s.createVolume(
+                            sdid, poolid, imgid, volume['size'],
+                            volname2type(volume['format']),
+                            volname2type(volume['preallocate']),
+                            volname2type(volume['type']), volume['volid'],
+                            volume['description'])
+                    self.assertVdsOK(r)
+                    tid = r['uuid']
+                    self._waitTask(tid)
+                    undo = lambda sdid=sdid, poolid=poolid, imgid=imgid: \
+                                self._waitTask(
+                                        self.s.deleteImage(
+                                            sdid, poolid, imgid)['uuid'])
+                    rollback.append(undo)
+                    undo = (lambda sdid=sdid, poolid=poolid,
+                                   imgid=imgid, volume=volume:
+                                self._waitTask(
+                                    self.s.deleteVolume(
+                                            sdid, poolid, imgid,
+                                            [volume['volid']])['uuid']))
+                    rollback.append(undo)
+
+    def _waitTask(self, taskId):
+        def assertTaskOK():
+            r = self.s.getTaskStatus(taskId)
+            self.assertVdsOK(r)
+            state = r['taskStatus']['taskState']
+            self.assertEquals(state, 'finished')
+
+        self.retryAssert(assertTaskOK, 20)
+
+
+storageLayouts = \
+    {'localfs':
+        {'conn': {'53acd629-47e6-42d8-ba99-cd0b12ff0e1e':
+                    {'type': 'localfs',
+                     'params': {'path': '/tmp/teststorage0'}},
+                  '87e618fe-587c-4704-a9f8-9fd9321fd907':
+                    {'type': 'localfs',
+                     'params': {'path': '/tmp/teststorage1'}}},
+         'sd': {"def32ac7-1234-1234-8a8c-1c887333fe65":
+                  {"name": "test domain0", "type": "localfs",
+                   "class": "Data", "typeArgs": "/tmp/teststorage0"},
+                "9af9bd7f-6167-4ae8-aac6-95a5e5f36f60":
+                  {"name": "test domain1", "type": "localfs",
+                   "class": "Data", "typeArgs": "/tmp/teststorage1"}},
+         'sp': {"6e4d6a96-1234-1234-8905-b5eec55c1535":
+                  {"name": "local storage pool",
+                   "master_uuid": "def32ac7-1234-1234-8a8c-1c887333fe65",
+                   "master_ver": 1, "host": 1}},
+         'img': {"47bd7538-c48b-4b94-ba94-def922151d48":
+                   {"description": "Test volume0", "type": "leaf",
+                    "volid": "11bd7538-c48b-4b94-ba94-def922151d48",
+                    "format": "cow", "preallocate": "sparse",
+                    "size": 20971520},
+                 "bace8f68-4c5a-43f2-acb4-fa8daf58c0f9":
+                   {"description": "test volume1", "type": "leaf",
+                    "volid": "bb3cbda6-a711-45a6-a6f2-c32661939e93",
+                    "format": "cow", "preallocate": "sparse",
+                    "size": 20971520}},
+         'layout': {"6e4d6a96-1234-1234-8905-b5eec55c1535":  # pool
+                      {"def32ac7-1234-1234-8a8c-1c887333fe65":  # domains
+                         ["47bd7538-c48b-4b94-ba94-def922151d48"],  # images
+                       "9af9bd7f-6167-4ae8-aac6-95a5e5f36f60":
+                         ["bace8f68-4c5a-43f2-acb4-fa8daf58c0f9"]}}},
+     'nfs': {'conn': 'blah', 'sd': 'blah', 'sp': 'blah', 'img': 'blah',
+             'layout': 'blah'},
+     'iscsi': {'conn': 'blah', 'sd': 'blah', 'sp': 'blah', 'img': 'blah',
+               'layout': 'blah'}}
+
+
+@rollbackManager
+def _localfsStore(connDef, rollback):
+    for uuid, args in connDef.iteritems():
+        path = args['params']['path']
+        os.mkdir(path)
+
+        undo = lambda path=path: shutil.rmtree(path, ignore_errors=True)
+        rollback.append(undo)
+
+        uid = pwd.getpwnam(VDSM_USER)[2]
+        gid = grp.getgrnam(VDSM_GROUP)[2]
+        os.chown(path, uid, gid)


--
To view, visit http://gerrit.ovirt.org/8182
To unsubscribe, visit http://gerrit.ovirt.org/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: I8287046046460f399f180d19e0717a91419297f8
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Zhou Zheng Sheng <[email protected]>
Gerrit-Reviewer: Adam Litke <[email protected]>
Gerrit-Reviewer: Royce Lv <[email protected]>
_______________________________________________
vdsm-patches mailing list
[email protected]
https://lists.fedorahosted.org/mailman/listinfo/vdsm-patches

Reply via email to