Saggi Mizrahi has uploaded a new change for review.

Change subject: Refactor prepareVolumePath
......................................................................

Refactor prepareVolumePath

Change-Id: I57bb8684fd11a47843a158d13fcc2815147fa7ef
Signed-off-by: Saggi Mizrahi <[email protected]>
---
M vdsm/API.py
M vdsm/clientIF.py
M vdsm/libvirtvm.py
M vdsm/storage/devicemapper.py
M vdsm/vm.py
5 files changed, 93 insertions(+), 63 deletions(-)


  git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/55/7755/1

diff --git a/vdsm/API.py b/vdsm/API.py
index 720c3b9..c324d79 100644
--- a/vdsm/API.py
+++ b/vdsm/API.py
@@ -173,8 +173,7 @@
                 # NOTE: pickled params override command-line params. this
                 # might cause problems if an upgrade took place since the
                 # parmas were stored.
-                    fname = self._cif.prepareVolumePath(paramFilespec)
-                    try:
+                    with self._cif.preparedDrive(paramFilespec) as fname:
                         with file(fname) as f:
                             pickledMachineParams = pickle.load(f)
 
@@ -183,8 +182,6 @@
                                                    + str(pickledMachineParams))
                             self.log.debug('former conf ' + str(vmParams))
                             vmParams.update(pickledMachineParams)
-                    finally:
-                        self._cif.teardownVolumePath(paramFilespec)
                 except:
                     self.log.error("Error restoring VM parameters",
                             exc_info=True)
@@ -299,9 +296,15 @@
         :param hiberVolHandle: opaque string, indicating the location of
                                hibernation images.
         """
-        params = {'vmId': self._UUID, 'mode': 'file',
-                  'hiberVolHandle': hibernationVolHandle}
-        response = self.migrate(params)
+        v = self._getVmObject()
+        if v is None:
+            return errCode['noVM']
+
+        try:
+            response = self.hibernate(hibernationVolHandle)
+        except vm.WrongStateError:
+            response = errCode['noVM']
+
         if not response['status']['code']:
             response['status']['message'] = 'Hibernation process starting'
         return response
diff --git a/vdsm/clientIF.py b/vdsm/clientIF.py
index 55a7fc9..0446eb2 100644
--- a/vdsm/clientIF.py
+++ b/vdsm/clientIF.py
@@ -25,6 +25,7 @@
 from xml.dom import minidom
 import uuid
 import errno
+from contextlib import contextmanager
 
 from storage.dispatcher import Dispatcher
 from storage.hsm import HSM
@@ -44,6 +45,7 @@
 import blkid
 import supervdsm
 import vmContainer
+from storage import devicemapper
 try:
     import gluster.api as gapi
     _glusterEnabled = True
@@ -239,50 +241,74 @@
             self.log.info('Error finding path for device', exc_info=True)
             raise vm.VolumeError(uuid)
 
+    def _preparePoolImage(self, drive):
+        res = self.irs.prepareImage(
+                        drive['domainID'], drive['poolID'],
+                        drive['imageID'], drive['volumeID'])
+
+        if res['status']['code']:
+            raise vm.VolumeError(drive)
+
+        drive['volumeChain'] = res['chain']
+        return res['path']
+
+    def _prepareDmDevice(self, drive, vmId):
+        volPath = devicemapper.getDevicePathByGuid(drive["GUID"])
+
+        if not os.path.exists(volPath):
+            raise vm.VolumeError(drive)
+
+        res = self.irs.appropriateDevice(drive["GUID"], vmId)
+        if res['status']['code']:
+            raise vm.VolumeError(drive)
+
+        return volPath
+
+    def _prepareScsiDevice(self, drive):
+        return self._getUUIDSpecPath(drive["UUID"])
+
+    def _prepareVmPayload(self, drive, vmId):
+        '''
+        vmPayload is a key in specParams
+        'vmPayload': {'file': {'filename': 'content'}}
+        '''
+        for key, files in drive['specParams']['vmPayload'].iteritems():
+            if key == 'file':
+                svdsm = supervdsm.getProxy()
+                if drive['device'] == 'cdrom':
+                    return svdsm.mkIsoFs(vmId, files)
+                elif drive['device'] == 'floppy':
+                    return svdsm.mkFloppyFs(vmId, files)
+
+        raise vm.VolumeError(drive)
+
+    def _preparePath(self, drive):
+        return drive['path']
+
+    @contextmanager
+    def perparedDrive(self, drive, vmId=None):
+        path = self.prepareVolumePath(drive, vmId)
+        try:
+            yield path
+        finally:
+            self.teardownVolumePath(drive, vmId)
+
     def prepareVolumePath(self, drive, vmId=None):
         if type(drive) is dict:
-            # PDIV drive format
             if drive['device'] == 'disk' and vm.isVdsmImage(drive):
-                res = self.irs.prepareImage(
-                                drive['domainID'], drive['poolID'],
-                                drive['imageID'], drive['volumeID'])
+                volPath = self._preparePoolImage(drive)
 
-                if res['status']['code']:
-                    raise vm.VolumeError(drive)
-
-                volPath = res['path']
-                drive['volumeChain'] = res['chain']
-
-            # GUID drive format
             elif "GUID" in drive:
-                volPath = os.path.join("/dev/mapper", drive["GUID"])
+                volPath = self._prepareDmDevice(drive, vmId)
 
-                if not os.path.exists(volPath):
-                    raise vm.VolumeError(drive)
-
-                res = self.irs.appropriateDevice(drive["GUID"], vmId)
-                if res['status']['code']:
-                    raise vm.VolumeError(drive)
-
-            # UUID drive format
             elif "UUID" in drive:
-                volPath = self._getUUIDSpecPath(drive["UUID"])
+                volPath = self._prepareScsiDevice(drive)
 
             elif 'specParams' in drive and 'vmPayload' in drive['specParams']:
-                '''
-                vmPayload is a key in specParams
-                'vmPayload': {'file': {'filename': 'content'}}
-                '''
-                for key, files in drive['specParams']['vmPayload'].iteritems():
-                    if key == 'file':
-                        if drive['device'] == 'cdrom':
-                            volPath = supervdsm.getProxy().mkIsoFs(vmId, files)
-                        elif drive['device'] == 'floppy':
-                            volPath = \
-                                   supervdsm.getProxy().mkFloppyFs(vmId, files)
+                volPath = self._prepareVmPayload(drive, vmId)
 
             elif "path" in drive:
-                volPath = drive['path']
+                volPath = self._preparePath(drive)
 
             else:
                 raise vm.VolumeError(drive)
@@ -301,17 +327,22 @@
         self.log.info("prepared volume path: %s", volPath)
         return volPath
 
-    def teardownVolumePath(self, drive):
-        res = {'status': doneCode}
-        if type(drive) == dict:
-            try:
-                res = self.irs.teardownImage(drive['domainID'],
-                                             drive['poolID'], drive['imageID'])
-            except KeyError:
-                #This drive is not a vdsm image (quartet)
-                self.log.info("Avoiding tear down drive %s", str(drive))
+    def _teardownPoolImage(self, drive):
+        try:
+            res = self.irs.teardownImage(drive['domainID'],
+                                         drive['poolID'], drive['imageID'])
+            return res['status']['code']
+        except KeyError:
+            #This drive is not a vdsm image (quartet)
+            self.log.info("Avoiding tear down drive %s", str(drive))
+            return doneCode
 
-        return res['status']['code']
+    def teardownVolumePath(self, drive):
+        if type(drive) == dict:
+            return self._teardownPoolImage(drive)
+        else:
+            # Other types don't require tear down
+            return 0
 
     def createVm(self, vmParams):
         try:
@@ -320,6 +351,7 @@
         except vmContainer.VmContainerError as e:
             if e.errno == errno.EEXIST:
                 return errCode['exist']
+
             return
 
     def waitForShutdown(self, timeout=None):
diff --git a/vdsm/libvirtvm.py b/vdsm/libvirtvm.py
index a530228..ea0d017 100644
--- a/vdsm/libvirtvm.py
+++ b/vdsm/libvirtvm.py
@@ -404,11 +404,8 @@
             hooks.before_vm_hibernate(self._vm._dom.XMLDesc(0), self._vm.conf)
             try:
                 self._vm._vmStats.pause()
-                fname = self._vm.cif.prepareVolumePath(self._dst)
-                try:
+                with self._vm.cif.preparedDrive(self._dst) as fname:
                     self._vm._dom.save(fname)
-                finally:
-                    self._vm.cif.teardownVolumePath(self._dst)
             except:
                 self._vm._vmStats.cont()
                 raise
@@ -1397,11 +1394,8 @@
         elif 'restoreState' in self.conf:
             hooks.before_vm_dehibernate(self.conf.pop('_srcDomXML'), self.conf)
 
-            fname = self.cif.prepareVolumePath(self.conf['restoreState'])
-            try:
+            with self.cif.preparedDrive(self.conf['restoreState']) as fname:
                 self._connection.restore(fname)
-            finally:
-                self.cif.teardownVolumePath(self.conf['restoreState'])
 
             self._dom = NotifyingVirDomain(
                             self._connection.lookupByUUIDString(self.id),
diff --git a/vdsm/storage/devicemapper.py b/vdsm/storage/devicemapper.py
index a1651e0..388c1cd 100644
--- a/vdsm/storage/devicemapper.py
+++ b/vdsm/storage/devicemapper.py
@@ -46,6 +46,10 @@
                                              (major, minor)))
 
 
+def getDevicePathByGuid(devGuid):
+    return DMPATH_FORMAT % devGuid
+
+
 def getSysfsPath(devName):
     if "/" in devName:
         raise ValueError("devName has an illegal format. "
diff --git a/vdsm/vm.py b/vdsm/vm.py
index 3aa9f52..49193d3 100644
--- a/vdsm/vm.py
+++ b/vdsm/vm.py
@@ -210,12 +210,9 @@
                 if ignoreParam in self._machineParams:
                     del self._machineParams[ignoreParam]
 
-            fname = self._vm.cif.prepareVolumePath(self._dstparams)
-            try:
-                with file(fname, "w") as f:
+            with self._vm.cif.preparedDrive(self._dstparams) as fname:
+                with file(fname, "wb") as f:
                     pickle.dump(self._machineParams, f)
-            finally:
-                self._vm.cif.teardownVolumePath(self._dstparams)
 
             self._vm.setDownStatus(NORMAL, "SaveState succeeded")
             self.status = {


--
To view, visit http://gerrit.ovirt.org/7755
To unsubscribe, visit http://gerrit.ovirt.org/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: I57bb8684fd11a47843a158d13fcc2815147fa7ef
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Saggi Mizrahi <[email protected]>
_______________________________________________
vdsm-patches mailing list
[email protected]
https://lists.fedorahosted.org/mailman/listinfo/vdsm-patches

Reply via email to