Hello Ayal Baron, Timothy Asir, Saggi Mizrahi, Federico Simoncelli, Dan 
Kenigsberg,

I'd like you to do a code review.  Please visit

    http://gerrit.ovirt.org/7579

to review the following change.

Change subject: Added gluster tag support in getAllTasks()
......................................................................

Added gluster tag support in getAllTasks()

If param tag is empty, all tasks including gluster tasks are returned,
else tasks those tags are in param tag list are returned.

As below verbs are not consumed by engine/RHS-C yet, its OK to differ in
compatibility issue now.
glusterVolumeRebalanceStart
glusterVolumeRebalanceStatus
glusterVolumeReplaceBrickStart
glusterVolumeReplaceBrickStatus
glusterVolumeRemoveBrickStart
glusterVolumeRemoveBrickStatus

Change-Id: I9c765cbfebb5ba22f0d21efa04c824ea4daf6432
Signed-off-by: Bala.FA <barum...@redhat.com>
---
M tests/gluster_cli_tests.py
M vdsm/gluster/cli.py
M vdsm/gluster/exception.py
M vdsm/storage/taskManager.py
4 files changed, 367 insertions(+), 95 deletions(-)


  git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/79/7579/1

diff --git a/tests/gluster_cli_tests.py b/tests/gluster_cli_tests.py
index f442893..9c6357c 100644
--- a/tests/gluster_cli_tests.py
+++ b/tests/gluster_cli_tests.py
@@ -28,6 +28,7 @@
     from gluster import cli as gcli
 except ImportError:
     pass
+import xml.etree.cElementTree as etree
 
 
 class GlusterCliTests(TestCaseBase):
@@ -115,3 +116,74 @@
     def test_parsePeerStatus(self):
         self._parsePeerStatus_empty_test()
         self._parsePeerStatus_test()
+
+    def _parseVolumeStatusAll_test(self):
+        out = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<cliOutput>
+  <opRet>0</opRet>
+  <opErrno>0</opErrno>
+  <opErrstr></opErrstr>
+  <volumes>
+    <volume>
+      <name>V1</name>
+      <id>03eace73-9197-49d0-a877-831bc6e9dac2</id>
+      <tasks>
+        <task>
+          <name>rebalance</name>
+          <id>12345473-9197-49d0-a877-831bc6e9dac2</id>
+        </task>
+      </tasks>
+    </volume>
+    <volume>
+      <name>V2</name>
+      <id>03eace73-1237-49d0-a877-831bc6e9dac2</id>
+      <tasks>
+        <task>
+          <name>replace-brick</name>
+          <id>12345473-1237-49d0-a877-831bc6e9dac2</id>
+          <sourceBrick>192.168.122.167:/tmp/V2-b1</sourceBrick>
+          <destBrick>192.168.122.168:/tmp/V2-b1</destBrick>
+        </task>
+      </tasks>
+    </volume>
+    <volume>
+      <name>V3</name>
+      <id>03eace73-1237-1230-a877-831bc6e9dac2</id>
+      <tasks>
+        <task>
+          <name>remove-brick</name>
+          <id>12345473-1237-1230-a877-831bc6e9dac2</id>
+          <BrickCount>2</BrickCount>
+          <brick>192.168.122.167:/tmp/V3-b1</brick>
+          <brick>192.168.122.168:/tmp/V3-b1</brick>
+        </task>
+      </tasks>
+    </volume>
+  </volumes>
+</cliOutput>"""
+        tree = etree.fromstring(out)
+        status = gcli._parseVolumeStatusAll(tree)
+        self.assertEquals(status,
+                          {'12345473-1237-1230-a877-831bc6e9dac2':
+                               {'bricks': ['192.168.122.167:/tmp/V3-b1',
+                                           '192.168.122.168:/tmp/V3-b1'],
+                                'taskType': 'remove-brick',
+                                'volumeId':
+                                    '03eace73-1237-1230-a877-831bc6e9dac2',
+                                'volumeName': 'V3'},
+                           '12345473-1237-49d0-a877-831bc6e9dac2':
+                               {'bricks': ['192.168.122.167:/tmp/V2-b1',
+                                           '192.168.122.168:/tmp/V2-b1'],
+                                'taskType': 'replace-brick',
+                                'volumeId':
+                                    '03eace73-1237-49d0-a877-831bc6e9dac2',
+                                'volumeName': 'V2'},
+                           '12345473-9197-49d0-a877-831bc6e9dac2':
+                               {'bricks': [],
+                                'taskType': 'rebalance',
+                                'volumeId':
+                                    '03eace73-9197-49d0-a877-831bc6e9dac2',
+                                'volumeName': 'V1'}})
+
+    def test_parseVolumeStatusAll(self):
+        self._parseVolumeStatusAll_test()
diff --git a/vdsm/gluster/cli.py b/vdsm/gluster/cli.py
index 95de106..1f464f6 100644
--- a/vdsm/gluster/cli.py
+++ b/vdsm/gluster/cli.py
@@ -84,6 +84,55 @@
         raise ge.GlusterCmdFailedException(rc=rv, err=[msg])
 
 
+class TaskType:
+    REBALANCE = 'rebalance'
+    REPLACE_BRICK = 'replace-brick'
+    REMOVE_BRICK = 'remove-brick'
+
+
+def _parseVolumeStatusAll(tree):
+    """
+    returns {TaskId: {'volumeName': VolumeName,
+                      'volumeId': VolumeId,
+                      'taskType': TaskType,
+                      'bricks': BrickList}, ...}
+    """
+    tasks = {}
+    for el in tree.findall('volumes/volume'):
+        volumeName = el.find('name').text
+        volumeId = el.find('id').text
+        for c in el.findall('tasks/task'):
+            taskType = c.find('name').text
+            taskId = c.find('id').text
+            bricks = []
+            if taskType == TaskType.REPLACE_BRICK:
+                bricks.append(c.find('sourceBrick').text)
+                bricks.append(c.find('destBrick').text)
+            elif taskType == TaskType.REMOVE_BRICK:
+                for b in c.findall('brick'):
+                    bricks.append(b.text)
+            elif taskType == TaskType.REBALANCE:
+                pass
+            tasks[taskId] = {'volumeName': volumeName,
+                             'volumeId': volumeId,
+                             'taskType': taskType,
+                             'bricks': bricks}
+    return tasks
+
+
+@exportToSuperVdsm
+def volumeStatusAll():
+    command = _getGlusterVolCmd() + ["status", "all"]
+    try:
+        xmltree, out = _execGlusterXml(command)
+    except ge.GlusterCmdFailedException, e:
+        raise ge.GlusterVolumeStatusAllFailedException(rc=e.rc, err=e.err)
+    try:
+        return _parseVolumeStatusAll(xmltree)
+    except:
+        raise ge.GlusterXmlErrorException(err=out)
+
+
 def _parseVolumeInfo(out):
     if not out[0].strip():
         del out[0]
@@ -300,11 +349,15 @@
     command.append("start")
     if force:
         command.append("force")
-    rc, out, err = _execGluster(command)
-    if rc:
-        raise ge.GlusterVolumeRebalanceStartFailedException(rc, out, err)
-    else:
-        return True
+    try:
+        xmltree, out = _execGlusterXml(command)
+    except ge.GlusterCmdFailedException, e:
+        raise ge.GlusterVolumeRebalanceStartFailedException(rc=e.rc,
+                                                            err=e.err)
+    try:
+        return {'taskId': xmltree.find('id').text}
+    except:
+        raise ge.GlusterXmlErrorException(err=out)
 
 
 @exportToSuperVdsm
@@ -312,84 +365,147 @@
     command = _getGlusterVolCmd() + ["rebalance", volumeName, "stop"]
     if force:
         command.append('force')
-    rc, out, err = _execGluster(command)
-    if rc:
-        raise ge.GlusterVolumeRebalanceStopFailedException(rc, out, err)
-    else:
+    try:
+        _execGlusterXml(command)
         return True
+    except ge.GlusterCmdFailedException, e:
+        raise ge.GlusterVolumeRebalanceStopFailedException(rc=e.rc,
+                                                           err=e.err)
+
+
+class TaskStatus():
+    RUNNING = 'RUNNING'
+    FAILED = 'FAILED'
+    COMPLETED = 'COMPLETED'
+
+
+def _parseVolumeRebalanceRemoveBrickStatus(xmltree, mode):
+    """
+    returns {'taskId': UUID,
+             'host': [{'name': NAME,
+                       'id': HOSTID,
+                       'filesScanned': INT,
+                       'filesMoved': INT,
+                       'filesFailed': INT,
+                       'totalSizeMoved': INT,
+                       'status': TaskStatus},...]
+             'summary': {'filesScanned': INT,
+                         'filesMoved': INT,
+                         'filesFailed': INT,
+                         'totalSizeMoved': INT,
+                         'status': TaskStatus}}
+    """
+    if mode == 'rebalance':
+        tree = xmltree.find('volRebalance')
+    elif mode == 'remove-brick':
+        tree = xmltree.find('volRemoveBrick')
+    else:
+        return
+    status = \
+        {'taskId': tree.find('id').text,
+         'summary': \
+             {'filesScanned': int(tree.find('summary/filesScanned').text),
+              'filesMoved': int(tree.find('summary/filesMoved').text),
+              'filesFailed': int(tree.find('summary/filesFailed').text),
+              'totalSizeMoved': int(tree.find('summary/totalSizeMoved').text),
+              'status': tree.find('summary/status').text},
+         'host': []}
+    for el in tree.findall('node'):
+        status['host'].append({'name': el.find('name').text,
+                               'id': el.find('id').text,
+                               'filesScanned':
+                                   int(el.find('filesScanned').text),
+                               'filesMoved': int(el.find('filesMoved').text),
+                               'filesFailed': int(el.find('filesFailed').text),
+                               'totalSizeMoved':
+                                   int(el.find('totalSizeMoved').text),
+                               'status': el.find('status').text})
+    return status
+
+
+def _parseVolumeRebalanceStatus(tree):
+    return _parseVolumeRebalanceRemoveBrickStatus(tree, 'rebalance')
 
 
 @exportToSuperVdsm
 def volumeRebalanceStatus(volumeName):
-    rc, out, err = _execGluster(_getGlusterVolCmd() + ["rebalance", volumeName,
-                                                       "status"])
-    if rc:
-        raise ge.GlusterVolumeRebalanceStatusFailedException(rc, out, err)
-    if 'in progress' in out[0]:
-        return BrickStatus.RUNNING, "\n".join(out)
-    elif 'complete' in out[0]:
-        return BrickStatus.COMPLETED, "\n".join(out)
-    else:
-        return BrickStatus.UNKNOWN, "\n".join(out)
+    command = _getGlusterVolCmd() + ["rebalance", volumeName, "status"]
+    try:
+        xmltree, out = _execGlusterXml(command)
+    except ge.GlusterCmdFailedException, e:
+        raise ge.GlusterVolumeRebalanceStatusFailedException(rc=e.rc,
+                                                             err=e.err)
+    try:
+        return _parseVolumeRebalanceStatus(xmltree)
+    except:
+        raise ge.GlusterXmlErrorException(err=out)
 
 
 @exportToSuperVdsm
 def volumeReplaceBrickStart(volumeName, existingBrick, newBrick):
-    rc, out, err = _execGluster(_getGlusterVolCmd() + ["replace-brick",
-                                                       volumeName,
-                                                       existingBrick, newBrick,
-                                                       "start"])
-    if rc:
-        raise ge.GlusterVolumeReplaceBrickStartFailedException(rc, out, err)
-    else:
-        return True
+    command = _getGlusterVolCmd() + ["replace-brick", volumeName,
+                                     existingBrick, newBrick, "start"]
+    try:
+        xmltree, out = _execGlusterXml(command)
+    except ge.GlusterCmdFailedException, e:
+        raise ge.GlusterVolumeReplaceBrickStartFailedException(rc=e.rc,
+                                                               err=e.err)
+    try:
+        return {'taskId': xmltree.find('id').text}
+    except:
+        raise ge.GlusterXmlErrorException(err=out)
 
 
 @exportToSuperVdsm
 def volumeReplaceBrickAbort(volumeName, existingBrick, newBrick):
-    rc, out, err = _execGluster(_getGlusterVolCmd() + ["replace-brick",
-                                                       volumeName,
-                                                       existingBrick, newBrick,
-                                                       "abort"])
-    if rc:
-        raise ge.GlusterVolumeReplaceBrickAbortFailedException(rc, out, err)
-    else:
+    command = _getGlusterVolCmd() + ["replace-brick", volumeName,
+                                     existingBrick, newBrick, "abort"]
+    try:
+        _execGlusterXml(command)
         return True
+    except ge.GlusterCmdFailedException, e:
+        raise ge.GlusterVolumeReplaceBrickAbortFailedException(rc=e.rc,
+                                                               err=e.err)
 
 
 @exportToSuperVdsm
 def volumeReplaceBrickPause(volumeName, existingBrick, newBrick):
-    rc, out, err = _execGluster(_getGlusterVolCmd() + ["replace-brick",
-                                                       volumeName,
-                                                       existingBrick, newBrick,
-                                                       "pause"])
-    if rc:
-        raise ge.GlusterVolumeReplaceBrickPauseFailedException(rc, out, err)
-    else:
+    command = _getGlusterVolCmd() + ["replace-brick", volumeName,
+                                     existingBrick, newBrick, "pause"]
+    try:
+        _execGlusterXml(command)
         return True
+    except ge.GlusterCmdFailedException, e:
+        raise ge.GlusterVolumeReplaceBrickPauseFailedException(rc=e.rc,
+                                                               err=e.err)
+
+
+def _parseVolumeReplaceBrickStatus(tree):
+    """
+    returns {'taskId': UUID,
+             'filesMoved': INT,
+             'movingFile': STRING,
+             'status': TaskStatus}}
+    """
+    return {'taskId': tree.find('volReplaceBrick/id').text,
+            'filesMoved': int(tree.find('volReplaceBrick/filesMoved').text),
+            'movingFile': tree.find('volReplaceBrick/movingFile').text,
+            'status': tree.find('volReplaceBrick/status').text}
 
 
 @exportToSuperVdsm
 def volumeReplaceBrickStatus(volumeName, existingBrick, newBrick):
-    rc, out, err = _execGluster(_getGlusterVolCmd() + ["replace-brick",
-                                                       volumeName,
-                                                       existingBrick, newBrick,
-                                                       "status"])
-    if rc:
-        raise ge.GlusterVolumeReplaceBrickStatusFailedException(rc, out,
-                                                                err)
-    message = "\n".join(out)
-    statLine = out[0].strip().upper()
-    if BrickStatus.PAUSED in statLine:
-        return BrickStatus.PAUSED, message
-    elif statLine.endswith('MIGRATION COMPLETE'):
-        return BrickStatus.COMPLETED, message
-    elif statLine.startswith('NUMBER OF FILES MIGRATED'):
-        return BrickStatus.RUNNING, message
-    elif statLine.endswith("UNKNOWN"):
-        return BrickStatus.UNKNOWN, message
-    else:
-        return BrickStatus.NA, message
+    command = _getGlusterVolCmd() + ["replace-brick", volumeName,
+                                     existingBrick, newBrick, "status"]
+    try:
+        xmltree, out = _execGlusterXml(command)
+    except ge.GlusterCmdFailedException, e:
+        raise ge.GlusterVolumeReplaceBrickStatusFailedException(rc=e.rc,
+                                                                err=e.err)
+    try:
+        return _parseVolumeReplaceBrickStatus(xmltree)
+    except:
+        raise ge.GlusterXmlErrorException(err=out)
 
 
 @exportToSuperVdsm
@@ -399,12 +515,12 @@
                                      existingBrick, newBrick, "commit"]
     if force:
         command.append('force')
-    rc, out, err = _execGluster(command)
-    if rc:
-        raise ge.GlusterVolumeReplaceBrickCommitFailedException(rc, out,
-                                                                err)
-    else:
+    try:
+        _execGlusterXml(command)
         return True
+    except ge.GlusterCmdFailedException, e:
+        raise ge.GlusterVolumeReplaceBrickCommitFailedException(rc=e.rc,
+                                                                err=e.err)
 
 
 @exportToSuperVdsm
@@ -413,12 +529,15 @@
     if replicaCount:
         command += ["replica", "%s" % replicaCount]
     command += brickList + ["start"]
-
-    rc, out, err = _execGluster(command)
-    if rc:
-        raise ge.GlusterVolumeRemoveBrickStartFailedException(rc, out, err)
-    else:
-        return True
+    try:
+        xmltree, out = _execGlusterXml(command)
+    except ge.GlusterCmdFailedException, e:
+        raise ge.GlusterVolumeRemoveBrickStartFailedException(rc=e.rc,
+                                                              err=e.err)
+    try:
+        return {'taskId': xmltree.find('id').text}
+    except:
+        raise ge.GlusterXmlErrorException(err=out)
 
 
 @exportToSuperVdsm
@@ -427,12 +546,16 @@
     if replicaCount:
         command += ["replica", "%s" % replicaCount]
     command += brickList + ["stop"]
-    rc, out, err = _execGluster(command)
-
-    if rc:
-        raise ge.GlusterVolumeRemoveBrickStopFailedException(rc, out, err)
-    else:
+    try:
+        _execGlusterXml(command)
         return True
+    except ge.GlusterCmdFailedException, e:
+        raise ge.GlusterVolumeRemoveBrickStopFailedException(rc=e.rc,
+                                                             err=e.err)
+
+
+def _parseVolumeRemoveBrickStatus(tree):
+    return _parseVolumeRebalanceRemoveBrickStatus(tree, 'remove-brick')
 
 
 @exportToSuperVdsm
@@ -441,12 +564,15 @@
     if replicaCount:
         command += ["replica", "%s" % replicaCount]
     command += brickList + ["status"]
-    rc, out, err = _execGluster(command)
-
-    if rc:
-        raise ge.GlusterVolumeRemoveBrickStatusFailedException(rc, out, err)
-    else:
-        return "\n".join(out)
+    try:
+        xmltree, out = _execGlusterXml(command)
+    except ge.GlusterCmdFailedException, e:
+        raise ge.GlusterVolumeRemoveBrickStatusFailedException(rc=e.rc,
+                                                               err=e.err)
+    try:
+        return _parseVolumeRemoveBrickStatus(xmltree)
+    except:
+        raise ge.GlusterXmlErrorException(err=out)
 
 
 @exportToSuperVdsm
@@ -455,12 +581,12 @@
     if replicaCount:
         command += ["replica", "%s" % replicaCount]
     command += brickList + ["commit"]
-    rc, out, err = _execGluster(command)
-
-    if rc:
-        raise ge.GlusterVolumeRemoveBrickCommitFailedException(rc, out, err)
-    else:
+    try:
+        _execGlusterXml(command)
         return True
+    except ge.GlusterCmdFailedException, e:
+        raise ge.GlusterVolumeRemoveBrickCommitFailedException(rc=e.rc,
+                                                               err=e.err)
 
 
 @exportToSuperVdsm
@@ -469,12 +595,12 @@
     if replicaCount:
         command += ["replica", "%s" % replicaCount]
     command += brickList + ["force"]
-    rc, out, err = _execGluster(command)
-
-    if rc:
-        raise ge.GlusterVolumeRemoveBrickForceFailedException(rc, out, err)
-    else:
+    try:
+        _execGlusterXml(command)
         return True
+    except ge.GlusterCmdFailedException, e:
+        raise ge.GlusterVolumeRemoveBrickForceFailedException(rc=e.rc,
+                                                              err=e.err)
 
 
 @exportToSuperVdsm
diff --git a/vdsm/gluster/exception.py b/vdsm/gluster/exception.py
index f4f497b..f209885 100644
--- a/vdsm/gluster/exception.py
+++ b/vdsm/gluster/exception.py
@@ -323,6 +323,11 @@
     message = "Volume remove brick force failed"
 
 
+class GlusterVolumeStatusAllFailedException(GlusterVolumeException):
+    code = 4158
+    message = "Volume status all failed"
+
+
 # Host
 class GlusterHostException(GlusterException):
     code = 4400
diff --git a/vdsm/storage/taskManager.py b/vdsm/storage/taskManager.py
index 3bc12f3..0a269cd 100644
--- a/vdsm/storage/taskManager.py
+++ b/vdsm/storage/taskManager.py
@@ -25,6 +25,12 @@
 import storage_exception as se
 from task import Task, Job, TaskCleanType
 from threadPool import ThreadPool
+try:
+    from gluster import cli as gcli
+    from gluster import exception as ge
+    _glusterEnabled = True
+except ImportError:
+    _glusterEnabled = False
 
 
 class TaskManager:
@@ -113,19 +119,82 @@
         self.log.debug("Return: %s", subRes)
         return subRes
 
-    def getAllTasks(self):
+    def _getAllGlusterTasks(self):
         """
-        Return Tasks for all public tasks.
+        Return all gluster tasks
+        """
+        subRes = {}
+        if not _glusterEnabled:
+            return subRes
+
+        for taskId, value in gcli.volumeStatusAll():
+            msg = ''
+            state = ''
+            try:
+                if value['taskType'] == gcli.TaskType.REBALANCE:
+                    status = gcli.volumeRebalanceStatus(value['volumeName'])
+                    msg = ('Files [scanned: %d, moved: %d, failed: %d], '
+                           'Total size moved: %d') % \
+                           (status['summary']['filesScanned'],
+                            status['summary']['filesMoved'],
+                            status['summary']['filesFailed'],
+                            status['summary']['totalSizeMoved'])
+                    state = status['summary']['status']
+                elif value['taskType'] == gcli.TaskType.REMOVE_BRICK:
+                    status = gcli.volumeRemoveBrickStatus(value['volumeName'],
+                                                          value['bricks'])
+                    msg = ('Files [scanned: %d, moved: %d, failed: %d], '
+                           'Total size moved: %d') % \
+                           (status['summary']['filesScanned'],
+                            status['summary']['filesMoved'],
+                            status['summary']['filesFailed'],
+                            status['summary']['totalSizeMoved'])
+                    state = status['summary']['status']
+                elif value['taskType'] == gcli.TaskType.REPLACE_BRICK:
+                    status = gcli.volumeReplaceBrickStatus(value['volumeName'],
+                                                           value['bricks'][0],
+                                                           value['bricks'][1])
+                    msg = 'Files moved: %d, Moving file: %s' % \
+                        (status['filesMoved'], status['movingFile'])
+                    state = status['status']
+            except ge.GlusterException:
+                self.log.error("gluster exception occured", exc_info=True)
+
+            subRes[taskId] = {"id": taskId,
+                              "verb": value['volumeName'],
+                              "state": state,
+                              "code": value['taskType'],
+                              "message": msg,
+                              "result": '',
+                              "tag": 'gluster'}
+        return subRes
+
+    def getAllTasks(self, tag=[]):
+        """
+        Return Tasks for all public tasks if param tag is empty,
+        else return tasks those tags are in param tag.
         """
         self.log.debug("Entry.")
         subRes = {}
         for taskID, task in self._tasks.items():
             try:
-                subRes[taskID] = task.getDetails()
+                if not tag:
+                    subRes[taskID] = task.getDetails()
+                elif task.getTags() in tag:
+                    subRes[taskID] = task.getDetails()
             except se.UnknownTask:
                 # Return info for existing tasks only.
                 self.log.warn("Unknown task %s. Maybe task was already "
                                 "cleared.", taskID)
+
+        try:
+            if not tag:
+                subRes.update(self._getAllGlusterTasks())
+            elif 'gluster' in tag:
+                subRes.update(self._getAllGlusterTasks())
+        except ge.GlusterException:
+            self.log.error("gluster exception occured", exc_info=True)
+
         self.log.debug("Return: %s", subRes)
         return subRes
 


--
To view, visit http://gerrit.ovirt.org/7579
To unsubscribe, visit http://gerrit.ovirt.org/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: I9c765cbfebb5ba22f0d21efa04c824ea4daf6432
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Bala.FA <barum...@redhat.com>
Gerrit-Reviewer: Ayal Baron <aba...@redhat.com>
Gerrit-Reviewer: Dan Kenigsberg <dan...@redhat.com>
Gerrit-Reviewer: Federico Simoncelli <fsimo...@redhat.com>
Gerrit-Reviewer: Saggi Mizrahi <smizr...@redhat.com>
Gerrit-Reviewer: Timothy Asir <tjeya...@redhat.com>
_______________________________________________
vdsm-patches mailing list
vdsm-patches@lists.fedorahosted.org
https://lists.fedorahosted.org/mailman/listinfo/vdsm-patches

Reply via email to