Antoni Segura Puimedon has uploaded a new change for review.

Change subject: tc_filter: make use of _parser for getting the relevant items
......................................................................

tc_filter: make use of _parser for getting the relevant items

The previous patch introduced a lot of parsing methods for the new
qdisc tc module. This patch changes the already existing filter
module to use them.

Change-Id: I8fedf4b923288472aa4f45168d42601fe7c9c164
Signed-off-by: Antoni S. Puimedon <[email protected]>
---
M vdsm/network/tc/filter.py
1 file changed, 18 insertions(+), 16 deletions(-)


  git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/66/30466/1

diff --git a/vdsm/network/tc/filter.py b/vdsm/network/tc/filter.py
index b646401..04e8ac9 100644
--- a/vdsm/network/tc/filter.py
+++ b/vdsm/network/tc/filter.py
@@ -34,9 +34,9 @@
     data = {}
     for token in tokens:
         if token == 'root':
-            data['root'] = True
+            data['root'] = _parser.parse_true(tokens)
         elif token in ('dev', 'parent', 'protocol', 'pref'):
-            data[token] = next(tokens)
+            data[token] = _parser.parse_str(tokens)
         elif token in _CLASSES:
             data['kind'] = token
             break
@@ -53,27 +53,27 @@
     data = {}
     for token in tokens:
         if token in ('fh', 'order', 'link'):
-            data[token] = next(tokens)
+            data[token] = _parser.parse_str(tokens)
         elif token in ('*flowid', 'flowid'):
-            data['flowid'] = next(tokens)
+            data['flowid'] = _parser.parse_str(tokens)
         elif token == 'terminal':
             data['terminal'] = True
             next(tokens)  # swallow 'flowid'
             next(tokens)  # swallow '???'
         elif token == 'ht':
             next(tokens)
-            data['ht_divisor'] = next(tokens)
+            data['ht_divisor'] = _parser.parse_str(tokens)
         elif token == 'key':
             next(tokens)   # swallow 'ht'
-            data['key_ht'] = next(tokens)
+            data['key_ht'] = _parser.parse_str(tokens)
             next(tokens)  # swallow 'bkt'
-            data['key_bkt'] = next(tokens)
+            data['key_bkt'] = _parser.parse_str(tokens)
         elif token == '???':
             continue
         elif token == 0:  # line break
             continue
         elif token == 'match':
-            match_first = next(tokens)
+            match_first = _parser.parse_str(tokens)
             if match_first.lower() == 'ip':
                 data['match'] = _parse_match_ip(tokens)  # To implement
             else:
@@ -98,7 +98,7 @@
     value = int(value, 16)
     mask = int(mask, 16)
     next(tokens)  # Swallow 'at'
-    offset = int(next(tokens))
+    offset = _parser.parse_int(tokens)
     return {'value': value, 'mask': mask, 'offset': offset}
 
 
@@ -110,8 +110,8 @@
         if token == 0:
             continue
         if token == 'order':
-            data[token] = next(tokens)
-            data['kind'] = next(tokens)
+            data[token] = _parser.parse_str(tokens)
+            data['kind'] = _parser.parse_str(tokens)
             action_opt_parse = _ACTIONS.get(data['kind'])
             if action_opt_parse is not None:
                 data.update(action_opt_parse(tokens))
@@ -121,19 +121,21 @@
 def _parse_mirred(tokens):
     """Parses the tokens of a mirred action into a data dictionary"""
     data = {}
-    action = next(tokens)[1:]  # Get the first token without the opening paren
+    # Get the first token without the opening paren
+    action = _parser.parse_str(tokens)[1:]
     if action == 'unkown':
         data['action'] = action
     else:
-        data['action'] = '%s_%s' % (action.lower(), next(tokens).lower())
+        data['action'] = '%s_%s' % (action.lower(),
+                                    _parser.parse_str(tokens).lower())
     next(tokens)  # swallow 'to'
     next(tokens)  # swallow 'device'
-    data['target'] = next(tokens)[:-1]
-    data['op'] = next(tokens)
+    data['target'] = _parser.parse_str(tokens)[:-1]
+    data['op'] = _parser.parse_str(tokens)
     next(tokens)  # pop the 0 that marks new line
     for token in tokens:
         if token in ('index', 'ref', 'bind'):
-            data[token] = next(tokens)
+            data[token] = _parser.parse_str(tokens)
         elif token == 0:
             break
         else:


-- 
To view, visit http://gerrit.ovirt.org/30466
To unsubscribe, visit http://gerrit.ovirt.org/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: I8fedf4b923288472aa4f45168d42601fe7c9c164
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Antoni Segura Puimedon <[email protected]>
_______________________________________________
vdsm-patches mailing list
[email protected]
https://lists.fedorahosted.org/mailman/listinfo/vdsm-patches

Reply via email to