On 10/03/2017 09:42 AM, Joshua Lock wrote:
Hi Jose,

Where will the previous log come from in standard QA use?

I'm wondering if we should work towards (in the 2.5 cycle) another git repo in the yp-qa-* namespace to keep historical ptest results and build some tooling around that, in a similar vein to poky-buildhistory / buildhistory-diff and yp-qa-build-perf-data / oe-build-perf-report.

Thanks,

Joshua
Hi Joshua

Currently,  as defined on the process of execution for pTest on QA cycles [1] , we download the last execution directly from the wiki created previously, here is an archive of the pTest executions [2]. The proposal of creating a separate repo to handle the results is great, are you able to raise a bug with the details of this proposal and assign it to me hence I can check with the team who can manage that implementation.

1- https://wiki.yoctoproject.org/wiki/BSP_Test_Plan#pTest
2- https://wiki.yoctoproject.org/wiki/Ptest/archive

José

On 02/10/17 21:52, jose.perez.carra...@linux.intel.com wrote:
From: Jose Perez Carranza <jose.perez.carra...@linux.intel.com>

Add ptest-parser.py scrip that creates a file with the report of pTest
execution compared with the previous results provided, the data stored
on the file is in wikimedia format to be copied and pasted on the new
created wiki for pTest results of QA cycle.

Signed-off-by: Jose Perez Carranza <jose.perez.carra...@linux.intel.com>
---
  scripts/ptest-parser.py | 234 ++++++++++++++++++++++++++++++++++++++++++++++++
  1 file changed, 234 insertions(+)
  create mode 100755 scripts/ptest-parser.py

diff --git a/scripts/ptest-parser.py b/scripts/ptest-parser.py
new file mode 100755
index 0000000..a3f20fb
--- /dev/null
+++ b/scripts/ptest-parser.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python3
+
+import sys
+import os.path
+import codecs
+import logging
+import argparse
+
+
+prelog = {}
+currlog = {}
+
+
+def get_args():
+    parser = argparse.ArgumentParser(description="Parse ptest results")
+    requiredNamed = parser.add_argument_group('required arguments')
+    requiredNamed.add_argument('-p', dest="pre_log", required=True, help='prvious log of the pTest result') +    requiredNamed.add_argument('--pre_commit', dest="pre_commit", required=True, help='commit of the previous log.') +    requiredNamed.add_argument('--pre_release', dest="pre_release", required=True, help='release of the previous log.') +    requiredNamed.add_argument('-c', dest="curr_log", required=True, help='current log of pTets results.') +    requiredNamed.add_argument('--curr_commit', dest="curr_commit", required=True, help='commit of the current log.') +    requiredNamed.add_argument('--curr_release', dest="curr_release", required=True, help='release of the current log.')
+
+    return parser.parse_args()
+
+
+
+##Check that logs exists
+def check_args():
+    if not os.path.isfile(args.pre_log):
+        logging.error("Cannot find log file %s" % args.pre_log)
+        sys.exit(1)
+    elif not os.path.isfile(args.curr_log):
+        logging.error("Cannot find log file %s" % args.pre_curr)
+
+
+def create_log_dict(argslog):
+    test_id = ""
+    result = ""
+    failures = ""
+    failures_log = []
+    passed = 0
+    failed = 0
+    content_file = codecs.open(argslog, 'r', 'utf-8', errors='ignore')
+    content = content_file.read()
+    content = content.split("\n")
+    res_dict = {}
+    directory = 'failures'
+
+    #create dir to store failures
+    if not os.path.exists(directory):
+        os.makedirs(directory)
+
+    #Parse the logs
+    for i in range (len(content)-1):
+        try:
+            result = content[i].split(":")[0]
+            test_id = content[i].split(":")[1].strip()
+        except:
+            result = None
+        if result:
+            if result in "BEGIN":
+                test_module = test_id
+                test_module = test_module.replace('/usr/lib/', '')
+                test_module = test_module.replace('/ptest', '')
+            elif result in "PASS":
+                passed = passed + 1
+            elif result in "FAIL":
+                failed = failed + 1
+                failures = "{0}:{1}" .format(result,test_id)
+                failures_log.append(failures)
+            elif result in "END":
+                total = passed + failed
+                if total == 0:
+                    passrate = 0
+                else:
+                    passrate = 100 * (passed / total)
+                res_dict[test_module] = [total, passed, failed, round(passrate,2)]
+
+                #Store Failures
+                with open('failures/'+test_module+".failures", 'w') as failures_file:
+                    for fails in failures_log:
+                        failures_file.write("{0}\n" .format(fails))
+                        failures_log = []
+
+                total = passed = failed = 0
+
+    return res_dict
+
+
+def create_compared_dict(currlog, prelog):
+    diff = set(prelog.keys()) - set(currlog.keys())
+
+    for key, value in currlog .items():
+        if key in prelog.keys():
+            lastpastrate = prelog[key][3]
+        else:
+            lastpastrate = 0
+
+        currlog[key].append(lastpastrate)
+
+    for item in diff:
+        if item in prelog.keys():
+            currlog[item] = prelog[item]
+            currlog[item].insert(3, 0)
+
+    return currlog
+
+
+def create_header(file_name):
+    header = '[https://wiki.yoctoproject.org/wiki/Ptest/archive < Archive]' + \
+             '\n\n<br />Ran on a NUC and compared with' + \
+             '[[Ptest %s| %s]]'% (args.pre_commit, args.pre_release) + \
+             '\n\n[<put-here-link-of-the-log> < FULL_LOG]'
+
+    with open(file_name, 'a') as f:
+        f.write(header)
+
+def create_table(file_name, finalog):
+
+    nomenclature = '\n\n{| style="border-spacing:0;"\n' + \
+        '| style="background-color:#ff8080;border:0.05pt solid #000000' + \
+        ';padding:0.0382in;"| Failures\n' + \
+        '|-\n' + \
+        '| style="background-color:#99ff66;border-top:none;border-bottom:0.05pt' + \ +        'solid #000000;border-left:0.05pt solid #000000;border-right:0.05pt' + \
+        'solid #000000;padding:0.0201in;"| % increased\n' + \
+        '|-\n' + \
+        '| style="background-color:#c5000b;border-top:none;border-bottom:0.05pt' + \ +        'solid #000000;border-left:0.05pt solid #000000;border-right:0.05pt' + \ +        'solid #000000;padding:0.0201in;color:#ffffff"| % decreased\n' + \
+        '|-\n' + \
+        '| style="background-color:gray;border-top:none;border-bottom:0.05pt' + \ +        'solid #000000;border-left:0.05pt solid #000000;border-right:0.05pt' + \ +        'solid #000000;padding:0.0201in;color:#000000"| Module not executed\n' + \
+        '|}'
+
+    titles = ['Package Name', 'Tests ran', 'Passed', 'Failed', 'Passrate', 'Last Passrate']
+
+    table = '\n\n{| class="wikitable"\n' + \
+        '|+Results\n' + \
+        '|-\n'
+
+    for t in titles:
+        table += '| align=center style="background-color:#ccffff;' + \
+            'border-top:0.05pt solid #000000;border-bottom:0.5pt solid' + \ +            '#000000;border-left:0.05pt solid #000000;border-right:0.5pt' + \
+            'solid #000000;padding:0.0201in;"' + \
+            "| '''%s'''\n"% t
+
+    table += '|-\n'
+
+
+    for key, value in sorted(finalog.items()):
+        bgcolor_rate = bgcolor_fails = 'transparent'
+        text_color = '#000000'
+
+        if all(v == 0 for v in value[0:2]):
+            bgcolor_rate = bgcolor_fails = 'gray'
+            rate_image = 'no_rate.png'
+
+        #Package Name
+        table += '| align=left style="background-color:trasnparent' + \
+            'border-color:#000000"| %s\n'% key
+
+        #Tests ran and Passed
+        for v in range(0,2):
+            table += '| align=center style="background-color:%s;'% bgcolor_rate + \
+                'border-color:#000000"|%s\n'% value[v]
+
+        if value[2] > 0:
+            bgcolor_fails = '#ff8080'
+
+        #Failed
+        table += '| align=center style="background-color:%s;'% bgcolor_fails + \
+            'border-color:#000000"|%s\n'% value[2]
+
+        if value[3] > value[4]:
+            bgcolor_rate = '#99ff66'
+            rate_image = 'up_rate.png'
+        elif value[3] < value[4]:
+            bgcolor_rate = '#c5000b'
+            rate_image = 'down_rate.png'
+            text_color = '#ffffff'
+
+        #Passrate
+        table += '| align=center style="background-color:%s;'% bgcolor_rate + \ +            'border-color:#000000;color:%s"|%s\n'% (text_color, value[3])
+
+        #Last Passrate
+        table += '| align=center style="background-color:%s;'% bgcolor_rate + \ +            'border-color:#000000;color:%s"|%s\n'% (text_color, value[4])
+
+        if  bgcolor_rate not in 'transparent':
+            table += '| align=center style="background-color:transparent;' + \ +                'border-color:transparent" |[[File:%s|15px]]\n'% rate_image
+
+        table += '|-\n'
+
+
+
+    with open(file_name, 'a') as f:
+        f.write(nomenclature + table)
+
+
+def create_pTest_wiki(finalog):
+
+    file_name = args.curr_release + '-wiki.page'
+
+    create_header(file_name)
+
+    create_table(file_name, finalog)
+
+    return file_name
+
+if __name__ == '__main__':
+    args = get_args()
+    logger = None
+
+    logging.basicConfig(stream=sys.stdout)
+    root = logging.getLogger()
+    root.setLevel(logging.INFO)
+
+    logger = logging.getLogger()
+    ##Parse previous log
+    prelog = create_log_dict(args.pre_log)
+    #Parse current log
+    currlog = create_log_dict(args.curr_log)
+    logger.info("Logs parsed correctly")
+    #Get a compraed result
+    compared_log = create_compared_dict(currlog, prelog)
+
+    file_name = create_pTest_wiki(compared_log)
+    logger.info("Wiki correctly created as -  %s -"%file_name)


--
Saludos
José

--
_______________________________________________
yocto mailing list
yocto@yoctoproject.org
https://lists.yoctoproject.org/listinfo/yocto

Reply via email to