tfiala updated this revision to Diff 33375.

http://reviews.llvm.org/D12416

Files:
  test/reports/dotest_stats.py

Index: test/reports/dotest_stats.py
===================================================================
--- test/reports/dotest_stats.py
+++ test/reports/dotest_stats.py
@@ -0,0 +1,142 @@
+"""
+Report stats on the test output from dosep.py/dotest.py,
+breaking down reported reasons for skipped tests.
+
+Here is a flow to run this report:
+cd {your_lldb_source_dir}/test
+python dosep.py -s --options "-q --executable /path/to/lldb -A {your_arch} \
+    -C {your_compiler_path} 2>&1 | tee /tmp/test_output.log
+python {path_to_this_script} -t /tmp/test_output.log
+"""
+
+import argparse
+import os.path
+import re
+
+
+def parse_options():
+    parser = argparse.ArgumentParser(
+        description='Collect stats on lldb test run trace output dir')
+    parser.add_argument(
+        '--trace-file', '-t', action='store', required=True,
+        help='trace file to parse')
+    parser.add_argument(
+        '--verbose', '-v', action='store_true',
+        help='produce verbose output during operation')
+    return parser.parse_args()
+
+
+def validate_options(options):
+    if not os.path.isfile(options.trace_file):
+        print 'trace file "{}" does not exist'.format(options.trace_file)
+        return False
+    return True
+
+
+def process_skipped_test(options, line, match, skip_reasons):
+    if len(match.groups()) > 0:
+        key = match.group(1)
+    else:
+        print "*** unspecified skip reason on line:", line
+        exit(1)
+        key = 'unspecified'
+
+    if key in skip_reasons:
+        skip_reasons[key] += 1
+    else:
+        skip_reasons[key] = 1
+
+
+def parse_trace_output(options):
+    skip_reasons = {}
+
+    test_result_types = [
+        {'value_key': 'test suites', 'regex': re.compile(r'^RESULT:.+$')},
+        {'value_key': 'success', 'regex': re.compile(r'^PASS: LLDB.+$')},
+        {'value_key': 'failure', 'regex': re.compile(r'^FAIL: LLDB.+$')},
+        {'value_key': 'expected failure',
+         'regex': re.compile(r'^XFAIL:.+$')},
+        {'value_key': 'skipped',
+         'regex': re.compile(r'^UNSUPPORTED:.+\(([^\)]+)[\)\s]*$'),
+         'substats_func': process_skipped_test,
+         'substats_dict_arg': skip_reasons},
+        # Catch anything that didn't match the regex above but clearly
+        # is unsupported.
+        {'value_key': 'skipped',
+         'regex': re.compile(r'^UNSUPPORTED:.+$'),
+         'substats_func': process_skipped_test,
+         'substats_dict_arg': skip_reasons},
+        {'value_key': 'unexpected success',
+         'regex': re.compile(r'^XPASS:.+$')}
+    ]
+
+    early_termination_re = re.compile(r'^Ran \d+ test suites.*$')
+
+    # Initialize count values for each type.
+    counts = {}
+    for tr_type in test_result_types:
+        counts[tr_type['value_key']] = 0
+
+    with open(options.trace_file, 'r') as trace_file:
+        for line in trace_file:
+            # Early termination condition - stop after test suite
+            # counts are printed out so we don't double count fails
+            # and other reported test entries.
+            if early_termination_re.match(line):
+                break
+
+            for tr_type in test_result_types:
+                match = tr_type['regex'].match(line)
+                if match:
+                    counts[tr_type['value_key']] += 1
+                    if 'substats_func' in tr_type:
+                        tr_type['substats_func'](
+                            options, line, match, tr_type['substats_dict_arg'])
+                    break
+    return (counts, skip_reasons)
+
+
+def print_counts(options, counts, skip_reasons):
+    print 'Test Counts'
+    print '---- ------'
+    # Print entries parsed directly out of filenames.
+    report_entries = [
+        {'name': 'started', 'unit': 'file'},
+        {'name': 'success', 'unit': 'method'},
+        {'name': 'unexpected success', 'unit': 'method'},
+        {'name': 'failure', 'unit': 'method'},
+        {'name': 'expected failure', 'unit': 'method'},
+        {'name': 'skipped', 'unit': 'method'},
+        {'name': 'skipped.no-reason', 'unit': 'method'}
+    ]
+    max_name_len = max(
+        len(report_entry['name']) for report_entry in report_entries)
+    format_str = '{:<' + str(max_name_len + 2) + '}{}'
+
+    for report_entry in report_entries:
+        if report_entry['name'] in counts:
+            print format_str.format(
+                report_entry['name'] + ':',
+                counts[report_entry['name']])
+
+    # Print computed entries.
+    max_skip_reason_len = max(
+        len(reason) for reason in skip_reasons.keys())
+    reason_format_str = '{:<' + str(max_skip_reason_len + 2) + '}{}'
+
+    print
+    print 'Skip Reasons'
+    print '---- -------'
+    for reason_key in sorted(skip_reasons, key=skip_reasons.get, reverse=True):
+        print reason_format_str.format(reason_key + ':', skip_reasons[reason_key])
+
+
+def main():
+    options = parse_options()
+    if not validate_options(options):
+        exit(1)
+
+    (counts, skip_reasons) = parse_trace_output(options)
+    print_counts(options, counts, skip_reasons)
+
+main()
_______________________________________________
lldb-commits mailing list
lldb-commits@lists.llvm.org
http://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits

Reply via email to