Piglit test results are difficult to analyze with simpler JUnit visualizers like those provided by Jenkins. There are hundreds of failures, but the engineer is typically interested only in NEW failures. Jenkins JUnit rules typically expect 100% pass rate, or some static threshold of failures.
Specifying test names in the [expected-failures] or [expected-crashes] sections of the config file enables JUnit reporting of just the test failures which represent new regressions. Test names are expected in the dotted JUnit format (eg: "piglit.spec.ARB_fragment_program.fp-formats") and are case insensitive. --- framework/results.py | 38 ++++++++++++++++++++++++++++++++++++-- piglit.conf.example | 9 +++++++++ 2 files changed, 45 insertions(+), 2 deletions(-) diff --git a/framework/results.py b/framework/results.py index e73519a..0496124 100644 --- a/framework/results.py +++ b/framework/results.py @@ -29,6 +29,8 @@ import abc import threading import posixpath from cStringIO import StringIO +from framework.core import PIGLIT_CONFIG + try: import simplejson as json except ImportError: @@ -385,6 +387,18 @@ class JUnitBackend(FSyncMixin, Backend): self._file = open(os.path.join(dest, 'results.xml'), 'w') FSyncMixin.__init__(self, **options) + # make dictionaries of all test names expected to crash/fail + # for quick lookup when writing results. Use lower-case to + # provide case insensitive matches. + self._expected_failures = {} + if PIGLIT_CONFIG.has_section("expected-failures"): + for (fail, _) in PIGLIT_CONFIG.items("expected-failures"): + self._expected_failures[fail.lower()] = True + self._expected_crashes = {} + if PIGLIT_CONFIG.has_section("expected-crashes"): + for (fail, _) in PIGLIT_CONFIG.items("expected-crashes"): + self._expected_crashes[fail.lower()] = True + # Write initial headers and other data that etree cannot write for us self._file.write('<?xml version="1.0" encoding="UTF-8" ?>\n') self._file.write('<testsuites>\n') @@ -413,6 +427,13 @@ class JUnitBackend(FSyncMixin, Backend): # set different root names. classname = 'piglit.' + classname + expected_result = "pass" + lname = (classname + "." + testname).lower() + if lname in self._expected_failures: + expected_result = "failure" + if lname in self._expected_crashes: + expected_result = "error" + # Create the root element element = etree.Element('testcase', name=testname, classname=classname, @@ -431,10 +452,23 @@ class JUnitBackend(FSyncMixin, Backend): # one of these statuses if data['result'] == 'skip': etree.SubElement(element, 'skipped') + elif data['result'] in ['warn', 'fail', 'dmesg-warn', 'dmesg-fail']: - etree.SubElement(element, 'failure') + if expected_result == "failure": + err.text += "\n\nWARN: passing test as an expected failure" + else: + etree.SubElement(element, 'failure') + elif data['result'] == 'crash': - etree.SubElement(element, 'error') + if expected_result == "error": + err.text += "\n\nWARN: passing test as an expected crash" + else: + etree.SubElement(element, 'error') + + elif expected_result != "pass": + err.text += "\n\nERROR: This test passed when it "\ + "expected {0}".format(expected_result) + etree.SubElement(element, 'failure') self._file.write(etree.tostring(element)) self._file.write('\n') diff --git a/piglit.conf.example b/piglit.conf.example index 9cb7dd1..95454ba 100644 --- a/piglit.conf.example +++ b/piglit.conf.example @@ -71,3 +71,12 @@ run_test=./%(test_name)s ; Options can be found running piglit run -h and reading the section for ; -b/--backend ;backend=json + +[expected-failures] +; Provide a list of test names that are expected to fail. These tests +; will be listed as passing in JUnit output when they fail. Any +; unexpected result (pass/crash) will be listed as a failure. + +[expected-crashes] +; Like expected-failures, but specifies that a test is expected to +; crash. \ No newline at end of file -- 2.1.0 _______________________________________________ Piglit mailing list Piglit@lists.freedesktop.org http://lists.freedesktop.org/mailman/listinfo/piglit