- Revision
- 106784
- Author
- ser...@webkit.org
- Date
- 2012-02-06 01:07:20 -0800 (Mon, 06 Feb 2012)
Log Message
Incorrect statistics shown when running run-webkit-tests with --repeat-each or --iterations
https://bugs.webkit.org/show_bug.cgi?id=77672
Reviewed by Dirk Pranke.
Test repetitions must be taken into account when working out
the statistics shown by run-webkit-tests.
* Scripts/webkitpy/layout_tests/controllers/manager.py:
(Manager.prepare_lists_and_print_output):
(Manager._print_result_summary):
* Scripts/webkitpy/layout_tests/models/result_summary.py:
(ResultSummary.__init__):
(ResultSummary.add):
* Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py:
(MainTest.test_repeat_each_iterations_num_tests):
Modified Paths
Diff
Modified: trunk/Tools/ChangeLog (106783 => 106784)
--- trunk/Tools/ChangeLog 2012-02-06 08:56:02 UTC (rev 106783)
+++ trunk/Tools/ChangeLog 2012-02-06 09:07:20 UTC (rev 106784)
@@ -1,3 +1,22 @@
+2012-02-06 Sergio Villar Senin <svil...@igalia.com>
+
+ Incorrect statistics shown when running run-webkit-tests with --repeat-each or --iterations
+ https://bugs.webkit.org/show_bug.cgi?id=77672
+
+ Reviewed by Dirk Pranke.
+
+ Test repetitions must be taken into account when working out
+ the statistics shown by run-webkit-tests.
+
+ * Scripts/webkitpy/layout_tests/controllers/manager.py:
+ (Manager.prepare_lists_and_print_output):
+ (Manager._print_result_summary):
+ * Scripts/webkitpy/layout_tests/models/result_summary.py:
+ (ResultSummary.__init__):
+ (ResultSummary.add):
+ * Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py:
+ (MainTest.test_repeat_each_iterations_num_tests):
+
2012-02-05 Dan Bernstein <m...@apple.com>
<rdar://problem/10809525> WebKit2’s WebFrameLoaderClient::shouldUseCredentialStorage() always returns true
Modified: trunk/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py (106783 => 106784)
--- trunk/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py 2012-02-06 08:56:02 UTC (rev 106783)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py 2012-02-06 09:07:20 UTC (rev 106784)
@@ -503,7 +503,10 @@
if self._options.iterations:
self._test_files_list = self._test_files_list * self._options.iterations
- result_summary = ResultSummary(self._expectations, self._test_files | skipped)
+ iterations = \
+ (self._options.repeat_each if self._options.repeat_each else 1) * \
+ (self._options.iterations if self._options.iterations else 1)
+ result_summary = ResultSummary(self._expectations, self._test_files | skipped, iterations)
self._print_expected_results_of_type(result_summary, test_expectations.PASS, "passes")
self._print_expected_results_of_type(result_summary, test_expectations.FAIL, "failures")
self._print_expected_results_of_type(result_summary, test_expectations.FLAKY, "flaky")
@@ -518,7 +521,11 @@
for test in skipped:
result = test_results.TestResult(test)
result.type = test_expectations.SKIP
- result_summary.add(result, expected=True)
+ iterations = \
+ (self._options.repeat_each if self._options.repeat_each else 1) * \
+ (self._options.iterations if self._options.iterations else 1)
+ for iteration in range(iterations):
+ result_summary.add(result, expected=True)
self._printer.print_expected('')
# Check to make sure we didn't filter out all of the tests.
@@ -1320,9 +1327,8 @@
Args:
result_summary: information to log
"""
- failed = len(result_summary.failures)
- skipped = len(
- result_summary.tests_by_expectation[test_expectations.SKIP])
+ failed = result_summary.total_failures
+ skipped = result_summary.total_tests_by_expectation[test_expectations.SKIP]
total = result_summary.total
passed = total - failed - skipped
pct_passed = 0.0
Modified: trunk/Tools/Scripts/webkitpy/layout_tests/models/result_summary.py (106783 => 106784)
--- trunk/Tools/Scripts/webkitpy/layout_tests/models/result_summary.py 2012-02-06 08:56:02 UTC (rev 106783)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/models/result_summary.py 2012-02-06 09:07:20 UTC (rev 106784)
@@ -31,8 +31,8 @@
class ResultSummary(object):
- def __init__(self, expectations, test_files):
- self.total = len(test_files)
+ def __init__(self, expectations, test_files, iterations=1):
+ self.total = len(test_files) * iterations
self.remaining = self.total
self.expectations = expectations
self.expected = 0
@@ -40,22 +40,28 @@
self.unexpected_failures = 0
self.unexpected_crashes = 0
self.unexpected_timeouts = 0
+ self.total_tests_by_expectation = {}
self.tests_by_expectation = {}
self.tests_by_timeline = {}
self.results = {}
self.unexpected_results = {}
self.failures = {}
+ self.total_failures = 0
+ self.total_tests_by_expectation[SKIP] = 0
self.tests_by_expectation[SKIP] = set()
for expectation in TestExpectations.EXPECTATIONS.values():
self.tests_by_expectation[expectation] = set()
+ self.total_tests_by_expectation[expectation] = 0
for timeline in TestExpectations.TIMELINES.values():
self.tests_by_timeline[timeline] = expectations.get_tests_with_timeline(timeline)
def add(self, test_result, expected):
+ self.total_tests_by_expectation[test_result.type] += 1
self.tests_by_expectation[test_result.type].add(test_result.test_name)
self.results[test_result.test_name] = test_result
self.remaining -= 1
if len(test_result.failures):
+ self.total_failures += 1
self.failures[test_result.test_name] = test_result.failures
if expected:
self.expected += 1
Modified: trunk/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py (106783 => 106784)
--- trunk/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py 2012-02-06 08:56:02 UTC (rev 106783)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py 2012-02-06 09:07:20 UTC (rev 106784)
@@ -390,6 +390,18 @@
tests_run = get_tests_run(['--iterations', '2'] + tests_to_run, tests_included=True, flatten_batches=True)
self.assertEquals(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html'])
+ def test_repeat_each_iterations_num_tests(self):
+ # The total number of tests should be: number_of_tests *
+ # repeat_each * iterations
+ host = MockHost()
+ res, out, err, _ = logging_run(['--iterations', '2',
+ '--repeat-each', '4',
+ '--print', 'everything',
+ 'passes/text.html', 'failures/expected/text.html'],
+ tests_included=True, host=host, record_results=True)
+ self.assertTrue("=> Results: 8/16 tests passed (50.0%)\n" in out.get())
+ self.assertTrue(err.get()[-2] == "All 16 tests ran as expected.\n")
+
def test_run_chunk(self):
# Test that we actually select the right chunk
all_tests_run = get_tests_run(flatten_batches=True)