Modified: trunk/Tools/ChangeLog (235755 => 235756)
--- trunk/Tools/ChangeLog 2018-09-06 20:47:55 UTC (rev 235755)
+++ trunk/Tools/ChangeLog 2018-09-06 21:09:14 UTC (rev 235756)
@@ -1,3 +1,18 @@
+2018-09-06 Commit Queue <commit-qu...@webkit.org>
+
+ Unreviewed, rolling out r235755.
+ https://bugs.webkit.org/show_bug.cgi?id=189367
+
+ Didn't address the review comment (Requested by rniwa on
+ #webkit).
+
+ Reverted changeset:
+
+ "BenchmarkResults.format should support specifying depth of
+ tests to show."
+ https://bugs.webkit.org/show_bug.cgi?id=189135
+ https://trac.webkit.org/changeset/235755
+
2018-08-29 Dewei Zhu <dewei_...@apple.com>
BenchmarkResults.format should support specifying depth of tests to show.
Modified: trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_results.py (235755 => 235756)
--- trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_results.py 2018-09-06 20:47:55 UTC (rev 235755)
+++ trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_results.py 2018-09-06 21:09:14 UTC (rev 235756)
@@ -48,13 +48,11 @@
self._lint_results(results)
self._results = self._aggregate_results(results)
- def format(self, scale_unit=True, show_iteration_values=False, max_depth=None):
- return self._format_tests(self._results, scale_unit, show_iteration_values, max_depth)
+ def format(self, scale_unit=True, show_iteration_values=False):
+ return self._format_tests(self._results, scale_unit, show_iteration_values)
@classmethod
- def _format_tests(cls, tests, scale_unit, show_iteration_values, max_depth, indent=''):
- if max_depth is not None and max_depth <= 0:
- return ''
+ def _format_tests(cls, tests, scale_unit, show_iteration_values, indent=''):
output = ''
config_name = 'current'
for test_name in sorted(tests.keys()):
@@ -75,7 +73,7 @@
output += aggregator_name + ':'
output += ' ' + cls._format_values(metric_name, metric[aggregator_name][config_name], scale_unit, show_iteration_values) + '\n'
if 'tests' in test:
- output += cls._format_tests(test['tests'], scale_unit, show_iteration_values, max_depth - 1 if max_depth else None, indent=(indent + ' ' * len(test_name)))
+ output += cls._format_tests(test['tests'], scale_unit, show_iteration_values, indent=(indent + ' ' * len(test_name)))
return output
@classmethod
Modified: trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_results_unittest.py (235755 => 235756)
--- trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_results_unittest.py 2018-09-06 20:47:55 UTC (rev 235755)
+++ trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_results_unittest.py 2018-09-06 21:09:14 UTC (rev 235756)
@@ -55,18 +55,6 @@
SubTest2:Time: 5.0ms stdev=20.0%
'''[1:])
- def test_format_with_depth_limit(self):
- result = BenchmarkResults({'SomeTest': {
- 'metrics': {'Time': ['Total', 'Arithmetic']},
- 'tests': {
- 'SubTest1': {'metrics': {'Time': {'current': [1, 2, 3]}}},
- 'SubTest2': {'metrics': {'Time': {'current': [4, 5, 6]}}}}}})
- self.assertEqual(result.format(max_depth=1), '''
-SomeTest:Time:Arithmetic: 3.0ms stdev=33.3%
- :Time:Total: 7.0ms stdev=28.6%
-'''[1:])
- self.assertEqual(result.format(max_depth=0), "")
-
def test_format_values_with_large_error(self):
self.assertEqual(BenchmarkResults._format_values('Runs', [1, 2, 3]), '2.0/s stdev=50.0%')
self.assertEqual(BenchmarkResults._format_values('Runs', [10, 20, 30]), '20/s stdev=50.0%')