Modified: trunk/Tools/ChangeLog (220291 => 220292)
--- trunk/Tools/ChangeLog 2017-08-04 20:50:51 UTC (rev 220291)
+++ trunk/Tools/ChangeLog 2017-08-04 21:18:17 UTC (rev 220292)
@@ -1,3 +1,21 @@
+2017-08-04 Carlos Alberto Lopez Perez <clo...@igalia.com>
+
+ REGRESSION(r219857): run-benchmark --allplans broken
+ https://bugs.webkit.org/show_bug.cgi?id=175186
+
+ Reviewed by Saam Barati.
+
+ r219857 forgot to update also the calls to BenchmarkRunner() that
+ is done when the script is run with --allplans.
+
+ To fix this (and avoid future issues like this), let's factorize
+ the calls to the benchhmark runner in a run_benchmark_plan()
+ function.
+
+ * Scripts/webkitpy/benchmark_runner/run_benchmark.py:
+ (run_benchmark_plan):
+ (start):
+
2017-08-04 Aakash Jain <aakash_j...@apple.com>
Dashboard bubbles sometimes show failure count instead of crash count
Modified: trunk/Tools/Scripts/webkitpy/benchmark_runner/run_benchmark.py (220291 => 220292)
--- trunk/Tools/Scripts/webkitpy/benchmark_runner/run_benchmark.py 2017-08-04 20:50:51 UTC (rev 220291)
+++ trunk/Tools/Scripts/webkitpy/benchmark_runner/run_benchmark.py 2017-08-04 21:18:17 UTC (rev 220292)
@@ -60,6 +60,12 @@
return args
+def run_benchmark_plan(args, plan):
+ benchmark_runner_class = benchmark_runner_subclasses[args.driver]
+ runner = benchmark_runner_class(plan, args.localCopy, args.countOverride, args.buildDir, args.output, args.platform, args.browser, args.scale_unit, args.device_id)
+ runner.execute()
+
+
def start(args):
if args.json_file:
results_json = json.load(open(args.json_file, 'r'))
@@ -83,8 +89,7 @@
continue
_log.info('Starting benchmark plan: %s' % plan)
try:
- runner = BenchmarkRunner(plan, args.localCopy, args.countOverride, args.buildDir, args.output, args.platform, args.browser, args.scale_unit, args.device_id)
- runner.execute()
+ run_benchmark_plan(args, plan)
_log.info('Finished benchmark plan: %s' % plan)
except KeyboardInterrupt:
raise
@@ -94,9 +99,7 @@
if failed:
_log.error('The following benchmark plans have failed: %s' % failed)
return len(failed)
- benchmark_runner_class = benchmark_runner_subclasses[args.driver]
- runner = benchmark_runner_class(args.plan, args.localCopy, args.countOverride, args.buildDir, args.output, args.platform, args.browser, args.scale_unit, args.device_id)
- runner.execute()
+ run_benchmark_plan(args, args.plan)
def format_logger(logger):