Modified: trunk/PerformanceTests/ChangeLog (239292 => 239293)
--- trunk/PerformanceTests/ChangeLog 2018-12-17 23:01:44 UTC (rev 239292)
+++ trunk/PerformanceTests/ChangeLog 2018-12-17 23:06:47 UTC (rev 239293)
@@ -1,5 +1,54 @@
2018-12-17 Suresh Koppisetty <skoppise...@apple.com>
+ Add "-o/--output" option to startup.py and new_tab.py benchmark scripts to save the results in json format.
+ https://bugs.webkit.org/show_bug.cgi?id=192385
+
+ Reviewed by Ryosuke Niwa.
+
+ Sample json output for new tab benchmark script after running for 2 iterations and 2 groups. Values are in milliseconds.
+ {
+ "NewTabBenchmark": {
+ "metrics": {
+ "Time": {
+ "current": [
+ [
+ 410.2939453125,
+ 307.81494140625
+ ],
+ [
+ 340.616943359375,
+ 265.94384765625
+ ]
+ ]
+ }
+ }
+ }
+ }
+
+ Sample json output for startup time benchmark script after running for 2 iterations. Values are in milliseconds.
+ {
+ "StartupBenchmark": {
+ "metrics": {
+ "Time": {
+ "current": [
+ [
+ 1415.2099609375,
+ 1439.552978515625
+ ]
+ ]
+ }
+ }
+ }
+ }
+
+ * LaunchTime/launch_time.py:
+ * LaunchTime/new_tab.py:
+ (NewTabBenchmark.get_test_name):
+ * LaunchTime/startup.py:
+ (StartupBenchmark.get_test_name):
+
+2018-12-17 Suresh Koppisetty <skoppise...@apple.com>
+
Import FeedbackServer only if "-f/--feedback-in-browser" option is enabled.
https://bugs.webkit.org/show_bug.cgi?id=192378
Modified: trunk/PerformanceTests/LaunchTime/launch_time.py (239292 => 239293)
--- trunk/PerformanceTests/LaunchTime/launch_time.py 2018-12-17 23:01:44 UTC (rev 239292)
+++ trunk/PerformanceTests/LaunchTime/launch_time.py 2018-12-17 23:06:47 UTC (rev 239293)
@@ -5,6 +5,7 @@
from math import sqrt
from operator import mul
import os
+import json
from subprocess import call, check_output
import sys
import threading
@@ -82,6 +83,8 @@
self._app_name = None
self._verbose = False
self._feedback_in_browser = False
+ self._save_results_to_json = False
+ self._json_results_path = None
self._do_not_ignore_first_result = False
self._iterations = 5
self._browser_bundle_path = '/Applications/Safari.app'
@@ -109,6 +112,8 @@
help="print each iteration's time")
self.argument_parser.add_argument('-f', '--feedback-in-browser', action='',
help="show benchmark results in browser (default: {})".format(self._feedback_in_browser))
+ self.argument_parser.add_argument('-o', '--output', type=self._json_results_path,
+ help='saves benchmark results in json format (default: {})'.format(self._json_results_path))
self.will_parse_arguments()
args = self.argument_parser.parse_args()
@@ -120,6 +125,9 @@
self._verbose = args.verbose
if args.feedback_in_browser is not None:
self._feedback_in_browser = args.feedback_in_browser
+ if args.output:
+ self._save_results_to_json = True
+ self._json_results_path = args.output
path_len = len(self._browser_bundle_path)
start_index = self._browser_bundle_path.rfind('/', 0, path_len)
end_index = self._browser_bundle_path.rfind('.', 0, path_len)
@@ -248,6 +256,9 @@
try:
group_means = []
+ if self._save_results_to_json:
+ resultsDict = {self.get_test_name(): {"metrics": {"Time": {"current": []}}}}
+
results_by_iteration_number = [[] for _ in range(self._iterations)]
group = 1
@@ -273,6 +284,9 @@
if not self._verbose:
print ''
+ if self._save_results_to_json:
+ resultsDict[self.get_test_name()]["metrics"]["Time"]["current"].append(results)
+
mean, stdev = self._compute_results(results)
self.log_verbose('RESULTS:\n')
self.log_verbose('mean: {} ms\n'.format(mean))
@@ -289,6 +303,10 @@
if self._feedback_in_browser:
self.launch_browser()
+ if self._save_results_to_json and self._json_results_path:
+ with open(self._json_results_path, "w") as jsonFile:
+ json.dump(resultsDict, jsonFile, indent=4, separators=(',', ': '))
+
means_by_iteration_number = []
if len(results_by_iteration_number) > 1 and not self._do_not_ignore_first_result:
results_by_iteration_number = results_by_iteration_number[1:]
@@ -319,3 +337,6 @@
def did_parse_arguments(self, args):
pass
+
+ def get_test_name(self):
+ return "LaunchTimeBenchmark"
Modified: trunk/PerformanceTests/LaunchTime/new_tab.py (239292 => 239293)
--- trunk/PerformanceTests/LaunchTime/new_tab.py 2018-12-17 23:01:44 UTC (rev 239292)
+++ trunk/PerformanceTests/LaunchTime/new_tab.py 2018-12-17 23:06:47 UTC (rev 239293)
@@ -55,6 +55,9 @@
def group_init(self):
self.launch_browser()
+ def get_test_name(self):
+ return "NewTabBenchmark"
+
def will_parse_arguments(self):
self.argument_parser.add_argument('-g', '--groups', type=int,
help='number of groups of iterations to run (default: {})'.format(self.iteration_groups))
Modified: trunk/PerformanceTests/LaunchTime/startup.py (239292 => 239293)
--- trunk/PerformanceTests/LaunchTime/startup.py 2018-12-17 23:01:44 UTC (rev 239292)
+++ trunk/PerformanceTests/LaunchTime/startup.py 2018-12-17 23:06:47 UTC (rev 239293)
@@ -24,6 +24,9 @@
self.quit_browser()
return result
+ def get_test_name(self):
+ return "StartupBenchmark"
+
@staticmethod
def ResponseHandler(startup_benchmark):
class Handler(DefaultLaunchTimeHandler):