Diff
Modified: trunk/Tools/ChangeLog (239738 => 239739)
--- trunk/Tools/ChangeLog 2019-01-08 21:05:24 UTC (rev 239738)
+++ trunk/Tools/ChangeLog 2019-01-08 21:15:04 UTC (rev 239739)
@@ -1,3 +1,31 @@
+2019-01-08 Zhifei Fang <zhifei_f...@apple.com>
+
+ Layout test will generate a perf metric file to results dir.
+ https://bugs.webkit.org/show_bug.cgi?id=192030
+ <rdar://problem/32779516>
+
+ Reviewed by Aakash Jain.
+
+ Layout test running time will be collected to a perf metric file.
+ For now, instead of outputing running time of all tests (which is huge),
+ we aggregate them by test directories which are at most two level deep.
+
+ * Scripts/webkitpy/layout_tests/controllers/manager.py:
+ (Manager._end_test_run):
+ (Manager._output_perf_metrics):
+ (Manager._print_expectation_line_for_test):
+ * Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py:
+ (add_test_perf_metric):
+ (test_perf_metrics):
+ * Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py:
+ (JSONGeneratorTest.test_test_timings_trie):
+ (JSONGeneratorTest):
+ (JSONGeneratorTest.test_test_perf_metrics):
+ * Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py:
+ (RebaselineTest.test_reset_results):
+ (RebaselineTest.test_missing_results):
+ (RebaselineTest.test_new_baseline):
+
2019-01-08 Patrick Griffis <pgrif...@igalia.com>
[WPE][GTK] Add php-json to install-dependencies
Modified: trunk/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py (239738 => 239739)
--- trunk/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py 2019-01-08 21:05:24 UTC (rev 239738)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py 2019-01-08 21:15:04 UTC (rev 239739)
@@ -339,6 +339,7 @@
exit_code = -1
if not self._options.dry_run:
self._port.print_leaks_summary()
+ self._output_perf_metrics(end_time - start_time, initial_results)
self._upload_json_files(summarized_results, initial_results, results_including_passes, start_time, end_time)
results_path = self._filesystem.join(self._results_directory, "results.html")
@@ -431,6 +432,11 @@
(result.type != test_expectations.MISSING) and
(result.type != test_expectations.CRASH or include_crashes))]
+ def _output_perf_metrics(self, run_time, initial_results):
+ perf_metrics_json = json_results_generator.perf_metrics_for_test(run_time, initial_results.results_by_name.values())
+ perf_metrics_path = self._filesystem.join(self._results_directory, "layout_test_perf_metrics.json")
+ self._filesystem.write_text_file(perf_metrics_path, json.dumps(perf_metrics_json))
+
def _upload_json_files(self, summarized_results, initial_results, results_including_passes=None, start_time=None, end_time=None):
"""Writes the results of the test run as JSON files into the results
dir and upload the files to the appengine server.
@@ -569,7 +575,7 @@
def _print_expectation_line_for_test(self, format_string, test):
line = self._expectations.model().get_expectation_line(test)
print(format_string.format(test, line.expected_behavior, self._expectations.readable_filename_and_line_number(line), line.original_string or ''))
-
+
def _print_expectations_for_subset(self, device_type, test_col_width, tests_to_run, tests_to_skip={}):
format_string = '{{:{width}}} {{}} {{}} {{}}'.format(width=test_col_width)
if tests_to_skip:
Modified: trunk/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py (239738 => 239739)
--- trunk/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py 2019-01-08 21:05:24 UTC (rev 239738)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py 2019-01-08 21:15:04 UTC (rev 239739)
@@ -114,12 +114,74 @@
trie = {}
for test_result in individual_test_timings:
test = test_result.test_name
-
add_path_to_trie(test, int(1000 * test_result.test_run_time), trie)
return trie
+def _add_perf_metric_for_test(path, time, tests, depth, depth_limit):
+ """
+ Aggregate test time to result for a given test at a specified depth_limit.
+ """
+ if not "/" in path:
+ tests["tests"][path] = {
+ "metrics": {
+ "Time": {
+ "current": [time],
+ }}}
+ return
+
+ directory, slash, rest = path.partition("/")
+ if depth == depth_limit:
+ if directory not in tests["tests"]:
+ tests["tests"][directory] = {
+ "metrics": {
+ "Time": {
+ "current": [time],
+ }}}
+ else:
+ tests["tests"][directory]["metrics"]["Time"]["current"][0] += time
+ return
+ else:
+ if directory not in tests["tests"]:
+ tests["tests"][directory] = {
+ "metrics": {
+ "Time": ["Total", "Arithmetic"],
+ },
+ "tests": {}
+ }
+ _add_perf_metric_for_test(rest, time, tests["tests"][directory], depth + 1, depth_limit)
+
+
+def perf_metrics_for_test(run_time, individual_test_timings):
+ """
+ Output two performace metrics
+ 1. run time, which is how much time consumed by the layout tests script
+ 2. run time of first-level and second-level of test directories
+ """
+ total_run_time = 0
+
+ for test_result in individual_test_timings:
+ total_run_time += int(1000 * test_result.test_run_time)
+
+ perf_metric = {
+ "layout_tests": {
+ "metrics": {
+ "Time": ["Total", "Arithmetic"],
+ },
+ "tests": {}
+ },
+ "layout_tests_run_time": {
+ "metrics": {
+ "Time": {"current": [run_time]},
+ }}}
+ for test_result in individual_test_timings:
+ test = test_result.test_name
+ # for now, we only send two levels of directories
+ _add_perf_metric_for_test(test, int(1000 * test_result.test_run_time), perf_metric["layout_tests"], 1, 2)
+ return perf_metric
+
+
# FIXME: We already have a TestResult class in test_results.py
class TestResult(object):
"""A simple class that represents a single test result."""
Modified: trunk/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py (239738 => 239739)
--- trunk/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py 2019-01-08 21:05:24 UTC (rev 239738)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py 2019-01-08 21:15:04 UTC (rev 239739)
@@ -226,3 +226,42 @@
}
self.assertEqual(json.dumps(trie), json.dumps(expected_trie))
+
+ def test_perf_metrics_for_test(self):
+ individual_test_timings = []
+ individual_test_timings.append(json_results_generator.TestResult('foo/bar/baz.html', elapsed_time=1.2))
+ individual_test_timings.append(json_results_generator.TestResult('foo/bar/ba.html', elapsed_time=1.4))
+ individual_test_timings.append(json_results_generator.TestResult('bar.html', elapsed_time=0.0001))
+ metrics = json_results_generator.perf_metrics_for_test(1200, individual_test_timings)
+
+ expected_metrics = {
+ "layout_tests": {
+ "metrics": {
+ "Time": ["Total", "Arithmetic"],
+ },
+ "tests": {
+ "foo": {
+ "metrics": {
+ "Time": ["Total", "Arithmetic"],
+ },
+ "tests": {
+ "bar": {
+ "metrics": {
+ "Time": {"current": [2600]},
+ }
+ }
+ }
+ },
+ "bar.html": {
+ "metrics": {
+ "Time": {"current": [0]},
+ }
+ }
+ }
+ },
+ "layout_tests_run_time": {
+ "metrics": {
+ "Time": {"current": [1200]},
+ }
+ }}
+ self.assertEqual(json.dumps(metrics), json.dumps(expected_metrics))
Modified: trunk/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py (239738 => 239739)
--- trunk/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py 2019-01-08 21:05:24 UTC (rev 239738)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py 2019-01-08 21:15:04 UTC (rev 239739)
@@ -879,7 +879,7 @@
tests_included=True, host=host, new_results=True)
file_list = host.filesystem.written_files.keys()
self.assertEqual(details.exit_code, 0)
- self.assertEqual(len(file_list), 8)
+ self.assertEqual(len(file_list), 9)
self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err)
self.assertBaselines(file_list, "failures/expected/missing_image", [".txt", ".png"], err)
@@ -895,7 +895,7 @@
tests_included=True, host=host, new_results=True)
file_list = host.filesystem.written_files.keys()
self.assertEqual(details.exit_code, 0)
- self.assertEqual(len(file_list), 10)
+ self.assertEqual(len(file_list), 11)
self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_image", [".png"], err)
self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_render_tree_dump", [".txt"], err)
@@ -909,7 +909,7 @@
tests_included=True, host=host, new_results=True)
file_list = host.filesystem.written_files.keys()
self.assertEqual(details.exit_code, 0)
- self.assertEqual(len(file_list), 8)
+ self.assertEqual(len(file_list), 9)
self.assertBaselines(file_list,
"platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
self.assertBaselines(file_list,