Title: [145152] trunk/Tools
Revision
145152
Author
gl...@skynav.com
Date
2013-03-07 16:38:45 -0800 (Thu, 07 Mar 2013)

Log Message

run-perf-tests should have a --repeat option
https://bugs.webkit.org/show_bug.cgi?id=100030

Reviewed by Ryosuke Niwa.

Add --repeat option to run-perf-tests, with default value of 1. When greater
than 1, runs test set specified number of times. Note that multiple runs are
not aggregated for statistical purposes.

Incidentally fixed typo: s/suceeds/succeeds/.

* Scripts/webkitpy/performance_tests/perftestsrunner.py:
(PerfTestsRunner._parse_args): Add repeat option.
(PerfTestsRunner.run): Add outer repeat loop.
(PerfTestsRunner._generate_results): Split from original _generate_and_show_results.
(PerfTestsRunner._upload_and_show_results): Split from original _generate_and_show_results.
* Scripts/webkitpy/performance_tests/perftestsrunner_integrationtest.py:
(MainTest._test_run_with_json_output): Add repeat optional argument for generating expected logs. Fix typo.
(MainTest._test_run_with_json_output.mock_upload_json): Fix typo.
(MainTest.test_run_with_json_output): Fix typo.
(MainTest.test_run_with_description): Fix typo.
(MainTest.test_run_respects_no_results): Fix typo.
(MainTest.test_run_with_slave_config_json): Fix typo.
(MainTest.test_run_with_multiple_repositories): Fix typo.
(MainTest.test_run_with_upload_json): Fix typo.
(MainTest.test_run_with_upload_json_should_generate_perf_webkit_json): Fix typo.
(MainTest.test_run_with_repeat): Added new test for repeat count semantics.
* Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py:
(MainTest.test_parse_args): Add test for repeat option parsing.

Modified Paths

Diff

Modified: trunk/Tools/ChangeLog (145151 => 145152)


--- trunk/Tools/ChangeLog	2013-03-08 00:26:06 UTC (rev 145151)
+++ trunk/Tools/ChangeLog	2013-03-08 00:38:45 UTC (rev 145152)
@@ -1,3 +1,35 @@
+2013-03-07  Glenn Adams  <gl...@skynav.com>
+
+        run-perf-tests should have a --repeat option
+        https://bugs.webkit.org/show_bug.cgi?id=100030
+
+        Reviewed by Ryosuke Niwa.
+
+        Add --repeat option to run-perf-tests, with default value of 1. When greater
+        than 1, runs test set specified number of times. Note that multiple runs are
+        not aggregated for statistical purposes.
+
+        Incidentally fixed typo: s/suceeds/succeeds/.
+
+        * Scripts/webkitpy/performance_tests/perftestsrunner.py:
+        (PerfTestsRunner._parse_args): Add repeat option.
+        (PerfTestsRunner.run): Add outer repeat loop.
+        (PerfTestsRunner._generate_results): Split from original _generate_and_show_results.
+        (PerfTestsRunner._upload_and_show_results): Split from original _generate_and_show_results.
+        * Scripts/webkitpy/performance_tests/perftestsrunner_integrationtest.py:
+        (MainTest._test_run_with_json_output): Add repeat optional argument for generating expected logs. Fix typo.
+        (MainTest._test_run_with_json_output.mock_upload_json): Fix typo.
+        (MainTest.test_run_with_json_output): Fix typo.
+        (MainTest.test_run_with_description): Fix typo.
+        (MainTest.test_run_respects_no_results): Fix typo.
+        (MainTest.test_run_with_slave_config_json): Fix typo.
+        (MainTest.test_run_with_multiple_repositories): Fix typo.
+        (MainTest.test_run_with_upload_json): Fix typo.
+        (MainTest.test_run_with_upload_json_should_generate_perf_webkit_json): Fix typo.
+        (MainTest.test_run_with_repeat): Added new test for repeat count semantics.
+        * Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py:
+        (MainTest.test_parse_args): Add test for repeat option parsing.
+
 2013-03-07  Julie Parent  <jpar...@chromium.org>
 
         showAllRuns checkbox no longer works on the stats dashboard

Modified: trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py (145151 => 145152)


--- trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py	2013-03-08 00:26:06 UTC (rev 145151)
+++ trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py	2013-03-08 00:38:45 UTC (rev 145152)
@@ -126,6 +126,8 @@
             optparse.make_option("--additional-drt-flag", action=""
                 default=[], help="Additional command line flag to pass to DumpRenderTree "
                      "Specify multiple times to add multiple flags."),
+            optparse.make_option("--repeat", default=1, type="int",
+                help="Specify number of times to run test set (default: 1)."),
             ]
         return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
 
@@ -177,24 +179,35 @@
             _log.error("Build not up to date for %s" % self._port._path_to_driver())
             return self.EXIT_CODE_BAD_BUILD
 
-        tests = self._collect_tests()
-        _log.info("Running %d tests" % len(tests))
+        run_count = 0
+        repeat = self._options.repeat
+        while (run_count < repeat):
+            run_count += 1
 
-        for test in tests:
-            if not test.prepare(self._options.time_out_ms):
-                return self.EXIT_CODE_BAD_PREPARATION
+            tests = self._collect_tests()
+            runs = ' (Run %d of %d)' % (run_count, repeat) if repeat > 1 else ''
+            _log.info("Running %d tests%s" % (len(tests), runs))
 
-        try:
-            if needs_http:
-                self._start_http_servers()
-            unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()))
+            for test in tests:
+                if not test.prepare(self._options.time_out_ms):
+                    return self.EXIT_CODE_BAD_PREPARATION
 
-        finally:
-            if needs_http:
-                self._stop_http_servers()
+            try:
+                if needs_http:
+                    self._start_http_servers()
+                unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()))
 
+            finally:
+                if needs_http:
+                    self._stop_http_servers()
+
+            if self._options.generate_results and not self._options.profile:
+                exit_code = self._generate_results()
+                if exit_code:
+                    return exit_code
+
         if self._options.generate_results and not self._options.profile:
-            exit_code = self._generate_and_show_results()
+            exit_code = self._upload_and_show_results()
             if exit_code:
                 return exit_code
 
@@ -206,7 +219,10 @@
             return output_json_path
         return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)
 
-    def _generate_and_show_results(self):
+    def _results_page_path(self):
+        return self._host.filesystem.splitext(self._output_json_path())[0] + '.html'
+
+    def _generate_results(self):
         options = self._options
         output_json_path = self._output_json_path()
         output = self._generate_results_dict(self._timestamp, options.description, options.platform, options.builder_name, options.build_number)
@@ -220,18 +236,20 @@
         if not output:
             return self.EXIT_CODE_BAD_MERGE
 
-        results_page_path = self._host.filesystem.splitext(output_json_path)[0] + '.html'
-        self._generate_output_files(output_json_path, results_page_path, output)
+        self._generate_output_files(output_json_path, self._results_page_path(), output)
 
+    def _upload_and_show_results(self):
+        options = self._options
+
         if options.test_results_server:
             if options.test_results_server == 'webkit-perf.appspot.com':
                 options.test_results_server = 'perf.webkit.org'
 
-            if not self._upload_json(options.test_results_server, output_json_path):
+            if not self._upload_json(options.test_results_server, self._output_json_path()):
                 return self.EXIT_CODE_FAILED_UPLOADING
 
         if options.show_results:
-            self._port.show_results_html_file(results_page_path)
+            self._port.show_results_html_file(self._results_page_path())
 
     def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
         revisions = {}

Modified: trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_integrationtest.py (145151 => 145152)


--- trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_integrationtest.py	2013-03-08 00:26:06 UTC (rev 145151)
+++ trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_integrationtest.py	2013-03-08 00:38:45 UTC (rev 145152)
@@ -305,7 +305,7 @@
         self.assertEqual(parser_tests['memory-test']['metrics']['JSHeap'], MemoryTestData.js_heap_results)
         self.assertEqual(parser_tests['memory-test']['metrics']['Malloc'], MemoryTestData.malloc_results)
 
-    def _test_run_with_json_output(self, runner, filesystem, upload_suceeds=False, results_shown=True, expected_exit_code=0):
+    def _test_run_with_json_output(self, runner, filesystem, upload_succeeds=False, results_shown=True, expected_exit_code=0, repeat=1):
         filesystem.write_text_file(runner._base_path + '/inspector/pass.html', 'some content')
         filesystem.write_text_file(runner._base_path + '/Bindings/event-target-wrapper.html', 'some content')
 
@@ -316,8 +316,8 @@
             self.assertIn(hostname, ['some.host'])
             self.assertIn(json_path, ['/mock-checkout/output.json'])
             self.assertIn(host_path, [None, '/api/report'])
-            uploaded[0] = upload_suceeds
-            return upload_suceeds
+            uploaded[0] = upload_succeeds
+            return upload_succeeds
 
         runner._upload_json = mock_upload_json
         runner._timestamp = 123456789
@@ -330,12 +330,15 @@
             stdout, stderr, logs = output_capture.restore_output()
 
         if not expected_exit_code:
-            expected_logs = 'Running 2 tests\n' + EventTargetWrapperTestData.output + InspectorPassTestData.output
+            expected_logs = ''
+            for i in xrange(repeat):
+                runs = ' (Run %d of %d)' % (i + 1, repeat) if repeat > 1 else ''
+                expected_logs += 'Running 2 tests%s\n' % runs + EventTargetWrapperTestData.output + InspectorPassTestData.output
             if results_shown:
                 expected_logs += 'MOCK: user.open_url: file://...\n'
             self.assertEqual(self._normalize_output(logs), expected_logs)
 
-        self.assertEqual(uploaded[0], upload_suceeds)
+        self.assertEqual(uploaded[0], upload_succeeds)
 
         return logs
 
@@ -347,7 +350,7 @@
     def test_run_with_json_output(self):
         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
             '--test-results-server=some.host'])
-        self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
         self.assertEqual(self._load_output_json(runner), [{
             "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
             "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
@@ -359,7 +362,7 @@
     def test_run_with_description(self):
         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
             '--test-results-server=some.host', '--description', 'some description'])
-        self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
         self.assertEqual(self._load_output_json(runner), [{
             "buildTime": "2013-02-08T15:19:37.460000", "description": "some description",
             "tests": self._event_target_wrapper_and_inspector_results,
@@ -377,7 +380,7 @@
     def test_run_respects_no_results(self):
         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
             '--test-results-server=some.host', '--no-results'])
-        self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=False, results_shown=False)
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=False, results_shown=False)
         self.assertFalse(port.host.filesystem.isfile('/mock-checkout/output.json'))
 
     def test_run_generates_json_by_default(self):
@@ -479,7 +482,7 @@
         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
             '--slave-config-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
         port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value"}')
-        self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
         self.assertEqual(self._load_output_json(runner), [{
             "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
             "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}, "builderKey": "value"}])
@@ -498,7 +501,7 @@
         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
             '--test-results-server=some.host'])
         port.repository_paths = lambda: [('webkit', '/mock-checkout'), ('some', '/mock-checkout/some')]
-        self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
         self.assertEqual(self._load_output_json(runner), [{
             "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
             "revisions": {"webkit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"},
@@ -508,13 +511,13 @@
         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
             '--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123'])
 
-        self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
         generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
         self.assertEqual(generated_json[0]['platform'], 'platform1')
         self.assertEqual(generated_json[0]['builderName'], 'builder1')
         self.assertEqual(generated_json[0]['buildNumber'], 123)
 
-        self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=False, expected_exit_code=PerfTestsRunner.EXIT_CODE_FAILED_UPLOADING)
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=False, expected_exit_code=PerfTestsRunner.EXIT_CODE_FAILED_UPLOADING)
 
     def test_run_with_upload_json_should_generate_perf_webkit_json(self):
         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
@@ -522,7 +525,7 @@
             '--slave-config-json-path=/mock-checkout/slave-config.json'])
         port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value1"}')
 
-        self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
         generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
         self.assertTrue(isinstance(generated_json, list))
         self.assertEqual(len(generated_json), 1)
@@ -542,3 +545,24 @@
         self.assertEqual(output['tests']['Bindings']['tests']['event-target-wrapper'], {
             'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Bindings/event-target-wrapper.html',
             'metrics': {'Time': {'current': [[1486.0, 1471.0, 1510.0, 1505.0, 1478.0, 1490.0]] * 4}}})
+
+    def test_run_with_repeat(self):
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+            '--test-results-server=some.host', '--repeat', '5'])
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True, repeat=5)
+        self.assertEqual(self._load_output_json(runner), [
+            {"buildTime": "2013-02-08T15:19:37.460000",
+            "tests": self._event_target_wrapper_and_inspector_results,
+            "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
+            {"buildTime": "2013-02-08T15:19:37.460000",
+            "tests": self._event_target_wrapper_and_inspector_results,
+            "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
+            {"buildTime": "2013-02-08T15:19:37.460000",
+            "tests": self._event_target_wrapper_and_inspector_results,
+            "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
+            {"buildTime": "2013-02-08T15:19:37.460000",
+            "tests": self._event_target_wrapper_and_inspector_results,
+            "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
+            {"buildTime": "2013-02-08T15:19:37.460000",
+            "tests": self._event_target_wrapper_and_inspector_results,
+            "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])

Modified: trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py (145151 => 145152)


--- trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py	2013-03-08 00:26:06 UTC (rev 145151)
+++ trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py	2013-03-08 00:38:45 UTC (rev 145152)
@@ -137,6 +137,7 @@
                 '--test-results-server=somehost',
                 '--additional-drt-flag=--enable-threaded-parser',
                 '--additional-drt-flag=--awesomesauce',
+                '--repeat=5',
                 '--debug'])
         self.assertTrue(options.build)
         self.assertEqual(options.build_directory, 'folder42')
@@ -151,6 +152,7 @@
         self.assertEqual(options.slave_config_json_path, 'a/source.json')
         self.assertEqual(options.test_results_server, 'somehost')
         self.assertEqual(options.additional_drt_flag, ['--enable-threaded-parser', '--awesomesauce'])
+        self.assertEqual(options.repeat, 5)
 
     def test_upload_json(self):
         runner, port = self.create_runner()
_______________________________________________
webkit-changes mailing list
webkit-changes@lists.webkit.org
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to