Modified: trunk/Tools/Scripts/webkitpy/performance_tests/perftest.py (144362 => 144363)
--- trunk/Tools/Scripts/webkitpy/performance_tests/perftest.py 2013-02-28 21:51:25 UTC (rev 144362)
+++ trunk/Tools/Scripts/webkitpy/performance_tests/perftest.py 2013-02-28 21:52:22 UTC (rev 144363)
@@ -151,14 +151,42 @@
return results
+ _description_regex = re.compile(r'^Description: (?P<description>.*)$', re.IGNORECASE)
+ _metrics_regex = re.compile(r'^(?P<metric>Time|Malloc|JS Heap):')
+ _statistics_keys = ['avg', 'median', 'stdev', 'min', 'max', 'unit', 'values']
+ _score_regex = re.compile(r'^(?P<key>' + r'|'.join(_statistics_keys) + r')\s+(?P<value>([0-9\.]+(,\s+)?)+)\s*(?P<unit>.*)')
+
def _run_with_driver(self, driver, time_out_ms):
output = self.run_single(driver, self.test_path(), time_out_ms)
self._filter_output(output)
if self.run_failed(output):
return None
- return self.parse_output(output)
+ current_metric = None
+ results = []
+ for line in re.split('\n', output.text):
+ if not line:
+ continue
+ description_match = self._description_regex.match(line)
+ metric_match = self._metrics_regex.match(line)
+ score = self._score_regex.match(line)
+
+ if description_match:
+ self._description = description_match.group('description')
+ elif metric_match:
+ current_metric = metric_match.group('metric').replace(' ', '')
+ elif score:
+ key = score.group('key')
+ if key == 'values' and results != None:
+ values = [float(number) for number in score.group('value').split(', ')]
+ results.append(PerfTestMetric(current_metric, score.group('unit'), values))
+ else:
+ results = None
+ _log.error('ERROR: ' + line)
+
+ return results
+
def run_single(self, driver, test_path, time_out_ms, should_run_pixel_test=False):
return driver.run_test(DriverInput(test_path, time_out_ms, image_hash=None, should_run_pixel_test=should_run_pixel_test), stop_when_done=False)
@@ -177,7 +205,8 @@
return True
- def _should_ignore_line(self, regexps, line):
+ @staticmethod
+ def _should_ignore_line(regexps, line):
if not line:
return True
for regexp in regexps:
@@ -191,9 +220,6 @@
re.compile(r'^\[INFO:'),
]
- def _should_ignore_line_in_stderr(self, line):
- return self._should_ignore_line(self._lines_to_ignore_in_stderr, line)
-
_lines_to_ignore_in_parser_result = [
re.compile(r'^Running \d+ times$'),
re.compile(r'^Ignoring warm-up '),
@@ -210,47 +236,12 @@
re.compile(r'(?P<name>.+): \[(?P<values>(\d+(.\d+)?,\s+)*\d+(.\d+)?)\]'),
]
- def _should_ignore_line_in_parser_test_result(self, line):
- return self._should_ignore_line(self._lines_to_ignore_in_parser_result, line)
-
def _filter_output(self, output):
if output.error:
- filtered_error = '\n'.join([line for line in re.split('\n', output.error) if not self._should_ignore_line_in_stderr(line)])
- output.error = filtered_error if filtered_error else None
+ output.error = '\n'.join([line for line in re.split('\n', output.error) if not self._should_ignore_line(self._lines_to_ignore_in_stderr, line)])
if output.text:
- output.text = '\n'.join([line for line in re.split('\n', output.text) if not self._should_ignore_line_in_parser_test_result(line)])
+ output.text = '\n'.join([line for line in re.split('\n', output.text) if not self._should_ignore_line(self._lines_to_ignore_in_parser_result, line)])
- _description_regex = re.compile(r'^Description: (?P<description>.*)$', re.IGNORECASE)
- _metrics_regex = re.compile(r'^(?P<metric>Time|Malloc|JS Heap):')
- _statistics_keys = ['avg', 'median', 'stdev', 'min', 'max', 'unit', 'values']
- _score_regex = re.compile(r'^(?P<key>' + r'|'.join(_statistics_keys) + r')\s+(?P<value>([0-9\.]+(,\s+)?)+)\s*(?P<unit>.*)')
-
- def parse_output(self, output):
- current_metric = None
- results = []
- for line in re.split('\n', output.text):
- if not line:
- continue
-
- description_match = self._description_regex.match(line)
- metric_match = self._metrics_regex.match(line)
- score = self._score_regex.match(line)
-
- if description_match:
- self._description = description_match.group('description')
- elif metric_match:
- current_metric = metric_match.group('metric').replace(' ', '')
- elif score:
- key = score.group('key')
- if key == 'values' and results != None:
- values = [float(number) for number in score.group('value').split(', ')]
- results.append(PerfTestMetric(current_metric, score.group('unit'), values))
- else:
- results = None
- _log.error('ERROR: ' + line)
-
- return results
-
def output_statistics(self, test_name, results):
unit = results['unit']
_log.info('RESULT %s= %s %s' % (test_name.replace(':', ': ').replace('/', ': '), results['avg'], unit))
Modified: trunk/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py (144362 => 144363)
--- trunk/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py 2013-02-28 21:51:25 UTC (rev 144362)
+++ trunk/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py 2013-02-28 21:52:22 UTC (rev 144363)
@@ -106,8 +106,8 @@
class TestPerfTest(unittest.TestCase):
def _assert_results_are_correct(self, test, output):
- test._filter_output(output)
- parsed_results = test.parse_output(output)
+ test.run_single = lambda driver, path, time_out_ms: output
+ parsed_results = test._run_with_driver(None, None)
self.assertEqual(len(parsed_results), 1)
some_test_results = parsed_results[0].to_dict()
self.assertItemsEqual(some_test_results.keys(), ['avg', 'max', 'median', 'min', 'stdev', 'unit', 'values'])
@@ -162,8 +162,8 @@
output_capture.capture_output()
try:
test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
- test._filter_output(output)
- self.assertIsNone(test.parse_output(output))
+ test.run_single = lambda driver, path, time_out_ms: output
+ self.assertIsNone(test._run_with_driver(None, None))
finally:
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertEqual(actual_stdout, '')
@@ -190,22 +190,19 @@
def test_ignored_stderr_lines(self):
test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
- ignored_lines = [
- "Unknown option: --foo-bar",
- "[WARNING:proxy_service.cc] bad moon a-rising",
- "[INFO:SkFontHost_android.cpp(1158)] Use Test Config File Main /data/local/tmp/drt/android_main_fonts.xml, Fallback /data/local/tmp/drt/android_fallback_fonts.xml, Font Dir /data/local/tmp/drt/fonts/",
- ]
- for line in ignored_lines:
- self.assertTrue(test._should_ignore_line_in_stderr(line))
+ output_with_lines_to_ignore = DriverOutput('', image=None, image_hash=None, audio=None, error="""
+Unknown option: --foo-bar
+Should not be ignored
+[WARNING:proxy_service.cc] bad moon a-rising
+[WARNING:chrome.cc] Something went wrong
+[INFO:SkFontHost_android.cpp(1158)] Use Test Config File Main /data/local/tmp/drt/android_main_fonts.xml, Fallback /data/local/tmp/drt/android_fallback_fonts.xml, Font Dir /data/local/tmp/drt/fonts/
+[ERROR:main.cc] The sky has fallen""")
+ test._filter_output(output_with_lines_to_ignore)
+ self.assertEqual(output_with_lines_to_ignore.error,
+ "Should not be ignored\n"
+ "[WARNING:chrome.cc] Something went wrong\n"
+ "[ERROR:main.cc] The sky has fallen")
- non_ignored_lines = [
- "Should not be ignored",
- "[WARNING:chrome.cc] Something went wrong",
- "[ERROR:main.cc] The sky has fallen",
- ]
- for line in non_ignored_lines:
- self.assertFalse(test._should_ignore_line_in_stderr(line))
-
def test_parse_output_with_subtests(self):
output = DriverOutput("""
Running 20 times