Reviewers: Jakob,

Message:
PTAL

Description:
Allow benchmarks to provide the standard error.

Some benchmarks include their own runner which provides an
overall average and a standard error. This enables
extraction of that value similar to the other measurements.

These benchmarks should only be run once. If a benchmarks
specifies multiple runs and provides a standard error,
a warning will be issued that makes the build fail on the
buildbot side.

TEST=python -m unittest run_benchmarks_test
BUG=393947
LOG=n

Please review this at https://codereview.chromium.org/395633012/

SVN Base: https://v8.googlecode.com/svn/branches/bleeding_edge

Affected files (+76, -15 lines):
  M tools/run_benchmarks.py
  M tools/unittests/run_benchmarks_test.py


Index: tools/run_benchmarks.py
diff --git a/tools/run_benchmarks.py b/tools/run_benchmarks.py
index 1a07025f07d61a9890bfd146625e982fb8f340f0..5a78a664d754c1a45f2bc6ebb381ba015941c5f7 100755
--- a/tools/run_benchmarks.py
+++ b/tools/run_benchmarks.py
@@ -156,6 +156,7 @@ class DefaultSentinel(Node):
     self.flags = []
     self.resources = []
     self.results_regexp = None
+    self.stderr_regexp = None
     self.units = "score"


@@ -196,6 +197,13 @@ class Graph(Node):
       regexp_default = None
     self.results_regexp = suite.get("results_regexp", regexp_default)

+    # A similar regular expression for the standard deviation (optional).
+    if parent.stderr_regexp:
+      stderr_default = parent.stderr_regexp % suite["name"]
+    else:
+      stderr_default = None
+    self.stderr_regexp = suite.get("stderr_regexp", stderr_default)
+

 class Trace(Graph):
   """Represents a leaf in the benchmark suite tree structure.
@@ -207,6 +215,7 @@ class Trace(Graph):
     assert self.results_regexp
     self.results = []
     self.errors = []
+    self.stderr = ""

   def ConsumeOutput(self, stdout):
     try:
@@ -216,11 +225,22 @@ class Trace(Graph):
       self.errors.append("Regexp \"%s\" didn't match for benchmark %s."
                          % (self.results_regexp, self.graphs[-1]))

+    try:
+      if self.stderr_regexp and self.stderr:
+ self.errors.append("Benchmark %s should only run once since a stderr " + "is provided by the benchmark." % self.graphs[-1])
+      if self.stderr_regexp:
+        self.stderr = re.search(self.stderr_regexp, stdout, re.M).group(1)
+    except:
+      self.errors.append("Regexp \"%s\" didn't match for benchmark %s."
+                         % (self.stderr_regexp, self.graphs[-1]))
+
   def GetResults(self):
     return Results([{
       "graphs": self.graphs,
       "units": self.units,
       "results": self.results,
+      "stderr": self.stderr,
     }], self.errors)


Index: tools/unittests/run_benchmarks_test.py
diff --git a/tools/unittests/run_benchmarks_test.py b/tools/unittests/run_benchmarks_test.py index f627d434b4941d7cc5bd4930296766b8f6303bb6..293fc21ef6388c33c0ee0b72f2224fb0de7ad8d6 100644
--- a/tools/unittests/run_benchmarks_test.py
+++ b/tools/unittests/run_benchmarks_test.py
@@ -135,8 +135,9 @@ class BenchmarksTest(unittest.TestCase):
     self.assertEquals([
       {"units": units,
        "graphs": [suite, trace["name"]],
-       "results": trace["results"]} for trace in traces],
-        self._LoadResults()["traces"])
+       "results": trace["results"],
+       "stderr": trace["stderr"]} for trace in traces],
+      self._LoadResults()["traces"])

   def _VerifyErrors(self, errors):
     self.assertEquals(errors, self._LoadResults()["errors"])
@@ -159,8 +160,8 @@ class BenchmarksTest(unittest.TestCase):
self._MockCommand(["."], ["x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n"])
     self.assertEquals(0, self._CallMain())
     self._VerifyResults("test", "score", [
-      {"name": "Richards", "results": ["1.234"]},
-      {"name": "DeltaBlue", "results": ["10657567"]},
+      {"name": "Richards", "results": ["1.234"], "stderr": ""},
+      {"name": "DeltaBlue", "results": ["10657567"], "stderr": ""},
     ])
     self._VerifyErrors([])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
@@ -176,8 +177,8 @@ class BenchmarksTest(unittest.TestCase):
                        "Richards: 50\nDeltaBlue: 300\n"])
     self.assertEquals(0, self._CallMain())
     self._VerifyResults("v8", "ms", [
-      {"name": "Richards", "results": ["50", "100"]},
-      {"name": "DeltaBlue", "results": ["300", "200"]},
+      {"name": "Richards", "results": ["50", "100"], "stderr": ""},
+      {"name": "DeltaBlue", "results": ["300", "200"], "stderr": ""},
     ])
     self._VerifyErrors([])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
@@ -194,8 +195,8 @@ class BenchmarksTest(unittest.TestCase):
                        "Richards: 50\nDeltaBlue: 300\n"])
     self.assertEquals(0, self._CallMain())
     self._VerifyResults("test", "score", [
-      {"name": "Richards", "results": ["50", "100"]},
-      {"name": "DeltaBlue", "results": ["300", "200"]},
+      {"name": "Richards", "results": ["50", "100"], "stderr": ""},
+      {"name": "DeltaBlue", "results": ["300", "200"], "stderr": ""},
     ])
     self._VerifyErrors([])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
@@ -213,13 +214,16 @@ class BenchmarksTest(unittest.TestCase):
     self.assertEquals([
       {"units": "score",
        "graphs": ["test", "Richards"],
-       "results": ["50", "100"]},
+       "results": ["50", "100"],
+       "stderr": ""},
       {"units": "ms",
        "graphs": ["test", "Sub", "Leaf"],
-       "results": ["3", "2", "1"]},
+       "results": ["3", "2", "1"],
+       "stderr": ""},
       {"units": "score",
        "graphs": ["test", "DeltaBlue"],
-       "results": ["200"]},
+       "results": ["200"],
+       "stderr": ""},
       ], self._LoadResults()["traces"])
     self._VerifyErrors([])
     self._VerifyMockMultiple(
@@ -232,13 +236,50 @@ class BenchmarksTest(unittest.TestCase):
         (path.join("out", "x64.release", "d8"), "--flag", "run.js"),
(path.join("out", "x64.release", "d8"), "--flag", "--flag2", "run.js"))

+  def testOneRunStdErrRegExp(self):
+    test_input = dict(V8_JSON)
+    test_input["stderr_regexp"] = "^%s\-stderr: (.+)$"
+    self._WriteTestInput(test_input)
+    self._MockCommand(["."], ["Richards: 1.234\nRichards-stderr: 0.23\n"
+ "DeltaBlue: 10657567\nDeltaBlue-stderr: 106\n"])
+    self.assertEquals(0, self._CallMain())
+    self._VerifyResults("test", "score", [
+      {"name": "Richards", "results": ["1.234"], "stderr": "0.23"},
+      {"name": "DeltaBlue", "results": ["10657567"], "stderr": "106"},
+    ])
+    self._VerifyErrors([])
+ self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
+
+  def testTwoRunsStdErrRegExp(self):
+    test_input = dict(V8_JSON)
+    test_input["stderr_regexp"] = "^%s\-stderr: (.+)$"
+    test_input["run_count"] = 2
+    self._WriteTestInput(test_input)
+    self._MockCommand(["."], ["Richards: 3\nRichards-stderr: 0.7\n"
+                              "DeltaBlue: 6\nDeltaBlue-boom: 0.9\n",
+                              "Richards: 2\nRichards-stderr: 0.5\n"
+                              "DeltaBlue: 5\nDeltaBlue-stderr: 0.8\n"])
+    self.assertEquals(1, self._CallMain())
+    self._VerifyResults("test", "score", [
+      {"name": "Richards", "results": ["2", "3"], "stderr": "0.7"},
+      {"name": "DeltaBlue", "results": ["5", "6"], "stderr": "0.8"},
+    ])
+    self._VerifyErrors(
+ ["Benchmark Richards should only run once since a stderr is provided "
+         "by the benchmark.",
+ "Benchmark DeltaBlue should only run once since a stderr is provided "
+         "by the benchmark.",
+         "Regexp \"^DeltaBlue\-stderr: (.+)$\" didn't match for benchmark "
+         "DeltaBlue."])
+ self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
+
   def testBuildbot(self):
     self._WriteTestInput(V8_JSON)
     self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567\n"])
     self.assertEquals(0, self._CallMain("--buildbot"))
     self._VerifyResults("test", "score", [
-      {"name": "Richards", "results": ["1.234"]},
-      {"name": "DeltaBlue", "results": ["10657567"]},
+      {"name": "Richards", "results": ["1.234"], "stderr": ""},
+      {"name": "DeltaBlue", "results": ["10657567"], "stderr": ""},
     ])
     self._VerifyErrors([])
     self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
@@ -248,8 +289,8 @@ class BenchmarksTest(unittest.TestCase):
self._MockCommand(["."], ["x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n"])
     self.assertEquals(1, self._CallMain())
     self._VerifyResults("test", "score", [
-      {"name": "Richards", "results": []},
-      {"name": "DeltaBlue", "results": ["10657567"]},
+      {"name": "Richards", "results": [], "stderr": ""},
+      {"name": "DeltaBlue", "results": ["10657567"], "stderr": ""},
     ])
     self._VerifyErrors(
["Regexp \"^Richards: (.+)$\" didn't match for benchmark Richards."])


--
--
v8-dev mailing list
v8-dev@googlegroups.com
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to v8-dev+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.

Reply via email to