Author: Maciej Fijalkowski <[email protected]>
Branch: single-run
Changeset: r224:e466ac6b92c3
Date: 2013-08-01 21:21 +0200
http://bitbucket.org/pypy/benchmarks/changeset/e466ac6b92c3/

Log:    merge default

diff --git a/lib/pypy/rpython/flowspace/flowcontext.py 
b/lib/pypy/rpython/flowspace/flowcontext.py
--- a/lib/pypy/rpython/flowspace/flowcontext.py
+++ b/lib/pypy/rpython/flowspace/flowcontext.py
@@ -800,6 +800,9 @@
         self.popvalue()
         return next_instr
 
+    def JUMP_IF_NOT_DEBUG(self, target, next_instr):
+        return next_instr
+
     def GET_ITER(self, oparg, next_instr):
         w_iterable = self.popvalue()
         w_iterator = self.space.iter(w_iterable)
diff --git a/runner.py b/runner.py
--- a/runner.py
+++ b/runner.py
@@ -66,7 +66,6 @@
          'json file.'))
     benchmark_group.add_option(
         "-b", "--benchmarks", metavar="BM_LIST",
-        default=','.join(BENCHMARK_SET),
         help=("Comma-separated list of benchmarks to run"
               " Valid benchmarks are: %s"
               ". (default: Run all listed benchmarks)"
@@ -76,6 +75,10 @@
         help=('Interpreter. (default: the python used to '
               'run this script)'))
     benchmark_group.add_option(
+        "-f", "--benchmarks-file", metavar="BM_FILE",
+        help=("Read the list of benchmarks to run from this file (one "
+              "benchmark name per line).  Do not specify both this and -b."))
+    benchmark_group.add_option(
         '-o', '--output-filename', default="result.json",
         action="store",
         help=('Specify the output filename to store resulting json. '
@@ -129,6 +132,26 @@
     options, args = parser.parse_args(argv)
 
     benchmarks = options.benchmarks.split(',')
+    if options.benchmarks is not None:
+        if options.benchmarks_file is not None:
+            parser.error(
+                '--benchmarks and --benchmarks-file are mutually exclusive')
+        else:
+            benchmarks = [benchmark.strip()
+                          for benchmark in options.benchmarks.split(',')]
+    else:
+        if options.benchmarks_file is not None:
+            benchmarks = []
+            try:
+                bm_file = open(options.benchmarks_file, 'rt')
+            except IOError as e:
+                parser.error('error opening benchmarks file: %s' % e)
+            with bm_file:
+                for line in bm_file:
+                    benchmarks.append(line.strip())
+        else:
+            benchmarks = list(BENCHMARK_SET)
+
     for benchmark in benchmarks:
         if benchmark not in BENCHMARK_SET:
             raise WrongBenchmark(benchmark)
diff --git a/unladen_swallow/perf.py b/unladen_swallow/perf.py
--- a/unladen_swallow/perf.py
+++ b/unladen_swallow/perf.py
@@ -167,7 +167,10 @@
     """
     assert len(sample1) == len(sample2)
     error = PooledSampleVariance(sample1, sample2) / len(sample1)
-    return (avg(sample1) - avg(sample2)) / math.sqrt(error * 2)
+    try:
+        return (avg(sample1) - avg(sample2)) / math.sqrt(error * 2)
+    except ZeroDivisionError:
+        return 0.0
 
 
 def IsSignificant(sample1, sample2):
_______________________________________________
pypy-commit mailing list
[email protected]
http://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to