Author: Edd Barrett <vex...@gmail.com>
Branch: min_5_secs
Changeset: r285:507b827a733d
Date: 2014-12-09 16:31 +0000
http://bitbucket.org/pypy/benchmarks/changeset/507b827a733d/

Log:    Ensure that all benchmarks run for at least 5 secs on a fast
        machine.

        (I actually aimed for 5<=t<=6, but some are more)

        Note that some benchmarks (spitfire, twisted) do not report how long
        an iteration took, but rather how many of x happened in t seconds.
        So the output may not necessarily be >=5 secs.

diff --git a/lib/pypy/rpython/translator/c/src/signals.o 
b/lib/pypy/rpython/translator/c/src/signals.o
index 
166016f96874014a34535131bd7cfed3aabe09eb..978377c7573fccb667802c8cb75d56c1a1ce01b9
GIT binary patch

[cut]

diff --git a/lib/pypy/rpython/translator/c/src/stacklet/stacklet.o 
b/lib/pypy/rpython/translator/c/src/stacklet/stacklet.o
index 
e42497399f1f975b42e62fbf236498014c21f654..f9610bf7b82769ca1eeeac7f2eca92de8d120802
GIT binary patch

[cut]

diff --git a/lib/pypy/rpython/translator/c/src/thread.o 
b/lib/pypy/rpython/translator/c/src/thread.o
index 
9b7dcc7cb6e9c6eb94216acf121fbcd5b32297e5..a0f49075af33414be1dca504e743a537a7f175f6
GIT binary patch

[cut]

diff --git a/own/bm_chameleon.py b/own/bm_chameleon.py
--- a/own/bm_chameleon.py
+++ b/own/bm_chameleon.py
@@ -13,15 +13,20 @@
 </tr>
 </table>"""
 
+INNER_ITERS = 500
+
 def main(n):
     tmpl = PageTemplate(BIGTABLE_ZPT)
     options = {'table': [dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9, 
j=10)
                          for x in range(1000)]}
     import time
     l = []
-    for k in range(n):
+    for k in xrange(n):
         t0 = time.time()
-        tmpl(options=options)
+
+        for i in xrange(INNER_ITERS):
+            tmpl(options=options)
+
         l.append(time.time() - t0)
     return l
 
diff --git a/own/bm_dulwich_log.py b/own/bm_dulwich_log.py
--- a/own/bm_dulwich_log.py
+++ b/own/bm_dulwich_log.py
@@ -4,13 +4,18 @@
 
 import dulwich.repo
 
+INNER_ITERS = 50
+
 def test_dulwich(n):
     l = []
     r = dulwich.repo.Repo(os.path.join(os.path.dirname(__file__), 'git-demo'))
     import time
-    for i in range(20):
+    for i in xrange(n):
         t0 = time.time()
-        r.revision_history(r.head())
+
+        for j in xrange(INNER_ITERS):
+            r.revision_history(r.head())
+
         l.append(time.time() - t0)
     return l
 
diff --git a/own/bm_genshi.py b/own/bm_genshi.py
--- a/own/bm_genshi.py
+++ b/own/bm_genshi.py
@@ -22,21 +22,31 @@
 </table>
 """
 
-
 def main(n, bench):
     tmpl_cls, tmpl_str = {
         'xml': (MarkupTemplate, BIGTABLE_XML),
         'text': (NewTextTemplate, BIGTABLE_TEXT),
         }[bench]
+
+    if bench == "text":
+        inner_iters = 620
+    elif bench == "xml":
+        inner_iters = 220
+    else:
+        assert(False)
+
     tmpl = tmpl_cls(tmpl_str)
     context = {'table':
                [dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9, j=10)
                 for x in range(1000)]}
     l = []
-    for k in range(n):
+    for k in xrange(n):
         t0 = time.time()
-        stream = tmpl.generate(**context)
-        stream.render()
+
+        for i in xrange(inner_iters):
+            stream = tmpl.generate(**context)
+            stream.render()
+
         l.append(time.time() - t0)
     return l
 
diff --git a/own/bm_mako.py b/own/bm_mako.py
--- a/own/bm_mako.py
+++ b/own/bm_mako.py
@@ -120,6 +120,7 @@
 ${fun6()}
 """
 
+INNER_ITERS = 550
 
 def test_mako(count):
 
@@ -128,7 +129,7 @@
     lookup.put_string('page.mako', PAGE_TEMPLATE)
 
     template = Template(CONTENT_TEMPLATE, lookup=lookup)
-    
+
     table = [xrange(150) for i in xrange(150)]
     paragraphs = xrange(50)
     title = 'Hello world!'
@@ -136,9 +137,11 @@
     times = []
     for i in range(count):
         t0 = time.time()
-        data = template.render(table=table, paragraphs=paragraphs,
-                               lorem=LOREM_IPSUM, title=title,
-                               img_count=50)
+
+        for j in xrange(INNER_ITERS):
+            data = template.render(table=table, paragraphs=paragraphs,
+                                   lorem=LOREM_IPSUM, title=title,
+                                   img_count=50)
         t1 = time.time()
         times.append(t1-t0)
     return times
diff --git a/own/bm_sympy.py b/own/bm_sympy.py
--- a/own/bm_sympy.py
+++ b/own/bm_sympy.py
@@ -20,13 +20,25 @@
     x, y, z = symbols('x y z')
     str(expand((x+2*y+3*z)**30))
 
+INNER_ITERS_D = {
+    'expand' : 650000,
+    'integrate' : 10,
+    'str' : 500,
+    'sum' : 500,
+}
+
 def main(n, bench):
+
+    inner_iters = INNER_ITERS_D.get(bench, 1)
     func = globals()['bench_' + bench]
     l = []
     for i in range(n):
         clear_cache()
         t0 = time.time()
-        func()
+
+        for j in xrange(inner_iters):
+            func()
+
         l.append(time.time() - t0)
     return l
 
diff --git a/own/chaos.py b/own/chaos.py
--- a/own/chaos.py
+++ b/own/chaos.py
@@ -13,6 +13,8 @@
 import sys
 import time
 
+INNER_ITERS = 5000 * 1300
+
 class GVector(object):
     def __init__(self, x = 0, y = 0, z = 0):
         self.x = x
@@ -219,9 +221,9 @@
                         (self.maxy + self.miny) / 2, 0)
         colored = 0
         times = []
-        for _ in range(n):
+        for _ in xrange(n):
             t1 = time.time()
-            for i in xrange(5000):
+            for i in xrange(INNER_ITERS):
                 point = self.transform_point(point)
                 x = (point.x - self.minx) / self.width * w
                 y = (point.y - self.miny) / self.height * h
diff --git a/own/crypto_pyaes.py b/own/crypto_pyaes.py
--- a/own/crypto_pyaes.py
+++ b/own/crypto_pyaes.py
@@ -22,13 +22,18 @@
 
     assert plaintext == cleartext
 
+INNER_ITERS = 197
+
 def main(arg):
     # XXX warmup
 
     times = []
     for i in xrange(arg):
         t0 = time.time()
-        o = benchmark()
+
+        for j in xrange(INNER_ITERS):
+            o = benchmark()
+
         tk = time.time()
         times.append(tk - t0)
     return times
diff --git a/own/deltablue.py b/own/deltablue.py
--- a/own/deltablue.py
+++ b/own/deltablue.py
@@ -614,15 +614,19 @@
     chain_test(100)
     projection_test(100)
 
+INNER_ITERS = 17000
 
 # Specific to the PyPy implementation, to run within the main harnass.
 def main(n):
     import time
     times = []
 
-    for i in range(n):
+    for i in xrange(n):
         t1 = time.time()
-        delta_blue()
+
+        for j in xrange(INNER_ITERS):
+            delta_blue()
+
         t2 = time.time()
         times.append(t2 - t1)
 
diff --git a/own/eparse.py b/own/eparse.py
--- a/own/eparse.py
+++ b/own/eparse.py
@@ -4,13 +4,18 @@
 ometa.FAST = True
 from monte.eparser import EParser
 
+INNER_ITERS = 24
+
 def main(n):
     l = []
     data = open(os.path.join(os.path.dirname(__file__), 'test.e')).read()
     for _ in range(n):
         t0 = time.time()
-        p = EParser(data)
-        v, e = p.apply('start')
+
+        for i in xrange(INNER_ITERS):
+            p = EParser(data)
+            v, e = p.apply('start')
+
         l.append(time.time() - t0)
     return l
 
diff --git a/own/fannkuch.py b/own/fannkuch.py
--- a/own/fannkuch.py
+++ b/own/fannkuch.py
@@ -52,15 +52,20 @@
 
 DEFAULT_ARG = 9
 
+INNER_ITERS = 60
+
 def main(n):
     times = []
-    for i in range(n):
+    for i in xrange(n):
         t0 = time.time()
-        fannkuch(DEFAULT_ARG)
+
+        for j in xrange(INNER_ITERS):
+            fannkuch(DEFAULT_ARG)
+
         tk = time.time()
         times.append(tk - t0)
     return times
-    
+
 if __name__ == "__main__":
     parser = optparse.OptionParser(
         usage="%prog [options]",
diff --git a/own/float.py b/own/float.py
--- a/own/float.py
+++ b/own/float.py
@@ -47,17 +47,22 @@
 
 POINTS = 100000
 
+INNER_ITERS = 240
+
 def main(arg):
     # XXX warmup
-    
+
     times = []
     for i in xrange(arg):
         t0 = time.time()
-        o = benchmark(POINTS)
+
+        for j in xrange(INNER_ITERS):
+            o = benchmark(POINTS)
+
         tk = time.time()
         times.append(tk - t0)
     return times
-    
+
 if __name__ == "__main__":
     parser = optparse.OptionParser(
         usage="%prog [options]",
diff --git a/own/go.py b/own/go.py
--- a/own/go.py
+++ b/own/go.py
@@ -423,13 +423,18 @@
     board = Board()
     pos = computer_move(board)
 
+INNER_ITERS = 89
+
 def main(n):
     times = []
-    for i in range(5):
+    for i in xrange(5):
         versus_cpu() # warmup
-    for i in range(n):
+    for i in xrange(n):
         t1 = time.time()
-        versus_cpu()
+
+        for j in xrange(INNER_ITERS):
+            versus_cpu()
+
         t2 = time.time()
         times.append(t2 - t1)
     return times
diff --git a/own/json_bench.py b/own/json_bench.py
--- a/own/json_bench.py
+++ b/own/json_bench.py
@@ -11,14 +11,20 @@
 
 cases = ['EMPTY', 'SIMPLE', 'NESTED', 'HUGE']
 
+INNER_ITERS = 13
+
 def main(n):
     l = []
-    for i in range(n):
+    for i in xrange(n):
         t0 = time.time()
-        for case in cases:
-            data, count = globals()[case]
-            for i in range(count):
-                json.dumps(data)
+
+        for m in xrange(INNER_ITERS):
+
+            for case in cases:
+                data, count = globals()[case]
+                for i in range(count):
+                    json.dumps(data)
+
         l.append(time.time() - t0)
     return l
 
diff --git a/own/meteor-contest.py b/own/meteor-contest.py
--- a/own/meteor-contest.py
+++ b/own/meteor-contest.py
@@ -135,21 +135,25 @@
 
 SOLVE_ARG = 60
 
+INNER_ITERS = 92
+
 def main(n):
     times = []
-    for i in range(n):
+    for i in xrange(n):
+
         t0 = time.time()
-        free = frozenset(xrange(len(board)))
-        curr_board = [-1] * len(board)
-        pieces_left = range(len(pieces))
-        solutions = []
-        solve(SOLVE_ARG, 0, free, curr_board, pieces_left, solutions)
-        #print len(solutions),  'solutions found\n'
-        #for i in (0, -1): print_board(solutions[i])
-        tk = time.time()
+        for j in xrange(INNER_ITERS):
+            free = frozenset(xrange(len(board)))
+            curr_board = [-1] * len(board)
+            pieces_left = range(len(pieces))
+            solutions = []
+            solve(SOLVE_ARG, 0, free, curr_board, pieces_left, solutions)
+            #print len(solutions),  'solutions found\n'
+            #for i in (0, -1): print_board(solutions[i])
+            tk = time.time()
         times.append(tk - t0)
     return times
-    
+
 if __name__ == "__main__":
     parser = optparse.OptionParser(
         usage="%prog [options]",
diff --git a/own/nbody_modified.py b/own/nbody_modified.py
--- a/own/nbody_modified.py
+++ b/own/nbody_modified.py
@@ -109,17 +109,22 @@
 
 NUMBER_OF_ITERATIONS = 20000
 
+INNER_ITERATIONS = 290
+
 def main(n, ref='sun'):
     # XXX warmup
-    
+
     times = []
-    for i in range(n):
+    for i in xrange(n):
         t0 = time.time()
-        offset_momentum(BODIES[ref])
-        report_energy()
-        advance(0.01, NUMBER_OF_ITERATIONS)
-        report_energy()
-        tk = time.time()
+
+        for j in xrange(INNER_ITERATIONS):
+            offset_momentum(BODIES[ref])
+            report_energy()
+            advance(0.01, NUMBER_OF_ITERATIONS)
+            report_energy()
+            tk = time.time()
+
         times.append(tk - t0)
     return times
 
diff --git a/own/pyflate-fast.py b/own/pyflate-fast.py
--- a/own/pyflate-fast.py
+++ b/own/pyflate-fast.py
@@ -660,6 +660,8 @@
     assert md5.md5(out).hexdigest() == "afa004a630fe072901b1d9628b960974"
     input.close()
 
+INNER_ITERS = 26
+
 def main(n):
     import time
     times = []
@@ -667,7 +669,10 @@
         _main() # warmup
     for i in range(n):
         t1 = time.time()
-        _main()
+
+        for j in xrange(INNER_ITERS):
+            _main()
+
         t2 = time.time()
         times.append(t2 - t1)
     return times
diff --git a/own/raytrace-simple.py b/own/raytrace-simple.py
--- a/own/raytrace-simple.py
+++ b/own/raytrace-simple.py
@@ -353,6 +353,8 @@
     s.addObject(Halfspace(Point(0,0,0), Vector.UP), CheckerboardSurface())
     s.render(c)
 
+INNER_ITERS = 300
+
 def main(n):
     import time
     times = []
@@ -360,7 +362,10 @@
         _main() # warmup
     for i in range(n):
         t1 = time.time()
-        _main()
+
+        for j in xrange(INNER_ITERS):
+            _main()
+
         t2 = time.time()
         times.append(t2 - t1)
     return times
diff --git a/own/spectral-norm.py b/own/spectral-norm.py
--- a/own/spectral-norm.py
+++ b/own/spectral-norm.py
@@ -42,25 +42,30 @@
 
 DEFAULT_N = 130
 
+INNER_ITERS = 600
+
 def main(n):
     times = []
-    for i in range(n):
+    for i in xrange(n):
         t0 = time.time()
-        u = [1] * DEFAULT_N
 
-        for dummy in xrange (10):
-            v = eval_AtA_times_u (u)
-            u = eval_AtA_times_u (v)
+        for j in xrange(INNER_ITERS):
+            u = [1] * DEFAULT_N
 
-        vBv = vv = 0
+            for dummy in xrange (10):
+                v = eval_AtA_times_u (u)
+                u = eval_AtA_times_u (v)
 
-        for ue, ve in izip (u, v):
-            vBv += ue * ve
-            vv  += ve * ve
+            vBv = vv = 0
+
+            for ue, ve in izip (u, v):
+                vBv += ue * ve
+                vv  += ve * ve
+
         tk = time.time()
         times.append(tk - t0)
     return times
-    
+
 if __name__ == "__main__":
     parser = optparse.OptionParser(
         usage="%prog [options]",
diff --git a/own/spitfire.py b/own/spitfire.py
--- a/own/spitfire.py
+++ b/own/spitfire.py
@@ -21,11 +21,17 @@
 # bummer, timeit module is stupid
 from bigtable import test_python_cstringio, test_spitfire_o4, test_spitfire
 
+INNER_ITERS_D = {
+    "python_cstringio" :  12000,
+    "spitfire_o4" : 4000,
+}
+
 def runtest(n, benchmark):
     times = []
+    inner_iters = INNER_ITERS_D.get(benchmark, 100)
     for i in range(n):
         sys.stdout = StringIO()
-        bigtable.run([benchmark], 100)
+        bigtable.run([benchmark], inner_iters)
         times.append(float(sys.stdout.getvalue().split(" ")[-2]))
         sys.stdout = sys.__stdout__
     return times
diff --git a/own/telco.py b/own/telco.py
--- a/own/telco.py
+++ b/own/telco.py
@@ -75,11 +75,16 @@
     end = time()
     return end - start
 
+INNER_ITERS = 450
+
 def main(n):
     run() # warmup
     times = []
     for i in range(n):
-        times.append(run())
+        all_inner_runs = 0.0
+        for j in xrange(INNER_ITERS):
+            all_inner_runs += run()
+        times.append(all_inner_runs)
     return times
 
 
diff --git a/unladen_swallow/perf.py b/unladen_swallow/perf.py
--- a/unladen_swallow/perf.py
+++ b/unladen_swallow/perf.py
@@ -809,7 +809,7 @@
     if options.rigorous:
         trials = 100
     elif options.fast:
-        trials = 5
+        trials = 1 # XXX
     trials = max(1, int(trials * iteration_scaling))
 
     RemovePycs()
diff --git a/unladen_swallow/performance/bm_ai.py 
b/unladen_swallow/performance/bm_ai.py
--- a/unladen_swallow/performance/bm_ai.py
+++ b/unladen_swallow/performance/bm_ai.py
@@ -69,6 +69,7 @@
             yield vec
 
 
+INNER_ITERS = 230
 def test_n_queens(iterations):
     # Warm-up runs.
     list(n_queens(8))
@@ -77,7 +78,10 @@
     times = []
     for _ in xrange(iterations):
         t0 = time.time()
-        list(n_queens(8))
+
+        for i in xrange(INNER_ITERS):
+            list(n_queens(8))
+
         t1 = time.time()
         times.append(t1 - t0)
     return times
diff --git a/unladen_swallow/performance/bm_django.py 
b/unladen_swallow/performance/bm_django.py
--- a/unladen_swallow/performance/bm_django.py
+++ b/unladen_swallow/performance/bm_django.py
@@ -32,6 +32,7 @@
 </table>
 """)
 
+INNER_ITERS = 300
 def test_django(count):
     table = [xrange(150) for _ in xrange(150)]
     context = Context({"table": table})
@@ -43,7 +44,10 @@
     times = []
     for _ in xrange(count):
         t0 = time.time()
-        data = DJANGO_TMPL.render(context)
+
+        for i in xrange(INNER_ITERS):
+            data = DJANGO_TMPL.render(context)
+
         t1 = time.time()
         times.append(t1 - t0)
     return times
diff --git a/unladen_swallow/performance/bm_html5lib.py 
b/unladen_swallow/performance/bm_html5lib.py
--- a/unladen_swallow/performance/bm_html5lib.py
+++ b/unladen_swallow/performance/bm_html5lib.py
@@ -23,6 +23,8 @@
 import html5lib
 
 
+INNER_ITERS = 4
+
 def test_html5lib(count, spec_data):
     # No warm-up runs for this benchmark; in real life, the parser doesn't get
     # to warm up (this isn't a daemon process).
@@ -31,7 +33,10 @@
     for _ in xrange(count):
         spec_data.seek(0)
         t0 = time.time()
-        html5lib.parse(spec_data)
+
+        for i in xrange(INNER_ITERS):
+            html5lib.parse(spec_data)
+
         t1 = time.time()
         times.append(t1 - t0)
     return times
diff --git a/unladen_swallow/performance/bm_richards.py 
b/unladen_swallow/performance/bm_richards.py
--- a/unladen_swallow/performance/bm_richards.py
+++ b/unladen_swallow/performance/bm_richards.py
@@ -18,6 +18,8 @@
 import util
 
 
+INNER_ITERS = 2750
+
 def test_richards(iterations):
     # Warm-up
     r = richards.Richards()
@@ -26,7 +28,7 @@
     times = []
     for _ in xrange(iterations):
         t0 = time.time()
-        r.run(iterations=1)
+        r.run(iterations=INNER_ITERS)
         t1 = time.time()
         times.append(t1 - t0)
     return times
diff --git a/unladen_swallow/performance/bm_rietveld.py 
b/unladen_swallow/performance/bm_rietveld.py
--- a/unladen_swallow/performance/bm_rietveld.py
+++ b/unladen_swallow/performance/bm_rietveld.py
@@ -86,6 +86,7 @@
     tmpl = loader.get_template(templ_name)
     return tmpl, context
 
+INNER_ITERS = 46
 
 def test_rietveld(count, tmpl, context):
     # Warm up Django.
@@ -95,37 +96,40 @@
     times = []
     for _ in xrange(count):
         t0 = time.time()
-        # 30 calls to render, so that we don't measure loop overhead.
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
-        tmpl.render(context)
+
+        for i in xrange(INNER_ITERS):
+            # 30 calls to render, so that we don't measure loop overhead.
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+            tmpl.render(context)
+
         t1 = time.time()
         times.append(t1 - t0)
     return times
diff --git a/unladen_swallow/performance/bm_spambayes.py 
b/unladen_swallow/performance/bm_spambayes.py
--- a/unladen_swallow/performance/bm_spambayes.py
+++ b/unladen_swallow/performance/bm_spambayes.py
@@ -20,6 +20,7 @@
 # Local imports
 import util
 
+INNER_ITERS = 150
 
 def test_spambayes(iterations, messages, ham_classifier):
     # Prime the pump. This still leaves some hot functions uncompiled; these
@@ -30,8 +31,9 @@
     times = []
     for _ in xrange(iterations):
         t0 = time.time()
-        for msg in messages:
-            ham_classifier.score(msg)
+        for i in xrange(INNER_ITERS):
+            for msg in messages:
+                ham_classifier.score(msg)
         t1 = time.time()
         times.append(t1 - t0)
     return times
diff --git a/unladen_swallow/performance/bm_spitfire.py 
b/unladen_swallow/performance/bm_spitfire.py
--- a/unladen_swallow/performance/bm_spitfire.py
+++ b/unladen_swallow/performance/bm_spitfire.py
@@ -40,6 +40,8 @@
 </table>
 """
 
+INNER_ITERS = 31
+
 def test_spitfire(count):
     # Activate the most aggressive Spitfire optimizations. While it might
     # conceivably be interesting to stress Spitfire's lower optimization
@@ -60,7 +62,10 @@
     times = []
     for _ in xrange(count):
         t0 = time.time()
-        data = spitfire_tmpl_o4(search_list=[{"table": table}]).main()
+
+        for i in xrange(INNER_ITERS):
+            data = spitfire_tmpl_o4(search_list=[{"table": table}]).main()
+
         t1 = time.time()
         times.append(t1 - t0)
     return times
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to