Author: Hakan Ardo <ha...@debian.org> Branch: extradoc Changeset: r5464:0fbd61901330 Date: 2014-11-21 09:50 +0100 http://bitbucket.org/pypy/extradoc/changeset/0fbd61901330/
Log: numpy versions of these benchmarks diff --git a/talk/dls2012/benchmarks/benchmark.sh b/talk/dls2012/benchmarks/benchmark.sh --- a/talk/dls2012/benchmarks/benchmark.sh +++ b/talk/dls2012/benchmarks/benchmark.sh @@ -59,13 +59,21 @@ #$* ./runner.py $EXTRA_OPTS convolution/convolution.py conv3 1 #$* ./runner.py $EXTRA_OPTS convolution/convolution.py conv5 1 $* ./runner.py $EXTRA_OPTS convolution/convolution.py conv3 100 + $* ./runner.py $EXTRA_OPTS convolution/convolution.py conv3_numpy 100 $* ./runner.py $EXTRA_OPTS convolution/convolution.py conv5 100 + $* ./runner.py $EXTRA_OPTS convolution/convolution.py conv5_numpy 100 $* ./runner.py $EXTRA_OPTS convolution/convolution.py conv3 1000 + $* ./runner.py $EXTRA_OPTS convolution/convolution.py conv3_numpy 1000 $* ./runner.py $EXTRA_OPTS convolution/convolution.py conv5 1000 + $* ./runner.py $EXTRA_OPTS convolution/convolution.py conv5_numpy 1000 $* ./runner.py $EXTRA_OPTS convolution/convolution.py conv3x3 1000000 3 + $* ./runner.py $EXTRA_OPTS convolution/convolution.py conv3x3_numpy 1000000 3 $* ./runner.py $EXTRA_OPTS convolution/convolution.py conv3x3 1000 1000 + $* ./runner.py $EXTRA_OPTS convolution/convolution.py conv3x3_numpy 1000 1000 $* ./runner.py $EXTRA_OPTS convolution/convolution.py dilate3x3 1000 1000 + $* ./runner.py $EXTRA_OPTS convolution/convolution.py dilate3x3_numpy 1000 1000 $* ./runner.py $EXTRA_OPTS convolution/convolution.py sobel_magnitude 1000 1000 + $* ./runner.py $EXTRA_OPTS convolution/convolution.py sobel_magnitude_numpy 1000 1000 #$* ./runner.py $EXTRA_OPTS image/noborder.py main NoBorderImagePadded #$* ./runner.py $EXTRA_OPTS image/noborder.py main NoBorderImagePadded iter #$* ./runner.py $EXTRA_OPTS image/noborder.py main NoBorderImagePadded range diff --git a/talk/dls2012/benchmarks/convolution/convolution.py b/talk/dls2012/benchmarks/convolution/convolution.py --- a/talk/dls2012/benchmarks/convolution/convolution.py +++ b/talk/dls2012/benchmarks/convolution/convolution.py @@ -1,5 +1,13 @@ from array import array from math import log10, sqrt +try: + import numpy as np +except ImportError: + try: + import numpypy as np + except ImportError: + print "Cant find nympy" + def _conv3(a, k, n=1): assert len(k)==3 @@ -14,7 +22,22 @@ n = int(args[0]) _conv3(array('d', [1]) * (100000000/n), array('d', [-1, 0, 1]), n) - return 'conv3(array(1e%d))' % log10(100000000/n) + return 'conv3(array.array(1e%d))' % log10(100000000/n) + +def _conv3_numpy(a, k, n=1): + assert len(k)==3 + b = np.zeros(len(a) - 2, a.dtype) + while n: + n -= 1 + for i in xrange(len(b)): + b[i] = k[2]*a[i] + k[1]*a[i+1] + k[0]*a[i+2] + return b + +def conv3_numpy(args): + n = int(args[0]) + _conv3_numpy(np.ones(100000000/n, 'd'), + np.array([-1, 0, 1], 'd'), n) + return 'conv3(numpy.array(1e%d))' % log10(100000000/n) def _conv5(a, k, n=1): assert len(k)==5 @@ -29,7 +52,22 @@ n = int(args[0]) _conv5(array('d', [1]) * (100000000/n), array('d', [1, 4, 6, 4, 1]), n) - return 'conv5(array(1e%d))' % log10(100000000/n) + return 'conv5(array.array(1e%d))' % log10(100000000/n) + +def _conv5_numpy(a, k, n=1): + assert len(k)==5 + b = np.zeros(len(a) - 4, a.dtype) + while n: + n -= 1 + for i in xrange(len(b)): + b[i] = k[4]*a[i] + k[3]*a[i+1] + k[2]*a[i+2] + k[1]*a[i+3] + k[0]*a[i+4] + return b + +def conv5_numpy(args): + n = int(args[0]) + _conv5_numpy(np.ones(100000000/n, 'd'), + np.array([1, 4, 6, 4, 1], 'd'), n) + return 'conv5(numpy.array(1e%d))' % log10(100000000/n) class Array2D(object): def __init__(self, w, h, data=None): @@ -71,13 +109,16 @@ def __init__(self, w, h): self.width = w self.height = h - import numpypy - self.data = numpypy.zeros([h, w], 'd') + self.data = np.zeros([h, w], 'd') def __getitem__(self, (x, y)): + if x < 0 or y < 0: + raise IndexError return self.data[y, x] def __setitem__(self, (x, y), val): + if x < 0 or y < 0: + raise IndexError self.data[y, x] = val def _conv3x3(a, b, k): @@ -125,6 +166,13 @@ _dilate3x3(a, b, Array2D(3,3)) return 'dilate3x3(Array2D(%sx%s))' % tuple(args) +def dilate3x3_numpy(args): + a = NumpyArray(int(args[0]), int(args[1])) + b = NumpyArray(a.width, a.height) + for i in range(10): + _dilate3x3(a, b, NumpyArray(3,3)) + return 'dilate3x3(NumpyArray(%sx%s))' % tuple(args) + def _sobel_magnitude(a): b = Array2D(a.width, a.height) for y in xrange(1, a.height-1): @@ -141,3 +189,8 @@ for i in range(10): _sobel_magnitude(Array2D(int(args[0]), int(args[1]))) return 'sobel(Array2D(%sx%s))' % tuple(args) + +def sobel_magnitude_numpy(args): + for i in range(10): + _sobel_magnitude(NumpyArray(int(args[0]), int(args[1]))) + return 'sobel(NumpyArray(%sx%s))' % tuple(args) diff --git a/talk/dls2012/benchmarks/iter/ndindex.py b/talk/dls2012/benchmarks/iter/ndindex.py new file mode 100644 --- /dev/null +++ b/talk/dls2012/benchmarks/iter/ndindex.py @@ -0,0 +1,145 @@ +from numpy import ndindex, array, ones, tile + +range1 = range2 = ndindex + +def _sum1d(a): + sa = 0 + for i, in range1(len(a)): + sa += a[i] + +def _xsum1d(a): + sa = 0 + for i, in range1(len(a)): + sa += a[i] + i + +def _wsum1d(a): + sa = 0 + for i, in range1(len(a)): + sa += a[i] + len(a) + +def _sum2d(a, w, h): + sa = 0 + for x, y in range2(w, h): + sa += a[y, x] + +def _wsum2d(a, w, h): + sa = 0 + for x, y in range2(w, h): + sa += a[y, x] + w + +def _xsum2d(a, w, h): + sa = 0 + for x, y in range2(w, h): + sa += a[y, x] + x + +def _whsum2d(a, w, h): + sa = 0 + for x, y in range2(w, h): + sa += a[y, x] + w + h + +def _xysum2d(a, w, h): + sa = 0 + for x, y in range2(w, h): + sa += a[y, x] + x + y + +def _mean1d(a): + sa = 0 + for i, in range1(len(a)): + sa = (i*sa + a[i])/(i + 1.0); + +def _median1d(a): + sa = 0 + for i, in range1(len(a)): + if sa > a[i]: + sa -= 1.0/(i + 1.0) + elif sa < a[i]: + sa += 1.0/(i + 1.0) + +def _ripple1d(a): + sa = 0 + for i, in range1(len(a)): + if sa > a[i]: + sa -= 0.1 + elif sa < a[i]: + sa += 0.1 + +def _ripple2d(a, w, h): + sa = 0 + for x, y in range2(w, h): + if sa > a[y, x]: + sa -= 0.1 + elif sa < a[y, x]: + sa += 0.1 + +def sum1d(args): + run1d(args, _sum1d) + return "sum1d" + +def xsum1d(args): + run1d(args, _xsum1d) + return "xsum1d" + +def wsum1d(args): + run1d(args, _wsum1d) + return "wsum1d" + +def sum2d(args): + run2d(args, _sum2d) + return "sum2d" + +def wsum2d(args): + run2d(args, _wsum2d) + return "wsum2d" + +def xsum2d(args): + run2d(args, _xsum2d) + return "xsum2d" + +def whsum2d(args): + run2d(args, _whsum2d) + return "whsum2d" + +def xysum2d(args): + run2d(args, _xysum2d) + return "xysum2d" + +def mean1d(args): + run1d(args, _mean1d, [1, -1]) + return "mean1d" + +def median1d(args): + run1d(args, _median1d, [1, -1]) + return "median1d" + +def ripple1d(args): + run1d(args, _ripple1d, [1, -1]) + return "ripple1d" + +def ripple2d(args): + run2d(args, _ripple2d, [1, -1]) + return "ripple2d" + +def run1d(args, f, data=None): + if data: + a = tile(array(data), 100000000/len(data)) + else: + a = ones(100000000) + n = int(args[0]) + for i in xrange(n): + f(a) + return "sum1d" + +def run2d(args, f, data=None): + if data: + a = tile(array(data), 100000000/len(data)).reshape((10000, 10000)) + else: + a = ones(100000000).reshape((10000, 10000)) + n = int(args[0]) + for i in xrange(n): + f(a, 10000, 10000) + return "sum1d" + +if __name__ == '__main__': + import sys + eval(sys.argv[1])(sys.argv[2:]) + diff --git a/talk/dls2012/benchmarks/iter/nditer.py b/talk/dls2012/benchmarks/iter/nditer.py new file mode 100644 --- /dev/null +++ b/talk/dls2012/benchmarks/iter/nditer.py @@ -0,0 +1,166 @@ +from numpy import nditer, array, ones, tile + +def _sum1d(a): + sa = 0 + it = nditer(a, flags=['f_index']) + for v in it: + i = it.index + sa += a[i] + +def _xsum1d(a): + sa = 0 + it = nditer(a, flags=['f_index']) + for v in it: + i = it.index + sa += a[i] + i + +def _wsum1d(a): + sa = 0 + it = nditer(a, flags=['f_index']) + for v in it: + i = it.index + sa += a[i] + len(a) + +def _sum2d(a, w, h): + sa = 0 + it = nditer(a, flags=['multi_index']) + for v in it: + y, x = it.multi_index + sa += a[y, x] + +def _wsum2d(a, w, h): + sa = 0 + it = nditer(a, flags=['multi_index']) + for v in it: + y, x = it.multi_index + sa += a[y, x] + w + +def _xsum2d(a, w, h): + sa = 0 + it = nditer(a, flags=['multi_index']) + for v in it: + y, x = it.multi_index + sa += a[y, x] + x + +def _whsum2d(a, w, h): + sa = 0 + it = nditer(a, flags=['multi_index']) + for v in it: + y, x = it.multi_index + sa += a[y, x] + w + h + +def _xysum2d(a, w, h): + sa = 0 + it = nditer(a, flags=['multi_index']) + for v in it: + y, x = it.multi_index + sa += a[y, x] + x + y + +def _mean1d(a): + sa = 0 + it = nditer(a, flags=['f_index']) + for v in it: + i = it.index + sa = (i*sa + a[i])/(i + 1.0); + +def _median1d(a): + sa = 0 + it = nditer(a, flags=['f_index']) + for v in it: + i = it.index + if sa > a[i]: + sa -= 1.0/(i + 1.0) + elif sa < a[i]: + sa += 1.0/(i + 1.0) + +def _ripple1d(a): + sa = 0 + it = nditer(a, flags=['f_index']) + for v in it: + i = it.index + if sa > a[i]: + sa -= 0.1 + elif sa < a[i]: + sa += 0.1 + +def _ripple2d(a, w, h): + sa = 0 + it = nditer(a, flags=['multi_index']) + for v in it: + y, x = it.multi_index + if sa > a[y, x]: + sa -= 0.1 + elif sa < a[y, x]: + sa += 0.1 + +def sum1d(args): + run1d(args, _sum1d) + return "sum1d" + +def xsum1d(args): + run1d(args, _xsum1d) + return "xsum1d" + +def wsum1d(args): + run1d(args, _wsum1d) + return "wsum1d" + +def sum2d(args): + run2d(args, _sum2d) + return "sum2d" + +def wsum2d(args): + run2d(args, _wsum2d) + return "wsum2d" + +def xsum2d(args): + run2d(args, _xsum2d) + return "xsum2d" + +def whsum2d(args): + run2d(args, _whsum2d) + return "whsum2d" + +def xysum2d(args): + run2d(args, _xysum2d) + return "xysum2d" + +def mean1d(args): + run1d(args, _mean1d, [1, -1]) + return "mean1d" + +def median1d(args): + run1d(args, _median1d, [1, -1]) + return "median1d" + +def ripple1d(args): + run1d(args, _ripple1d, [1, -1]) + return "ripple1d" + +def ripple2d(args): + run2d(args, _ripple2d, [1, -1]) + return "ripple2d" + +def run1d(args, f, data=None): + if data: + a = tile(array(data), 100000000/len(data)) + else: + a = ones(100000000) + n = int(args[0]) + for i in xrange(n): + f(a) + return "sum1d" + +def run2d(args, f, data=None): + if data: + a = tile(array(data), 100000000/len(data)).reshape((10000, 10000)) + else: + a = ones(100000000).reshape((10000, 10000)) + n = int(args[0]) + for i in xrange(n): + f(a, 10000, 10000) + return "sum1d" + +if __name__ == '__main__': + import sys + eval(sys.argv[1])(sys.argv[2:]) diff --git a/talk/dls2012/benchmarks/iter/result-2.4.0.txt b/talk/dls2012/benchmarks/iter/result-2.4.0.txt new file mode 100644 --- /dev/null +++ b/talk/dls2012/benchmarks/iter/result-2.4.0.txt @@ -0,0 +1,90 @@ +gcc -O3 +sum1d: 0.83 +- 1.24126707662e-16 +sum2d: 0.83 +- 1.24126707662e-16 +whsum2d: 0.842 +- 0.004472135955 +3wsum1d: 0.836 +- 0.00894427191 +wsum2d: 0.85 +- 0.0308220700148 +xsum1d: 0.842 +- 0.004472135955 +xsum2d: 0.842 +- 0.004472135955 +xysum2d: 1.12 +- 0.0346410161514 +mean1d: 7.428 +- 0.0294957624075 +median1d: 3.818 +- 0.004472135955 +ripple1d: 1.342 +- 0.0109544511501 +ripple2d: 1.336 +- 0.00894427191 + +pypy iter/generator.py +sum1d: 5.53084101677 +- 0.00651376226379 +sum2d: 5.7555460453 +- 0.00951369332241 +whsum2d: 5.8612534523 +- 0.0505271222339 +wsum1d: 5.13269457817 +- 0.0823542879822 +wsum2d: 5.99619159698 +- 0.0487867098222 +xsum1d: 5.04685320854 +- 0.0555180883435 +xsum2d: 6.07883496284 +- 0.0389639282491 +xysum2d: 5.83931522369 +- 0.0576320488093 +mean1d: 8.94375824928 +- 0.0108197222492 +median1d: 10.4045877457 +- 0.0285781258496 +ripple1d: 10.2467153549 +- 0.00567696790862 +ripple2d: 11.1841029644 +- 0.139083424095 + +pypy iter/generator2.py +sum1d: 5.9797270298 +- 0.0755165051781 +sum2d: 5.80511965752 +- 0.380443555753 +whsum2d: 6.19872779846 +- 0.0262446391517 +wsum1d: 5.0686296463 +- 0.03220581952 +wsum2d: 6.22603621483 +- 0.0765020459155 +xsum1d: 5.03696541786 +- 0.0313417312818 +xsum2d: 6.64942345619 +- 0.0634006175674 +xysum2d: 5.85069346428 +- 0.024646797031 +mean1d: 9.20232362747 +- 0.27107580199 +median1d: 10.5072529793 +- 0.0780365503085 +ripple1d: 10.3291498184 +- 0.0457349492366 +ripple2d: 12.1278275967 +- 0.0184532784891 + +pypy iter/iterator.py +sum1d: 2.89783701897 +- 0.0338818402654 +sum2d: 6.21735162735 +- 0.0305362100956 +whsum2d: 5.78359918594 +- 0.0418847806897 +wsum1d: 2.90417222977 +- 0.00550225146282 +wsum2d: 6.1562063694 +- 0.0248465945318 +xsum1d: 3.15220880508 +- 0.0238542345497 +xsum2d: 6.17962999344 +- 0.00522105603458 +xysum2d: 6.54959263802 +- 0.0204275708962 +mean1d: 8.9222530365 +- 0.0569358104413 +median1d: 9.32725701332 +- 0.0118218937952 +ripple1d: 7.84541239738 +- 0.0128802667437 +ripple2d: 11.0337635994 +- 0.0381211395066 + +pypy iter/range.py +sum1d: 1.5154399395 +- 0.00218717543831 +sum2d: 1.78169260025 +- 0.0031213150465 +whsum2d: 1.78300223351 +- 0.00343094840578 +wsum1d: 1.51814541817 +- 0.00407496519924 +wsum2d: 1.78870997429 +- 0.0102636422345 +xsum1d: 1.78627576828 +- 0.0141348129086 +xsum2d: 2.04741225243 +- 0.00739649010433 +xysum2d: 2.04407844543 +- 0.00160511612186 +mean1d: 8.38623380661 +- 0.00678559642762 +median1d: 4.52713260651 +- 0.0215238656358 +ripple1d: 3.50191493034 +- 0.0344265982872 +ripple2d: 4.24880318642 +- 0.00425382282478 + +pypy iter/while.py +sum1d: 0.987529802322 +- 0.00129671178469 +sum2d: 1.75787782669 +- 0.00201957281403 +whsum2d: 1.75991163254 +- 0.000927917277915 +wsum1d: 0.989894151688 +- 0.00303482111217 +wsum2d: 1.76429200172 +- 0.00496186597436 +xsum1d: 1.23868374825 +- 0.00168004472652 +xsum2d: 2.0280834198 +- 0.00263820277497 +xysum2d: 2.02464160919 +- 0.00174334572372 +mean1d: 8.34133095741 +- 0.0121082272197 +median1d: 4.27610282898 +- 0.0313133386645 +ripple1d: 2.98665003777 +- 0.0134154060239 +ripple2d: 4.04827785492 +- 0.055327379039 + +pypy iter/ndindex.py +NotImplementedError: unable to create dtype from objects, "DummyArray" instance not supported + +pypy iter/nditer.py +sum1d: 61.2064362049 +- 0.578041254203 +sum2d: 71.1426748753 +- 1.09482960038 diff --git a/talk/dls2012/benchmarks/runall.sh b/talk/dls2012/benchmarks/runall.sh --- a/talk/dls2012/benchmarks/runall.sh +++ b/talk/dls2012/benchmarks/runall.sh @@ -1,6 +1,8 @@ #!/bin/bash #./benchmark.sh pypy +./benchmark.sh pypy-2.4.0 +./benchmark.sh pypy-2.0 #./benchmark.sh pypy --jit enable_opts=intbounds:rewrite:virtualize:string:earlyforce:pure:heap:ffi #./benchmark.sh pypy-1.5 #./benchmark.sh pypy-1.5 --jit enable_opts=intbounds:rewrite:virtualize:heap:unroll @@ -8,10 +10,11 @@ #./benchmark.sh gcc #./benchmark.sh gcc -O2 #./benchmark.sh gcc -O3 -march=native -fno-tree-vectorize +./benchmark.sh gcc -O3 -march=native #./benchmark.sh python2.7 #./benchmark.sh python2.6 psyco-wrapper.py #./benchmark.sh luajit-2.0.0-beta10 #./benchmark.sh luajit-2.0.0-beta10 -O-loop ./benchmark.sh luajit -./benchmark.sh luajit -O-loop +#./benchmark.sh luajit -O-loop #./benchmark.sh luajit diff --git a/talk/dls2012/benchmarks/runiter.sh b/talk/dls2012/benchmarks/runiter.sh --- a/talk/dls2012/benchmarks/runiter.sh +++ b/talk/dls2012/benchmarks/runiter.sh @@ -10,7 +10,7 @@ for p in iter/*.py; do echo pypy $p for b in $BENCHMARKS; do - /tmp/pypy-trunk ./runner.py -n 5 $p $b 10 + pypy ./runner.py -n 5 $p $b 10 done echo done \ No newline at end of file _______________________________________________ pypy-commit mailing list pypy-commit@python.org https://mail.python.org/mailman/listinfo/pypy-commit