Author: Armin Rigo <[email protected]>
Branch: py3.5
Changeset: r88795:eb1bdeb6f204
Date: 2016-12-01 17:22 +0100
http://bitbucket.org/pypy/pypy/changeset/eb1bdeb6f204/
Log: hg merge default
diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py
--- a/pypy/module/gc/interp_gc.py
+++ b/pypy/module/gc/interp_gc.py
@@ -14,7 +14,19 @@
cache.clear()
cache = space.fromcache(MapAttrCache)
cache.clear()
+
rgc.collect()
+
+ # if we are running in gc.disable() mode but gc.collect() is called,
+ # we should still call the finalizers now. We do this as an attempt
+ # to get closer to CPython's behavior: in Py3.5 some tests
+ # specifically rely on that. This is similar to how, in CPython, an
+ # explicit gc.collect() will invoke finalizers from cycles and fully
+ # ignore the gc.disable() mode.
+ if not space.user_del_action.enabled_at_app_level:
+ enable_finalizers(space)
+ disable_finalizers(space)
+
return space.wrap(0)
def enable(space):
diff --git a/pypy/module/micronumpy/test/test_zjit.py
b/pypy/module/micronumpy/test/test_zjit.py
--- a/pypy/module/micronumpy/test/test_zjit.py
+++ b/pypy/module/micronumpy/test/test_zjit.py
@@ -518,12 +518,10 @@
def test_prod(self):
result = self.run("prod")
assert int(result) == 576
- self.check_vectorized(1, 1)
def test_prod_zero(self):
result = self.run("prod_zero")
assert int(result) == 0
- self.check_vectorized(1, 1)
def define_max():
diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py
b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py
--- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py
@@ -77,7 +77,6 @@
arith_comb = [
('sum','int', 1742, 1742, 1),
('sum','float', 2581, 2581, 1),
- ('prod','float', 1, 3178, 1),
('prod','int', 1, 3178, 1),
('any','int', 1, 2239, 1),
('any','int', 0, 4912, 0),
diff --git a/rpython/jit/backend/llgraph/runner.py
b/rpython/jit/backend/llgraph/runner.py
--- a/rpython/jit/backend/llgraph/runner.py
+++ b/rpython/jit/backend/llgraph/runner.py
@@ -1128,7 +1128,7 @@
value = sum(value)
elif info.accum_operation == '*':
def prod(acc, x): return acc * x
- value = reduce(prod, value, 1)
+ value = reduce(prod, value, 1.0)
else:
raise NotImplementedError("accum operator in fail guard")
values[i] = value
diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py
b/rpython/jit/metainterp/optimizeopt/schedule.py
--- a/rpython/jit/metainterp/optimizeopt/schedule.py
+++ b/rpython/jit/metainterp/optimizeopt/schedule.py
@@ -980,7 +980,6 @@
class AccumPack(Pack):
SUPPORTED = { rop.FLOAT_ADD: '+',
rop.INT_ADD: '+',
- rop.FLOAT_MUL: '*',
}
def __init__(self, nodes, operator, position):
diff --git a/rpython/jit/metainterp/optimizeopt/vector.py
b/rpython/jit/metainterp/optimizeopt/vector.py
--- a/rpython/jit/metainterp/optimizeopt/vector.py
+++ b/rpython/jit/metainterp/optimizeopt/vector.py
@@ -847,6 +847,10 @@
vecop, count)
oplist.append(vecop)
elif pack.reduce_init() == 1:
+ # PRECISION loss, because the numbers are accumulated
(associative, commutative properties must hold)
+ # you can end up a small number and a huge number that is
finally multiplied. giving an
+ # inprecision result, thus this is disabled now
+ raise NotImplementedError
# multiply is only supported by floats
vecop = OpHelpers.create_vec_expand(ConstFloat(1.0), bytesize,
signed, count)
diff --git a/rpython/jit/metainterp/test/test_vector.py
b/rpython/jit/metainterp/test/test_vector.py
--- a/rpython/jit/metainterp/test/test_vector.py
+++ b/rpython/jit/metainterp/test/test_vector.py
@@ -414,7 +414,9 @@
lambda a,b:
lltype.intmask(lltype.intmask(a)+lltype.intmask(b)), lltype.Signed)
small_floats = st.floats(min_value=-100, max_value=100, allow_nan=False,
allow_infinity=False)
test_vec_float_sum = vec_reduce(small_floats, lambda a,b: a+b, rffi.DOUBLE)
- test_vec_float_prod = vec_reduce(small_floats, lambda a,b: a*b,
rffi.DOUBLE)
+ # PRECISION loss, because the numbers are accumulated (associative,
commutative properties must hold)
+ # you can end up a small number and a huge number that is finally
multiplied losing precision
+ # test_vec_float_prod = vec_reduce(small_floats, lambda a,b: a*b,
rffi.DOUBLE)
def test_constant_expand(self):
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit