Author: Maciej Fijalkowski <[email protected]>
Branch: result-in-resops
Changeset: r58416:856e6f99d11b
Date: 2012-10-25 12:55 +0200
http://bitbucket.org/pypy/pypy/changeset/856e6f99d11b/
Log: slow progress on changing all APIs yet again
diff --git a/pypy/jit/metainterp/optimizeopt/intbounds.py
b/pypy/jit/metainterp/optimizeopt/intbounds.py
--- a/pypy/jit/metainterp/optimizeopt/intbounds.py
+++ b/pypy/jit/metainterp/optimizeopt/intbounds.py
@@ -27,19 +27,19 @@
# FIXME: This takes care of the instruction where box is the reuslt
# but the bounds produced by all instructions where box is
# an argument might also be tighten
+ xxx
v = self.getvalue(box)
- b = v.intbound
+ b = v.getintbound()
if b.has_lower and b.has_upper and b.lower == b.upper:
v.make_constant(ConstInt(b.lower))
if isinstance(box, AbstractResOp):
dispatch_bounds_ops(self, box)
- def optimize_GUARD_TRUE(self, op):
- self.emit_operation(op)
+ def postprocess_GUARD_TRUE(self, op):
self.propagate_bounds_backward(op.getarg(0))
- optimize_GUARD_FALSE = optimize_GUARD_TRUE
- optimize_GUARD_VALUE = optimize_GUARD_TRUE
+ postprocess_GUARD_FALSE = postprocess_GUARD_TRUE
+ postprocess_GUARD_VALUE = postprocess_GUARD_TRUE
def optimize_INT_XOR(self, op):
v1 = self.getvalue(op.getarg(0))
@@ -48,10 +48,10 @@
self.make_constant_int(op, 0)
return
self.emit_operation(op)
- if v1.intbound.known_ge(IntBound(0, 0)) and \
- v2.intbound.known_ge(IntBound(0, 0)):
+ if v1.getintbound().known_ge(IntBound(0, 0)) and \
+ v2.getintbound().known_ge(IntBound(0, 0)):
r = self.getvalue(op)
- r.intbound.make_ge(IntLowerBound(0))
+ r.getintbound().make_ge(IntLowerBound(0))
def optimize_INT_AND(self, op):
v1 = self.getvalue(op.getarg(0))
@@ -62,51 +62,50 @@
if v2.is_constant():
val = v2.op.getint()
if val >= 0:
- r.intbound.intersect(IntBound(0,val))
+ r.getintbound().intersect(IntBound(0,val))
elif v1.is_constant():
val = v1.op.getint()
if val >= 0:
- r.intbound.intersect(IntBound(0,val))
+ r.getintbound().intersect(IntBound(0,val))
- def optimize_INT_SUB(self, op):
- v1 = self.getvalue(op.getarg(0))
- v2 = self.getvalue(op.getarg(1))
- self.emit_operation(op)
- r = self.getvalue(op)
- b = v1.intbound.sub_bound(v2.intbound)
+ def postprocess_INT_SUB(self, op):
+ v1 = self.getforwarded(op.getarg(0))
+ v2 = self.getforwarded(op.getarg(1))
+ r = self.getforwarded(op)
+ b = v1.getintbound().sub_bound(v2.getintbound())
if b.bounded():
- r.intbound.intersect(b)
+ r.getintbound().intersect(b)
def optimize_INT_ADD(self, op):
v1 = self.getvalue(op.getarg(0))
v2 = self.getvalue(op.getarg(1))
self.emit_operation(op)
r = self.getvalue(op)
- b = v1.intbound.add_bound(v2.intbound)
+ b = v1.getintbound().add_bound(v2.getintbound())
if b.bounded():
- r.intbound.intersect(b)
+ r.getintbound().intersect(b)
def optimize_INT_MUL(self, op):
v1 = self.getvalue(op.getarg(0))
v2 = self.getvalue(op.getarg(1))
self.emit_operation(op)
r = self.getvalue(op)
- b = v1.intbound.mul_bound(v2.intbound)
+ b = v1.getintbound().mul_bound(v2.getintbound())
if b.bounded():
- r.intbound.intersect(b)
+ r.getintbound().intersect(b)
def optimize_INT_FLOORDIV(self, op):
v1 = self.getvalue(op.getarg(0))
v2 = self.getvalue(op.getarg(1))
self.emit_operation(op)
r = self.getvalue(op)
- r.intbound.intersect(v1.intbound.div_bound(v2.intbound))
+ r.getintbound().intersect(v1.getintbound().div_bound(v2.getintbound()))
def optimize_INT_MOD(self, op):
v1 = self.getvalue(op.getarg(0))
v2 = self.getvalue(op.getarg(1))
- known_nonneg = (v1.intbound.known_ge(IntBound(0, 0)) and
- v2.intbound.known_ge(IntBound(0, 0)))
+ known_nonneg = (v1.getintbound().known_ge(IntBound(0, 0)) and
+ v2.getintbound().known_ge(IntBound(0, 0)))
if known_nonneg and v2.is_constant():
val = v2.op.getint()
if (val & (val-1)) == 0:
@@ -124,18 +123,18 @@
return # give up
val = -val
if known_nonneg:
- r.intbound.make_ge(IntBound(0, 0))
+ r.getintbound().make_ge(IntBound(0, 0))
else:
- r.intbound.make_gt(IntBound(-val, -val))
- r.intbound.make_lt(IntBound(val, val))
+ r.getintbound().make_gt(IntBound(-val, -val))
+ r.getintbound().make_lt(IntBound(val, val))
def optimize_INT_LSHIFT(self, op):
v1 = self.getvalue(op.getarg(0))
v2 = self.getvalue(op.getarg(1))
self.emit_operation(op)
r = self.getvalue(op)
- b = v1.intbound.lshift_bound(v2.intbound)
- r.intbound.intersect(b)
+ b = v1.getintbound().lshift_bound(v2.getintbound())
+ r.getintbound().intersect(b)
# intbound.lshift_bound checks for an overflow and if the
# lshift can be proven not to overflow sets b.has_upper and
# b.has_lower
@@ -147,14 +146,14 @@
def optimize_INT_RSHIFT(self, op):
v1 = self.getvalue(op.getarg(0))
v2 = self.getvalue(op.getarg(1))
- b = v1.intbound.rshift_bound(v2.intbound)
+ b = v1.getintbound().rshift_bound(v2.getintbound())
if b.has_lower and b.has_upper and b.lower == b.upper:
# constant result (likely 0, for rshifts that kill all bits)
self.make_constant_int(op, b.lower)
else:
self.emit_operation(op)
r = self.getvalue(op)
- r.intbound.intersect(b)
+ r.getintbound().intersect(b)
def optimize_GUARD_NO_OVERFLOW(self, op):
lastop = self.last_emitted_operation
@@ -199,7 +198,7 @@
def optimize_INT_ADD_OVF(self, op):
v1 = self.getvalue(op.getarg(0))
v2 = self.getvalue(op.getarg(1))
- resbound = v1.intbound.add_bound(v2.intbound)
+ resbound = v1.getintbound().add_bound(v2.getintbound())
if resbound.bounded():
# Transform into INT_ADD. The following guard will be killed
# by optimize_GUARD_NO_OVERFLOW; if we see instead an
@@ -207,34 +206,34 @@
op = self.optimizer.copy_and_change(op, rop.INT_ADD)
self.emit_operation(op) # emit the op
r = self.getvalue(op)
- r.intbound.intersect(resbound)
+ r.getintbound().intersect(resbound)
def optimize_INT_SUB_OVF(self, op):
v1 = self.getvalue(op.getarg(0))
v2 = self.getvalue(op.getarg(1))
- resbound = v1.intbound.sub_bound(v2.intbound)
+ resbound = v1.getintbound().sub_bound(v2.getintbound())
if resbound.bounded():
op = self.optimizer.copy_and_change(op, rop.INT_SUB)
self.emit_operation(op) # emit the op
r = self.getvalue(op)
- r.intbound.intersect(resbound)
+ r.getintbound().intersect(resbound)
def optimize_INT_MUL_OVF(self, op):
v1 = self.getvalue(op.getarg(0))
v2 = self.getvalue(op.getarg(1))
- resbound = v1.intbound.mul_bound(v2.intbound)
+ resbound = v1.getintbound().mul_bound(v2.getintbound())
if resbound.bounded():
op = self.optimizer.copy_and_change(op, rop.INT_MUL)
self.emit_operation(op)
r = self.getvalue(op)
- r.intbound.intersect(resbound)
+ r.getintbound().intersect(resbound)
def optimize_INT_LT(self, op):
v1 = self.getvalue(op.getarg(0))
v2 = self.getvalue(op.getarg(1))
- if v1.intbound.known_lt(v2.intbound):
+ if v1.getintbound().known_lt(v2.getintbound()):
self.make_constant_int(op, 1)
- elif v1.intbound.known_ge(v2.intbound) or v1 is v2:
+ elif v1.getintbound().known_ge(v2.getintbound()) or v1 is v2:
self.make_constant_int(op, 0)
else:
self.emit_operation(op)
@@ -242,9 +241,9 @@
def optimize_INT_GT(self, op):
v1 = self.getvalue(op.getarg(0))
v2 = self.getvalue(op.getarg(1))
- if v1.intbound.known_gt(v2.intbound):
+ if v1.getintbound().known_gt(v2.getintbound()):
self.make_constant_int(op, 1)
- elif v1.intbound.known_le(v2.intbound) or v1 is v2:
+ elif v1.getintbound().known_le(v2.getintbound()) or v1 is v2:
self.make_constant_int(op, 0)
else:
self.emit_operation(op)
@@ -252,9 +251,9 @@
def optimize_INT_LE(self, op):
v1 = self.getvalue(op.getarg(0))
v2 = self.getvalue(op.getarg(1))
- if v1.intbound.known_le(v2.intbound) or v1 is v2:
+ if v1.getintbound().known_le(v2.getintbound()) or v1 is v2:
self.make_constant_int(op, 1)
- elif v1.intbound.known_gt(v2.intbound):
+ elif v1.getintbound().known_gt(v2.getintbound()):
self.make_constant_int(op, 0)
else:
self.emit_operation(op)
@@ -262,9 +261,9 @@
def optimize_INT_GE(self, op):
v1 = self.getvalue(op.getarg(0))
v2 = self.getvalue(op.getarg(1))
- if v1.intbound.known_ge(v2.intbound) or v1 is v2:
+ if v1.getintbound().known_ge(v2.getintbound()) or v1 is v2:
self.make_constant_int(op, 1)
- elif v1.intbound.known_lt(v2.intbound):
+ elif v1.getintbound().known_lt(v2.getintbound()):
self.make_constant_int(op, 0)
else:
self.emit_operation(op)
@@ -272,9 +271,9 @@
def optimize_INT_EQ(self, op):
v1 = self.getvalue(op.getarg(0))
v2 = self.getvalue(op.getarg(1))
- if v1.intbound.known_gt(v2.intbound):
+ if v1.getintbound().known_gt(v2.getintbound()):
self.make_constant_int(op, 0)
- elif v1.intbound.known_lt(v2.intbound):
+ elif v1.getintbound().known_lt(v2.getintbound()):
self.make_constant_int(op, 0)
elif v1 is v2:
self.make_constant_int(op, 1)
@@ -284,9 +283,9 @@
def optimize_INT_NE(self, op):
v1 = self.getvalue(op.getarg(0))
v2 = self.getvalue(op.getarg(1))
- if v1.intbound.known_gt(v2.intbound):
+ if v1.getintbound().known_gt(v2.getintbound()):
self.make_constant_int(op, 1)
- elif v1.intbound.known_lt(v2.intbound):
+ elif v1.getintbound().known_lt(v2.getintbound()):
self.make_constant_int(op, 1)
elif v1 is v2:
self.make_constant_int(op, 0)
@@ -298,50 +297,50 @@
array = self.getvalue(op.getarg(0))
result = self.getvalue(op)
array.make_len_gt(MODE_ARRAY, op.getdescr(), -1)
- array.lenbound.bound.intersect(result.intbound)
- result.intbound = array.lenbound.bound
+ array.lenbound.bound.intersect(result.getintbound())
+ result.setintbound(array.lenbound.bound)
def optimize_STRLEN(self, op):
self.emit_operation(op)
array = self.getvalue(op.getarg(0))
result = self.getvalue(op)
array.make_len_gt(MODE_STR, op.getdescr(), -1)
- array.lenbound.bound.intersect(result.intbound)
- result.intbound = array.lenbound.bound
+ array.lenbound.bound.intersect(result.getintbound())
+ result.setintbound(array.lenbound.bound)
def optimize_UNICODELEN(self, op):
self.emit_operation(op)
array = self.getvalue(op.getarg(0))
result = self.getvalue(op)
array.make_len_gt(MODE_UNICODE, op.getdescr(), -1)
- array.lenbound.bound.intersect(result.intbound)
- result.intbound = array.lenbound.bound
+ array.lenbound.bound.intersect(result.getintbound())
+ result.setintbound(array.lenbound.bound)
def optimize_STRGETITEM(self, op):
self.emit_operation(op)
v1 = self.getvalue(op)
- v1.intbound.make_ge(IntLowerBound(0))
- v1.intbound.make_lt(IntUpperBound(256))
+ v1.getintbound().make_ge(IntLowerBound(0))
+ v1.getintbound().make_lt(IntUpperBound(256))
def optimize_UNICODEGETITEM(self, op):
self.emit_operation(op)
v1 = self.getvalue(op)
- v1.intbound.make_ge(IntLowerBound(0))
+ v1.getintbound().make_ge(IntLowerBound(0))
def make_int_lt(self, box1, box2):
v1 = self.getvalue(box1)
v2 = self.getvalue(box2)
- if v1.intbound.make_lt(v2.intbound):
+ if v1.getintbound().make_lt(v2.getintbound()):
self.propagate_bounds_backward(box1)
- if v2.intbound.make_gt(v1.intbound):
+ if v2.getintbound().make_gt(v1.getintbound()):
self.propagate_bounds_backward(box2)
def make_int_le(self, box1, box2):
v1 = self.getvalue(box1)
v2 = self.getvalue(box2)
- if v1.intbound.make_le(v2.intbound):
+ if v1.getintbound().make_le(v2.getintbound()):
self.propagate_bounds_backward(box1)
- if v2.intbound.make_ge(v1.intbound):
+ if v2.getintbound().make_ge(v1.getintbound()):
self.propagate_bounds_backward(box2)
def make_int_gt(self, box1, box2):
@@ -388,9 +387,9 @@
if r.op.same_constant(CONST_1):
v1 = self.getvalue(op.getarg(0))
v2 = self.getvalue(op.getarg(1))
- if v1.intbound.intersect(v2.intbound):
+ if v1.getintbound().intersect(v2.getintbound()):
self.propagate_bounds_backward(op.getarg(0))
- if v2.intbound.intersect(v1.intbound):
+ if v2.getintbound().intersect(v1.getintbound()):
self.propagate_bounds_backward(op.getarg(1))
def propagate_bounds_INT_NE(self, op):
@@ -399,9 +398,9 @@
if r.op.same_constant(CONST_0):
v1 = self.getvalue(op.getarg(0))
v2 = self.getvalue(op.getarg(1))
- if v1.intbound.intersect(v2.intbound):
+ if v1.getintbound().intersect(v2.getintbound()):
self.propagate_bounds_backward(op.getarg(0))
- if v2.intbound.intersect(v1.intbound):
+ if v2.getintbound().intersect(v1.getintbound()):
self.propagate_bounds_backward(op.getarg(1))
def propagate_bounds_INT_IS_TRUE(self, op):
@@ -409,8 +408,8 @@
if r.is_constant():
if r.op.same_constant(CONST_1):
v1 = self.getvalue(op.getarg(0))
- if v1.intbound.known_ge(IntBound(0, 0)):
- v1.intbound.make_gt(IntBound(0, 0))
+ if v1.getintbound().known_ge(IntBound(0, 0)):
+ v1.getintbound().make_gt(IntBound(0, 0))
self.propagate_bounds_backward(op.getarg(0))
def propagate_bounds_INT_IS_ZERO(self, op):
@@ -421,49 +420,49 @@
# Clever hack, we can't use self.make_constant_int yet because
# the args aren't in the values dictionary yet so it runs into
# an assert, this is a clever way of expressing the same thing.
- v1.intbound.make_ge(IntBound(0, 0))
- v1.intbound.make_lt(IntBound(1, 1))
+ v1.getintbound().make_ge(IntBound(0, 0))
+ v1.getintbound().make_lt(IntBound(1, 1))
self.propagate_bounds_backward(op.getarg(0))
def propagate_bounds_INT_ADD(self, op):
v1 = self.getvalue(op.getarg(0))
v2 = self.getvalue(op.getarg(1))
r = self.getvalue(op)
- b = r.intbound.sub_bound(v2.intbound)
- if v1.intbound.intersect(b):
+ b = r.getintbound().sub_bound(v2.getintbound())
+ if v1.getintbound().intersect(b):
self.propagate_bounds_backward(op.getarg(0))
- b = r.intbound.sub_bound(v1.intbound)
- if v2.intbound.intersect(b):
+ b = r.getintbound().sub_bound(v1.getintbound())
+ if v2.getintbound().intersect(b):
self.propagate_bounds_backward(op.getarg(1))
def propagate_bounds_INT_SUB(self, op):
v1 = self.getvalue(op.getarg(0))
v2 = self.getvalue(op.getarg(1))
r = self.getvalue(op)
- b = r.intbound.add_bound(v2.intbound)
- if v1.intbound.intersect(b):
+ b = r.getintbound().add_bound(v2.getintbound())
+ if v1.getintbound().intersect(b):
self.propagate_bounds_backward(op.getarg(0))
- b = r.intbound.sub_bound(v1.intbound).mul(-1)
- if v2.intbound.intersect(b):
+ b = r.getintbound().sub_bound(v1.getintbound()).mul(-1)
+ if v2.getintbound().intersect(b):
self.propagate_bounds_backward(op.getarg(1))
def propagate_bounds_INT_MUL(self, op):
v1 = self.getvalue(op.getarg(0))
v2 = self.getvalue(op.getarg(1))
r = self.getvalue(op)
- b = r.intbound.div_bound(v2.intbound)
- if v1.intbound.intersect(b):
+ b = r.getintbound().div_bound(v2.getintbound())
+ if v1.getintbound().intersect(b):
self.propagate_bounds_backward(op.getarg(0))
- b = r.intbound.div_bound(v1.intbound)
- if v2.intbound.intersect(b):
+ b = r.getintbound().div_bound(v1.getintbound())
+ if v2.getintbound().intersect(b):
self.propagate_bounds_backward(op.getarg(1))
def propagate_bounds_INT_LSHIFT(self, op):
v1 = self.getvalue(op.getarg(0))
v2 = self.getvalue(op.getarg(1))
r = self.getvalue(op)
- b = r.intbound.rshift_bound(v2.intbound)
- if v1.intbound.intersect(b):
+ b = r.getintbound().rshift_bound(v2.getintbound())
+ if v1.getintbound().intersect(b):
self.propagate_bounds_backward(op.getarg(0))
propagate_bounds_INT_ADD_OVF = propagate_bounds_INT_ADD
diff --git a/pypy/jit/metainterp/optimizeopt/intutils.py
b/pypy/jit/metainterp/optimizeopt/intutils.py
--- a/pypy/jit/metainterp/optimizeopt/intutils.py
+++ b/pypy/jit/metainterp/optimizeopt/intutils.py
@@ -248,7 +248,14 @@
guards.append(op)
op = ResOperation(rop.GUARD_TRUE, [res], None)
guards.append(op)
-
+
+class ConstantIntBound(IntBound):
+ has_upper = True
+ has_lower = True
+
+ def __init__(self, v):
+ self.upper = v
+ self.lower = v
class IntUpperBound(IntBound):
def __init__(self, upper):
diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py
b/pypy/jit/metainterp/optimizeopt/optimizer.py
--- a/pypy/jit/metainterp/optimizeopt/optimizer.py
+++ b/pypy/jit/metainterp/optimizeopt/optimizer.py
@@ -297,6 +297,15 @@
self.last_emitted_operation = op
return op
+ def postprocess_op(self, op):
+ name = 'postprocess_' + opname[op.getopnum()]
+ next_func = getattr(self, name, self.postprocess_default)
+ if next_func is not None:
+ next_func(op)
+
+ def postprocess_default(self, op):
+ pass
+
# FIXME: Move some of these here?
def getforwarded(self, op):
return self.optimizer.getforwarded(op)
@@ -434,7 +443,11 @@
return op
value = op._forwarded
if value is None:
- value = op.make_forwarded_copy()
+ # we only need to make a new copy if the old one is immutable
+ if op.is_mutable:
+ value = op
+ else:
+ value = op.make_forwarded_copy()
else:
if value._forwarded:
while value._forwarded:
@@ -531,12 +544,15 @@
i = 0
while i < len(self.loop.operations):
op = self.loop.operations[i]
+ orig_op = op
for opt in self.optimizations:
op = opt.optimize_operation(op)
if op is None:
break
else:
self.emit_operation(op)
+ for opt in self.optimizations:
+ opt.postprocess_op(orig_op)
i += 1
self.loop.operations = self.get_newoperations()
self.loop.quasi_immutable_deps = self.quasi_immutable_deps
diff --git a/pypy/jit/metainterp/optimizeopt/pure.py
b/pypy/jit/metainterp/optimizeopt/pure.py
--- a/pypy/jit/metainterp/optimizeopt/pure.py
+++ b/pypy/jit/metainterp/optimizeopt/pure.py
@@ -47,7 +47,7 @@
self.remember_emitting_pure(op)
# otherwise, the operation remains
- if op.returns_bool_result():
+ if newop.returns_bool_result():
newop.is_bool_box = True
if nextop:
self.emit_operation(nextop)
diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py
b/pypy/jit/metainterp/optimizeopt/rewrite.py
--- a/pypy/jit/metainterp/optimizeopt/rewrite.py
+++ b/pypy/jit/metainterp/optimizeopt/rewrite.py
@@ -89,15 +89,15 @@
self.emit_operation(op)
def optimize_INT_SUB(self, op):
- v2 = self.getvalue(op.getarg(1))
- if v2.is_constant() and v2.op.getint() == 0:
+ v2 = self.getforwarded(op.getarg(1))
+ if v2.is_constant() and v2.getint() == 0:
self.replace(op, op.getarg(0))
else:
- self.emit_operation(op)
# Synthesize the reverse ops for optimize_default to reuse
self.pure(op.getarg(0), rop.INT_ADD, op.getarg(1), op)
self.pure(op.getarg(0), rop.INT_ADD, op, op.getarg(1))
self.pure(op.getarg(1), rop.INT_SUB, op.getarg(0), op)
+ return op
def optimize_INT_ADD(self, op):
arg1 = op.getarg(0)
@@ -193,8 +193,9 @@
self.pure(op.getarg(0), rop.FLOAT_NEG, op)
def optimize_guard(self, op, constbox, emit_operation=True):
- value = self.getvalue(op.getarg(0))
+ value = self.getforwarded(op.getarg(0))
if value.is_constant():
+ xxx
box = value.op
assert isinstance(box, Const)
if not box.same_constant(constbox):
@@ -202,11 +203,17 @@
'always fail')
return
if emit_operation:
- self.emit_operation(op)
- value = self.getvalue(op.getarg(0)) # might have been forwarded
+ return self.getforwarded(op)
+
+ def postprocess_guard(self, op):
+ value = self.getforwarded(op.getarg(0))
value.make_constant(constbox)
self.optimizer.turned_constant(value)
+ def postprocess_op(self, op):
+ if op.is_guard():
+ self.postprocess_guard(op)
+
def optimize_GUARD_ISNULL(self, op):
value = self.getvalue(op.getarg(0))
if value.is_null():
@@ -227,12 +234,12 @@
value.make_nonnull(op, pos)
def optimize_GUARD_VALUE(self, op):
- value = self.getvalue(op.getarg(0))
- if value.last_guard:
+ value = self.getforwarded(op.getarg(0))
+ if value.getlastguard():
# there already has been a guard_nonnull or guard_class or
# guard_nonnull_class on this value, which is rather silly.
# replace the original guard with a guard_value
- old_guard_op = value.last_guard
+ old_guard_op = value.getlastguard()
if old_guard_op.getopnum() != rop.GUARD_NONNULL:
# This is only safe if the class of the guard_value matches the
# class of the guard_*_class, otherwise the intermediate ops
might
diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py
b/pypy/jit/metainterp/optimizeopt/simplify.py
--- a/pypy/jit/metainterp/optimizeopt/simplify.py
+++ b/pypy/jit/metainterp/optimizeopt/simplify.py
@@ -43,7 +43,7 @@
xxx
return self.optimize_JUMP(op.copy_and_change(rop.JUMP))
self.last_label_descr = op.getdescr()
- self.emit_operation(op)
+ return op
def optimize_JUMP(self, op):
if not self.unroll:
diff --git a/pypy/jit/metainterp/optmodel.py b/pypy/jit/metainterp/optmodel.py
--- a/pypy/jit/metainterp/optmodel.py
+++ b/pypy/jit/metainterp/optmodel.py
@@ -3,10 +3,18 @@
"""
from pypy.tool.sourcetools import func_with_new_name
-from pypy.jit.metainterp.resoperation import opclasses, opclasses_mutable, rop
+from pypy.jit.metainterp.resoperation import opclasses, opclasses_mutable,
rop,\
+ INT, ConstInt
+from pypy.jit.metainterp.optimizeopt.intutils import ImmutableIntUnbounded,\
+ ConstantIntBound
+
+class __extend__(ConstInt):
+ def getintbound(self):
+ return ConstantIntBound(self.getint())
def create_mutable_subclasses():
def addattr(cls, attr, default_value=None):
+ cls.attributes_to_copy.append('_' + attr)
def getter(self):
return getattr(self, '_' + attr)
def setter(self, value):
@@ -15,17 +23,35 @@
setattr(cls, 'get' + attr, func_with_new_name(getter, 'get' + attr))
setattr(cls, 'set' + attr, func_with_new_name(setter, 'set' + attr))
+ def make_new_copy_function(cls, paren_cls):
+ def _copy_extra_attrs(self, new):
+ paren_cls._copy_extra_attrs(self, new)
+ for attr in cls.attributes_to_copy:
+ setattr(new, getattr(self, attr))
+ cls._copy_extra_attrs = _copy_extra_attrs
+
+ imm_int_unbound = ImmutableIntUnbounded()
for i, cls in enumerate(opclasses):
if cls is None:
Mutable = None
else:
class Mutable(cls):
is_mutable = True
+ attributes_to_copy = []
if cls.is_guard() or cls.getopnum() == rop.FINISH:
addattr(Mutable, 'failargs')
if cls.is_guard():
addattr(Mutable, 'descr') # mutable guards have descrs
+ if cls.type == INT:
+ # all the integers have bounds
+ addattr(Mutable, 'intbound', imm_int_unbound)
+ # for tracking last guard and merging GUARD_VALUE with
+ # GUARD_NONNULL etc
+ addattr(Mutable, 'lastguard', None)
+ addattr(Mutable, 'lastguardpos', -1)
Mutable.__name__ = cls.__name__ + '_mutable'
+ if Mutable.attributes_to_copy:
+ make_new_copy_function(Mutable, cls)
assert len(opclasses_mutable) == i
opclasses_mutable.append(Mutable)
assert len(opclasses) == len(opclasses_mutable)
diff --git a/pypy/jit/metainterp/resoperation.py
b/pypy/jit/metainterp/resoperation.py
--- a/pypy/jit/metainterp/resoperation.py
+++ b/pypy/jit/metainterp/resoperation.py
@@ -12,14 +12,14 @@
"""
-from pypy.rlib.objectmodel import we_are_translated, specialize
+from pypy.jit.codewriter import longlong
+from pypy.jit.codewriter import heaptracker
from pypy.rpython.lltypesystem import lltype, llmemory, rffi
from pypy.rpython.ootypesystem import ootype
-from pypy.jit.codewriter import longlong
+from pypy.rlib.rarithmetic import is_valid_int, intmask
from pypy.rlib.objectmodel import compute_identity_hash, newlist_hint,\
- compute_unique_id, Symbolic
-from pypy.jit.codewriter import heaptracker
-from pypy.rlib.rarithmetic import is_valid_int, intmask
+ compute_unique_id, Symbolic, we_are_translated, specialize
+from pypy.tool.pairtype import extendabletype
INT = 'i'
REF = 'r'
@@ -156,6 +156,8 @@
class AbstractValue(object):
__slots__ = ()
+ __metaclass__ = extendabletype
+
def getint(self):
""" Get an integer value, if the box supports it, otherwise crash
"""
@@ -456,8 +458,8 @@
"""The central ResOperation class, representing one operation."""
# debug
- name = ""
- pc = 0
+ _name = ""
+ _pc = 0
_counter = 0
_hash = 0
@@ -566,8 +568,8 @@
sres = '%s = ' % (str(self),)
else:
sres = ''
- if self.name:
- prefix = "%s:%s " % (self.name, self.pc)
+ if self._name:
+ prefix = "%s:%s " % (self._name, self._pc)
if graytext:
prefix = "\f%s\f" % prefix
else:
@@ -650,6 +652,9 @@
return False # for tests
return opboolresult[opnum]
+ def _copy_extra_attrs(self, new):
+ pass
+
# some debugging help
def __setattr__(self, attr, val):
@@ -657,6 +662,17 @@
assert self._forwarded is None
object.__setattr__(self, attr, val)
+ def __getattribute__(self, attr):
+ if not attr.startswith('_') and attr != 'type':
+ # methods are fine
+ if not callable(getattr(self.__class__, attr, None)):
+ try:
+ assert self._forwarded is None
+ except AssertionError:
+ import pdb
+ pdb.set_trace()
+ return object.__getattribute__(self, attr)
+
# ===========
# type mixins
# ===========
@@ -690,27 +706,27 @@
intval = int(intval)
else:
assert isinstance(intval, Symbolic)
- self.intval = intval
+ self._intval = intval
def getint(self):
- return self.intval
+ return self._intval
getresult = getint
def getresultrepr(self):
- return str(self.intval)
+ return str(self._intval)
@staticmethod
def wrap_constant(intval):
return ConstInt(intval)
def constbox(self):
- return ConstInt(self.intval)
+ return ConstInt(self._intval)
def get_result_hash(self):
- return make_hashable_int(self.intval)
+ return make_hashable_int(self._intval)
def eq_value(self, other):
- return self.intval == other.getint()
+ return self._intval == other.getint()
class ResOpFloat(object):
_mixin_ = True
@@ -719,13 +735,13 @@
def __init__(self, floatval):
#assert isinstance(floatval, float)
# XXX not sure between float or float storage
- self.floatval = floatval
+ self._floatval = floatval
def getresultrepr(self):
- return str(self.floatval)
+ return str(self._floatval)
def getfloatstorage(self):
- return self.floatval
+ return self._floatval
getresult = getfloatstorage
@staticmethod
@@ -733,13 +749,13 @@
return ConstFloat(floatval)
def constbox(self):
- return ConstFloat(self.floatval)
+ return ConstFloat(self._floatval)
def get_result_hash(self):
- return longlong.gethash(self.floatval)
+ return longlong.gethash(self._floatval)
def eq_value(self, other):
- return self.floatval == other.getfloatstorage()
+ return self._floatval == other.getfloatstorage()
class ResOpPointer(object):
_mixin_ = True
@@ -747,10 +763,10 @@
def __init__(self, pval):
assert lltype.typeOf(pval) == llmemory.GCREF
- self.pval = pval
+ self._pval = pval
def getref_base(self):
- return self.pval
+ return self._pval
getresult = getref_base
def getref(self, TYPE):
@@ -758,11 +774,11 @@
def getresultrepr(self):
# XXX what do we want to put in here?
- return str(self.pval)
+ return str(self._pval)
def get_result_hash(self):
- if self.pval:
- return lltype.identityhash(self.pval)
+ if self._pval:
+ return lltype.identityhash(self._pval)
else:
return 0
@@ -771,10 +787,10 @@
return ConstPtr(pval)
def constbox(self):
- return ConstPtr(self.pval)
+ return ConstPtr(self._pval)
def eq_value(self, other):
- return self.pval == other.getref_base()
+ return self._pval == other.getref_base()
# ===================
# Top of the hierachy
@@ -852,6 +868,10 @@
descr.rd_frame_info_list = self._rd_frame_info_list
return descr
+ def _copy_extra_attrs(self, res):
+ res.set_rd_frame_info_list(self.get_rd_frame_info_list())
+ res.set_rd_snapshot(self.get_rd_snapshot())
+
# ============
# arity mixins
# ============
@@ -879,12 +899,9 @@
newopnum = self.getopnum()
res = create_resop_0(newopnum, self.getresult(),
descr or self.getdescr(), mutable=True)
- if self.is_guard():
- res.set_rd_frame_info_list(self.get_rd_frame_info_list())
- res.set_rd_snapshot(self.get_rd_snapshot())
- assert not self.is_mutable
assert not self._forwarded
self._forwarded = res
+ self._copy_extra_attrs(res)
return res
def get_key_op(self, opt):
@@ -932,12 +949,9 @@
newopnum = self.getopnum()
res = create_resop_1(newopnum, self.getresult(), arg0 or self._arg0,
descr or self.getdescr(), mutable=True)
- if self.is_guard():
- res.set_rd_frame_info_list(self.get_rd_frame_info_list())
- res.set_rd_snapshot(self.get_rd_snapshot())
- assert not self.is_mutable
assert not self._forwarded
self._forwarded = res
+ self._copy_extra_attrs(res)
return res
def get_arg_hash(self):
@@ -994,8 +1008,8 @@
res.set_rd_frame_info_list(self.get_rd_frame_info_list())
res.set_rd_snapshot(self.get_rd_snapshot())
assert not self._forwarded
- assert not self.is_mutable
self._forwarded = res
+ self._copy_extra_attrs(res)
return res
def get_arg_hash(self):
@@ -1055,10 +1069,9 @@
r = create_resop_3(newopnum, self.getresult(), arg0 or self._arg0,
arg1 or self._arg1, arg2 or self._arg2,
descr or self.getdescr(), mutable=True)
- assert not r.is_guard()
assert not self._forwarded
- assert not self.is_mutable
self._forwarded = r
+ self._copy_extra_attrs(r)
return r
def get_arg_hash(self):
@@ -1116,9 +1129,8 @@
r = create_resop(newopnum, self.getresult(),
newargs or self.getarglist(),
descr or self.getdescr(), mutable=True)
- assert not r.is_guard()
assert not self._forwarded
- assert not self.is_mutable
+ self._copy_extra_attrs(r)
self._forwarded = r
return r
diff --git a/pypy/jit/metainterp/test/test_optmodel.py
b/pypy/jit/metainterp/test/test_optmodel.py
--- a/pypy/jit/metainterp/test/test_optmodel.py
+++ b/pypy/jit/metainterp/test/test_optmodel.py
@@ -42,7 +42,7 @@
def test_failargs():
op = rop.create_resop_0(rop.rop.GUARD_NO_OVERFLOW, None)
- assert not hasattr(op, 'set_failargs')
+ assert not hasattr(op, 'setfailargs')
op2 = op.make_forwarded_copy()
assert op._forwarded is op2
op2.setfailargs([1, 2, 3])
_______________________________________________
pypy-commit mailing list
[email protected]
http://mail.python.org/mailman/listinfo/pypy-commit