Author: Maciej Fijalkowski <[email protected]>
Branch: optresult-unroll
Changeset: r79286:9e0692ce2a85
Date: 2015-08-28 18:32 +0200
http://bitbucket.org/pypy/pypy/changeset/9e0692ce2a85/
Log: merge
diff --git a/rpython/jit/backend/llgraph/runner.py
b/rpython/jit/backend/llgraph/runner.py
--- a/rpython/jit/backend/llgraph/runner.py
+++ b/rpython/jit/backend/llgraph/runner.py
@@ -16,6 +16,7 @@
from rpython.rlib.clibffi import FFI_DEFAULT_ABI
from rpython.rlib.rarithmetic import ovfcheck, r_uint, r_ulonglong
+from rpython.rlib.objectmodel import Symbolic
class LLTrace(object):
has_been_freed = False
@@ -88,6 +89,10 @@
def get_result_type(self):
return getkind(self.RESULT)[0]
+class TypeIDSymbolic(Symbolic):
+ def __init__(self, STRUCT_OR_ARRAY):
+ self.STRUCT_OR_ARRAY = STRUCT_OR_ARRAY
+
class SizeDescr(AbstractDescr):
def __init__(self, S, vtable, runner):
assert not isinstance(vtable, bool)
@@ -114,6 +119,10 @@
def is_immutable(self):
return heaptracker.is_immutable_struct(self.S)
+ def get_type_id(self):
+ assert isinstance(self.S, lltype.GcStruct)
+ return TypeIDSymbolic(self.S) # integer-like symbolic
+
def __repr__(self):
return 'SizeDescr(%r)' % (self.S,)
@@ -222,6 +231,10 @@
return intbounds.get_integer_max(
not _is_signed_kind(self.A.OF), rffi.sizeof(self.A.OF))
+ def get_type_id(self):
+ assert isinstance(self.A, lltype.GcArray)
+ return TypeIDSymbolic(self.A) # integer-like symbolic
+
class InteriorFieldDescr(AbstractDescr):
def __init__(self, A, fieldname, runner):
@@ -281,6 +294,7 @@
supports_floats = True
supports_longlong = r_uint is not r_ulonglong
supports_singlefloats = True
+ supports_guard_gc_type = True
translate_support_code = False
is_llgraph = True
@@ -893,6 +907,16 @@
self.execute_guard_nonnull(descr, arg)
self.execute_guard_class(descr, arg, klass)
+ def execute_guard_gc_type(self, descr, arg, typeid):
+ assert isinstance(typeid, TypeIDSymbolic)
+ TYPE = arg._obj.container._TYPE
+ if TYPE != typeid.STRUCT_OR_ARRAY:
+ self.fail_guard(descr)
+
+ def execute_guard_nonnull_gc_type(self, descr, arg, typeid):
+ self.execute_guard_nonnull(descr, arg)
+ self.execute_guard_gc_type(descr, arg, typeid)
+
def execute_guard_no_exception(self, descr):
if self.last_exception is not None:
self.fail_guard(descr)
diff --git a/rpython/jit/backend/llsupport/llmodel.py
b/rpython/jit/backend/llsupport/llmodel.py
--- a/rpython/jit/backend/llsupport/llmodel.py
+++ b/rpython/jit/backend/llsupport/llmodel.py
@@ -40,6 +40,7 @@
else:
translator = None
self.gc_ll_descr = get_ll_description(gcdescr, translator, rtyper)
+ self.supports_guard_gc_type = bool(translate_support_code)
if translator and translator.config.translation.gcremovetypeptr:
self.vtable_offset = None
else:
diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py
--- a/rpython/jit/backend/model.py
+++ b/rpython/jit/backend/model.py
@@ -15,6 +15,7 @@
# longlongs are supported by the JIT, but stored as doubles.
# Boxes and Consts are BoxFloats and ConstFloats.
supports_singlefloats = False
+ supports_guard_gc_type = False
propagate_exception_descr = None
diff --git a/rpython/jit/backend/test/runner_test.py
b/rpython/jit/backend/test/runner_test.py
--- a/rpython/jit/backend/test/runner_test.py
+++ b/rpython/jit/backend/test/runner_test.py
@@ -4832,3 +4832,51 @@
assert a[i].a == a[i].b == val
else:
assert a[i] == rffi.cast(OF, val)
+
+ def test_passing_guard_gc_type_struct(self):
+ if not self.cpu.supports_guard_gc_type:
+ py.test.skip("guard_gc_type not available")
+ t_box, _, descr = self.alloc_instance(self.T)
+ c_typeid = ConstInt(descr.get_type_id())
+ self.execute_operation(rop.GUARD_GC_TYPE, [t_box, c_typeid], 'void')
+ assert not self.guard_failed
+ self.execute_operation(rop.GUARD_NONNULL_GC_TYPE, [t_box, c_typeid],
+ 'void')
+ assert not self.guard_failed
+
+ def test_passing_guard_gc_type_array(self):
+ if not self.cpu.supports_guard_gc_type:
+ py.test.skip("guard_gc_type not available")
+ a_box, A = self.alloc_array_of(rffi.SHORT, 342)
+ arraydescr = self.cpu.arraydescrof(A)
+ c_typeid = ConstInt(arraydescr.get_type_id())
+ self.execute_operation(rop.GUARD_GC_TYPE, [a_box, c_typeid], 'void')
+ assert not self.guard_failed
+ self.execute_operation(rop.GUARD_NONNULL_GC_TYPE, [a_box, c_typeid],
+ 'void')
+ assert not self.guard_failed
+
+ def test_failing_guard_gc_type(self):
+ if not self.cpu.supports_guard_gc_type:
+ py.test.skip("guard_gc_type not available")
+ t_box, _, tdescr = self.alloc_instance(self.T)
+ u_box, _, udescr = self.alloc_instance(self.U)
+ a_box, A = self.alloc_array_of(rffi.SHORT, 342)
+ adescr = self.cpu.arraydescrof(A)
+ c_ttypeid = ConstInt(tdescr.get_type_id())
+ c_utypeid = ConstInt(udescr.get_type_id())
+ c_atypeid = ConstInt(adescr.get_type_id())
+ null_box = self.null_instance()
+ for opname, args in [(rop.GUARD_GC_TYPE, [t_box, c_utypeid]),
+ (rop.GUARD_GC_TYPE, [u_box, c_ttypeid]),
+ (rop.GUARD_GC_TYPE, [a_box, c_utypeid]),
+ (rop.GUARD_GC_TYPE, [t_box, c_atypeid]),
+ (rop.GUARD_NONNULL_GC_TYPE, [t_box, c_utypeid]),
+ (rop.GUARD_NONNULL_GC_TYPE, [u_box, c_ttypeid]),
+ (rop.GUARD_NONNULL_GC_TYPE, [a_box, c_ttypeid]),
+ (rop.GUARD_NONNULL_GC_TYPE, [u_box, c_atypeid]),
+ (rop.GUARD_NONNULL_GC_TYPE, [null_box,
c_ttypeid]),
+ (rop.GUARD_NONNULL_GC_TYPE, [null_box,
c_atypeid]),
+ ]:
+ assert self.execute_operation(opname, args, 'void') == None
+ assert self.guard_failed
diff --git a/rpython/jit/backend/x86/assembler.py
b/rpython/jit/backend/x86/assembler.py
--- a/rpython/jit/backend/x86/assembler.py
+++ b/rpython/jit/backend/x86/assembler.py
@@ -1734,10 +1734,10 @@
self.mc.CMP(locs[0], locs[1])
self.implement_guard(guard_token, 'NE')
- def _cmp_guard_class(self, locs):
+ def _cmp_guard_class(self, loc_ptr, loc_classptr):
offset = self.cpu.vtable_offset
if offset is not None:
- self.mc.CMP(mem(locs[0], offset), locs[1])
+ self.mc.CMP(mem(loc_ptr, offset), loc_classptr)
else:
# XXX hard-coded assumption: to go from an object to its class
# we use the following algorithm:
@@ -1749,26 +1749,39 @@
# - multiply by 4 (on 32-bits only) and use it as an
# offset in type_info_group
# - add 16/32 bytes, to go past the TYPE_INFO structure
- loc = locs[1]
- assert isinstance(loc, ImmedLoc)
- classptr = loc.value
+ assert isinstance(loc_classptr, ImmedLoc)
+ classptr = loc_classptr.value
# here, we have to go back from 'classptr' to the value expected
- # from reading the half-word in the object header. Note that
- # this half-word is at offset 0 on a little-endian machine;
- # it would be at offset 2 or 4 on a big-endian machine.
+ # from reading the half-word in the object header.
from rpython.memory.gctypelayout import GCData
sizeof_ti = rffi.sizeof(GCData.TYPE_INFO)
type_info_group = llop.gc_get_type_info_group(llmemory.Address)
type_info_group = rffi.cast(lltype.Signed, type_info_group)
expected_typeid = classptr - sizeof_ti - type_info_group
- if IS_X86_32:
- expected_typeid >>= 2
- self.mc.CMP16(mem(locs[0], 0), ImmedLoc(expected_typeid))
- elif IS_X86_64:
- self.mc.CMP32_mi((locs[0].value, 0), expected_typeid)
+ self._cmp_guard_gc_type(loc_ptr, ImmedLoc(expected_typeid))
+
+ def _cmp_guard_gc_type(self, loc_ptr, loc_expected_typeid):
+ # Note that the typeid half-word is at offset 0 on a little-endian
+ # machine; it would be at offset 2 or 4 on a big-endian machine.
+ assert self.cpu.supports_guard_gc_type
+ if IS_X86_32:
+ self.mc.CMP16(mem(loc_ptr, 0), loc_expected_typeid)
+ else:
+ assert isinstance(loc_expected_typeid, ImmedLoc)
+ self.mc.CMP32_mi((loc_ptr.value, 0), loc_expected_typeid.value)
+
+ def _cmp_guard_class_or_gc_type(self, guard_op, locs):
+ if ( guard_op.getopnum() == rop.GUARD_CLASS or
+ guard_op.getopnum() == rop.GUARD_NONNULL_CLASS):
+ self._cmp_guard_class(locs[0], locs[1])
+ elif (guard_op.getopnum() == rop.GUARD_GC_TYPE or
+ guard_op.getopnum() == rop.GUARD_NONNULL_GC_TYPE):
+ self._cmp_guard_gc_type(locs[0], locs[1])
+ else:
+ assert 0
def genop_guard_guard_class(self, ign_1, guard_op, guard_token, locs,
ign_2):
- self._cmp_guard_class(locs)
+ self._cmp_guard_class_or_gc_type(guard_op, locs)
self.implement_guard(guard_token, 'NE')
def genop_guard_guard_nonnull_class(self, ign_1, guard_op,
@@ -1777,7 +1790,7 @@
# Patched below
self.mc.J_il8(rx86.Conditions['B'], 0)
jb_location = self.mc.get_relative_pos()
- self._cmp_guard_class(locs)
+ self._cmp_guard_class_or_gc_type(guard_op, locs)
# patch the JB above
offset = self.mc.get_relative_pos() - jb_location
assert 0 < offset <= 127
@@ -1785,6 +1798,9 @@
#
self.implement_guard(guard_token, 'NE')
+ genop_guard_guard_gc_type = genop_guard_guard_class
+ genop_guard_guard_nonnull_gc_type = genop_guard_guard_nonnull_class
+
def implement_guard_recovery(self, guard_opnum, faildescr, failargs,
fail_locs, frame_depth):
exc = (guard_opnum == rop.GUARD_EXCEPTION or
diff --git a/rpython/jit/backend/x86/regalloc.py
b/rpython/jit/backend/x86/regalloc.py
--- a/rpython/jit/backend/x86/regalloc.py
+++ b/rpython/jit/backend/x86/regalloc.py
@@ -428,6 +428,8 @@
self.perform_guard(op, [x, y], None)
consider_guard_nonnull_class = consider_guard_class
+ consider_guard_gc_type = consider_guard_class
+ consider_guard_nonnull_gc_type = consider_guard_class
def _consider_binop_part(self, op, symm=False):
x = op.getarg(0)
diff --git a/rpython/jit/metainterp/resoperation.py
b/rpython/jit/metainterp/resoperation.py
--- a/rpython/jit/metainterp/resoperation.py
+++ b/rpython/jit/metainterp/resoperation.py
@@ -656,6 +656,8 @@
'GUARD_NONNULL/1d/n',
'GUARD_ISNULL/1d/n',
'GUARD_NONNULL_CLASS/2d/n',
+ 'GUARD_GC_TYPE/2d/n',
+ 'GUARD_NONNULL_GC_TYPE/2d/n',
'_GUARD_FOLDABLE_LAST',
'GUARD_NO_EXCEPTION/0d/n', # may be called with an exception currently
set
'GUARD_EXCEPTION/1d/r', # may be called with an exception currently set
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit