Author: Armin Rigo <ar...@tunes.org>
Branch: ppc-updated-backend
Changeset: r79928:0766a869fc86
Date: 2015-10-02 10:49 +0200
http://bitbucket.org/pypy/pypy/changeset/0766a869fc86/

Log:    PPC Backend #6: most tests pass

        Various remaining fixes, until most tests pass. Took the relevant
        tests, copied and adapted from the x86 backend. This also includes
        test_zll_stress_*.py from rpython/jit/backend/test.

        Update to default for the "optresult" changes.

diff --git a/rpython/jit/backend/arm/regalloc.py 
b/rpython/jit/backend/arm/regalloc.py
--- a/rpython/jit/backend/arm/regalloc.py
+++ b/rpython/jit/backend/arm/regalloc.py
@@ -41,11 +41,7 @@
 from rpython.jit.backend.llsupport.descr import CallDescr
 
 
-# xxx hack: set a default value for TargetToken._ll_loop_code.  If 0, we know
-# that it is a LABEL that was not compiled yet.
-TargetToken._ll_loop_code = 0
-
-class TempInt(TempVar):
+class TempInt(TempBox):
     type = INT
 
     def __repr__(self):
diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py 
b/rpython/jit/backend/llsupport/test/test_gc_integration.py
--- a/rpython/jit/backend/llsupport/test/test_gc_integration.py
+++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py
@@ -3,7 +3,7 @@
 """
 
 import py
-import re
+import re, sys, struct
 from rpython.jit.metainterp.history import TargetToken, BasicFinalDescr,\
      JitCellToken, BasicFailDescr, AbstractDescr
 from rpython.jit.backend.llsupport.gc import GcLLDescription, GcLLDescr_boehm,\
@@ -613,7 +613,10 @@
         cpu = CPU(None, None)
         cpu.gc_ll_descr = GCDescrShadowstackDirect()
         wbd = cpu.gc_ll_descr.write_barrier_descr
-        wbd.jit_wb_if_flag_byteofs = 0 # directly into 'hdr' field
+        if sys.byteorder == 'little':
+            wbd.jit_wb_if_flag_byteofs = 0 # directly into 'hdr' field
+        else:
+            wbd.jit_wb_if_flag_byteofs = struct.calcsize("l") - 1
         S = lltype.GcForwardReference()
         S.become(lltype.GcStruct('S',
                                  ('hdr', lltype.Signed),
diff --git a/rpython/jit/backend/ppc/helper/regalloc.py 
b/rpython/jit/backend/ppc/helper/regalloc.py
--- a/rpython/jit/backend/ppc/helper/regalloc.py
+++ b/rpython/jit/backend/ppc/helper/regalloc.py
@@ -1,4 +1,4 @@
-from rpython.jit.metainterp.history import ConstInt, Box, FLOAT
+from rpython.jit.metainterp.history import ConstInt, FLOAT
 from rpython.jit.backend.ppc.locations import imm
 
 def check_imm_box(arg, lower_bound=-2**15, upper_bound=2**15-1):
@@ -21,7 +21,7 @@
         else:
             l1 = self.ensure_reg(a1)
         self.free_op_vars()
-        res = self.force_allocate_reg_or_cc(op.result)
+        res = self.force_allocate_reg_or_cc(op)
         return [l0, l1, res]
     return f
 prepare_cmp_op          = _prepare_cmp_op(signed=True)
@@ -31,27 +31,27 @@
     l0 = self.ensure_reg(op.getarg(0))
     l1 = imm(0)
     self.free_op_vars()
-    res = self.force_allocate_reg_or_cc(op.result)
+    res = self.force_allocate_reg_or_cc(op)
     return [l0, l1, res]
 
 def prepare_float_cmp(self, op):
     l0 = self.ensure_reg(op.getarg(0))
     l1 = self.ensure_reg(op.getarg(1))
     self.free_op_vars()
-    res = self.force_allocate_reg_or_cc(op.result)
+    res = self.force_allocate_reg_or_cc(op)
     return [l0, l1, res]
 
 def prepare_unary_op(self, op):
     l0 = self.ensure_reg(op.getarg(0))
     self.free_op_vars()
-    res = self.force_allocate_reg(op.result)
+    res = self.force_allocate_reg(op)
     return [l0, res]
 
 def prepare_binary_op(self, op):
     reg1 = self.ensure_reg(op.getarg(0))
     reg2 = self.ensure_reg(op.getarg(1))
     self.free_op_vars()
-    res = self.force_allocate_reg(op.result)
+    res = self.force_allocate_reg(op)
     return [reg1, reg2, res]
 
 def prepare_int_add_or_mul(self, op):
@@ -65,7 +65,7 @@
     else:
         l1 = self.ensure_reg(a1)
     self.free_op_vars()
-    res = self.force_allocate_reg(op.result)
+    res = self.force_allocate_reg(op)
     return [l0, l1, res]
 
 def prepare_int_sub(self, op):
@@ -76,5 +76,5 @@
     else:
         l1 = self.ensure_reg(a1)
     self.free_op_vars()
-    res = self.force_allocate_reg(op.result)
+    res = self.force_allocate_reg(op)
     return [l0, l1, res]
diff --git a/rpython/jit/backend/ppc/opassembler.py 
b/rpython/jit/backend/ppc/opassembler.py
--- a/rpython/jit/backend/ppc/opassembler.py
+++ b/rpython/jit/backend/ppc/opassembler.py
@@ -10,9 +10,9 @@
                                           THREADLOCAL_ADDR_OFFSET,
                                           IS_BIG_ENDIAN)
 
-from rpython.jit.metainterp.history import (JitCellToken, TargetToken, Box,
+from rpython.jit.metainterp.history import (JitCellToken, TargetToken,
                                             AbstractFailDescr, FLOAT, INT, REF,
-                                            ConstInt)
+                                            ConstInt, VOID)
 from rpython.rlib.objectmodel import we_are_translated
 from rpython.jit.backend.ppc.helper.assembler import (Saved_Volatiles)
 from rpython.jit.backend.ppc.jump import remap_frame_layout
@@ -24,6 +24,7 @@
 from rpython.jit.backend.llsupport.gcmap import allocate_gcmap
 from rpython.rtyper.lltypesystem import rstr, rffi, lltype
 from rpython.rtyper.annlowlevel import cast_instance_to_gcref
+from rpython.rtyper import rclass
 from rpython.jit.metainterp.resoperation import rop
 from rpython.jit.codewriter.effectinfo import EffectInfo
 from rpython.jit.backend.ppc import callbuilder
@@ -41,6 +42,8 @@
         else:
             self.mc.add(res.value, l0.value, l1.value)
 
+    emit_nursery_ptr_increment = emit_int_add
+
     def emit_int_sub(self, op, arglocs, regalloc):
         l0, l1, res = arglocs
         assert not l0.is_imm()
@@ -317,7 +320,7 @@
     def emit_guard_class(self, op, arglocs, regalloc):
         self._cmp_guard_class(op, arglocs, regalloc)
         self.guard_success_cc = c.EQ
-        self._emit_guard(op, arglocs[3:])
+        self._emit_guard(op, arglocs[2:])
 
     def emit_guard_nonnull_class(self, op, arglocs, regalloc):
         self.mc.cmp_op(0, arglocs[0].value, 1, imm=True, signed=False)
@@ -328,26 +331,102 @@
         pmc.blt(self.mc.currpos() - patch_pos)
         pmc.overwrite()
         self.guard_success_cc = c.EQ
-        self._emit_guard(op, arglocs[3:])
+        self._emit_guard(op, arglocs[2:])
 
     def _cmp_guard_class(self, op, locs, regalloc):
-        offset = locs[2]
+        offset = self.cpu.vtable_offset
         if offset is not None:
-            with scratch_reg(self.mc):
-                self.mc.load(r.SCRATCH.value, locs[0].value, offset.value)
-                self.mc.cmp_op(0, r.SCRATCH.value, locs[1].value)
+            # could be one instruction shorter, but don't care because
+            # it's not this case that is commonly translated
+            self.mc.load(r.SCRATCH.value, locs[0].value, offset)
+            self.mc.load_imm(r.SCRATCH2, locs[1].value)
+            self.mc.cmp_op(0, r.SCRATCH.value, r.SCRATCH2.value)
         else:
-            typeid = locs[1]
-            # here, we have to go back from 'classptr' to the value expected
-            # from reading the half-word in the object header.  Note that
-            # this half-word is at offset 0 on a little-endian machine;
-            # but it is at offset 2 (32 bit) or 4 (64 bit) on a
-            # big-endian machine.
-            if IS_PPC_32:
-                self.mc.lhz(r.SCRATCH.value, locs[0].value, 2 * IS_BIG_ENDIAN)
-            else:
-                self.mc.lwz(r.SCRATCH.value, locs[0].value, 4 * IS_BIG_ENDIAN)
-            self.mc.cmp_op(0, r.SCRATCH.value, typeid.value, 
imm=typeid.is_imm())
+            expected_typeid = (self.cpu.gc_ll_descr
+                    
.get_typeid_from_classptr_if_gcremovetypeptr(locs[1].value))
+            self._cmp_guard_gc_type(locs[0], expected_typeid)
+
+    def _read_typeid(self, targetreg, loc_ptr):
+        # Note that the typeid half-word is at offset 0 on a little-endian
+        # machine; it is at offset 2 or 4 on a big-endian machine.
+        assert self.cpu.supports_guard_gc_type
+        if IS_PPC_32:
+            self.mc.lhz(targetreg.value, loc_ptr.value, 2 * IS_BIG_ENDIAN)
+        else:
+            self.mc.lwz(targetreg.value, loc_ptr.value, 4 * IS_BIG_ENDIAN)
+
+    def _cmp_guard_gc_type(self, loc_ptr, expected_typeid):
+        self._read_typeid(r.SCRATCH2, loc_ptr)
+        assert 0 <= expected_typeid <= 0x7fffffff   # 4 bytes are always enough
+        if expected_typeid > 0xffff:     # if 2 bytes are not enough
+            self.mc.subis(r.SCRATCH2.value, r.SCRATCH2.value,
+                          expected_typeid >> 16)
+            expected_typeid = expected_typeid & 0xffff
+        self.mc.cmp_op(0, r.SCRATCH2.value, expected_typeid,
+                       imm=True, signed=False)
+
+    def emit_guard_gc_type(self, op, arglocs, regalloc):
+        self._cmp_guard_gc_type(arglocs[0], arglocs[1].value)
+        self.guard_success_cc = c.EQ
+        self._emit_guard(op, arglocs[2:])
+
+    def emit_guard_is_object(self, op, arglocs, regalloc):
+        assert self.cpu.supports_guard_gc_type
+        loc_object = arglocs[0]
+        # idea: read the typeid, fetch one byte of the field 'infobits' from
+        # the big typeinfo table, and check the flag 'T_IS_RPYTHON_INSTANCE'.
+        base_type_info, shift_by, sizeof_ti = (
+            self.cpu.gc_ll_descr.get_translated_info_for_typeinfo())
+        infobits_offset, IS_OBJECT_FLAG = (
+            self.cpu.gc_ll_descr.get_translated_info_for_guard_is_object())
+
+        self._read_typeid(r.SCRATCH2, loc_object)
+        self.mc.load_imm(r.SCRATCH, base_type_info + infobits_offset)
+        assert shift_by == 0     # on PPC64; fixme for PPC32
+        self.mc.lbzx(r.SCRATCH2.value, r.SCRATCH2.value, r.SCRATCH.value)
+        self.mc.andix(r.SCRATCH2.value, r.SCRATCH2.value, IS_OBJECT_FLAG & 
0xff)
+        self.guard_success_cc = c.NE
+        self._emit_guard(op, arglocs[1:])
+
+    def emit_guard_subclass(self, op, arglocs, regalloc):
+        assert self.cpu.supports_guard_gc_type
+        loc_object = arglocs[0]
+        loc_check_against_class = arglocs[1]
+        offset = self.cpu.vtable_offset
+        offset2 = self.cpu.subclassrange_min_offset
+        if offset is not None:
+            # read this field to get the vtable pointer
+            self.mc.load(r.SCRATCH2.value, loc_object.value, offset)
+            # read the vtable's subclassrange_min field
+            assert _check_imm_arg(offset2)
+            self.mc.ld(r.SCRATCH2.value, r.SCRATCH2.value, offset2)
+        else:
+            # read the typeid
+            self._read_typeid(r.SCRATCH, loc_object)
+            # read the vtable's subclassrange_min field, as a single
+            # step with the correct offset
+            base_type_info, shift_by, sizeof_ti = (
+                self.cpu.gc_ll_descr.get_translated_info_for_typeinfo())
+            self.mc.load_imm(r.SCRATCH2, base_type_info + sizeof_ti + offset2)
+            assert shift_by == 0     # on PPC64; fixme for PPC32
+            self.mc.ldx(r.SCRATCH2.value, r.SCRATCH2.value, r.SCRATCH.value)
+        # get the two bounds to check against
+        vtable_ptr = loc_check_against_class.getint()
+        vtable_ptr = rffi.cast(rclass.CLASSTYPE, vtable_ptr)
+        check_min = vtable_ptr.subclassrange_min
+        check_max = vtable_ptr.subclassrange_max
+        assert check_max > check_min
+        check_diff = check_max - check_min - 1
+        # right now, a full PyPy uses less than 6000 numbers,
+        # so we'll assert here that it always fit inside 15 bits
+        assert 0 <= check_min <= 0x7fff
+        assert 0 <= check_diff <= 0xffff
+        # check by doing the unsigned comparison (tmp - min) < (max - min)
+        self.mc.subi(r.SCRATCH2.value, r.SCRATCH2.value, check_min)
+        self.mc.cmp_op(0, r.SCRATCH2.value, check_diff, imm=True, signed=False)
+        # the guard passes if we get a result of "below or equal"
+        self.guard_success_cc = c.LE
+        self._emit_guard(op, arglocs[2:])
 
     def emit_guard_not_invalidated(self, op, arglocs, regalloc):
         self._emit_guard(op, arglocs, is_guard_not_invalidated=True)
@@ -433,17 +512,20 @@
         assert my_nbargs == target_nbargs
 
         if descr in self.target_tokens_currently_compiling:
-            self.mc.b_offset(descr._ppc_loop_code)
+            self.mc.b_offset(descr._ll_loop_code)
         else:
-            self.mc.b_abs(descr._ppc_loop_code)
+            self.mc.b_abs(descr._ll_loop_code)
 
-    def emit_same_as(self, op, arglocs, regalloc):
+    def _genop_same_as(self, op, arglocs, regalloc):
         argloc, resloc = arglocs
         if argloc is not resloc:
             self.regalloc_mov(argloc, resloc)
 
-    emit_cast_ptr_to_int = emit_same_as
-    emit_cast_int_to_ptr = emit_same_as
+    emit_same_as_i = _genop_same_as
+    emit_same_as_r = _genop_same_as
+    emit_same_as_f = _genop_same_as
+    emit_cast_ptr_to_int = _genop_same_as
+    emit_cast_int_to_ptr = _genop_same_as
 
     def emit_guard_no_exception(self, op, arglocs, regalloc):
         self.mc.load_from_addr(r.SCRATCH2, self.cpu.pos_exception())
@@ -504,20 +586,35 @@
         else:
             cb.emit()
 
-    def emit_call(self, op, arglocs, regalloc):
+    def _genop_call(self, op, arglocs, regalloc):
         oopspecindex = regalloc.get_oopspecindex(op)
         if oopspecindex == EffectInfo.OS_MATH_SQRT:
             return self._emit_math_sqrt(op, arglocs, regalloc)
         self._emit_call(op, arglocs)
 
-    def emit_call_may_force(self, op, arglocs, regalloc):
+    emit_call_i = _genop_call
+    emit_call_r = _genop_call
+    emit_call_f = _genop_call
+    emit_call_n = _genop_call
+
+    def _genop_call_may_force(self, op, arglocs, regalloc):
         self._store_force_index(self._find_nearby_operation(regalloc, +1))
         self._emit_call(op, arglocs)
 
-    def emit_call_release_gil(self, op, arglocs, regalloc):
+    emit_call_may_force_i = _genop_call_may_force
+    emit_call_may_force_r = _genop_call_may_force
+    emit_call_may_force_f = _genop_call_may_force
+    emit_call_may_force_n = _genop_call_may_force
+
+    def _genop_call_release_gil(self, op, arglocs, regalloc):
         self._store_force_index(self._find_nearby_operation(regalloc, +1))
         self._emit_call(op, arglocs, is_call_release_gil=True)
 
+    emit_call_release_gil_i = _genop_call_release_gil
+    emit_call_release_gil_r = _genop_call_release_gil
+    emit_call_release_gil_f = _genop_call_release_gil
+    emit_call_release_gil_n = _genop_call_release_gil
+
     def _store_force_index(self, guard_op):
         assert (guard_op.getopnum() == rop.GUARD_NOT_FORCED or
                 guard_op.getopnum() == rop.GUARD_NOT_FORCED_2)
@@ -667,13 +764,20 @@
         else:
             assert 0, "size not supported"
 
-    def emit_getfield_gc(self, op, arglocs, regalloc):
+    def _genop_getfield(self, op, arglocs, regalloc):
         base_loc, ofs, res, size, sign = arglocs
         self._load_from_mem(res, base_loc, ofs, size, sign)
 
-    emit_getfield_raw = emit_getfield_gc
-    emit_getfield_raw_pure = emit_getfield_gc
-    emit_getfield_gc_pure = emit_getfield_gc
+    emit_getfield_gc_i = _genop_getfield
+    emit_getfield_gc_r = _genop_getfield
+    emit_getfield_gc_f = _genop_getfield
+    emit_getfield_gc_pure_i = _genop_getfield
+    emit_getfield_gc_pure_r = _genop_getfield
+    emit_getfield_gc_pure_f = _genop_getfield
+    emit_getfield_raw_i = _genop_getfield
+    emit_getfield_raw_f = _genop_getfield
+    emit_getfield_raw_pure_i = _genop_getfield
+    emit_getfield_raw_pure_f = _genop_getfield
 
     SIZE2SCALE = dict([(1<<_i, _i) for _i in range(32)])
 
@@ -729,13 +833,15 @@
                 index_loc = r.SCRATCH2
             return index_loc
 
-    def emit_getinteriorfield_gc(self, op, arglocs, regalloc):
+    def _genop_getarray_or_interiorfield(self, op, arglocs, regalloc):
         (base_loc, index_loc, res_loc, ofs_loc,
             itemsize, fieldsize, fieldsign) = arglocs
         ofs_loc = self._apply_scale(ofs_loc, index_loc, itemsize)
         self._load_from_mem(res_loc, base_loc, ofs_loc, fieldsize, fieldsign)
 
-    emit_getinteriorfield_raw = emit_getinteriorfield_gc
+    emit_getinteriorfield_gc_i = _genop_getarray_or_interiorfield
+    emit_getinteriorfield_gc_r = _genop_getarray_or_interiorfield
+    emit_getinteriorfield_gc_f = _genop_getarray_or_interiorfield
 
     def emit_setinteriorfield_gc(self, op, arglocs, regalloc):
         (base_loc, index_loc, value_loc, ofs_loc,
@@ -752,12 +858,20 @@
     emit_setarrayitem_gc = emit_setinteriorfield_gc
     emit_setarrayitem_raw = emit_setarrayitem_gc
 
-    emit_getarrayitem_gc = emit_getinteriorfield_gc
-    emit_getarrayitem_raw = emit_getarrayitem_gc
-    emit_getarrayitem_gc_pure = emit_getarrayitem_gc
+    emit_getarrayitem_gc_i = _genop_getarray_or_interiorfield
+    emit_getarrayitem_gc_r = _genop_getarray_or_interiorfield
+    emit_getarrayitem_gc_f = _genop_getarray_or_interiorfield
+    emit_getarrayitem_gc_pure_i = _genop_getarray_or_interiorfield
+    emit_getarrayitem_gc_pure_r = _genop_getarray_or_interiorfield
+    emit_getarrayitem_gc_pure_f = _genop_getarray_or_interiorfield
+    emit_getarrayitem_raw_i = _genop_getarray_or_interiorfield
+    emit_getarrayitem_raw_f = _genop_getarray_or_interiorfield
+    emit_getarrayitem_raw_pure_i = _genop_getarray_or_interiorfield
+    emit_getarrayitem_raw_pure_f = _genop_getarray_or_interiorfield
 
     emit_raw_store = emit_setarrayitem_gc
-    emit_raw_load = emit_getarrayitem_gc
+    emit_raw_load_i = _genop_getarray_or_interiorfield
+    emit_raw_load_f = _genop_getarray_or_interiorfield
 
     def _copy_in_scratch2(self, loc):
         if loc.is_imm():
@@ -862,8 +976,8 @@
 
     _mixin_ = True
 
-    emit_strlen = FieldOpAssembler.emit_getfield_gc
-    emit_strgetitem = FieldOpAssembler.emit_getarrayitem_gc
+    emit_strlen = FieldOpAssembler._genop_getfield
+    emit_strgetitem = FieldOpAssembler._genop_getarray_or_interiorfield
     emit_strsetitem = FieldOpAssembler.emit_setarrayitem_gc
 
     def emit_copystrcontent(self, op, arglocs, regalloc):
@@ -926,8 +1040,8 @@
 
     _mixin_ = True
 
-    emit_unicodelen = FieldOpAssembler.emit_getfield_gc
-    emit_unicodegetitem = FieldOpAssembler.emit_getarrayitem_gc
+    emit_unicodelen = FieldOpAssembler._genop_getfield
+    emit_unicodegetitem = FieldOpAssembler._genop_getarray_or_interiorfield
     emit_unicodesetitem = FieldOpAssembler.emit_setarrayitem_gc
 
 
@@ -936,7 +1050,7 @@
     _mixin_ = True
 
     def emit_call_malloc_gc(self, op, arglocs, regalloc):
-        self.emit_call(op, arglocs, regalloc)
+        self._emit_call(op, arglocs)
         self.propagate_memoryerror_if_r3_is_null()
 
     def emit_call_malloc_nursery(self, op, arglocs, regalloc):
@@ -1130,16 +1244,21 @@
         res_loc = arglocs[0]
         self.mc.mr(res_loc.value, r.SPP.value)
 
-    def emit_call_assembler(self, op, arglocs, regalloc):
+    def _genop_call_assembler(self, op, arglocs, regalloc):
         if len(arglocs) == 3:
             [result_loc, argloc, vloc] = arglocs
         else:
             [result_loc, argloc] = arglocs
             vloc = imm(0)
         self._store_force_index(self._find_nearby_operation(regalloc, +1))
-        # 'result_loc' is either r3 or f1
+        # 'result_loc' is either r3 or f1, or None
         self.call_assembler(op, argloc, vloc, result_loc, r.r3)
 
+    emit_call_assembler_i = _genop_call_assembler
+    emit_call_assembler_r = _genop_call_assembler
+    emit_call_assembler_f = _genop_call_assembler
+    emit_call_assembler_n = _genop_call_assembler
+
     imm = staticmethod(imm)   # for call_assembler()
 
     def _call_assembler_emit_call(self, addr, argloc, _):
@@ -1177,9 +1296,9 @@
         return jump_to_done
 
     def _call_assembler_load_result(self, op, result_loc):
-        if op.result is not None:
+        if op.type != VOID:
             # load the return value from the dead frame's value index 0
-            kind = op.result.type
+            kind = op.type
             descr = self.cpu.getarraydescr_for_frame(kind)
             ofs = self.cpu.unpack_arraydescr(descr)
             if kind == FLOAT:
@@ -1202,6 +1321,10 @@
         assert old_nbargs == new_nbargs
         oldadr = oldlooptoken._ll_function_addr
         target = newlooptoken._ll_function_addr
+        # copy frame-info data
+        baseofs = self.cpu.get_baseofs_of_frame_field()
+        newlooptoken.compiled_loop_token.update_frame_info(
+            oldlooptoken.compiled_loop_token, baseofs)
         if IS_PPC_32 or not IS_BIG_ENDIAN:
             # we overwrite the instructions at the old _ll_function_addr
             # to start with a JMP to the new _ll_function_addr.
diff --git a/rpython/jit/backend/ppc/ppc_assembler.py 
b/rpython/jit/backend/ppc/ppc_assembler.py
--- a/rpython/jit/backend/ppc/ppc_assembler.py
+++ b/rpython/jit/backend/ppc/ppc_assembler.py
@@ -15,7 +15,6 @@
 import rpython.jit.backend.ppc.condition as c
 from rpython.jit.backend.ppc.register import JITFRAME_FIXED_SIZE
 from rpython.jit.metainterp.history import AbstractFailDescr
-from rpython.jit.metainterp.history import ConstInt, BoxInt
 from rpython.jit.backend.llsupport import jitframe, rewrite
 from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper
 from rpython.jit.backend.llsupport.assembler import (DEBUG_COUNTER, 
debug_bridge,
@@ -770,7 +769,7 @@
         if IS_PPC_64 and IS_BIG_ENDIAN:  # fix the function descriptor (3 
words)
             rffi.cast(rffi.LONGP, rawstart)[0] = rawstart + 3 * WORD
         #
-        looptoken._ppc_loop_code = looppos + rawstart
+        looptoken._ll_loop_code = looppos + rawstart
         debug_start("jit-backend-addr")
         debug_print("Loop %d (%s) has address 0x%x to 0x%x (bootstrap 0x%x)" % 
(
             looptoken.number, loopname,
@@ -870,7 +869,7 @@
 
     def fixup_target_tokens(self, rawstart):
         for targettoken in self.target_tokens_currently_compiling:
-            targettoken._ppc_loop_code += rawstart
+            targettoken._ll_loop_code += rawstart
         self.target_tokens_currently_compiling = None
 
     def target_arglocs(self, looptoken):
diff --git a/rpython/jit/backend/ppc/regalloc.py 
b/rpython/jit/backend/ppc/regalloc.py
--- a/rpython/jit/backend/ppc/regalloc.py
+++ b/rpython/jit/backend/ppc/regalloc.py
@@ -1,5 +1,5 @@
 from rpython.jit.backend.llsupport.regalloc import (RegisterManager, 
FrameManager,
-                                                    TempBox, 
compute_vars_longevity,
+                                                    TempVar, 
compute_vars_longevity,
                                                     BaseRegalloc)
 from rpython.jit.backend.ppc.arch import (WORD, MY_COPY_OF_REGS, IS_PPC_32)
 from rpython.jit.codewriter import longlong
@@ -9,8 +9,7 @@
 from rpython.jit.backend.ppc.helper.regalloc import _check_imm_arg, 
check_imm_box
 from rpython.jit.backend.ppc.helper import regalloc as helper
 from rpython.jit.metainterp.history import (Const, ConstInt, ConstFloat, 
ConstPtr,
-                                            Box, BoxPtr,
-                                            INT, REF, FLOAT)
+                                            INT, REF, FLOAT, VOID)
 from rpython.jit.metainterp.history import JitCellToken, TargetToken
 from rpython.jit.metainterp.resoperation import rop
 from rpython.jit.backend.ppc import locations
@@ -32,23 +31,20 @@
 
 LIMIT_LOOP_BREAK = 15000      # should be much smaller than 32 KB
 
-# xxx hack: set a default value for TargetToken._arm_loop_code.  If 0, we know
-# that it is a LABEL that was not compiled yet.
-TargetToken._ppc_loop_code = 0
 
-class TempInt(TempBox):
+class TempInt(TempVar):
     type = INT
 
     def __repr__(self):
         return "<TempInt at %s>" % (id(self),)
 
-class TempPtr(TempBox):
+class TempPtr(TempVar):
     type = REF
 
     def __repr__(self):
         return "<TempPtr at %s>" % (id(self),)
 
-class TempFloat(TempBox):
+class TempFloat(TempVar):
     type = FLOAT
 
     def __repr__(self):
@@ -163,7 +159,7 @@
         return loc
 
     def get_scratch_reg(self):
-        box = TempBox()
+        box = TempVar()
         reg = self.force_allocate_reg(box, forbidden_vars=self.temp_boxes)
         self.temp_boxes.append(box)
         return reg
@@ -320,7 +316,7 @@
             self.assembler.mc.mark_op(op)
             self.rm.position = i
             self.fprm.position = i
-            if op.has_no_side_effect() and op.result not in self.longevity:
+            if op.has_no_side_effect() and op not in self.longevity:
                 i += 1
                 self.possibly_free_vars_for_op(op)
                 continue
@@ -333,13 +329,13 @@
                     self.fprm.temp_boxes.append(box)
             #
             opnum = op.getopnum()
-            if not we_are_translated() and opnum == -124:
+            if not we_are_translated() and opnum == -127:
                 self._consider_force_spill(op)
             else:
                 arglocs = oplist[opnum](self, op)
                 asm_operations[opnum](self.assembler, op, arglocs, self)
             self.free_op_vars()
-            self.possibly_free_var(op.result)
+            self.possibly_free_var(op)
             self.rm._check_invariants()
             self.fprm._check_invariants()
             if self.assembler.mc.get_relative_pos() > self.limit_loop_break:
@@ -462,6 +458,7 @@
     prepare_int_add = helper.prepare_int_add_or_mul
     prepare_int_sub = helper.prepare_int_sub
     prepare_int_mul = helper.prepare_int_add_or_mul
+    prepare_nursery_ptr_increment = prepare_int_add
 
     prepare_int_floordiv = helper.prepare_binary_op
     prepare_int_mod = helper.prepare_binary_op
@@ -521,29 +518,29 @@
     def _prepare_math_sqrt(self, op):
         loc = self.ensure_reg(op.getarg(1))
         self.free_op_vars()
-        res = self.fprm.force_allocate_reg(op.result)
+        res = self.fprm.force_allocate_reg(op)
         return [loc, res]
 
     def prepare_cast_float_to_int(self, op):
         loc1 = self.ensure_reg(op.getarg(0))
         self.free_op_vars()
         temp_loc = self.get_scratch_reg(FLOAT)
-        res = self.rm.force_allocate_reg(op.result)
+        res = self.rm.force_allocate_reg(op)
         return [loc1, temp_loc, res]
 
     def prepare_cast_int_to_float(self, op):
         loc1 = self.ensure_reg(op.getarg(0))
-        res = self.fprm.force_allocate_reg(op.result)
+        res = self.fprm.force_allocate_reg(op)
         return [loc1, res]
 
     def prepare_convert_float_bytes_to_longlong(self, op):
         loc1 = self.ensure_reg(op.getarg(0))
-        res = self.rm.force_allocate_reg(op.result)
+        res = self.rm.force_allocate_reg(op)
         return [loc1, res]
 
     def prepare_convert_longlong_bytes_to_float(self, op):
         loc1 = self.ensure_reg(op.getarg(0))
-        res = self.fprm.force_allocate_reg(op.result)
+        res = self.fprm.force_allocate_reg(op)
         return [loc1, res]
 
     def prepare_finish(self, op):
@@ -602,8 +599,8 @@
 
     def prepare_guard_exception(self, op):
         loc = self.ensure_reg(op.getarg(0))
-        if op.result in self.longevity:
-            resloc = self.force_allocate_reg(op.result)
+        if op in self.longevity:
+            resloc = self.force_allocate_reg(op)
         else:
             resloc = None
         arglocs = self._prepare_guard(op, [loc, resloc])
@@ -626,41 +623,17 @@
     def prepare_guard_class(self, op):
         x = self.ensure_reg(op.getarg(0))
         y_val = force_int(op.getarg(1).getint())
-
-        arglocs = [x, None, None]
-
-        offset = self.cpu.vtable_offset
-        if offset is not None:
-            y = r.SCRATCH2
-            self.assembler.mc.load_imm(y, y_val)
-
-            assert _check_imm_arg(offset)
-            offset_loc = imm(offset)
-
-            arglocs[1] = y
-            arglocs[2] = offset_loc
-
-        else:
-            # XXX hard-coded assumption: to go from an object to its class
-            # we use the following algorithm:
-            #   - read the typeid from mem(locs[0]), i.e. at offset 0
-            #   - keep the lower half-word read there
-            #   - multiply by 4 (on 32-bits only) and use it as an
-            #     offset in type_info_group
-            #   - add 16/32 bytes, to go past the TYPE_INFO structure
-            classptr = y_val
-            from rpython.memory.gctypelayout import GCData
-            sizeof_ti = rffi.sizeof(GCData.TYPE_INFO)
-            type_info_group = llop.gc_get_type_info_group(llmemory.Address)
-            type_info_group = rffi.cast(lltype.Signed, type_info_group)
-            expected_typeid = classptr - sizeof_ti - type_info_group
-            if IS_PPC_32:
-                expected_typeid >>= 2
-            arglocs[1] = 
self.ensure_reg_or_16bit_imm(ConstInt(expected_typeid))
-
-        return self._prepare_guard(op, arglocs)
+        arglocs = self._prepare_guard(op, [x, imm(y_val)])
+        return arglocs
 
     prepare_guard_nonnull_class = prepare_guard_class
+    prepare_guard_gc_type = prepare_guard_class
+    prepare_guard_subclass = prepare_guard_class
+
+    def prepare_guard_is_object(self, op):
+        loc_object = self.ensure_reg(op.getarg(0))
+        arglocs = self._prepare_guard(op, [loc_object])
+        return arglocs
 
     def compute_hint_frame_locations(self, operations):
         # optimization only: fill in the 'hint_frame_locations' dictionary
@@ -672,7 +645,7 @@
         self.final_jump_op = op
         descr = op.getdescr()
         assert isinstance(descr, TargetToken)
-        if descr._ppc_loop_code != 0:
+        if descr._ll_loop_code != 0:
             # if the target LABEL was already compiled, i.e. if it belongs
             # to some already-compiled piece of code
             self._compute_hint_frame_locations_from_descr(descr)
@@ -688,7 +661,7 @@
         assert len(arglocs) == jump_op.numargs()
         for i in range(jump_op.numargs()):
             box = jump_op.getarg(i)
-            if isinstance(box, Box):
+            if not isinstance(box, Const):
                 loc = arglocs[i]
                 if loc is not None and loc.is_stack():
                     self.fm.hint_frame_pos[box] = self.fm.get_loc_index(loc)
@@ -735,35 +708,44 @@
 
     prepare_setfield_raw = prepare_setfield_gc
 
-    def prepare_getfield_gc(self, op):
+    def _prepare_getfield(self, op):
         ofs, size, sign = unpack_fielddescr(op.getdescr())
         base_loc = self.ensure_reg(op.getarg(0))
         ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs))
         self.free_op_vars()
-        res = self.force_allocate_reg(op.result)
+        res = self.force_allocate_reg(op)
         return [base_loc, ofs_loc, res, imm(size), imm(sign)]
 
-    prepare_getfield_raw = prepare_getfield_gc
-    prepare_getfield_raw_pure = prepare_getfield_gc
-    prepare_getfield_gc_pure = prepare_getfield_gc
+    prepare_getfield_gc_i = _prepare_getfield
+    prepare_getfield_gc_r = _prepare_getfield
+    prepare_getfield_gc_f = _prepare_getfield
+    prepare_getfield_raw_i = _prepare_getfield
+    prepare_getfield_raw_f = _prepare_getfield
+    prepare_getfield_raw_pure_i = _prepare_getfield
+    prepare_getfield_raw_pure_f = _prepare_getfield
+    prepare_getfield_gc_pure_i = _prepare_getfield
+    prepare_getfield_gc_pure_r = _prepare_getfield
+    prepare_getfield_gc_pure_f = _prepare_getfield
 
     def prepare_increment_debug_counter(self, op):
         base_loc = self.ensure_reg(op.getarg(0))
         temp_loc = r.SCRATCH2
         return [base_loc, temp_loc]
 
-    def prepare_getinteriorfield_gc(self, op):
+    def _prepare_getinteriorfield(self, op):
         t = unpack_interiorfielddescr(op.getdescr())
         ofs, itemsize, fieldsize, sign = t
         base_loc = self.ensure_reg(op.getarg(0))
         index_loc = self.ensure_reg_or_any_imm(op.getarg(1))
         ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs))
         self.free_op_vars()
-        result_loc = self.force_allocate_reg(op.result)
+        result_loc = self.force_allocate_reg(op)
         return [base_loc, index_loc, result_loc, ofs_loc,
                 imm(itemsize), imm(fieldsize), imm(sign)]
 
-    prepare_getinteriorfield_raw = prepare_getinteriorfield_gc
+    prepare_getinteriorfield_gc_i = _prepare_getinteriorfield
+    prepare_getinteriorfield_gc_r = _prepare_getinteriorfield
+    prepare_getinteriorfield_gc_f = _prepare_getinteriorfield
 
     def prepare_setinteriorfield_gc(self, op):
         t = unpack_interiorfielddescr(op.getdescr())
@@ -784,7 +766,7 @@
         assert _check_imm_arg(ofs)
         base_loc = self.ensure_reg(op.getarg(0))
         self.free_op_vars()
-        res = self.force_allocate_reg(op.result)
+        res = self.force_allocate_reg(op)
         return [res, base_loc, imm(ofs)]
 
     def prepare_setarrayitem_gc(self, op):
@@ -808,36 +790,47 @@
         return [base_loc, index_loc, value_loc, ofs_loc,
                 imm(1), imm(size)]
 
-    def prepare_getarrayitem_gc(self, op):
+    def _prepare_getarrayitem(self, op):
         size, ofs, sign = unpack_arraydescr(op.getdescr())
         base_loc = self.ensure_reg(op.getarg(0))
         index_loc = self.ensure_reg_or_any_imm(op.getarg(1))
         ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs))
         self.free_op_vars()
-        result_loc = self.force_allocate_reg(op.result)
+        result_loc = self.force_allocate_reg(op)
         imm_size = imm(size)
         return [base_loc, index_loc, result_loc, ofs_loc,
                 imm_size, imm_size, imm(sign)]
 
-    prepare_getarrayitem_raw = prepare_getarrayitem_gc
-    prepare_getarrayitem_gc_pure = prepare_getarrayitem_gc
+    prepare_getarrayitem_gc_i = _prepare_getarrayitem
+    prepare_getarrayitem_gc_r = _prepare_getarrayitem
+    prepare_getarrayitem_gc_f = _prepare_getarrayitem
+    prepare_getarrayitem_raw_i = _prepare_getarrayitem
+    prepare_getarrayitem_raw_f = _prepare_getarrayitem
+    prepare_getarrayitem_raw_pure_i = _prepare_getarrayitem
+    prepare_getarrayitem_raw_pure_f = _prepare_getarrayitem
+    prepare_getarrayitem_gc_pure_i = _prepare_getarrayitem
+    prepare_getarrayitem_gc_pure_r = _prepare_getarrayitem
+    prepare_getarrayitem_gc_pure_f = _prepare_getarrayitem
 
-    def prepare_raw_load(self, op):
+    def _prepare_raw_load(self, op):
         size, ofs, sign = unpack_arraydescr(op.getdescr())
         base_loc = self.ensure_reg(op.getarg(0))
         index_loc = self.ensure_reg_or_any_imm(op.getarg(1))
         ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs))
         self.free_op_vars()
-        result_loc = self.force_allocate_reg(op.result)
+        result_loc = self.force_allocate_reg(op)
         return [base_loc, index_loc, result_loc, ofs_loc,
                 imm(1), imm(size), imm(sign)]
 
+    prepare_raw_load_i = _prepare_raw_load
+    prepare_raw_load_f = _prepare_raw_load
+
     def prepare_strlen(self, op):
         basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR,
                                              self.cpu.translate_support_code)
         base_loc = self.ensure_reg(op.getarg(0))
         self.free_op_vars()
-        result_loc = self.force_allocate_reg(op.result)
+        result_loc = self.force_allocate_reg(op)
         return [base_loc, imm(ofs_length), result_loc, imm(WORD), imm(0)]
 
     def prepare_strgetitem(self, op):
@@ -847,7 +840,7 @@
         index_loc = self.ensure_reg_or_any_imm(op.getarg(1))
         ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize))
         self.free_op_vars()
-        result_loc = self.force_allocate_reg(op.result)
+        result_loc = self.force_allocate_reg(op)
         imm_size = imm(itemsize)
         return [base_loc, index_loc, result_loc, ofs_loc,
                 imm_size, imm_size, imm(0)]
@@ -880,7 +873,7 @@
                                              self.cpu.translate_support_code)
         base_loc = self.ensure_reg(op.getarg(0))
         self.free_op_vars()
-        result_loc = self.force_allocate_reg(op.result)
+        result_loc = self.force_allocate_reg(op)
         return [base_loc, imm(ofs_length), result_loc, imm(WORD), imm(0)]
 
     def prepare_unicodegetitem(self, op):
@@ -890,7 +883,7 @@
         index_loc = self.ensure_reg_or_any_imm(op.getarg(1))
         ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize))
         self.free_op_vars()
-        result_loc = self.force_allocate_reg(op.result)
+        result_loc = self.force_allocate_reg(op)
         imm_size = imm(itemsize)
         return [base_loc, index_loc, result_loc, ofs_loc,
                 imm_size, imm_size, imm(0)]
@@ -906,9 +899,11 @@
         return [base_loc, index_loc, value_loc, ofs_loc,
                 imm_size, imm_size]
 
-    prepare_same_as = helper.prepare_unary_op
-    prepare_cast_ptr_to_int = prepare_same_as
-    prepare_cast_int_to_ptr = prepare_same_as
+    prepare_same_as_i = helper.prepare_unary_op
+    prepare_same_as_r = helper.prepare_unary_op
+    prepare_same_as_f = helper.prepare_unary_op
+    prepare_cast_ptr_to_int = helper.prepare_unary_op
+    prepare_cast_int_to_ptr = helper.prepare_unary_op
 
     def get_oopspecindex(self, op):
         descr = op.getdescr()
@@ -918,12 +913,17 @@
             return effectinfo.oopspecindex
         return EffectInfo.OS_NONE
 
-    def prepare_call(self, op):
+    def _prepare_call(self, op):
         oopspecindex = self.get_oopspecindex(op)
         if oopspecindex == EffectInfo.OS_MATH_SQRT:
             return self._prepare_math_sqrt(op)
         return self._prepare_call(op)
 
+    prepare_call_i = _prepare_call
+    prepare_call_r = _prepare_call
+    prepare_call_f = _prepare_call
+    prepare_call_n = _prepare_call
+
     def _spill_before_call(self, save_all_regs=False):
         # spill variables that need to be saved around calls
         self.fprm.before_call(save_all_regs=save_all_regs)
@@ -939,14 +939,14 @@
         for i in range(op.numargs()):
             args.append(self.loc(op.getarg(i)))
         self._spill_before_call(save_all_regs)
-        if op.result:
-            resloc = self.after_call(op.result)
+        if op.type != VOID:
+            resloc = self.after_call(op)
             args[0] = resloc
         return args
 
     def prepare_call_malloc_nursery(self, op):
-        self.rm.force_allocate_reg(op.result, selected_reg=r.RES)
-        self.rm.temp_boxes.append(op.result)
+        self.rm.force_allocate_reg(op, selected_reg=r.RES)
+        self.rm.temp_boxes.append(op)
         tmp_box = TempInt()
         self.rm.force_allocate_reg(tmp_box, selected_reg=r.RSZ)
         self.rm.temp_boxes.append(tmp_box)
@@ -958,8 +958,8 @@
         # (we take care explicitly of conflicts with r.RES or r.RSZ)
         self.free_op_vars()
         # the result will be in r.RES
-        self.rm.force_allocate_reg(op.result, selected_reg=r.RES)
-        self.rm.temp_boxes.append(op.result)
+        self.rm.force_allocate_reg(op, selected_reg=r.RES)
+        self.rm.temp_boxes.append(op)
         # we need r.RSZ as a temporary
         tmp_box = TempInt()
         self.rm.force_allocate_reg(tmp_box, selected_reg=r.RSZ)
@@ -968,8 +968,8 @@
 
     def prepare_call_malloc_nursery_varsize(self, op):
         # the result will be in r.RES
-        self.rm.force_allocate_reg(op.result, selected_reg=r.RES)
-        self.rm.temp_boxes.append(op.result)
+        self.rm.force_allocate_reg(op, selected_reg=r.RES)
+        self.rm.temp_boxes.append(op)
         # we need r.RSZ as a temporary
         tmp_box = TempInt()
         self.rm.force_allocate_reg(tmp_box, selected_reg=r.RSZ)
@@ -1001,7 +1001,7 @@
         return arglocs
 
     def prepare_force_token(self, op):
-        res_loc = self.force_allocate_reg(op.result)
+        res_loc = self.force_allocate_reg(op)
         return [res_loc]
 
     def prepare_label(self, op):
@@ -1016,7 +1016,7 @@
         # of some guard
         position = self.rm.position
         for arg in inputargs:
-            assert isinstance(arg, Box)
+            assert not isinstance(arg, Const)
             if self.last_real_usage.get(arg, -1) <= position:
                 self.force_spill_var(arg)
         #
@@ -1028,7 +1028,7 @@
         #
         for i in range(len(inputargs)):
             arg = inputargs[i]
-            assert isinstance(arg, Box)
+            assert not isinstance(arg, Const)
             loc = self.loc(arg)
             assert loc is not r.SPP
             arglocs[i] = loc
@@ -1040,7 +1040,7 @@
         self.flush_loop()
         #
         descr._ppc_arglocs = arglocs
-        descr._ppc_loop_code = self.assembler.mc.currpos()
+        descr._ll_loop_code = self.assembler.mc.currpos()
         descr._ppc_clt = self.assembler.current_clt
         self.assembler.target_tokens_currently_compiling[descr] = None
         self.possibly_free_vars_for_op(op)
@@ -1053,17 +1053,33 @@
         if jump_op is not None and jump_op.getdescr() is descr:
             self._compute_hint_frame_locations_from_descr(descr)
 
-    def prepare_call_may_force(self, op):
+    def _prepare_call_may_force(self, op):
         return self._prepare_call(op, save_all_regs=True)
 
-    prepare_call_release_gil = prepare_call_may_force
+    prepare_call_may_force_i = _prepare_call_may_force
+    prepare_call_may_force_r = _prepare_call_may_force
+    prepare_call_may_force_f = _prepare_call_may_force
+    prepare_call_may_force_n = _prepare_call_may_force
 
-    def prepare_call_assembler(self, op):
+    prepare_call_release_gil_i = _prepare_call_may_force
+    prepare_call_release_gil_r = _prepare_call_may_force
+    prepare_call_release_gil_f = _prepare_call_may_force
+    prepare_call_release_gil_n = _prepare_call_may_force
+
+    def _prepare_call_assembler(self, op):
         locs = self.locs_for_call_assembler(op)
         self._spill_before_call(save_all_regs=True)
-        resloc = self.after_call(op.result)
+        if op.type != VOID:
+            resloc = self.after_call(op)
+        else:
+            resloc = None
         return [resloc] + locs
 
+    prepare_call_assembler_i = _prepare_call_assembler
+    prepare_call_assembler_r = _prepare_call_assembler
+    prepare_call_assembler_f = _prepare_call_assembler
+    prepare_call_assembler_n = _prepare_call_assembler
+
     def prepare_force_spill(self, op):
         self.force_spill_var(op.getarg(0))
         return []
diff --git a/rpython/jit/backend/ppc/runner.py 
b/rpython/jit/backend/ppc/runner.py
--- a/rpython/jit/backend/ppc/runner.py
+++ b/rpython/jit/backend/ppc/runner.py
@@ -84,3 +84,9 @@
     def get_all_loop_runs(self):
         # not implemented
         return lltype.malloc(LOOP_RUN_CONTAINER, 0)
+
+    def build_regalloc(self):
+        ''' for tests'''
+        from rpython.jit.backend.ppc.regalloc import Regalloc
+        assert self.assembler is not None
+        return Regalloc(self.assembler)
diff --git a/rpython/jit/backend/ppc/test/support.py 
b/rpython/jit/backend/ppc/test/support.py
--- a/rpython/jit/backend/ppc/test/support.py
+++ b/rpython/jit/backend/ppc/test/support.py
@@ -4,6 +4,9 @@
 class JitPPCMixin(support.LLJitMixin):
     type_system = 'lltype'
     CPUClass = getcpuclass()
+    # we have to disable unroll
+    enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap"
+    basic = False
 
     def check_jumps(self, maxcount):
         pass
diff --git a/rpython/jit/backend/x86/test/test_recursive.py 
b/rpython/jit/backend/ppc/test/test_recursive.py
copy from rpython/jit/backend/x86/test/test_recursive.py
copy to rpython/jit/backend/ppc/test/test_recursive.py
--- a/rpython/jit/backend/x86/test/test_recursive.py
+++ b/rpython/jit/backend/ppc/test/test_recursive.py
@@ -1,30 +1,8 @@
 
 from rpython.jit.metainterp.test.test_recursive import RecursiveTests
-from rpython.jit.backend.x86.test.test_basic import Jit386Mixin
-from rpython.jit.backend.llsupport.codemap import unpack_traceback
-from rpython.jit.backend.x86.arch import WORD
+from rpython.jit.backend.ppc.test.support import JitPPCMixin
 
-class TestRecursive(Jit386Mixin, RecursiveTests):
+class TestRecursive(JitPPCMixin, RecursiveTests):
     # for the individual tests see
     # ====> ../../../metainterp/test/test_recursive.py
-    def check_get_unique_id(self, codemaps):
-        if WORD == 4:
-            return # this is 64 bit only check
-
-        assert len(codemaps) == 3
-        # we want to create a map of differences, so unpacking the tracebacks
-        # byte by byte
-        codemaps.sort(lambda a, b: cmp(a[1], b[1]))
-        # biggest is the big loop, smallest is the bridge
-        def get_ranges(c):
-            ranges = []
-            prev_traceback = None
-            for b in range(c[0], c[0] + c[1]):
-                tb = unpack_traceback(b)
-                if tb != prev_traceback:
-                    ranges.append(tb)
-                    prev_traceback = tb
-            return ranges
-        assert get_ranges(codemaps[2]) == [[4], [4, 2], [4]]
-        assert get_ranges(codemaps[1]) == [[2]]
-        assert get_ranges(codemaps[0]) == [[2], []]
+    pass
diff --git a/rpython/jit/backend/ppc/test/test_regalloc_3.py 
b/rpython/jit/backend/ppc/test/test_regalloc_3.py
--- a/rpython/jit/backend/ppc/test/test_regalloc_3.py
+++ b/rpython/jit/backend/ppc/test/test_regalloc_3.py
@@ -1,7 +1,5 @@
 import py
-from rpython.jit.metainterp.history import ResOperation, BoxInt, ConstInt,\
-     BoxPtr, ConstPtr, BasicFailDescr, JitCellToken
-from rpython.jit.metainterp.resoperation import rop
+from rpython.jit.metainterp.history import JitCellToken
 from rpython.jit.backend.detect_cpu import getcpuclass
 from rpython.jit.backend.ppc.arch import WORD
 from rpython.jit.tool.oparser import parse
@@ -79,7 +77,7 @@
     i38 = uint_gt(i33, -11)
     i39 = int_neg(i7)
     i40 = int_gt(i24, i32)
-    i99 = same_as(0)
+    i99 = same_as_i(0)
     guard_true(i99) [i40, i36, i37, i31, i16, i34, i35, i23, i22, i29, i14, 
i39, i30, i38]
     finish(42)
     ''')
@@ -136,7 +134,7 @@
     i38 = int_gt(i4, i11)
     i39 = int_lt(i27, i22)
     i40 = int_neg(i27)
-    i99 = same_as(0)
+    i99 = same_as_i(0)
     guard_true(i99) [i40, i10, i36, i26, i13, i30, i21, i33, i18, i25, i31, 
i32, i28, i29, i35, i38, i20, i39, i34, i23, i37]
     finish(-42)
     ''')
diff --git a/rpython/jit/backend/ppc/test/test_runner.py 
b/rpython/jit/backend/ppc/test/test_runner.py
--- a/rpython/jit/backend/ppc/test/test_runner.py
+++ b/rpython/jit/backend/ppc/test/test_runner.py
@@ -4,11 +4,10 @@
 from rpython.jit.metainterp.history import (AbstractFailDescr,
                                             AbstractDescr,
                                             BasicFailDescr, BasicFinalDescr,
-                                            BoxInt, Box, BoxPtr,
                                             JitCellToken, TargetToken,
                                             ConstInt, ConstPtr,
-                                            Const,
-                                            BoxFloat, ConstFloat)
+                                            Const, ConstFloat)
+from rpython.jit.metainterp.resoperation import InputArgInt, InputArgFloat
 from rpython.rtyper.lltypesystem import lltype
 from rpython.jit.metainterp.resoperation import ResOperation, rop
 from rpython.jit.backend.ppc.arch import IS_PPC_32
@@ -105,51 +104,28 @@
     def test_unicodesetitem_really_needs_temploc(self):
         u_box = self.alloc_unicode(u"abcdsdasdsaddefg")
 
-        i0 = BoxInt()
-        i1 = BoxInt()
-        i2 = BoxInt()
-        i3 = BoxInt()
-        i4 = BoxInt()
-        i5 = BoxInt()
-        i6 = BoxInt()
-        i7 = BoxInt()
-        i8 = BoxInt()
-        i9 = BoxInt()
-        p10 = BoxPtr()
-
-        inputargs = [i0,i1,i2,i3,i4,i5,i6,i7,i8,i9,p10]
-        looptoken = JitCellToken()
         targettoken = TargetToken()
         finaldescr = BasicFinalDescr(1)
+        loop = parse('''
+        [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, p10]
+        label(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, p10, descr=targettoken)
+        unicodesetitem(p10, i6, 123)
+        i11 = int_add(i0,  i1)
+        i12 = int_add(i11, i2)
+        i13 = int_add(i12, i3)
+        i14 = int_add(i13, i4)
+        i15 = int_add(i14, i5)
+        i16 = int_add(i15, i6)
+        i17 = int_add(i16, i7)
+        i18 = int_add(i17, i8)
+        i19 = int_add(i18, i9)
+        finish(i19, descr=finaldescr)
+        ''', namespace={'targettoken': targettoken,
+                        'finaldescr': finaldescr})
 
-        i11 = BoxInt()
-        i12 = BoxInt()
-        i13 = BoxInt()
-        i14 = BoxInt()
-        i15 = BoxInt()
-        i16 = BoxInt()
-        i17 = BoxInt()
-        i18 = BoxInt()
-        i19 = BoxInt()
-
-        operations = [
-            ResOperation(rop.LABEL, inputargs, None, descr=targettoken),
-            ResOperation(rop.UNICODESETITEM, 
-                         [p10, i6, ConstInt(123)], None),
-            ResOperation(rop.INT_ADD, [i0,  i1], i11),
-            ResOperation(rop.INT_ADD, [i11, i2], i12),
-            ResOperation(rop.INT_ADD, [i12, i3], i13),
-            ResOperation(rop.INT_ADD, [i13, i4], i14),
-            ResOperation(rop.INT_ADD, [i14, i5], i15),
-            ResOperation(rop.INT_ADD, [i15, i6], i16),
-            ResOperation(rop.INT_ADD, [i16, i7], i17),
-            ResOperation(rop.INT_ADD, [i17, i8], i18),
-            ResOperation(rop.INT_ADD, [i18, i9], i19),
-            ResOperation(rop.FINISH, [i19], None, descr=finaldescr)
-            ]
-
+        looptoken = JitCellToken()
         args = [(i + 1) for i in range(10)] + [u_box.getref_base()]
-        self.cpu.compile_loop(inputargs, operations, looptoken)
+        self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken)
         deadframe = self.cpu.execute_token(looptoken, *args)
         fail = self.cpu.get_latest_descr(deadframe)
         assert fail.identifier == 1
@@ -200,27 +176,26 @@
 
     def test_compile_more_than_32k(self):
         # the guard_true needs a "b.cond" jumping forward more than 32 kb
-        i0 = BoxInt()
-        i1 = BoxInt()
         looptoken = JitCellToken()
         targettoken = TargetToken()
-        operations = [
-            ResOperation(rop.LABEL, [i0], None, descr=targettoken),
-            ResOperation(rop.INT_LE, [i0, ConstInt(9)], i1),
-            ResOperation(rop.GUARD_TRUE, [i1], None, descr=BasicFailDescr(5)),
+        ops = [
+            '[i0]',
+            'label(i0, descr=targettoken)',
+            'i1 = int_le(i0, 9)',
+            'guard_true(i1, descr=faildescr) [i0]',
             ]
-        operations[2].setfailargs([i0])
-        inputargs = [i0]
         NUM = 8193
+        iprevious = 'i0'
         for i in range(NUM):
-            i2 = BoxInt()
-            operations.append(
-                ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i2))
-            i0 = i2
-        operations.append(
-            ResOperation(rop.JUMP, [i0], None, descr=targettoken))
+            inext = 'i%d' % (i + 2,)
+            ops.append('%s = int_add(%s, 1)' % (inext, iprevious))
+            iprevious = inext
+        ops.append('jump(%s, descr=targettoken)' % (iprevious,))
 
-        self.cpu.compile_loop(inputargs, operations, looptoken)
+        loop = parse('\n'.join(ops), namespace={'targettoken': targettoken,
+                                                'faildescr': 
BasicFailDescr(5)})
+
+        self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken)
         deadframe = self.cpu.execute_token(looptoken, -42)
         fail = self.cpu.get_latest_descr(deadframe)
         assert fail.identifier == 5
@@ -252,13 +227,13 @@
         argboxes = []
         for x in argvals:
             if isinstance(x, float):
-                argboxes.append(BoxFloat(x))
+                argboxes.append(InputArgFloat(x))
             else:
-                argboxes.append(BoxInt(x))
-        res = self.execute_operation(rop.CALL,
+                argboxes.append(InputArgInt(x))
+        res = self.execute_operation(rop.CALL_I,
                                      [funcbox] + argboxes,
                                      'int', descr=calldescr)
-        assert res.value == -42
+        assert res == -42
         assert seen == [argvals]
 
     def test_subi_range(self):
diff --git a/rpython/jit/backend/x86/regalloc.py 
b/rpython/jit/backend/x86/regalloc.py
--- a/rpython/jit/backend/x86/regalloc.py
+++ b/rpython/jit/backend/x86/regalloc.py
@@ -1530,7 +1530,3 @@
     if we_are_translated():
         llop.debug_print(lltype.Void, msg)
     raise NotImplementedError(msg)
-
-# xxx hack: set a default value for TargetToken._ll_loop_code.
-# If 0, we know that it is a LABEL that was not compiled yet.
-TargetToken._ll_loop_code = 0
diff --git a/rpython/jit/backend/x86/test/test_runner.py 
b/rpython/jit/backend/x86/test/test_runner.py
--- a/rpython/jit/backend/x86/test/test_runner.py
+++ b/rpython/jit/backend/x86/test/test_runner.py
@@ -39,7 +39,7 @@
                                  'nop; '    # for the label
                                  'add; test; je; jmp;')   # plus some padding
         bridge_loop_instructions = (
-            'cmp; jge; mov; mov; mov;( mov ;)? call; mov; jmp;')
+            'cmp; jge; mov;( movabs;)? mov; mov(abs)?; call; mov(abs)?; jmp;')
 
     def get_cpu(self):
         cpu = CPU(rtyper=None, stats=FakeStats())
diff --git a/rpython/jit/metainterp/history.py 
b/rpython/jit/metainterp/history.py
--- a/rpython/jit/metainterp/history.py
+++ b/rpython/jit/metainterp/history.py
@@ -438,6 +438,9 @@
         self.compiled_loop_token.cpu.dump_loop_token(self)
 
 class TargetToken(AbstractDescr):
+    _ll_loop_code = 0     # for the backend.  If 0, we know that it is
+                          # a LABEL that was not compiled yet.
+
     def __init__(self, targeting_jitcell_token=None,
                  original_jitcell_token=None):
         # Warning, two different jitcell_tokens here!
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to