Author: Armin Rigo <ar...@tunes.org>
Branch: ppc-updated-backend
Changeset: r79592:5b7660b90e96
Date: 2015-09-05 18:35 +0200
http://bitbucket.org/pypy/pypy/changeset/5b7660b90e96/

Log:    PPC Backend #3: fix the calls.

        Reviewed the particular calling conventions and adapted to them.
        Copy and adapt callbuilder.py from the x86 backend. Add support for
        releasing the GIL, saving and restoring errno, and so on.

diff too long, truncating to 2000 out of 2077 lines

diff --git a/rpython/jit/backend/llsupport/llerrno.py 
b/rpython/jit/backend/llsupport/llerrno.py
--- a/rpython/jit/backend/llsupport/llerrno.py
+++ b/rpython/jit/backend/llsupport/llerrno.py
@@ -1,14 +1,22 @@
+import sys
 from rpython.rtyper.lltypesystem import lltype, rffi
 from rpython.translator.tool.cbuild import ExternalCompilationInfo
 from rpython.jit.backend.llsupport.symbolic import WORD
 
 
+if sys.byteorder == 'little' or sys.maxint <= 2**32:
+    long2int = int2long = lambda x: x
+else:
+    def long2int(x): return x >> 32
+    def int2long(x): return x << 32
+
+
 def get_debug_saved_errno(cpu):
-    return cpu._debug_errno_container[3]
+    return long2int(cpu._debug_errno_container[3])
 
 def set_debug_saved_errno(cpu, nerrno):
     assert nerrno >= 0
-    cpu._debug_errno_container[3] = nerrno
+    cpu._debug_errno_container[3] = int2long(nerrno)
 
 def get_rpy_errno_offset(cpu):
     if cpu.translate_support_code:
@@ -19,11 +27,11 @@
 
 
 def get_debug_saved_alterrno(cpu):
-    return cpu._debug_errno_container[4]
+    return long2int(cpu._debug_errno_container[4])
 
 def set_debug_saved_alterrno(cpu, nerrno):
     assert nerrno >= 0
-    cpu._debug_errno_container[4] = nerrno
+    cpu._debug_errno_container[4] = int2long(nerrno)
 
 def get_alt_errno_offset(cpu):
     if cpu.translate_support_code:
diff --git a/rpython/jit/backend/ppc/callbuilder.py 
b/rpython/jit/backend/ppc/callbuilder.py
--- a/rpython/jit/backend/ppc/callbuilder.py
+++ b/rpython/jit/backend/ppc/callbuilder.py
@@ -1,9 +1,12 @@
 from rpython.jit.backend.ppc.arch import IS_PPC_64, WORD, 
PARAM_SAVE_AREA_OFFSET
-from rpython.jit.backend.ppc.arch import IS_BIG_ENDIAN, IS_LITTLE_ENDIAN
+from rpython.jit.backend.ppc.arch import THREADLOCAL_ADDR_OFFSET
 import rpython.jit.backend.ppc.register as r
 from rpython.jit.metainterp.history import INT, FLOAT
 from rpython.jit.backend.llsupport.callbuilder import AbstractCallBuilder
 from rpython.jit.backend.ppc.jump import remap_frame_layout
+from rpython.rlib.objectmodel import we_are_translated
+from rpython.jit.backend.llsupport import llerrno
+from rpython.rtyper.lltypesystem import rffi
 
 
 def follow_jump(addr):
@@ -16,11 +19,9 @@
     FPR_ARGS = r.MANAGED_FP_REGS
     assert FPR_ARGS == [r.f1, r.f2, r.f3, r.f4, r.f5, r.f6, r.f7,
                         r.f8, r.f9, r.f10, r.f11, r.f12, r.f13]
-
-    if IS_BIG_ENDIAN:
-        FNREG = r.r2
-    else:
-        FNREG = r.r12
+    RSHADOWPTR  = r.RCS1
+    RFASTGILPTR = r.RCS2
+    RSHADOWOLD  = r.RCS3
 
     def __init__(self, assembler, fnloc, arglocs, resloc):
         AbstractCallBuilder.__init__(self, assembler, fnloc, arglocs,
@@ -30,7 +31,16 @@
         assert IS_PPC_64
         self.subtracted_to_sp = 0
 
-        # Prepare arguments
+        # Prepare arguments.  Note that this follows the convention where
+        # a prototype is in scope, and doesn't take "..." arguments.  If
+        # you were to call a C function with a "..." argument with cffi,
+        # it would not go there but instead via libffi.  If you pretend
+        # instead that it takes fixed arguments, then it would arrive here
+        # but the convention is bogus for floating-point arguments.  (And,
+        # to add to the mess, at least CPython's ctypes cannot be used
+        # to call a "..." function with floating-point arguments.  As I
+        # guess that it's a problem with libffi, it means PyPy inherits
+        # the same problem.)
         arglocs = self.arglocs
         num_args = len(arglocs)
 
@@ -67,7 +77,7 @@
                     # after the 8th argument, a non-float location is
                     # always stored in the stack
                     if loc.is_reg():
-                        src = loc.value
+                        src = loc
                     else:
                         src = r.r2
                         self.asm.regalloc_mov(loc, src)
@@ -80,7 +90,7 @@
                         float_locs.append(loc)
                     else:
                         if loc.is_fp_reg():
-                            src = loc.value
+                            src = loc
                         else:
                             src = r.FP_SCRATCH
                             self.asm.regalloc_mov(loc, src)
@@ -88,7 +98,7 @@
 
         # We must also copy fnloc into FNREG
         non_float_locs.append(self.fnloc)
-        non_float_regs.append(self.FNREG)
+        non_float_regs.append(self.mc.RAW_CALL_REG)     # r2 or r12
 
         if float_locs:
             assert len(float_locs) <= len(self.FPR_ARGS)
@@ -107,25 +117,145 @@
         pass  # XXX
 
     def emit_raw_call(self):
-        if IS_BIG_ENDIAN:
-            # Load the function descriptor (currently in r2) from memory:
-            #  [r2 + 0]  -> ctr
-            #  [r2 + 16] -> r11
-            #  [r2 + 8]  -> r2  (= TOC)
-            assert self.FNREG is r.r2
-            self.mc.ld(r.SCRATCH.value, r.r2.value, 0)
-            self.mc.ld(r.r11.value, r.r2.value, 16)
-            self.mc.mtctr(r.SCRATCH.value)
-            self.mc.ld(r.TOC.value, r.r2.value, 8)   # must be last: TOC is r2
-        elif IS_LITTLE_ENDIAN:
-            assert self.FNREG is r.r12
-            self.mc.mtctr(r.r12.value)
-        # Call the function
-        self.mc.bctrl()
+        self.mc.raw_call()
 
     def restore_stack_pointer(self):
         if self.subtracted_to_sp != 0:
             self.mc.addi(r.SP.value, r.SP.value, self.subtracted_to_sp)
 
     def load_result(self):
-        pass
+        assert (self.resloc is None or
+                self.resloc is r.r3 or
+                self.resloc is r.f1)
+
+
+    def call_releasegil_addr_and_move_real_arguments(self, fastgil):
+        assert self.is_call_release_gil
+        RSHADOWPTR  = self.RSHADOWPTR
+        RFASTGILPTR = self.RFASTGILPTR
+        RSHADOWOLD  = self.RSHADOWOLD
+        #
+        # Save this thread's shadowstack pointer into r29, for later comparison
+        gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap
+        if gcrootmap:
+            rst = gcrootmap.get_root_stack_top_addr()
+            self.mc.load_imm(RSHADOWPTR, rst)
+            self.mc.load(RSHADOWOLD.value, RSHADOWPTR.value, 0)
+        #
+        # change 'rpy_fastgil' to 0 (it should be non-zero right now)
+        self.mc.load_imm(RFASTGILPTR, fastgil)
+        self.mc.li(r.r0.value, 0)
+        self.mc.lwsync()
+        self.mc.std(r.r0.value, RFASTGILPTR.value, 0)
+        #
+        if not we_are_translated():        # for testing: we should not access
+            self.mc.addi(r.SPP.value, r.SPP.value, 1)           # r31 any more
+
+
+    def move_real_result_and_call_reacqgil_addr(self, fastgil):
+        from rpython.jit.backend.ppc.codebuilder import OverwritingBuilder
+
+        # try to reacquire the lock.  The following registers are still
+        # valid from before the call:
+        RSHADOWPTR  = self.RSHADOWPTR     # r30: &root_stack_top
+        RFASTGILPTR = self.RFASTGILPTR    # r29: &fastgil
+        RSHADOWOLD  = self.RSHADOWOLD     # r28: previous val of root_stack_top
+
+        # Equivalent of 'r10 = __sync_lock_test_and_set(&rpy_fastgil, 1);'
+        self.mc.li(r.r9.value, 1)
+        retry_label = self.mc.currpos()
+        self.mc.ldarx(r.r10.value, 0, RFASTGILPTR.value)  # load the lock value
+        self.mc.stdcxx(r.r9.value, 0, RFASTGILPTR.value)  # try to claim lock
+        self.mc.bc(6, 2, retry_label - self.mc.currpos()) # retry if failed
+        self.mc.isync()
+
+        self.mc.cmpdi(0, r.r10.value, 0)
+        b1_location = self.mc.currpos()
+        self.mc.trap()       # patched with a BEQ: jump if r10 is zero
+
+        if self.asm.cpu.gc_ll_descr.gcrootmap:
+            # When doing a call_release_gil with shadowstack, there
+            # is the risk that the 'rpy_fastgil' was free but the
+            # current shadowstack can be the one of a different
+            # thread.  So here we check if the shadowstack pointer
+            # is still the same as before we released the GIL (saved
+            # in 'r7'), and if not, we fall back to 'reacqgil_addr'.
+            XXXXXXXXXXXXXXXXXXX
+            self.mc.LDR_ri(r.ip.value, r.r5.value, cond=c.EQ)
+            self.mc.CMP_rr(r.ip.value, r.r7.value, cond=c.EQ)
+            b1_location = self.mc.currpos()
+            self.mc.BKPT()                       # BEQ below
+            # there are two cases here: either EQ was false from
+            # the beginning, or EQ was true at first but the CMP
+            # made it false.  In the second case we need to
+            # release the fastgil here.  We know which case it is
+            # by checking again r3.
+            self.mc.CMP_ri(r.r3.value, 0)
+            self.mc.STR_ri(r.r3.value, r.r6.value, cond=c.EQ)
+        #
+        # save the result we just got
+        RSAVEDRES = RFASTGILPTR     # can reuse this reg here
+        reg = self.resloc
+        if reg is not None:
+            if reg.is_core_reg():
+                self.mc.mr(RSAVEDRES.value, reg.value)
+            elif reg.is_fp_reg():
+                self.mc.stfd(reg.value, r.SP.value,
+                             PARAM_SAVE_AREA_OFFSET + 7 * WORD)
+        self.mc.load_imm(self.mc.RAW_CALL_REG, self.asm.reacqgil_addr)
+        self.mc.raw_call()
+        if reg is not None:
+            if reg.is_core_reg():
+                self.mc.mr(reg.value, RSAVEDRES.value)
+            elif reg.is_fp_reg():
+                self.mc.lfd(reg.value, r.SP.value,
+                            PARAM_SAVE_AREA_OFFSET + 7 * WORD)
+
+        # replace b1_location with BEQ(here)
+        jmp_target = self.mc.currpos()
+        pmc = OverwritingBuilder(self.mc, b1_location, 1)
+        pmc.bc(12, 2, jmp_target - b1_location)    # "beq"
+        pmc.overwrite()
+
+        if not we_are_translated():        # for testing: now we can access
+            self.mc.addi(r.SPP.value, r.SPP.value, -1)          # r31 again
+
+
+    def write_real_errno(self, save_err):
+        if save_err & rffi.RFFI_READSAVED_ERRNO:
+            # Just before a call, read '*_errno' and write it into the
+            # real 'errno'.  A lot of registers are free here, notably
+            # r11 and r0.
+            if save_err & rffi.RFFI_ALT_ERRNO:
+                rpy_errno = llerrno.get_alt_errno_offset(self.asm.cpu)
+            else:
+                rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu)
+            p_errno = llerrno.get_p_errno_offset(self.asm.cpu)
+            self.mc.ld(r.r11.value, r.SP.value,
+                       THREADLOCAL_ADDR_OFFSET + self.subtracted_to_sp)
+            self.mc.lwz(r.r0.value, r.r11.value, rpy_errno)
+            self.mc.ld(r.r11.value, r.r11.value, p_errno)
+            self.mc.stw(r.r0.value, r.r11.value, 0)
+        elif save_err & rffi.RFFI_ZERO_ERRNO_BEFORE:
+            # Same, but write zero.
+            p_errno = llerrno.get_p_errno_offset(self.asm.cpu)
+            self.mc.ld(r.r11.value, r.SP.value,
+                       THREADLOCAL_ADDR_OFFSET + self.subtracted_to_sp)
+            self.mc.ld(r.r11.value, r.r11.value, p_errno)
+            self.mc.li(r.r0.value, 0)
+            self.mc.stw(r.r0.value, r.r11.value, 0)
+
+    def read_real_errno(self, save_err):
+        if save_err & rffi.RFFI_SAVE_ERRNO:
+            # Just after a call, read the real 'errno' and save a copy of
+            # it inside our thread-local '*_errno'.  Registers r4-r10
+            # never contain anything after the call.
+            if save_err & rffi.RFFI_ALT_ERRNO:
+                rpy_errno = llerrno.get_alt_errno_offset(self.asm.cpu)
+            else:
+                rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu)
+            p_errno = llerrno.get_p_errno_offset(self.asm.cpu)
+            self.mc.ld(r.r9.value, r.SP.value, THREADLOCAL_ADDR_OFFSET)
+            self.mc.ld(r.r10.value, r.r9.value, p_errno)
+            self.mc.lwz(r.r10.value, r.r10.value, 0)
+            self.mc.stw(r.r10.value, r.r9.value, rpy_errno)
diff --git a/rpython/jit/backend/ppc/codebuilder.py 
b/rpython/jit/backend/ppc/codebuilder.py
--- a/rpython/jit/backend/ppc/codebuilder.py
+++ b/rpython/jit/backend/ppc/codebuilder.py
@@ -47,6 +47,7 @@
 XL2 = Form("crbD", "XO1", "Rc")
 XFL = Form("FM", "frB", "XO1", "Rc")
 XFX = Form("CRM", "rS", "XO1")
+XLL = Form("LL", "XO1")
 
 MI = Form("rA", "rS", "SH", "MB", "ME", "Rc")
 MB = Form("rA", "rS", "rB", "MB", "ME", "Rc")
@@ -542,7 +543,8 @@
     subfzeo = XO0(31, OE=1, XO2=200, Rc=0)
     subfzeox= XO0(31, OE=1, XO2=200, Rc=1)
 
-    sync    = X(31, XO1=598)
+    sync    = XLL(31, LL=0, XO1=598)
+    lwsync  = XLL(31, LL=1, XO1=598)
 
     tlbia = X(31, XO1=370)
     tlbie = Form("rB", "XO1")(31, XO1=306)
@@ -925,8 +927,8 @@
 class PPCGuardToken(GuardToken):
     def __init__(self, cpu, gcmap, descr, failargs, faillocs,
                  exc, frame_depth, is_guard_not_invalidated=False,
-                 is_guard_not_forced=False, fcond=c.UH):
-        assert fcond != c.UH
+                 is_guard_not_forced=False, fcond=c.cond_none):
+        assert fcond != c.cond_none
         GuardToken.__init__(self, cpu, gcmap, descr, failargs, faillocs, exc,
                             frame_depth, is_guard_not_invalidated,
                             is_guard_not_forced)
@@ -1007,7 +1009,7 @@
         self.b(offset)
 
     def b_cond_offset(self, offset, condition):
-        assert condition != c.UH
+        assert condition != c.cond_none
         BI, BO = c.encoding[condition]
 
         pos = self.currpos()
@@ -1015,7 +1017,7 @@
         self.bc(BO, BI, target_ofs)
 
     def b_cond_abs(self, addr, condition):
-        assert condition != c.UH
+        assert condition != c.cond_none
         BI, BO = c.encoding[condition]
 
         with scratch_reg(self):
@@ -1037,6 +1039,29 @@
             self.mtctr(r.SCRATCH.value)
         self.bctrl()
 
+    if IS_BIG_ENDIAN:
+        RAW_CALL_REG = r.r2
+    else:
+        RAW_CALL_REG = r.r12
+
+    def raw_call(self):
+        """Emit a call to the address stored in the register RAW_CALL_REG."""
+        if IS_BIG_ENDIAN:
+            # Load the function descriptor (currently in r2) from memory:
+            #  [r2 + 0]  -> ctr
+            #  [r2 + 16] -> r11
+            #  [r2 + 8]  -> r2  (= TOC)
+            assert self.RAW_CALL_REG is r.r2
+            self.ld(r.SCRATCH.value, r.r2.value, 0)
+            self.ld(r.r11.value, r.r2.value, 16)
+            self.mtctr(r.SCRATCH.value)
+            self.ld(r.TOC.value, r.r2.value, 8)   # must be last: TOC is r2
+        elif IS_LITTLE_ENDIAN:
+            assert self.RAW_CALL_REG is r.r12     # 'r12' is fixed by this ABI
+            self.mtctr(r.r12.value)
+        # Call the function
+        self.bctrl()
+
     ## def call(self, address):
     ##     """ do a call to an absolute address
     ##     """
diff --git a/rpython/jit/backend/ppc/condition.py 
b/rpython/jit/backend/ppc/condition.py
--- a/rpython/jit/backend/ppc/condition.py
+++ b/rpython/jit/backend/ppc/condition.py
@@ -6,7 +6,7 @@
 GE = 5
 SO = 6
 NS = 7
-UH = -1    # invalid
+cond_none = -1    # invalid
 
 def negate(cond):
     return cond ^ 1
diff --git a/rpython/jit/backend/ppc/helper/assembler.py 
b/rpython/jit/backend/ppc/helper/assembler.py
--- a/rpython/jit/backend/ppc/helper/assembler.py
+++ b/rpython/jit/backend/ppc/helper/assembler.py
@@ -6,15 +6,36 @@
 import rpython.jit.backend.ppc.register as r
 from rpython.rtyper.lltypesystem import rffi, lltype
 
-def test_condition_for(condition, guard_op):
-    opnum = guard_op.getopnum()
-    if opnum == rop.GUARD_FALSE:
-        return condition
-    elif opnum == rop.GUARD_TRUE:
-        return c.negate(condition)
-    assert 0, opnum
 
-def do_emit_cmp_op(self, guard_op, arglocs, condition, signed, fp):
+def flush_cc(asm, condition, result_loc):
+    # After emitting an instruction that leaves a boolean result in
+    # a condition code (cc), call this.  In the common case, result_loc
+    # will be set to SPP by the regalloc, which in this case means
+    # "propagate it between this operation and the next guard by keeping
+    # it in the cc".  In the uncommon case, result_loc is another
+    # register, and we emit a load from the cc into this register.
+    assert asm.guard_success_cc == c.cond_none
+    if result_loc is r.SPP:
+        asm.guard_success_cc = condition
+    else:
+        # Possibly invert the bit in the CR
+        bit, invert = c.encoding[condition]
+        assert 0 <= bit <= 3
+        if invert == 12:
+            pass
+        elif invert == 4:
+            asm.mc.crnor(bit, bit, bit)
+        else:
+            assert 0
+
+        resval = result_loc.value
+        # move the content of the CR to resval
+        asm.mc.mfcr(resval)
+        # zero out everything except of the result
+        asm.mc.rlwinm(resval, resval, 1 + bit, 31, 31)
+
+
+def do_emit_cmp_op(self, arglocs, condition, signed, fp):
     l0 = arglocs[0]
     l1 = arglocs[1]
     assert not l0.is_imm()
@@ -40,32 +61,12 @@
             self.mc.crnor(0, 0, 3)
             condition = c.LT
 
-    if guard_op is None:
-        # After the comparison, place the result in a single bit of the CR
-        bit, invert = c.encoding[condition]
-        assert 0 <= bit <= 3
-        if invert == 12:
-            pass
-        elif invert == 4:
-            self.mc.crnor(bit, bit, bit)
-        else:
-            assert 0
+    flush_cc(self, condition, arglocs[2])
 
-        assert len(arglocs) == 3
-        res = arglocs[2]
-        resval = res.value
-        # move the content of the CR to resval
-        self.mc.mfcr(resval)
-        # zero out everything except of the result
-        self.mc.rlwinm(resval, resval, 1 + bit, 31, 31)
-    else:
-        failargs = arglocs[2:]
-        fcond = test_condition_for(condition, guard_op)
-        self._emit_guard(guard_op, failargs, fcond)
 
 def gen_emit_cmp_op(condition, signed=True, fp=False):
-    def f(self, op, guard_op, arglocs, regalloc):
-        do_emit_cmp_op(self, guard_op, arglocs, condition, signed, fp)
+    def f(self, op, arglocs, regalloc):
+        do_emit_cmp_op(self, arglocs, condition, signed, fp)
     return f
 
 def count_reg_args(args):
diff --git a/rpython/jit/backend/ppc/helper/regalloc.py 
b/rpython/jit/backend/ppc/helper/regalloc.py
--- a/rpython/jit/backend/ppc/helper/regalloc.py
+++ b/rpython/jit/backend/ppc/helper/regalloc.py
@@ -13,7 +13,7 @@
 def _prepare_cmp_op(signed):
     lower_bound = -2**15 if signed else 0
     upper_bound = 2**15-1 if signed else 2**16-1
-    def f(self, op, guard_op):
+    def f(self, op):
         l0 = self.ensure_reg(op.getarg(0))
         a1 = op.getarg(1)
         if check_imm_box(a1, lower_bound, upper_bound):
@@ -21,34 +21,25 @@
         else:
             l1 = self.ensure_reg(a1)
         self.free_op_vars()
-        if guard_op is None:
-            res = self.force_allocate_reg(op.result)
-            return [l0, l1, res]
-        else:
-            return self._prepare_guard(guard_op, [l0, l1])
+        res = self.force_allocate_reg_or_cc(op.result)
+        return [l0, l1, res]
     return f
 prepare_cmp_op          = _prepare_cmp_op(signed=True)
 prepare_cmp_op_unsigned = _prepare_cmp_op(signed=False)
 
-def prepare_unary_cmp(self, op, guard_op):
+def prepare_unary_cmp(self, op):
     l0 = self.ensure_reg(op.getarg(0))
     l1 = imm(0)
     self.free_op_vars()
-    if guard_op is None:
-        res = self.force_allocate_reg(op.result)
-        return [l0, l1, res]
-    else:
-        return self._prepare_guard(guard_op, [l0, l1])
+    res = self.force_allocate_reg_or_cc(op.result)
+    return [l0, l1, res]
 
-def prepare_float_cmp(self, op, guard_op):
+def prepare_float_cmp(self, op):
     l0 = self.ensure_reg(op.getarg(0))
     l1 = self.ensure_reg(op.getarg(1))
     self.free_op_vars()
-    if guard_op is None:
-        res = self.force_allocate_reg(op.result)
-        return [l0, l1, res]
-    else:
-        return self._prepare_guard(guard_op, [l0, l1])
+    res = self.force_allocate_reg_or_cc(op.result)
+    return [l0, l1, res]
 
 def prepare_unary_op(self, op):
     l0 = self.ensure_reg(op.getarg(0))
@@ -87,11 +78,3 @@
     self.free_op_vars()
     res = self.force_allocate_reg(op.result)
     return [l0, l1, res]
-
-def prepare_int_binary_ovf(self, op, guard_op):
-    reg1 = self.ensure_reg(op.getarg(0))
-    reg2 = self.ensure_reg(op.getarg(1))
-    self.free_op_vars()
-    res = self.force_allocate_reg(op.result)
-    assert guard_op is not None
-    return self._prepare_guard(guard_op, [reg1, reg2, res])
diff --git a/rpython/jit/backend/ppc/jump.py b/rpython/jit/backend/ppc/jump.py
--- a/rpython/jit/backend/ppc/jump.py
+++ b/rpython/jit/backend/ppc/jump.py
@@ -51,7 +51,7 @@
                 dst = dst_locations[i]
                 originalkey = dst.as_key()
                 if srccount[originalkey] >= 0:
-                    assembler.regalloc_push(dst)
+                    assembler.regalloc_push(dst, 0)
                     while True:
                         key = dst.as_key()
                         assert srccount[key] == 1
@@ -63,7 +63,7 @@
                             break
                         _move(assembler, src, dst, tmpreg)
                         dst = src
-                    assembler.regalloc_pop(dst)
+                    assembler.regalloc_pop(dst, 0)
             assert pending_dests == 0
 
 def _move(assembler, src, dst, tmpreg):
@@ -91,7 +91,7 @@
             key = loc.as_key()
             if (key in dst_keys or (loc.width > WORD and
                                     (key + 1) in dst_keys)):
-                assembler.regalloc_push(loc)
+                assembler.regalloc_push(loc, len(extrapushes))
                 extrapushes.append(dstloc)
                 continue
         src_locations2red.append(loc)
@@ -108,4 +108,4 @@
     # finally, pop the extra fp stack locations
     while len(extrapushes) > 0:
         loc = extrapushes.pop()
-        assembler.regalloc_pop(loc)
+        assembler.regalloc_pop(loc, len(extrapushes))
diff --git a/rpython/jit/backend/ppc/opassembler.py 
b/rpython/jit/backend/ppc/opassembler.py
--- a/rpython/jit/backend/ppc/opassembler.py
+++ b/rpython/jit/backend/ppc/opassembler.py
@@ -20,6 +20,7 @@
 from rpython.jit.backend.llsupport.descr import InteriorFieldDescr, CallDescr
 from rpython.jit.backend.llsupport.gcmap import allocate_gcmap
 from rpython.rtyper.lltypesystem import rstr, rffi, lltype
+from rpython.rtyper.annlowlevel import cast_instance_to_gcref
 from rpython.jit.metainterp.resoperation import rop
 from rpython.jit.backend.ppc import callbuilder
 
@@ -53,34 +54,23 @@
         else:
             self.mc.mulld(res.value, l0.value, l1.value)
 
-    def do_emit_int_binary_ovf(self, op, guard_op, arglocs, emit):
+    def do_emit_int_binary_ovf(self, op, arglocs, emit):
         l0, l1, res = arglocs[0], arglocs[1], arglocs[2]
         self.mc.load_imm(r.SCRATCH, 0)
         self.mc.mtxer(r.SCRATCH.value)
         emit(res.value, l0.value, l1.value)
-        #
-        failargs = arglocs[3:]
-        assert guard_op is not None
-        opnum = guard_op.getopnum()
-        if opnum == rop.GUARD_NO_OVERFLOW:
-            fcond = c.SO
-        elif opnum == rop.GUARD_OVERFLOW:
-            fcond = c.NS
+
+    def emit_int_add_ovf(self, op, arglocs, regalloc):
+        self.do_emit_int_binary_ovf(op, arglocs, self.mc.addox)
+
+    def emit_int_sub_ovf(self, op, arglocs, regalloc):
+        self.do_emit_int_binary_ovf(op, arglocs, self.mc.subox)
+
+    def emit_int_mul_ovf(self, op, arglocs, regalloc):
+        if IS_PPC_32:
+            self.do_emit_int_binary_ovf(op, arglocs, self.mc.mullwox)
         else:
-            assert 0
-        self._emit_guard(guard_op, failargs, fcond)
-
-    def emit_guard_int_add_ovf(self, op, guard_op, arglocs, regalloc):
-        self.do_emit_int_binary_ovf(op, guard_op, arglocs, self.mc.addox)
-
-    def emit_guard_int_sub_ovf(self, op, guard_op, arglocs, regalloc):
-        self.do_emit_int_binary_ovf(op, guard_op, arglocs, self.mc.subox)
-
-    def emit_guard_int_mul_ovf(self, op, guard_op, arglocs, regalloc):
-        if IS_PPC_32:
-            self.do_emit_int_binary_ovf(op, guard_op, arglocs, self.mc.mullwox)
-        else:
-            self.do_emit_int_binary_ovf(op, guard_op, arglocs, self.mc.mulldox)
+            self.do_emit_int_binary_ovf(op, arglocs, self.mc.mulldox)
 
     def emit_int_floordiv(self, op, arglocs, regalloc):
         l0, l1, res = arglocs
@@ -140,26 +130,26 @@
         else:
             self.mc.divdu(res.value, l0.value, l1.value)
 
-    emit_guard_int_le = gen_emit_cmp_op(c.LE)
-    emit_guard_int_lt = gen_emit_cmp_op(c.LT)
-    emit_guard_int_gt = gen_emit_cmp_op(c.GT)
-    emit_guard_int_ge = gen_emit_cmp_op(c.GE)
-    emit_guard_int_eq = gen_emit_cmp_op(c.EQ)
-    emit_guard_int_ne = gen_emit_cmp_op(c.NE)
+    emit_int_le = gen_emit_cmp_op(c.LE)
+    emit_int_lt = gen_emit_cmp_op(c.LT)
+    emit_int_gt = gen_emit_cmp_op(c.GT)
+    emit_int_ge = gen_emit_cmp_op(c.GE)
+    emit_int_eq = gen_emit_cmp_op(c.EQ)
+    emit_int_ne = gen_emit_cmp_op(c.NE)
 
-    emit_guard_uint_lt = gen_emit_cmp_op(c.LT, signed=False)
-    emit_guard_uint_le = gen_emit_cmp_op(c.LE, signed=False)
-    emit_guard_uint_gt = gen_emit_cmp_op(c.GT, signed=False)
-    emit_guard_uint_ge = gen_emit_cmp_op(c.GE, signed=False)
+    emit_uint_lt = gen_emit_cmp_op(c.LT, signed=False)
+    emit_uint_le = gen_emit_cmp_op(c.LE, signed=False)
+    emit_uint_gt = gen_emit_cmp_op(c.GT, signed=False)
+    emit_uint_ge = gen_emit_cmp_op(c.GE, signed=False)
 
-    emit_guard_int_is_zero = emit_guard_int_eq   # EQ to 0
-    emit_guard_int_is_true = emit_guard_int_ne   # NE to 0
+    emit_int_is_zero = emit_int_eq   # EQ to 0
+    emit_int_is_true = emit_int_ne   # NE to 0
 
-    emit_guard_ptr_eq = emit_guard_int_eq
-    emit_guard_ptr_ne = emit_guard_int_ne
+    emit_ptr_eq = emit_int_eq
+    emit_ptr_ne = emit_int_ne
 
-    emit_guard_instance_ptr_eq = emit_guard_ptr_eq
-    emit_guard_instance_ptr_ne = emit_guard_ptr_ne
+    emit_instance_ptr_eq = emit_ptr_eq
+    emit_instance_ptr_ne = emit_ptr_ne
 
     def emit_int_neg(self, op, arglocs, regalloc):
         l0, res = arglocs
@@ -223,12 +213,12 @@
         l0, res = arglocs
         self.mc.fsqrt(res.value, l0.value)
 
-    emit_guard_float_le = gen_emit_cmp_op(c.LE, fp=True)
-    emit_guard_float_lt = gen_emit_cmp_op(c.LT, fp=True)
-    emit_guard_float_gt = gen_emit_cmp_op(c.GT, fp=True)
-    emit_guard_float_ge = gen_emit_cmp_op(c.GE, fp=True)
-    emit_guard_float_eq = gen_emit_cmp_op(c.EQ, fp=True)
-    emit_guard_float_ne = gen_emit_cmp_op(c.NE, fp=True)
+    emit_float_le = gen_emit_cmp_op(c.LE, fp=True)
+    emit_float_lt = gen_emit_cmp_op(c.LT, fp=True)
+    emit_float_gt = gen_emit_cmp_op(c.GT, fp=True)
+    emit_float_ge = gen_emit_cmp_op(c.GE, fp=True)
+    emit_float_eq = gen_emit_cmp_op(c.EQ, fp=True)
+    emit_float_ne = gen_emit_cmp_op(c.NE, fp=True)
 
     def emit_cast_float_to_int(self, op, arglocs, regalloc):
         l0, temp_loc, res = arglocs
@@ -256,9 +246,16 @@
 
     _mixin_ = True
 
-    def _emit_guard(self, op, arglocs, fcond, save_exc=False,
+    def _emit_guard(self, op, arglocs, save_exc=False,
                     is_guard_not_invalidated=False,
                     is_guard_not_forced=False):
+        if is_guard_not_invalidated:
+            fcond = c.cond_none
+        else:
+            fcond = self.guard_success_cc
+            self.guard_success_cc = c.cond_none
+            assert fcond != c.cond_none
+            fcond = c.negate(fcond)
         token = self.build_guard_token(op, arglocs[0].value, arglocs[1:],
                                        fcond, save_exc, 
is_guard_not_invalidated,
                                        is_guard_not_forced)
@@ -279,18 +276,19 @@
         return token
 
     def emit_guard_true(self, op, arglocs, regalloc):
-        l0 = arglocs[0]
-        failargs = arglocs[1:]
-        self.mc.cmp_op(0, l0.value, 0, imm=True)
-        self._emit_guard(op, failargs, c.EQ)
-        #                        #      ^^^^ If this condition is met,
-        #                        #           then the guard fails.
+        self._emit_guard(op, arglocs)
 
     def emit_guard_false(self, op, arglocs, regalloc):
-        l0 = arglocs[0]
-        failargs = arglocs[1:]
-        self.mc.cmp_op(0, l0.value, 0, imm=True)
-        self._emit_guard(op, failargs, c.NE)
+        self.guard_success_cc = c.negate(self.guard_success_cc)
+        self._emit_guard(op, arglocs)
+
+    def emit_guard_overflow(self, op, arglocs, regalloc):
+        self.guard_success_cc = c.SO
+        self._emit_guard(op, arglocs)
+
+    def emit_guard_no_overflow(self, op, arglocs, regalloc):
+        self.guard_success_cc = c.NS
+        self._emit_guard(op, arglocs)
 
     def emit_guard_value(self, op, arglocs, regalloc):
         l0 = arglocs[0]
@@ -305,14 +303,16 @@
         elif l0.is_fp_reg():
             assert l1.is_fp_reg()
             self.mc.cmp_op(0, l0.value, l1.value, fp=True)
-        self._emit_guard(op, failargs, c.NE)
+        self.guard_success_cc = c.EQ
+        self._emit_guard(op, failargs)
 
     emit_guard_nonnull = emit_guard_true
     emit_guard_isnull = emit_guard_false
 
     def emit_guard_class(self, op, arglocs, regalloc):
         self._cmp_guard_class(op, arglocs, regalloc)
-        self._emit_guard(op, arglocs[3:], c.NE)
+        self.guard_success_cc = c.EQ
+        self._emit_guard(op, arglocs[3:])
 
     def emit_guard_nonnull_class(self, op, arglocs, regalloc):
         self.mc.cmp_op(0, arglocs[0].value, 1, imm=True, signed=False)
@@ -322,7 +322,8 @@
         pmc = OverwritingBuilder(self.mc, patch_pos, 1)
         pmc.bc(12, 0, self.mc.currpos() - patch_pos)    # LT
         pmc.overwrite()
-        self._emit_guard(op, arglocs[3:], c.NE, save_exc=False)
+        self.guard_success_cc = c.EQ
+        self._emit_guard(op, arglocs[3:])
 
     def _cmp_guard_class(self, op, locs, regalloc):
         offset = locs[2]
@@ -344,13 +345,24 @@
                     self.mc.lwz(r.SCRATCH.value, locs[0].value, 4)
                 self.mc.cmp_op(0, r.SCRATCH.value, typeid.value, 
imm=typeid.is_imm())
 
-    def emit_guard_not_invalidated(self, op, locs, regalloc):
-        return self._emit_guard(op, locs, c.UH, is_guard_not_invalidated=True)
+    def emit_guard_not_invalidated(self, op, arglocs, regalloc):
+        self._emit_guard(op, arglocs, is_guard_not_invalidated=True)
+
+    def emit_guard_not_forced(self, op, arglocs, regalloc):
+        ofs = self.cpu.get_ofs_of_frame_field('jf_descr')
+        self.mc.ld(r.SCRATCH.value, r.SPP.value, ofs)
+        self.mc.cmp_op(0, r.SCRATCH.value, 0, imm=True)
+        self.guard_success_cc = c.EQ
+        self._emit_guard(op, arglocs)
+
 
 class MiscOpAssembler(object):
 
     _mixin_ = True
 
+    def emit_label(self, op, arglocs, regalloc):
+        pass
+
     def emit_increment_debug_counter(self, op, arglocs, regalloc):
         [addr_loc, value_loc] = arglocs
         self.mc.load(value_loc.value, addr_loc.value, 0)
@@ -372,6 +384,7 @@
         self.mc.load_imm(r.r5, fail_descr_loc.getint())
         self.mc.std(r.r5.value, r.SPP.value, ofs)
 
+        ## XXX: gcmap logic here:
         ## arglist = op.getarglist()
         ## if arglist and arglist[0].type == REF:
         ##     if self._finish_gcmap:
@@ -421,13 +434,18 @@
     emit_cast_int_to_ptr = emit_same_as
 
     def emit_guard_no_exception(self, op, arglocs, regalloc):
-        loc = arglocs[0]
-        failargs = arglocs[1:]
-
-        self.mc.load(loc.value, loc.value, 0)
-        self.mc.cmp_op(0, loc.value, 0, imm=True)
-
-        self._emit_guard(op, failargs, c.NE, save_exc=True)
+        self.mc.load_from_addr(r.SCRATCH2, self.cpu.pos_exception())
+        self.mc.cmp_op(0, r.SCRATCH2.value, 0, imm=True)
+        self.guard_success_cc = c.EQ
+        self._emit_guard(op, arglocs, save_exc=True)
+        # If the previous operation was a COND_CALL, overwrite its conditional
+        # jump to jump over this GUARD_NO_EXCEPTION as well, if we can
+        if self._find_nearby_operation(regalloc,-1).getopnum() == 
rop.COND_CALL:
+            jmp_adr, BI, BO = self.previous_cond_call_jcond
+            relative_target = self.mc.currpos() - jmp_adr
+            pmc = OverwritingBuilder(self.mc, jmp_adr, 1)
+            pmc.bc(BO, BI, relative_target)
+            pmc.overwrite()
 
     def emit_guard_exception(self, op, arglocs, regalloc):
         loc, loc1, resloc, pos_exc_value, pos_exception = arglocs[:5]
@@ -435,21 +453,27 @@
         self.mc.load_imm(loc1, pos_exception.value)
         self.mc.load(r.SCRATCH.value, loc1.value, 0)
         self.mc.cmp_op(0, r.SCRATCH.value, loc.value)
-
-        self._emit_guard(op, failargs, c.NE, save_exc=True)
+        self.guard_success_cc = c.EQ
+        self._emit_guard(op, failargs, save_exc=True)
         self.mc.load_imm(loc, pos_exc_value.value)
 
         if resloc:
             self.mc.load(resloc.value, loc.value, 0)
- 
+
         self.mc.load_imm(r.SCRATCH, 0)
         self.mc.store(r.SCRATCH.value, loc.value, 0)
         self.mc.store(r.SCRATCH.value, loc1.value, 0)
 
-    def emit_call(self, op, arglocs, regalloc):
+
+class CallOpAssembler(object):
+
+    _mixin_ = True
+
+    def _emit_call(self, op, arglocs, is_call_release_gil=False):
         resloc = arglocs[0]
-        adr = arglocs[1]
-        arglist = arglocs[2:]
+        func_index = 1 + is_call_release_gil
+        adr = arglocs[func_index]
+        arglist = arglocs[func_index+1:]
 
         cb = callbuilder.CallBuilder(self, adr, arglist, resloc)
 
@@ -458,98 +482,85 @@
         cb.argtypes = descr.get_arg_types()
         cb.restype  = descr.get_result_type()
 
-        cb.emit()
+        if is_call_release_gil:
+            saveerrloc = arglocs[1]
+            assert saveerrloc.is_imm()
+            cb.emit_call_release_gil(saveerrloc.value)
+        else:
+            cb.emit()
 
-    ## def _emit_call(self, adr, arglocs, result=None, result_info=(-1,-1)):
-    ##     n_args = len(arglocs)
+    def emit_call(self, op, arglocs, regalloc):
+        self._emit_call(op, arglocs)
 
-    ##     # collect variables that need to go in registers
-    ##     # and the registers they will be stored in 
-    ##     num = 0
-    ##     fpnum = 0
-    ##     count = 0
-    ##     non_float_locs = []
-    ##     non_float_regs = []
-    ##     float_locs = []
-    ##     float_regs = []
-    ##     stack_args = []
-    ##     float_stack_arg = False
-    ##     for i in range(n_args):
-    ##         arg = arglocs[i]
+    def emit_call_may_force(self, op, arglocs, regalloc):
+        self._store_force_index(self._find_nearby_operation(regalloc, +1))
+        self._emit_call(op, arglocs)
 
-    ##         if arg.type == FLOAT:
-    ##             if fpnum < MAX_FREG_PARAMS:
-    ##                 fpreg = r.PARAM_FPREGS[fpnum]
-    ##                 float_locs.append(arg)
-    ##                 float_regs.append(fpreg)
-    ##                 fpnum += 1
-    ##                 # XXX Duplicate float arguments in GPR slots
-    ##                 if num < MAX_REG_PARAMS:
-    ##                     num += 1
-    ##                 else:
-    ##                     stack_args.append(arg)
-    ##             else:
-    ##                 stack_args.append(arg)
-    ##         else:
-    ##             if num < MAX_REG_PARAMS:
-    ##                 reg = r.PARAM_REGS[num]
-    ##                 non_float_locs.append(arg)
-    ##                 non_float_regs.append(reg)
-    ##                 num += 1
-    ##             else:
-    ##                 stack_args.append(arg)
-    ##                 float_stack_arg = True
+    def emit_call_release_gil(self, op, arglocs, regalloc):
+        self._store_force_index(self._find_nearby_operation(regalloc, +1))
+        self._emit_call(op, arglocs, is_call_release_gil=True)
 
-    ##     if adr in non_float_regs:
-    ##         non_float_locs.append(adr)
-    ##         non_float_regs.append(r.r11)
-    ##         adr = r.r11
+    def _store_force_index(self, guard_op):
+        assert (guard_op.getopnum() == rop.GUARD_NOT_FORCED or
+                guard_op.getopnum() == rop.GUARD_NOT_FORCED_2)
+        faildescr = guard_op.getdescr()
+        ofs = self.cpu.get_ofs_of_frame_field('jf_force_descr')
+        self.mc.load_imm(r.SCRATCH, rffi.cast(lltype.Signed,
+                                           cast_instance_to_gcref(faildescr)))
+        self.mc.store(r.SCRATCH.value, r.SPP.value, ofs)
 
-    ##     # compute maximum of parameters passed
-    ##     self.max_stack_params = max(self.max_stack_params, len(stack_args))
+    def _find_nearby_operation(self, regalloc, delta):
+        return regalloc.operations[regalloc.rm.position + delta]
 
-    ##     # compute offset at which parameters are stored
-    ##     if IS_PPC_32:
-    ##         param_offset = BACKCHAIN_SIZE * WORD
-    ##     else:
-    ##         # space for first 8 parameters
-    ##         param_offset = ((BACKCHAIN_SIZE + MAX_REG_PARAMS) * WORD)
+    def emit_cond_call(self, op, arglocs, regalloc):
+        fcond = self.guard_success_cc
+        self.guard_success_cc = c.cond_none
+        assert fcond != c.cond_none
+        fcond = c.negate(fcond)
 
-    ##     with scratch_reg(self.mc):
-    ##         if float_stack_arg:
-    ##             self.mc.stfd(r.f0.value, r.SPP.value, FORCE_INDEX_OFS + 
WORD)
-    ##         for i, arg in enumerate(stack_args):
-    ##             offset = param_offset + i * WORD
-    ##             if arg is not None:
-    ##                 if arg.type == FLOAT:
-    ##                     self.regalloc_mov(arg, r.f0)
-    ##                     self.mc.stfd(r.f0.value, r.SP.value, offset)
-    ##                 else:
-    ##                     self.regalloc_mov(arg, r.SCRATCH)
-    ##                     self.mc.store(r.SCRATCH.value, r.SP.value, offset)
-    ##         if float_stack_arg:
-    ##             self.mc.lfd(r.f0.value, r.SPP.value, FORCE_INDEX_OFS + WORD)
+        jmp_adr = self.mc.get_relative_pos()
+        self.mc.trap()        # patched later to a 'bc'
 
-    ##     # remap values stored in core registers
-    ##     remap_frame_layout(self, float_locs, float_regs, r.f0)
-    ##     remap_frame_layout(self, non_float_locs, non_float_regs, r.SCRATCH)
+        # XXX load_gcmap XXX -> r2
 
-    ##     # the actual call
-    ##     if adr.is_imm():
-    ##         self.mc.call(adr.value)
-    ##     elif adr.is_stack():
-    ##         self.regalloc_mov(adr, r.SCRATCH)
-    ##         self.mc.call_register(r.SCRATCH)
-    ##     elif adr.is_reg():
-    ##         self.mc.call_register(adr)
-    ##     else:
-    ##         assert 0, "should not reach here"
+        # save away r3, r4, r5, r6, r12 into the jitframe
+        base_ofs = self.cpu.get_baseofs_of_frame_field()
+        should_be_saved = self._regalloc.rm.reg_bindings.values()
+        for gpr in [r.r3, r.r4, r.r5, r.r6, r.r12]:
+            if gpr not in should_be_saved:
+                continue
+            v = self.cpu.all_reg_indexes[gpr.value]
+            self.mc.std(gpr.value, r.SPP.value, v * WORD + base_ofs)
+        #
+        # load the 0-to-4 arguments into these registers, with the address of
+        # the function to call into r12
+        remap_frame_layout(self, arglocs,
+                           [r.r12, r.r3, r.r4, r.r5, r.r6][:len(arglocs)],
+                           r.SCRATCH)
+        #
+        # figure out which variant of cond_call_slowpath to call, and call it
+        callee_only = False
+        floats = False
+        for reg in regalloc.rm.reg_bindings.values():
+            if reg not in regalloc.rm.save_around_call_regs:
+                break
+        else:
+            callee_only = True
+        if regalloc.fprm.reg_bindings:
+            floats = True
+        cond_call_adr = self.cond_call_slowpath[floats * 2 + callee_only]
+        self.mc.bl_abs(cond_call_adr)
+        # restoring the registers saved above, and doing pop_gcmap(), is left
+        # to the cond_call_slowpath helper.  We never have any result value.
+        relative_target = self.mc.currpos() - jmp_adr
+        pmc = OverwritingBuilder(self.mc, jmp_adr, 1)
+        BI, BO = c.encoding[fcond]
+        pmc.bc(BO, BI, relative_target)
+        pmc.overwrite()
+        # might be overridden again to skip over the following
+        # guard_no_exception too
+        self.previous_cond_call_jcond = jmp_adr, BI, BO
 
-    ##     self.mark_gc_roots(force_index)
-    ##     # ensure the result is wellformed and stored in the correct location
-    ##     if result is not None and result_info != (-1, -1):
-    ##         self._ensure_result_bit_extension(result, result_info[0],
-    ##                                                   result_info[1])
 
 class FieldOpAssembler(object):
 
@@ -1230,7 +1241,7 @@
             self.mc.cmp_op(0, r.SCRATCH.value, 0, imm=True)
 
         self._emit_guard(guard_op, regalloc._prepare_guard(guard_op),
-                                                    c.LT, save_exc=True)
+                                        xxxxxxxxxxxxxxxxx+c.LT, save_exc=True)
 
     # ../x86/assembler.py:668
     def redirect_call_assembler(self, oldlooptoken, newlooptoken):
@@ -1275,7 +1286,8 @@
             self.mc.load(r.SCRATCH.value, r.SPP.value, FORCE_INDEX_OFS)
             self.mc.cmp_op(0, r.SCRATCH.value, 0, imm=True)
 
-        self._emit_guard(guard_op, arglocs[1 + numargs:], c.LT, save_exc=True)
+        self._emit_guard(guard_op, arglocs[1 + numargs:],
+                         xxxxxxxxxxxxxx+c.LT, save_exc=True)
 
     def emit_guard_call_release_gil(self, op, guard_op, arglocs, regalloc):
 
@@ -1307,7 +1319,8 @@
             self.mc.load(r.SCRATCH.value, r.SPP.value, 0)
             self.mc.cmp_op(0, r.SCRATCH.value, 0, imm=True)
 
-        self._emit_guard(guard_op, arglocs[1 + numargs:], c.LT, save_exc=True)
+        self._emit_guard(guard_op, arglocs[1 + numargs:],
+                         xxxxxxxxxxxxxxxxxx+c.LT, save_exc=True)
 
     def call_release_gil(self, gcrootmap, save_registers):
         # XXX don't know whether this is correct
@@ -1329,7 +1342,7 @@
 
 class OpAssembler(IntOpAssembler, GuardOpAssembler,
                   MiscOpAssembler, FieldOpAssembler,
-                  StrOpAssembler,
+                  StrOpAssembler, CallOpAssembler,
                   UnicodeOpAssembler, ForceOpAssembler,
                   AllocOpAssembler, FloatOpAssembler):
 
diff --git a/rpython/jit/backend/ppc/ppc_assembler.py 
b/rpython/jit/backend/ppc/ppc_assembler.py
--- a/rpython/jit/backend/ppc/ppc_assembler.py
+++ b/rpython/jit/backend/ppc/ppc_assembler.py
@@ -32,6 +32,7 @@
 from rpython.rlib.objectmodel import we_are_translated, specialize
 from rpython.rtyper.lltypesystem.lloperation import llop
 from rpython.jit.backend.ppc.locations import StackLocation, get_fp_offset, imm
+from rpython.jit.backend.ppc import callbuilder
 from rpython.rlib.jit import AsmInfo
 from rpython.rlib.objectmodel import compute_unique_id
 from rpython.rlib.rarithmetic import r_uint
@@ -71,6 +72,9 @@
 def high(w):
     return (w >> 16) & 0x0000FFFF
 
+class JitFrameTooDeep(Exception):
+    pass
+
 class AssemblerPPC(OpAssembler, BaseAssembler):
 
     #ENCODING_AREA               = FORCE_INDEX_OFS
@@ -174,18 +178,19 @@
     def setup_failure_recovery(self):
         self.failure_recovery_code = [0, 0, 0, 0]
 
-    # TODO: see with we really need the ignored_regs argument
     def _push_all_regs_to_jitframe(self, mc, ignored_regs, withfloats,
                                    callee_only=False):
         base_ofs = self.cpu.get_baseofs_of_frame_field()
         if callee_only:
-            regs = XXX
+            regs = PPCRegisterManager.save_around_call_regs
         else:
-            regs = r.MANAGED_REGS
-        # For now, just push all regs to the jitframe
+            regs = PPCRegisterManager.all_regs
+        #
         for reg in regs:
-            v = r.ALL_REG_INDEXES[reg]
-            mc.std(reg.value, r.SPP.value, base_ofs + v * WORD)
+            if reg not in ignored_regs:
+                v = r.ALL_REG_INDEXES[reg]
+                mc.std(reg.value, r.SPP.value, base_ofs + v * WORD)
+        #
         if withfloats:
             for reg in r.MANAGED_FP_REGS:
                 v = r.ALL_REG_INDEXES[reg]
@@ -195,20 +200,19 @@
                                     callee_only=False):
         base_ofs = self.cpu.get_baseofs_of_frame_field()
         if callee_only:
-            regs = r.VOLATILES
+            regs = PPCRegisterManager.save_around_call_regs
         else:
-            regs = r.ALL_REGS
-        for i, reg in enumerate(regs):
-            # XXX should we progress to higher addressess
-            mc.load_from_addr(reg, base_ofs - (i * WORD))
-
+            regs = PPCRegisterManager.all_regs
+        #
+        for reg in regs:
+            if reg not in ignored_regs:
+                v = r.ALL_REG_INDEXES[reg]
+                mc.ld(reg.value, r.SPP.value, base_ofs + v * WORD)
+        #
         if withfloats:
-            if callee_only:
-                regs = r.VOLATILES_FLOAT
-            else:
-                regs = r.ALL_FLOAT_REGS
-            for i, reg in enumerate(regs):
-                pass # TODO find or create the proper load indexed for fpr's
+            for reg in r.MANAGED_FP_REGS:
+                v = r.ALL_REG_INDEXES[reg]
+                mc.lfd(reg.value, r.SPP.value, base_ofs + v * WORD)
 
     def _build_failure_recovery(self, exc, withfloats=False):
         mc = PPCBuilder()
@@ -245,13 +249,114 @@
         self.failure_recovery_code[exc + 2 * withfloats] = rawstart
         self.mc = None
 
-    # TODO
     def build_frame_realloc_slowpath(self):
-        pass
+        mc = PPCBuilder()
+        self.mc = mc
 
-    # TODO
+        # signature of this _frame_realloc_slowpath function:
+        #   * on entry, r0 is the new size
+        #   * on entry, r2 is the gcmap
+        #   * no managed register must be modified
+
+        ofs2 = self.cpu.get_ofs_of_frame_field('jf_gcmap')
+        mc.store(r.r2.value, r.SPP.value, ofs2)
+
+        self._push_all_regs_to_jitframe(mc, [], self.cpu.supports_floats)
+
+        # Save away the LR inside r30
+        mc.mflr(r.RCS1.value)
+
+        # First argument is SPP (= r31), which is the jitframe
+        mc.mr(r.r3.value, r.SPP.value)
+
+        # Second argument is the new size, which is still in r0 here
+        mc.mr(r.r4.value, r.r0.value)
+
+        self._store_and_reset_exception(mc, r.RCS2, r.RCS3)
+
+        # Do the call
+        adr = rffi.cast(lltype.Signed, self.cpu.realloc_frame)
+        cb = callbuilder.CallBuilder(self, imm(adr), [r.r3, r.r4], r.r3)
+        cb.emit()
+
+        # The result is stored back into SPP (= r31)
+        mc.mr(r.SPP.value, r.r3.value)
+
+        self._restore_exception(mc, r.RCS2, r.RCS3)
+
+        gcrootmap = self.cpu.gc_ll_descr.gcrootmap
+        if gcrootmap and gcrootmap.is_shadow_stack:
+            self._load_shadowstack_top_in_ebx(mc, gcrootmap)
+            mc.MOV_mr((ebx.value, -WORD), eax.value)
+
+        mc.mtlr(r.RCS1.value)     # restore LR
+        self._pop_all_regs_from_jitframe(mc, [], self.cpu.supports_floats)
+        mc.blr()
+
+        self._frame_realloc_slowpath = mc.materialize(self.cpu, [])
+        self.mc = None
+
+    def _store_and_reset_exception(self, mc, excvalloc, exctploc):
+        """Reset the exception, after fetching it inside the two regs.
+        """
+        mc.load_imm(r.r2, self.cpu.pos_exc_value())
+        diff = self.cpu.pos_exception() - self.cpu.pos_exc_value()
+        assert _check_imm_arg(diff)
+        # Load the exception fields into the two registers
+        mc.load(excvalloc.value, r.r2.value, 0)
+        mc.load(exctploc.value, r.r2.value, diff)
+        # Zero out the exception fields
+        mc.li(r.r0.value, 0)
+        mc.store(r.r0.value, r.r2.value, 0)
+        mc.store(r.r0.value, r.r2.value, diff)
+
+    def _restore_exception(self, mc, excvalloc, exctploc):
+        mc.load_imm(r.r2, self.cpu.pos_exc_value())
+        diff = self.cpu.pos_exception() - self.cpu.pos_exc_value()
+        assert _check_imm_arg(diff)
+        # Store the exception fields from the two registers
+        mc.store(excvalloc.value, r.r2.value, 0)
+        mc.store(exctploc.value, r.r2.value, diff)
+
     def _build_cond_call_slowpath(self, supports_floats, callee_only):
-        pass
+        """ This builds a general call slowpath, for whatever call happens to
+        come.
+        """
+        # signature of these cond_call_slowpath functions:
+        #   * on entry, r12 contains the function to call
+        #   * r3, r4, r5, r6 contain arguments for the call
+        #   * r2 is the gcmap
+        #   * the old value of these regs must already be stored in the 
jitframe
+        #   * on exit, all registers are restored from the jitframe
+
+        mc = PPCBuilder()
+        self.mc = mc
+        ofs2 = self.cpu.get_ofs_of_frame_field('jf_gcmap')
+        mc.store(r.r2.value, r.SPP.value, ofs2)
+
+        # copy registers to the frame, with the exception of r3 to r6 and r12,
+        # because these have already been saved by the caller.  Note that
+        # this is not symmetrical: these 5 registers are saved by the caller
+        # but restored here at the end of this function.
+        self._push_all_regs_to_jitframe(mc, [r.r3, r.r4, r.r5, r.r6, r.r12],
+                                        supports_floats, callee_only)
+
+        # Save away the LR inside r30
+        mc.mflr(r.RCS1.value)
+
+        # Do the call
+        cb = callbuilder.CallBuilder(self, r.r12, [r.r3, r.r4, r.r5, r.r6],
+                                     None)
+        cb.emit()
+
+        # Finish
+        # XXX self._reload_frame_if_necessary(mc, align_stack=True)
+
+        mc.mtlr(r.RCS1.value)     # restore LR
+        self._pop_all_regs_from_jitframe(mc, [], supports_floats, callee_only)
+        mc.blr()
+        self.mc = None
+        return mc.materialize(self.cpu, [])
 
     def _build_malloc_slowpath(self):
         mc = PPCBuilder()
@@ -672,15 +777,42 @@
                                                         allblocks)
         self.target_tokens_currently_compiling = {}
         self.frame_depth_to_patch = []
-        #self.max_stack_params = 0
 
     def update_frame_depth(self, frame_depth):
+        if frame_depth > 0x7fff:
+            raise JitFrameTooDeep     # XXX
         baseofs = self.cpu.get_baseofs_of_frame_field()
         self.current_clt.frame_info.update_frame_depth(baseofs, frame_depth)
 
-    def patch_stack_checks(self, framedepth, rawstart):
-        for ofs in self.frame_depth_to_patch:
-            self._patch_frame_depth(ofs + rawstart, framedepth)
+    def patch_stack_checks(self, frame_depth):
+        if frame_depth > 0x7fff:
+            raise JitFrameTooDeep     # XXX
+        for traps_pos, jmp_target in self.frame_depth_to_patch:
+            pmc = OverwritingBuilder(self.mc, traps_pos, 3)
+            # three traps, so exactly three instructions to patch here
+            pmc.cmpdi(0, r.r2.value, frame_depth)         # 1
+            pmc.bc(7, 0, jmp_target - (traps_pos + 4))    # 2   "bge+"
+            pmc.li(r.r0.value, frame_depth)               # 3
+            pmc.overwrite()
+
+    def _check_frame_depth(self, mc, gcmap):
+        """ check if the frame is of enough depth to follow this bridge.
+        Otherwise reallocate the frame in a helper.
+        """
+        descrs = self.cpu.gc_ll_descr.getframedescrs(self.cpu)
+        ofs = self.cpu.unpack_fielddescr(descrs.arraydescr.lendescr)
+        mc.ld(r.r2.value, r.SPP.value, ofs)
+        patch_pos = mc.currpos()
+        mc.trap()     # placeholder for cmpdi(0, r2, ...)
+        mc.trap()     # placeholder for bge
+        mc.trap()     # placeholder for li(r0, ...)
+        mc.load_imm(r.SCRATCH2, self._frame_realloc_slowpath)
+        mc.mtctr(r.SCRATCH2.value)
+        #XXXXX:
+        if we_are_translated(): XXX #self.load_gcmap(mc, gcmap)  # -> r2
+        mc.bctrl()
+
+        self.frame_depth_to_patch.append((patch_pos, mc.currpos()))
 
     @rgc.no_release_gil
     def assemble_loop(self, jd_id, unique_id, logger, loopname, inputargs,
@@ -717,12 +849,11 @@
         self.write_pending_failure_recoveries()
         full_size = self.mc.get_relative_pos()
         #
+        self.patch_stack_checks(frame_depth_no_fixed_size + 
JITFRAME_FIXED_SIZE)
         rawstart = self.materialize_loop(looptoken)
         if IS_PPC_64 and IS_BIG_ENDIAN:  # fix the function descriptor (3 
words)
             rffi.cast(rffi.LONGP, rawstart)[0] = rawstart + 3 * WORD
         #
-        self.patch_stack_checks(frame_depth_no_fixed_size + 
JITFRAME_FIXED_SIZE,
-                                rawstart)
         looptoken._ppc_loop_code = looppos + rawstart
         debug_start("jit-backend-addr")
         debug_print("Loop %d (%s) has address 0x%x to 0x%x (bootstrap 0x%x)" % 
(
@@ -756,8 +887,10 @@
 
     def _assemble(self, regalloc, inputargs, operations):
         self._regalloc = regalloc
+        self.guard_success_cc = c.cond_none
         regalloc.compute_hint_frame_locations(operations)
         regalloc.walk_operations(inputargs, operations)
+        assert self.guard_success_cc == c.cond_none
         if 1: # we_are_translated() or self.cpu.dont_keepalive_stuff:
             self._regalloc = None   # else keep it around for debugging
         frame_depth = regalloc.get_final_frame_depth()
@@ -788,16 +921,14 @@
                                              operations,
                                              self.current_clt.allgcrefs,
                                              self.current_clt.frame_info)
-        #self._check_frame_depth(self.mc, regalloc.get_gcmap())
-        #        XXX ^^^^^^^^^^
+        self._check_frame_depth(self.mc, "??")
         frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, 
operations)
         codeendpos = self.mc.get_relative_pos()
         self.write_pending_failure_recoveries()
         fullsize = self.mc.get_relative_pos()
         #
+        self.patch_stack_checks(frame_depth_no_fixed_size + 
JITFRAME_FIXED_SIZE)
         rawstart = self.materialize_loop(original_loop_token)
-        self.patch_stack_checks(frame_depth_no_fixed_size + 
JITFRAME_FIXED_SIZE,
-                                rawstart)
         debug_bridge(descr_number, rawstart, codeendpos)
         self.patch_pending_failure_recoveries(rawstart)
         # patch the jump from original guard
@@ -813,15 +944,6 @@
         self.teardown()
         return AsmInfo(ops_offset, startpos + rawstart, codeendpos - startpos)
 
-    def _patch_sp_offset(self, sp_patch_location, rawstart):
-        mc = PPCBuilder()
-        frame_depth = self.compute_frame_depth(self.current_clt.frame_depth,
-                                               self.current_clt.param_depth)
-        frame_depth -= self.OFFSET_SPP_TO_OLD_BACKCHAIN
-        mc.load_imm(r.SCRATCH, -frame_depth)
-        mc.add(r.SP.value, r.SPP.value, r.SCRATCH.value)
-        mc.copy_to_raw_memory(rawstart + sp_patch_location)
-
     DESCR_REF       = 0x00
     DESCR_INT       = 0x01
     DESCR_FLOAT     = 0x02
@@ -938,6 +1060,10 @@
         ptr = rffi.cast(lltype.Signed, gcmap)
         mc.load_imm(r.r2, ptr)
 
+    def push_gcmap(self, mc, gcmap, store):
+        assert store is True
+        # XXX IGNORED FOR NOW
+
     def break_long_loop(self):
         # If the loop is too long, the guards in it will jump forward
         # more than 32 KB.  We use an approximate hack to know if we
@@ -962,6 +1088,9 @@
         self.mc.mtctr(r.r0.value)
         self.mc.load_imm(r.r0, fail_descr)
         self.mc.bctr()
+        # we need to write at least 6 insns here, for patch_jump_for_descr()
+        while self.mc.currpos() < startpos + 6 * 4:
+            self.mc.trap()
         return startpos
 
     def write_pending_failure_recoveries(self):
@@ -1004,7 +1133,7 @@
         # conditional jump.  We must patch this conditional jump to go
         # to 'adr_new_target'.  If the target is too far away, we can't
         # patch it inplace, and instead we patch the quick failure code
-        # (which should be at least 5 instructions, so enough).
+        # (which should be at least 6 instructions, so enough).
         # --- XXX for now we always use the second solution ---
         mc = PPCBuilder()
         mc.b_abs(adr_new_target)
@@ -1098,63 +1227,44 @@
         assert 0, "not supported location"
     mov_loc_loc = regalloc_mov
 
-    def regalloc_push(self, loc):
+    def regalloc_push(self, loc, already_pushed):
         """Pushes the value stored in loc to the stack
         Can trash the current value of SCRATCH when pushing a stack
         loc"""
-        self.mc.addi(r.SP.value, r.SP.value, -WORD) # decrease stack pointer
         assert IS_PPC_64, 'needs to updated for ppc 32'
 
-        if loc.is_imm():
-            with scratch_reg(self.mc):
+        index = WORD * (~already_pushed)
+
+        if loc.type == FLOAT:
+            if not loc.is_fp_reg():
+                self.regalloc_mov(loc, r.FP_SCRATCH)
+                loc = r.FP_SCRATCH
+            self.mc.stfd(loc.value, r.SP.value, index)
+        else:
+            if not loc.is_core_reg():
                 self.regalloc_mov(loc, r.SCRATCH)
-                self.mc.store(r.SCRATCH.value, r.SP.value, 0)
-        elif loc.is_imm_float():
-            with scratch_reg(self.mc):
-                self.regalloc_mov(loc, r.FP_SCRATCH)
-                self.mc.store(r.FP_SCRATCH.value, r.SP.value, 0)
-        elif loc.is_stack():
-            # XXX this code has to be verified
-            with scratch_reg(self.mc):
-                self.regalloc_mov(loc, r.SCRATCH)
-                # push value
-                self.mc.store(r.SCRATCH.value, r.SP.value, 0)
-        elif loc.is_reg():
-            # push value
-            self.mc.store(loc.value, r.SP.value, 0)
-        elif loc.is_fp_reg():
-            self.mc.addi(r.SP.value, r.SP.value, -WORD) # decrease stack 
pointer
-            # push value
-            self.mc.stfd(loc.value, r.SP.value, 0)
-        else:
-            raise AssertionError('Trying to push an invalid location')
+                loc = r.SCRATCH
+            self.mc.std(loc.value, r.SP.value, index)
 
-    def regalloc_pop(self, loc):
+    def regalloc_pop(self, loc, already_pushed):
         """Pops the value on top of the stack to loc. Can trash the current
         value of SCRATCH when popping to a stack loc"""
         assert IS_PPC_64, 'needs to updated for ppc 32'
-        if loc.is_stack():
-            # XXX this code has to be verified
-            with scratch_reg(self.mc):
-                # pop value
-                if IS_PPC_32:
-                    self.mc.lwz(r.SCRATCH.value, r.SP.value, 0)
-                else:
-                    self.mc.ld(r.SCRATCH.value, r.SP.value, 0)
-                self.mc.addi(r.SP.value, r.SP.value, WORD) # increase stack 
pointer
-                self.regalloc_mov(r.SCRATCH, loc)
-        elif loc.is_reg():
-            # pop value
-            if IS_PPC_32:
-                self.mc.lwz(loc.value, r.SP.value, 0)
+
+        index = WORD * (~already_pushed)
+
+        if loc.type == FLOAT:
+            if loc.is_fp_reg():
+                self.mc.lfd(loc.value, r.SP.value, index)
             else:
-                self.mc.ld(loc.value, r.SP.value, 0)
-            self.mc.addi(r.SP.value, r.SP.value, WORD) # increase stack pointer
-        elif loc.is_fp_reg():
-            self.mc.lfd(loc.value, r.SP.value, 0)
-            self.mc.addi(r.SP.value, r.SP.value, WORD) # increase stack pointer
+                self.mc.lfd(r.FP_SCRATCH.value, r.SP.value, index)
+                self.regalloc_mov(r.FP_SCRATCH.value, loc)
         else:
-            raise AssertionError('Trying to pop to an invalid location')
+            if loc.is_core_reg():
+                self.mc.ld(loc.value, r.SP.value, index)
+            else:
+                self.mc.ld(r.SCRATCH.value, r.SP.value, index)
+                self.regalloc_mov(r.SCRATCH.value, loc)
 
     def malloc_cond(self, nursery_free_adr, nursery_top_adr, size):
         assert size & (WORD-1) == 0     # must be correctly aligned
@@ -1245,17 +1355,7 @@
     print "[PPC/asm] %s not implemented" % op.getopname()
     raise NotImplementedError(op)
 
-def notimplemented_op_with_guard(self, op, guard_op, arglocs, regalloc):
-    print "[PPC/asm] %s with guard %s not implemented" % \
-            (op.getopname(), guard_op.getopname())
-    raise NotImplementedError(op)
-
-def add_none_argument(fn):
-    return (lambda self, op, arglocs, regalloc:
-                        fn(self, op, None, arglocs, regalloc))
-
 operations = [notimplemented_op] * (rop._LAST + 1)
-operations_with_guard = [notimplemented_op_with_guard] * (rop._LAST + 1)
 
 for key, value in rop.__dict__.items():
     key = key.lower()
@@ -1266,16 +1366,5 @@
         func = getattr(AssemblerPPC, methname).im_func
         operations[value] = func
 
-for key, value in rop.__dict__.items():
-    key = key.lower()
-    if key.startswith('_'):
-        continue
-    methname = 'emit_guard_%s' % key
-    if hasattr(AssemblerPPC, methname):
-        assert operations[value] is notimplemented_op
-        func = getattr(AssemblerPPC, methname).im_func
-        operations_with_guard[value] = func
-        operations[value] = add_none_argument(func)
-
 class BridgeAlreadyCompiled(Exception):
     pass
diff --git a/rpython/jit/backend/ppc/ppc_field.py 
b/rpython/jit/backend/ppc/ppc_field.py
--- a/rpython/jit/backend/ppc/ppc_field.py
+++ b/rpython/jit/backend/ppc/ppc_field.py
@@ -49,7 +49,8 @@
     "XO4":    (30, 31),
     "XO5":    (27, 29),
     "XO6":    (21, 29),
-    "XO7":    (27, 30)
+    "XO7":    (27, 30),
+    "LL":     ( 9, 10),
 }
 
 
diff --git a/rpython/jit/backend/ppc/regalloc.py 
b/rpython/jit/backend/ppc/regalloc.py
--- a/rpython/jit/backend/ppc/regalloc.py
+++ b/rpython/jit/backend/ppc/regalloc.py
@@ -20,6 +20,7 @@
 from rpython.jit.backend.llsupport import symbolic
 from rpython.jit.backend.llsupport.descr import ArrayDescr
 import rpython.jit.backend.ppc.register as r
+import rpython.jit.backend.ppc.condition as c
 from rpython.jit.backend.llsupport.descr import unpack_arraydescr
 from rpython.jit.backend.llsupport.descr import unpack_fielddescr
 from rpython.jit.backend.llsupport.descr import unpack_interiorfielddescr
@@ -55,7 +56,7 @@
 class FPRegisterManager(RegisterManager):
     all_regs              = r.MANAGED_FP_REGS
     box_types             = [FLOAT]
-    save_around_call_regs = r.VOLATILES_FLOAT
+    save_around_call_regs = [_r for _r in all_regs if _r in r.VOLATILES_FLOAT]
 
     def convert_to_imm(self, c):
         assert isinstance(c, ConstFloat)
@@ -92,7 +93,8 @@
     all_regs              = r.MANAGED_REGS
     box_types             = None       # or a list of acceptable types
     no_lower_byte_regs    = all_regs
-    save_around_call_regs = r.VOLATILES
+    save_around_call_regs = [_r for _r in all_regs if _r in r.VOLATILES]
+    frame_reg             = r.SPP
 
     REGLOC_TO_COPY_AREA_OFS = {
         r.r5:   MY_COPY_OF_REGS + 0 * WORD,
@@ -281,13 +283,25 @@
             forbidden_vars = self.rm.temp_boxes
             return self.rm.force_allocate_reg(var, forbidden_vars)
 
+    def force_allocate_reg_or_cc(self, var):
+        assert var.type == INT
+        if self.next_op_can_accept_cc(self.operations, self.rm.position):
+            # hack: return the SPP location to mean "lives in CC".  This
+            # SPP will not actually be used, and the location will be freed
+            # after the next op as usual.
+            self.rm.force_allocate_frame_reg(var)
+            return r.SPP
+        else:
+            # else, return a regular register (not SPP).
+            return self.force_allocate_reg(var)
+
     def walk_operations(self, inputargs, operations):
         from rpython.jit.backend.ppc.ppc_assembler import (
-            operations_with_guard as asm_operations_with_guard,
             operations as asm_operations)
         i = 0
         self.limit_loop_break = (self.assembler.mc.get_relative_pos() +
                                      LIMIT_LOOP_BREAK)
+        self.operations = operations
         while i < len(operations):
             op = operations[i]
             self.assembler.mc.mark_op(op)
@@ -306,21 +320,11 @@
                     self.fprm.temp_boxes.append(box)
             #
             opnum = op.getopnum()
-            if self.can_merge_with_next_guard(op, i, operations):
-                i += 1
-                self.rm.position = i
-                self.fprm.position = i
-                arglocs = oplist_with_guard[opnum](self, op, operations[i])
-                assert arglocs is not None
-                asm_operations_with_guard[opnum](self.assembler, op,
-                                                 operations[i],
-                                                 arglocs, self)
-            elif not we_are_translated() and opnum == -124:
+            if not we_are_translated() and opnum == -124:
                 self._consider_force_spill(op)
             else:
                 arglocs = oplist[opnum](self, op)
-                if arglocs is not None:
-                    asm_operations[opnum](self.assembler, op, arglocs, self)
+                asm_operations[opnum](self.assembler, op, arglocs, self)
             self.free_op_vars()
             self.possibly_free_var(op.result)
             self.rm._check_invariants()
@@ -334,6 +338,7 @@
         assert not self.fprm.reg_bindings
         self.flush_loop()
         self.assembler.mc.mark_op(None) # end of the loop
+        self.operations = None
         for arg in inputargs:
             self.possibly_free_var(arg)
 
@@ -344,6 +349,10 @@
         while self.min_bytes_before_label > mc.get_relative_pos():
             mc.nop()
 
+    def get_gcmap(self, noregs=False):
+        #xxxxxx
+        return '???'
+
     def loc(self, var):
         if var.type == FLOAT:
             return self.fprm.loc(var)
@@ -436,46 +445,46 @@
     prepare_uint_rshift = helper.prepare_binary_op
     prepare_uint_floordiv = helper.prepare_binary_op
 
-    prepare_guard_int_add_ovf = helper.prepare_int_binary_ovf
-    prepare_guard_int_sub_ovf = helper.prepare_int_binary_ovf
-    prepare_guard_int_mul_ovf = helper.prepare_int_binary_ovf
+    prepare_int_add_ovf = helper.prepare_binary_op
+    prepare_int_sub_ovf = helper.prepare_binary_op
+    prepare_int_mul_ovf = helper.prepare_binary_op
 
     prepare_int_neg = helper.prepare_unary_op
     prepare_int_invert = helper.prepare_unary_op
     prepare_int_signext = helper.prepare_unary_op
 
-    prepare_guard_int_le = helper.prepare_cmp_op
-    prepare_guard_int_lt = helper.prepare_cmp_op
-    prepare_guard_int_ge = helper.prepare_cmp_op
-    prepare_guard_int_gt = helper.prepare_cmp_op
-    prepare_guard_int_eq = helper.prepare_cmp_op
-    prepare_guard_int_ne = helper.prepare_cmp_op
+    prepare_int_le = helper.prepare_cmp_op
+    prepare_int_lt = helper.prepare_cmp_op
+    prepare_int_ge = helper.prepare_cmp_op
+    prepare_int_gt = helper.prepare_cmp_op
+    prepare_int_eq = helper.prepare_cmp_op
+    prepare_int_ne = helper.prepare_cmp_op
 
-    prepare_guard_ptr_eq = prepare_guard_int_eq
-    prepare_guard_ptr_ne = prepare_guard_int_ne
+    prepare_ptr_eq = prepare_int_eq
+    prepare_ptr_ne = prepare_int_ne
 
-    prepare_guard_instance_ptr_eq = prepare_guard_ptr_eq
-    prepare_guard_instance_ptr_ne = prepare_guard_ptr_ne
+    prepare_instance_ptr_eq = prepare_ptr_eq
+    prepare_instance_ptr_ne = prepare_ptr_ne
 
-    prepare_guard_uint_lt = helper.prepare_cmp_op_unsigned
-    prepare_guard_uint_le = helper.prepare_cmp_op_unsigned
-    prepare_guard_uint_gt = helper.prepare_cmp_op_unsigned
-    prepare_guard_uint_ge = helper.prepare_cmp_op_unsigned
+    prepare_uint_lt = helper.prepare_cmp_op_unsigned
+    prepare_uint_le = helper.prepare_cmp_op_unsigned
+    prepare_uint_gt = helper.prepare_cmp_op_unsigned
+    prepare_uint_ge = helper.prepare_cmp_op_unsigned
 
-    prepare_guard_int_is_true = helper.prepare_unary_cmp
-    prepare_guard_int_is_zero = helper.prepare_unary_cmp
+    prepare_int_is_true = helper.prepare_unary_cmp
+    prepare_int_is_zero = helper.prepare_unary_cmp
 
     prepare_float_add = helper.prepare_binary_op
     prepare_float_sub = helper.prepare_binary_op
     prepare_float_mul = helper.prepare_binary_op
     prepare_float_truediv = helper.prepare_binary_op
 
-    prepare_guard_float_lt = helper.prepare_float_cmp
-    prepare_guard_float_le = helper.prepare_float_cmp
-    prepare_guard_float_eq = helper.prepare_float_cmp
-    prepare_guard_float_ne = helper.prepare_float_cmp
-    prepare_guard_float_gt = helper.prepare_float_cmp
-    prepare_guard_float_ge = helper.prepare_float_cmp
+    prepare_float_lt = helper.prepare_float_cmp
+    prepare_float_le = helper.prepare_float_cmp
+    prepare_float_eq = helper.prepare_float_cmp
+    prepare_float_ne = helper.prepare_float_cmp
+    prepare_float_gt = helper.prepare_float_cmp
+    prepare_float_ge = helper.prepare_float_cmp
     prepare_float_neg = helper.prepare_unary_op
     prepare_float_abs = helper.prepare_unary_op
 
@@ -541,14 +550,21 @@
         #
         return args
 
-    def prepare_guard_true(self, op):
-        l0 = self.ensure_reg(op.getarg(0))
-        args = self._prepare_guard(op, [l0])
-        return args
+    def load_condition_into_cc(self, box):
+        if self.assembler.guard_success_cc == c.cond_none:
+            loc = self.ensure_reg(box)
+            mc = self.assembler.mc
+            mc.cmp_op(0, loc.value, 0, imm=True)
+            self.assembler.guard_success_cc = c.NE
 
-    prepare_guard_false = prepare_guard_true
-    prepare_guard_nonnull = prepare_guard_true
-    prepare_guard_isnull = prepare_guard_true
+    def _prepare_guard_cc(self, op):
+        self.load_condition_into_cc(op.getarg(0))
+        return self._prepare_guard(op)
+
+    prepare_guard_true = _prepare_guard_cc
+    prepare_guard_false = _prepare_guard_cc
+    prepare_guard_nonnull = _prepare_guard_cc
+    prepare_guard_isnull = _prepare_guard_cc
 
     def prepare_guard_not_invalidated(self, op):
         pos = self.assembler.mc.get_relative_pos()
@@ -570,10 +586,13 @@
         return arglocs
 
     def prepare_guard_no_exception(self, op):
-        loc = self.ensure_reg(ConstInt(self.cpu.pos_exception()))
-        arglocs = self._prepare_guard(op, [loc])
+        arglocs = self._prepare_guard(op)
         return arglocs
 
+    prepare_guard_no_overflow = prepare_guard_no_exception
+    prepare_guard_overflow = prepare_guard_no_exception
+    prepare_guard_not_forced = prepare_guard_no_exception
+
     def prepare_guard_value(self, op):
         l0 = self.ensure_reg(op.getarg(0))
         l1 = self.ensure_reg_or_16bit_imm(op.getarg(1))
@@ -863,12 +882,13 @@
         if effectinfo is not None:
             oopspecindex = effectinfo.oopspecindex
             if oopspecindex == EffectInfo.OS_MATH_SQRT:
+                xxxxxxxxx
                 args = self.prepare_math_sqrt(op)
                 self.assembler.emit_math_sqrt(op, args, self)
                 return
         return self._prepare_call(op)
 
-    def _prepare_call(self, op, force_store=[], save_all_regs=False):
+    def _prepare_call(self, op, save_all_regs=False):
         args = []
         args.append(None)
         for i in range(op.numargs()):
@@ -1003,12 +1023,11 @@
         if jump_op is not None and jump_op.getdescr() is descr:
             self._compute_hint_frame_locations_from_descr(descr)
 
-    def prepare_guard_call_may_force(self, op, guard_op):
-        args = self._prepare_call(op, save_all_regs=True)
-        return self._prepare_guard(guard_op, args)
+    def prepare_call_may_force(self, op):
+        return self._prepare_call(op, save_all_regs=True)
 
-    prepare_guard_call_release_gil = prepare_guard_call_may_force
-    
+    prepare_call_release_gil = prepare_call_may_force
+
     def prepare_guard_call_assembler(self, op, guard_op):
         descr = op.getdescr()
         assert isinstance(descr, JitCellToken)
@@ -1055,19 +1074,21 @@
         ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs))
         return [base_loc, startindex_loc, length_loc, ofs_loc, imm(itemsize)]
 
-def add_none_argument(fn):
-    return lambda self, op: fn(self, op, None)
+    def prepare_cond_call(self, op):
+        self.load_condition_into_cc(op.getarg(0))
+        locs = []
+        # support between 0 and 4 integer arguments
+        assert 2 <= op.numargs() <= 2 + 4
+        for i in range(1, op.numargs()):
+            loc = self.loc(op.getarg(i))
+            assert loc.type != FLOAT
+            locs.append(loc)
+        return locs
 
 def notimplemented(self, op):
     print "[PPC/regalloc] %s not implemented" % op.getopname()
     raise NotImplementedError(op)
 
-def notimplemented_with_guard(self, op, guard_op):
-    print "[PPC/regalloc] %s with guard %s not implemented" % \
-            (op.getopname(), guard_op.getopname())
-    raise NotImplementedError(op)
-
-
 def force_int(intvalue):
     # a hack before transaction: force the intvalue argument through
     # rffi.cast(), to turn Symbolics into real values
@@ -1075,7 +1096,6 @@
 
 
 oplist = [notimplemented] * (rop._LAST + 1)
-oplist_with_guard = [notimplemented_with_guard] * (rop._LAST + 1)
 
 for key, value in rop.__dict__.items():
     key = key.lower()
@@ -1085,14 +1105,3 @@
     if hasattr(Regalloc, methname):
         func = getattr(Regalloc, methname).im_func
         oplist[value] = func
-
-for key, value in rop.__dict__.items():
-    key = key.lower()
-    if key.startswith('_'):
-        continue
-    methname = 'prepare_guard_%s' % key
-    if hasattr(Regalloc, methname):
-        assert oplist[value] is notimplemented
-        func = getattr(Regalloc, methname).im_func
-        oplist_with_guard[value] = func
-        oplist[value] = add_none_argument(func)
diff --git a/rpython/jit/backend/ppc/register.py 
b/rpython/jit/backend/ppc/register.py
--- a/rpython/jit/backend/ppc/register.py
+++ b/rpython/jit/backend/ppc/register.py
@@ -26,10 +26,13 @@
 SCRATCH    = r0
 SCRATCH2   = r2
 FP_SCRATCH = f0
-SP         = r1
-TOC        = r2
-RES        = r3
-SPP        = r31
+SP         = r1     # stack pointer register
+TOC        = r2     # the TOC, but unused inside the code we generated
+RES        = r3     # the result of calls
+SPP        = r31    # the frame pointer
+RCS1       = r30    # a random managed non-volatile register
+RCS2       = r29    # a random managed non-volatile register
+RCS3       = r28    # a random managed non-volatile register
 
 MANAGED_REGS = [r3, r4, r5, r6, r7, r8, r9, r10, r11, r12,
                 r25, r26, r27, r28, r29, r30]
@@ -38,6 +41,10 @@
 
 MANAGED_FP_REGS = VOLATILES_FLOAT[1:] #+ NONVOLATILES_FLOAT
 
+assert RCS1 in MANAGED_REGS and RCS1 in NONVOLATILES
+assert RCS2 in MANAGED_REGS and RCS2 in NONVOLATILES
+assert RCS3 in MANAGED_REGS and RCS3 in NONVOLATILES
+
 
 # The JITFRAME_FIXED_SIZE is measured in words, and should be the
 # number of registers that need to be saved into the jitframe when
diff --git a/rpython/jit/backend/ppc/runner.py 
b/rpython/jit/backend/ppc/runner.py
--- a/rpython/jit/backend/ppc/runner.py
+++ b/rpython/jit/backend/ppc/runner.py
@@ -1,6 +1,7 @@
 import py
 from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
 from rpython.rtyper.llinterp import LLInterpreter
+from rpython.rlib import rgc
 #from rpython.jit.backend.ppc.arch import FORCE_INDEX_OFS
 from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU
 from rpython.jit.backend.ppc.ppc_assembler import AssemblerPPC
@@ -14,9 +15,10 @@
 
 class PPC_CPU(AbstractLLCPU):
 
+    supports_floats = True
+    # missing: supports_singlefloats
 
     IS_64_BIT = True
-    BOOTSTRAP_TP = lltype.FuncType([], lltype.Signed)
 
     from rpython.jit.backend.ppc.register import JITFRAME_FIXED_SIZE
     frame_reg = r.SP
@@ -24,7 +26,10 @@
     for _i, _r in enumerate(r.MANAGED_REGS):
         all_reg_indexes[_r.value] = _i
     gen_regs = r.MANAGED_REGS
-    float_regs = r.MANAGED_FP_REGS
+    float_regs = [None] + r.MANAGED_FP_REGS
+    #             ^^^^ we leave a never-used hole for f0 in the jitframe
+    #             for rebuild_faillocs_from_descr(), as a counter-workaround
+    #             for the reverse hack in ALL_REG_INDEXES
 
     def __init__(self, rtyper, stats, opts=None, translate_support_code=False,
                  gcdescr=None):
@@ -36,15 +41,14 @@
         AbstractLLCPU.__init__(self, rtyper, stats, opts,
                                translate_support_code, gcdescr)
 
-        # floats are supported.  singlefloats are not supported yet
-        self.supports_floats = True
-
     def setup(self):
         self.assembler = AssemblerPPC(self)
 
+    @rgc.no_release_gil
     def setup_once(self):
         self.assembler.setup_once()
 
+    @rgc.no_release_gil
     def finish_once(self):
         self.assembler.finish_once()
 
@@ -55,69 +59,12 @@
         return self.assembler.assemble_bridge(faildescr, inputargs, operations,
                                               original_loop_token, log, logger)
 
-    @staticmethod
     def cast_ptr_to_int(x):
         adr = llmemory.cast_ptr_to_adr(x)
         return PPC_CPU.cast_adr_to_int(adr)
+    cast_ptr_to_int._annspecialcase_ = 'specialize:arglltype(0)'
+    cast_ptr_to_int = staticmethod(cast_ptr_to_int)
 
-    # XXX find out how big FP registers are on PPC32
-    all_null_registers = lltype.malloc(rffi.LONGP.TO,
-                        len(r.MANAGED_REGS),
-                        flavor='raw', zero=True, immortal=True)
-
-    def force(self, addr_of_force_index):
-        TP = rffi.CArrayPtr(lltype.Signed)
-
-        spilling_pointer = addr_of_force_index - FORCE_INDEX_OFS
-
-        fail_index = rffi.cast(TP, addr_of_force_index)[0]
-        assert fail_index >= 0, "already forced!"
-        faildescr = self.get_fail_descr_from_number(fail_index)
-        rffi.cast(TP, addr_of_force_index)[0] = ~fail_index
-
-        bytecode = self.assembler._find_failure_recovery_bytecode(faildescr)
-        addr_all_null_registers = rffi.cast(rffi.LONG, self.all_null_registers)
-        # start of "no gc operation!" block
-        fail_index_2 = self.assembler.failure_recovery_func(
-                bytecode,
-                spilling_pointer,
-                addr_all_null_registers)
-        self.assembler.leave_jitted_hook()
-        # end of "no gc operation!" block
-        assert fail_index == fail_index_2
-        return faildescr
-
-    # return the number of values that can be returned
-    def get_latest_value_count(self):
-        return self.assembler.fail_boxes_count
-
-    # fetch the result of the computation and return it
-    def get_latest_value_float(self, index):
-        return self.assembler.fail_boxes_float.getitem(index)
-
-    def get_latest_value_int(self, index):
-        return self.assembler.fail_boxes_int.getitem(index)
-
-    def get_latest_value_ref(self, index):
-        return self.assembler.fail_boxes_ptr.getitem(index)
-
-    def get_latest_force_token(self):
-        return self.assembler.fail_force_index
-    
-    def get_on_leave_jitted_hook(self):
-        return self.assembler.leave_jitted_hook
-
-    # walk through the given trace and generate machine code
-    def _walk_trace_ops(self, codebuilder, operations):
-        for op in operations:
-            codebuilder.build_op(op, self)
-                
-    def get_box_index(self, box):
-        return self.arg_to_box[box]
-
-    def teardown(self):
-        self.patch_list = None
-        self.reg_map = None
 
     def redirect_call_assembler(self, oldlooptoken, newlooptoken):
         self.assembler.redirect_call_assembler(oldlooptoken, newlooptoken)
diff --git a/rpython/jit/backend/ppc/test/test_runner.py 
b/rpython/jit/backend/ppc/test/test_runner.py
--- a/rpython/jit/backend/ppc/test/test_runner.py
+++ b/rpython/jit/backend/ppc/test/test_runner.py
@@ -3,7 +3,7 @@
 from rpython.jit.tool.oparser import parse
 from rpython.jit.metainterp.history import (AbstractFailDescr,
                                             AbstractDescr,
-                                            BasicFailDescr,
+                                            BasicFailDescr, BasicFinalDescr,
                                             BoxInt, Box, BoxPtr,
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to