Author: Richard Plangger <planri...@gmail.com>
Branch: ppc-vsx-support
Changeset: r85560:b45d23fda408
Date: 2016-07-05 14:48 +0200
http://bitbucket.org/pypy/pypy/changeset/b45d23fda408/

Log:    add intmask to arith in test (vec_int_sub), implement stitch guard

diff --git a/rpython/jit/backend/ppc/locations.py 
b/rpython/jit/backend/ppc/locations.py
--- a/rpython/jit/backend/ppc/locations.py
+++ b/rpython/jit/backend/ppc/locations.py
@@ -153,9 +153,6 @@
     def __repr__(self):
         return 'FP(%s)+%d' % (self.type, self.value)
 
-    def location_code(self):
-        return 'b'
-
     def get_position(self):
         return self.position
 
diff --git a/rpython/jit/backend/ppc/ppc_assembler.py 
b/rpython/jit/backend/ppc/ppc_assembler.py
--- a/rpython/jit/backend/ppc/ppc_assembler.py
+++ b/rpython/jit/backend/ppc/ppc_assembler.py
@@ -14,6 +14,7 @@
 from rpython.jit.backend.ppc.helper.regalloc import _check_imm_arg
 import rpython.jit.backend.ppc.register as r
 import rpython.jit.backend.ppc.condition as c
+from rpython.jit.metainterp.compile import ResumeGuardDescr
 from rpython.jit.backend.ppc.register import JITFRAME_FIXED_SIZE
 from rpython.jit.metainterp.history import AbstractFailDescr
 from rpython.jit.backend.llsupport import jitframe, rewrite
@@ -811,7 +812,7 @@
         #print(hex(rawstart))
         #import pdb; pdb.set_trace()
         return AsmInfo(ops_offset, rawstart + looppos,
-                       size_excluding_failure_stuff - looppos)
+                       size_excluding_failure_stuff - looppos, rawstart + 
looppos)
 
     def _assemble(self, regalloc, inputargs, operations):
         self._regalloc = regalloc
@@ -876,7 +877,8 @@
         self.fixup_target_tokens(rawstart)
         self.update_frame_depth(frame_depth)
         self.teardown()
-        return AsmInfo(ops_offset, startpos + rawstart, codeendpos - startpos)
+        return AsmInfo(ops_offset, startpos + rawstart, codeendpos - startpos,
+                       startpos + rawstart)
 
     def reserve_gcref_table(self, allgcrefs):
         # allocate the gc table right now.  We write absolute loads in
@@ -1373,14 +1375,13 @@
         assert isinstance(bridge_faildescr, ResumeGuardDescr)
         assert isinstance(faildescr, ResumeGuardDescr)
         assert asminfo.rawstart != 0
-        self.mc = codebuf.MachineCodeBlockWrapper()
+        self.mc = PPCBuilder()
         allblocks = self.get_asmmemmgr_blocks(looptoken)
         self.datablockwrapper = MachineDataBlockWrapper(self.cpu.asmmemmgr,
                                                    allblocks)
         frame_info = self.datablockwrapper.malloc_aligned(
             jitframe.JITFRAMEINFO_SIZE, alignment=WORD)
 
-        self.mc.force_frame_size(DEFAULT_FRAME_BYTES)
         # if accumulation is saved at the guard, we need to update it here!
         guard_locs = self.rebuild_faillocs_from_descr(faildescr, 
version.inputargs)
         bridge_locs = self.rebuild_faillocs_from_descr(bridge_faildescr, 
version.inputargs)
@@ -1392,7 +1393,7 @@
                 if bridge_accum_info.failargs_pos == 
guard_accum_info.failargs_pos:
                     # the mapping might be wrong!
                     if bridge_accum_info.location is not 
guard_accum_info.location:
-                        self.mov(guard_accum_info.location, 
bridge_accum_info.location)
+                        self.regalloc_mov(guard_accum_info.location, 
bridge_accum_info.location)
                 bridge_accum_info = bridge_accum_info.next()
             guard_accum_info = guard_accum_info.next()
 
@@ -1401,19 +1402,14 @@
         assert len(guard_locs) == len(bridge_locs)
         for i,gloc in enumerate(guard_locs):
             bloc = bridge_locs[i]
-            bstack = bloc.location_code() == 'b'
-            gstack = gloc.location_code() == 'b'
-            if bstack and gstack:
+            if bloc.is_stack() and gloc.is_stack():
                 pass
             elif gloc is not bloc:
-                self.mov(gloc, bloc)
+                self.regalloc_mov(gloc, bloc)
         offset = self.mc.get_relative_pos()
-        self.mc.JMP_l(0)
-        self.mc.writeimm32(0)
-        self.mc.force_frame_size(DEFAULT_FRAME_BYTES)
+        self.mc.b_abs(asminfo.rawstart)
+
         rawstart = self.materialize_loop(looptoken)
-        # update the jump (above) to the real trace
-        self._patch_jump_to(rawstart + offset, asminfo.rawstart)
         # update the guard to jump right to this custom piece of assembler
         self.patch_jump_for_descr(faildescr, rawstart)
 
diff --git a/rpython/jit/backend/ppc/vector_ext.py 
b/rpython/jit/backend/ppc/vector_ext.py
--- a/rpython/jit/backend/ppc/vector_ext.py
+++ b/rpython/jit/backend/ppc/vector_ext.py
@@ -88,8 +88,7 @@
 
     def emit_vec_load_f(self, op, arglocs, regalloc):
         resloc, baseloc, indexloc, size_loc, ofs, integer_loc, aligned_loc = 
arglocs
-        #src_addr = addr_add(baseloc, ofs_loc, ofs.value, 0)
-        assert ofs.value == 0
+        indexloc = self._apply_offset(indexloc, ofs)
         itemsize = size_loc.value
         if itemsize == 4:
             self.mc.lxvw4x(resloc.value, indexloc.value, baseloc.value)
@@ -99,8 +98,7 @@
     def emit_vec_load_i(self, op, arglocs, regalloc):
         resloc, baseloc, indexloc, size_loc, ofs, \
             Vhiloc, Vloloc, Vploc, tloc = arglocs
-        #src_addr = addr_add(base_loc, ofs_loc, ofs.value, 0)
-        assert ofs.value == 0
+        indexloc = self._apply_offset(indexloc, ofs)
         Vlo = Vloloc.value
         Vhi = Vhiloc.value
         self.mc.lvx(Vhi, indexloc.value, baseloc.value)
@@ -117,21 +115,10 @@
         else:
             self.mc.vperm(resloc.value, Vlo, Vhi, Vp)
 
-    def _emit_vec_setitem(self, op, arglocs, regalloc):
-        # prepares item scale (raw_store does not)
-        base_loc, ofs_loc, value_loc, size_loc, baseofs, integer_loc, 
aligned_loc = arglocs
-        scale = get_scale(size_loc.value)
-        dest_loc = addr_add(base_loc, ofs_loc, baseofs.value, scale)
-        self._vec_store(dest_loc, value_loc, integer_loc.value,
-                        size_loc.value, aligned_loc.value)
-
-    genop_discard_vec_setarrayitem_raw = _emit_vec_setitem
-    genop_discard_vec_setarrayitem_gc = _emit_vec_setitem
-
     def emit_vec_store(self, op, arglocs, regalloc):
         baseloc, indexloc, valueloc, sizeloc, baseofs, \
             integer_loc, aligned_loc = arglocs
-        #dest_loc = addr_add(base_loc, ofs_loc, baseofs.value, 0)
+        indexloc = self._apply_offset(indexloc, baseofs)
         assert baseofs.value == 0
         if integer_loc.value:
             Vloloc = regalloc.ivrm.get_scratch_reg()
diff --git a/rpython/jit/metainterp/test/test_vector.py 
b/rpython/jit/metainterp/test/test_vector.py
--- a/rpython/jit/metainterp/test/test_vector.py
+++ b/rpython/jit/metainterp/test/test_vector.py
@@ -207,9 +207,7 @@
         bits = size*8
         integers = st.integers(min_value=-2**(bits-1), max_value=2**(bits-1)-1)
         la = data.draw(st.lists(integers, min_size=10, max_size=150))
-        #la = [0] * 10 #1,2,3,4,5,6,7,8,9,10,11,12,13]
         l = len(la)
-        #lb = [0] * 10 # [1,2,3,4,5,6,7,8,9,10,11,12,13]
         lb = data.draw(st.lists(integers, min_size=l, max_size=l))
 
         rawstorage = RawStorage()
@@ -238,11 +236,11 @@
     test_vec_short_add = \
         vec_int_arith(lambda a,b: r_int(a)+r_int(b), rffi.SHORT)
 
-    test_vec_signed_sub = \
-        vec_int_arith(lambda a,b: r_int(a)-r_int(b), rffi.SIGNED)
-    test_vec_int_sub = \
+    test_vec_sub_signed = \
+        vec_int_arith(lambda a,b: intmask(a-b), rffi.SIGNED)
+    test_vec_sub_int = \
         vec_int_arith(lambda a,b: r_int(a)-r_int(b), rffi.INT)
-    test_vec_short_sub = \
+    test_vec_sub_short = \
         vec_int_arith(lambda a,b: r_int(a)-r_int(b), rffi.SHORT)
 
     test_vec_signed_and = \
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to