Author: Richard Plangger <planri...@gmail.com>
Branch: s390x-backend
Changeset: r82041:254804e6eb46
Date: 2016-02-02 13:38 +0100
http://bitbucket.org/pypy/pypy/changeset/254804e6eb46/

Log:    size_alignment_pos now supports bitfields for big endian platforms,
        some minor refactorings and simplifications

diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py
--- a/pypy/module/_rawffi/structure.py
+++ b/pypy/module/_rawffi/structure.py
@@ -18,6 +18,9 @@
 from rpython.rlib.rarithmetic import intmask, signedtype, r_uint, \
     r_ulonglong
 from rpython.rtyper.lltypesystem import lltype, rffi
+import sys
+
+IS_BIG_ENDIAN = sys.byteorder == 'big'
 
 
 
@@ -114,20 +117,32 @@
                 size += intmask(fieldsize)
                 bitsizes.append(fieldsize)
             elif field_type == NEW_BITFIELD:
-                bitsizes.append((bitsize << 16) + bitoffset)
+                if IS_BIG_ENDIAN:
+                    off = last_size - bitoffset - bitsize
+                else:
+                    off = bitoffset
+                bitsizes.append((bitsize << 16) + off)
                 bitoffset = bitsize
                 size = round_up(size, fieldalignment)
                 pos.append(size)
                 size += fieldsize
             elif field_type == CONT_BITFIELD:
-                bitsizes.append((bitsize << 16) + bitoffset)
+                if IS_BIG_ENDIAN:
+                    off = last_size - bitoffset - bitsize
+                else:
+                    off = bitoffset
+                bitsizes.append((bitsize << 16) + off)
                 bitoffset += bitsize
                 # offset is already updated for the NEXT field
                 pos.append(size - fieldsize)
             elif field_type == EXPAND_BITFIELD:
                 size += fieldsize - last_size / 8
                 last_size = fieldsize * 8
-                bitsizes.append((bitsize << 16) + bitoffset)
+                if IS_BIG_ENDIAN:
+                    off = last_size - bitoffset - bitsize
+                else:
+                    off = bitoffset
+                bitsizes.append((bitsize << 16) + off)
                 bitoffset += bitsize
                 # offset is already updated for the NEXT field
                 pos.append(size - fieldsize)
diff --git a/rpython/jit/backend/zarch/assembler.py 
b/rpython/jit/backend/zarch/assembler.py
--- a/rpython/jit/backend/zarch/assembler.py
+++ b/rpython/jit/backend/zarch/assembler.py
@@ -466,21 +466,17 @@
 
         if kind == 'fixed':
             # compute the size we want
-            # r5 is saved to the jit frame
-            # RES == r2!
-            mc.LGR(r.r5, r.RSZ)
-            mc.SGR(r.r5, r.RES)
-            mc.LGR(r.r2, r.r5)
+            mc.SGRK(r.r2, r.RSZ, r.RES)
             if hasattr(self.cpu.gc_ll_descr, 'passes_frame'):
                 # for tests only
                 mc.LGR(r.r3, r.SPP)
         elif kind == 'str' or kind == 'unicode':
-            pass  # length is already in r3
+            pass  # length is already in r2
         else:
             # arguments to the called function are [itemsize, tid, length]
             # itemsize is already in r2
+            mc.LGR(r.r4, r.RSZ)        # length
             mc.LGR(r.r3, r.SCRATCH2)   # tid
-            mc.LGR(r.r4, r.RSZ)        # length
 
         # Do the call
         addr = rffi.cast(lltype.Signed, addr)
@@ -498,11 +494,11 @@
         self._pop_fp_regs_from_jitframe(mc)
 
         nursery_free_adr = self.cpu.gc_ll_descr.get_nursery_free_addr()
-        self.mc.load_imm(r.SCRATCH, nursery_free_adr)
+        self.mc.load_imm(r.r1, nursery_free_adr)
 
-        # r.SCRATCH is now the address of nursery_free
+        # r.r1 is now the address of nursery_free
         # r.RES is still the result of the call done above
-        # r.RSZ is loaded from [SCRATCH], to make the caller's store a no-op 
here
+        # r.RSZ is loaded from [r1], to make the caller's store a no-op here
         mc.load(r.RSZ, r.r1, 0)
         #
         mc.restore_link()
@@ -1283,6 +1279,8 @@
         # no frame needed, r14 is saved on the jitframe
         mc.branch_absolute(self.malloc_slowpath)
 
+        # here r1 holds nursery_free_addr
+
         offset = mc.currpos() - fast_jmp_pos
         pmc = OverwritingBuilder(mc, fast_jmp_pos, 1)
         pmc.BRC(c.LE, l.imm(offset))    # jump if LE (not GT), predicted to be 
true
@@ -1362,15 +1360,13 @@
         force_realignment = (itemsize % WORD) != 0
         if force_realignment:
             constsize += WORD - 1
-        if lengthloc is not r.RSZ:
-            mc.LGR(r.RSZ, lengthloc)
-        mc.AGFI(r.RSZ, l.imm(constsize))
+        mc.AGHIK(r.RSZ, lengthloc, l.imm(constsize))
         if force_realignment:
             # "& ~(WORD-1)"
             mc.LGHI(r.SCRATCH2, l.imm(~(WORD-1)))
             mc.NGR(r.RSZ, r.SCRATCH2)
 
-        mc.AGR(r.RSZ, r.RES)
+        mc.AGRK(r.RSZ, r.RES, r.RSZ)
         # now RSZ contains the total size in bytes, rounded up to a multiple
         # of WORD, plus nursery_free_adr
 
@@ -1393,14 +1389,6 @@
         # save the gcmap
         self.load_gcmap(mc, r.r1, gcmap)
         #
-        # load the argument(s)
-        if kind == rewrite.FLAG_ARRAY:
-            mc.LGR(r.RSZ, lengthloc)
-            mc.load_imm(r.RES, itemsize)
-            mc.load_imm(r.SCRATCH2, arraydescr.tid)
-        else:
-            mc.LGR(r.RES, lengthloc)
-        #
         # load the function into r14 and jump
         if kind == rewrite.FLAG_ARRAY:
             addr = self.malloc_slowpath_varsize
@@ -1411,6 +1399,15 @@
         else:
             raise AssertionError(kind)
         #
+        # load the argument(s)
+        if kind == rewrite.FLAG_ARRAY:
+            mc.LGR(r.RSZ, lengthloc)
+            mc.load_imm(r.RES, itemsize)
+            mc.load_imm(r.SCRATCH2, arraydescr.tid)
+        else:
+            mc.LGR(r.RES, lengthloc)
+        #
+        #
         # call!
         mc.branch_absolute(addr)
 
diff --git a/rpython/jit/backend/zarch/codebuilder.py 
b/rpython/jit/backend/zarch/codebuilder.py
--- a/rpython/jit/backend/zarch/codebuilder.py
+++ b/rpython/jit/backend/zarch/codebuilder.py
@@ -175,7 +175,7 @@
         else:
             # this is not put into the constant pool, because it
             # is an immediate value that cannot easily be forseen
-            self.LGFI(dest_reg, l.imm(word & 0xFFFFffff))
+            self.IILF(dest_reg, l.imm(word & 0xFFFFffff))
             self.IIHF(dest_reg, l.imm((word >> 32) & 0xFFFFffff))
 
     def load_imm_plus(self, dest_reg, word):
diff --git a/rpython/jit/backend/zarch/instruction_builder.py 
b/rpython/jit/backend/zarch/instruction_builder.py
--- a/rpython/jit/backend/zarch/instruction_builder.py
+++ b/rpython/jit/backend/zarch/instruction_builder.py
@@ -308,6 +308,17 @@
         self.write_i16(imm16 & BIT_MASK_16)
     return encode_ri
 
+def build_rie_d(mnemonic, (opcode1,opcode2)):
+    @builder.arguments('r,r,i16')
+    def encode_rie_d(self, reg1, reg2, imm16):
+        self.writechar(opcode1)
+        byte = (reg1 & BIT_MASK_4) << 4 | (reg2 & BIT_MASK_4)
+        self.writechar(chr(byte))
+        self.write_i16(imm16 & BIT_MASK_16)
+        self.writechar(chr(0x0))
+        self.writechar(opcode2)
+    return encode_rie_d
+
 def build_rie_e(mnemonic, (opcode1,opcode2)):
     br = is_branch_relative(mnemonic)
     @builder.arguments('r,r,i16')
diff --git a/rpython/jit/backend/zarch/instructions.py 
b/rpython/jit/backend/zarch/instructions.py
--- a/rpython/jit/backend/zarch/instructions.py
+++ b/rpython/jit/backend/zarch/instructions.py
@@ -13,6 +13,7 @@
     # add
     'AR':      ('rr',    ['\x1A']),
     'AGR':     ('rre',   ['\xB9','\x08']),
+    'AGRK':    ('rrf_a', ['\xB9','\xE8']),
     'AGFR':    ('rre',   ['\xB9','\x18']),
     'A':       ('rx',    ['\x5A']),
     'AGFI':    ('ril',   ['\xC2','\x08']),
@@ -60,6 +61,7 @@
     'AGF':     ('rxy',   ['\xE3','\x18']),
     'AHI':     ('ri',    ['\xA7','\x0A']),
     'AGHI':    ('ri',    ['\xA7','\x0B']),
+    'AGHIK':   ('rie_d', ['\xEC','\xD9']),
 
 
     # comparision
@@ -150,6 +152,7 @@
     'LGB':     ('rxy',   ['\xE3','\x77']),
     'LLGC':     ('rxy',   ['\xE3','\x90']),
     'LARL':    ('ril',   ['\xC0','\x00'], 'r/m,h32'),
+    'IILF':    ('ril',   ['\xC0','\x09'], 'r,u32'),
     'IIHF':    ('ril',   ['\xC0','\x08'], 'r,u32'),
 
     # load on condition
@@ -252,6 +255,9 @@
     'CEB':     ('rxe',   ['\xED','\x09'], 'r,bidl,-'),
     'CDB':     ('rxe',   ['\xED','\x19'], 'r,bidl,-'),
 
+    # compare and trap
+    'CGRT':    ('rrf_c', ['\xB9','\x60']),
+
     # complement & positive
     'LPDBR':    ('rre',   ['\xB3','\x10']),
     'LCDBR':    ('rre',   ['\xB3','\x13']),
diff --git a/rpython/jit/backend/zarch/opassembler.py 
b/rpython/jit/backend/zarch/opassembler.py
--- a/rpython/jit/backend/zarch/opassembler.py
+++ b/rpython/jit/backend/zarch/opassembler.py
@@ -679,12 +679,17 @@
 
     def emit_guard_nonnull_class(self, op, arglocs, regalloc):
         self.mc.cmp_op(arglocs[0], l.imm(1), imm=True, signed=False)
+
         patch_pos = self.mc.currpos()
         self.mc.reserve_cond_jump(short=True)
+
         self._cmp_guard_class(op, arglocs, regalloc)
+        #self.mc.CGRT(r.SCRATCH, r.SCRATCH2, c.NE)
+
         pmc = OverwritingBuilder(self.mc, patch_pos, 1)
         pmc.BRC(c.LT, l.imm(self.mc.currpos() - patch_pos))
         pmc.overwrite()
+
         self.guard_success_cc = c.EQ
         self._emit_guard(op, arglocs[2:])
 
diff --git a/rpython/jit/backend/zarch/registers.py 
b/rpython/jit/backend/zarch/registers.py
--- a/rpython/jit/backend/zarch/registers.py
+++ b/rpython/jit/backend/zarch/registers.py
@@ -17,7 +17,7 @@
 SCRATCH2 = r0
 GPR_RETURN = r2
 RES = r2
-RSZ = r6
+RSZ = r12 # do not use a volatile register
 
 [f0,f1,f2,f3,f4,f5,f6,f7,f8,
  f9,f10,f11,f12,f13,f14,f15] = fpregisters
diff --git a/rpython/jit/backend/zarch/test/test_assembler.py 
b/rpython/jit/backend/zarch/test/test_assembler.py
--- a/rpython/jit/backend/zarch/test/test_assembler.py
+++ b/rpython/jit/backend/zarch/test/test_assembler.py
@@ -125,6 +125,20 @@
         self.a.mc.BCR(con.ANY, r.r14)
         assert run_asm(self.a) == 0
 
+    def test_load_64bit(self):
+        self.a.mc.load_imm(r.r2, 0x0fffFFFF)
+        self.a.mc.BCR(con.ANY, r.r14)
+        assert run_asm(self.a) == 0x0fffFFFF
+
+    def test_load_64bit_2(self):
+        self.a.mc.load_imm(r.r2, 0xffffFFFF)
+        self.a.mc.BCR(con.ANY, r.r14)
+        assert run_asm(self.a) == 0xffffFFFF
+
+    def test_load_64bit_3(self):
+        self.a.mc.load_imm(r.r2, 2177165728)
+        self.a.mc.BCR(con.ANY, r.r14)
+        assert run_asm(self.a) == 2177165728
 
     def test_byte_count_instr(self):
         assert self.mc.BRC_byte_count == 4
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to