Author: Maciej Fijalkowski <[email protected]>
Branch: arm64
Changeset: r95919:392250be4182
Date: 2019-02-09 16:18 +0000
http://bitbucket.org/pypy/pypy/changeset/392250be4182/

Log:    (bivab, rodolph, arigo)

        Make the first test of test_runner pass on ARM64. Yay!

diff --git a/rpython/jit/backend/aarch64/assembler.py 
b/rpython/jit/backend/aarch64/assembler.py
--- a/rpython/jit/backend/aarch64/assembler.py
+++ b/rpython/jit/backend/aarch64/assembler.py
@@ -163,7 +163,31 @@
         pass # XXX
 
     def reserve_gcref_table(self, allgcrefs):
-        pass
+        gcref_table_size = len(allgcrefs) * WORD
+       # align to a multiple of 16 and reserve space at the beginning
+       # of the machine code for the gc table.  This lets us write
+       # machine code with relative addressing (LDR literal).
+        gcref_table_size = (gcref_table_size + 15) & ~15
+        mc = self.mc
+        assert mc.get_relative_pos() == 0
+        for i in range(gcref_table_size):
+            mc.writechar('\x00')
+        self.setup_gcrefs_list(allgcrefs)
+
+    def patch_gcref_table(self, looptoken, rawstart):
+       # the gc table is at the start of the machine code
+       self.gc_table_addr = rawstart
+        tracer = self.cpu.gc_ll_descr.make_gcref_tracer(rawstart,
+                                                        self._allgcrefs)
+        gcreftracers = self.get_asmmemmgr_gcreftracers(looptoken)
+        gcreftracers.append(tracer)    # keepalive
+        self.teardown_gcrefs_list()
+
+    def load_from_gc_table(self, regnum, index):
+        address_in_buffer = index * WORD   # at the start of the buffer
+        p_location = self.mc.get_relative_pos(break_basic_block=False)
+        offset = address_in_buffer - p_location
+        self.mc.LDR_r_literal(regnum, offset)
 
     def materialize_loop(self, looptoken):
         self.datablockwrapper.done()      # finish using cpu.asmmemmgr
@@ -176,9 +200,6 @@
         #    self.codemap.get_final_bytecode(res, size))
         return res
 
-    def patch_gcref_table(self, looptoken, rawstart):
-        pass
-
     def process_pending_guards(self, rawstart):
         pass
 
@@ -359,6 +380,32 @@
 
         mc.RET_r(r.lr.value)
 
+    def store_reg(self, mc, source, base, ofs=0):
+        # uses r.ip1 as a temporary
+        if source.is_vfp_reg():
+            return self._store_vfp_reg(mc, source, base, ofs)
+        else:
+            return self._store_core_reg(mc, source, base, ofs)
+
+    def _store_vfp_reg(self, mc, source, base, ofs):
+        if check_imm_arg(ofs, VMEM_imm_size):
+            mc.VSTR(source.value, base.value, imm=ofs, cond=cond)
+        else:
+            mc.gen_load_int(helper.value, ofs, cond=cond)
+            mc.ADD_rr(helper.value, base.value, helper.value, cond=cond)
+            mc.VSTR(source.value, helper.value, cond=cond)
+
+    def _store_core_reg(self, mc, source, base, ofs):
+        # uses r.ip1 as a temporary
+        # XXX fix:
+        assert ofs & 0x7 == 0
+        assert 0 <= ofs < 32768
+        mc.STR_ri(source.value, base.value, ofs)
+        #if check_imm_arg(ofs):
+        #    mc.STR_ri(source.value, base.value, imm=ofs)
+        #else:
+        #    mc.gen_load_int(r.ip1, ofs)
+        #    mc.STR_rr(source.value, base.value, r.ip1)
 
 
 def not_implemented(msg):
diff --git a/rpython/jit/backend/aarch64/codebuilder.py 
b/rpython/jit/backend/aarch64/codebuilder.py
--- a/rpython/jit/backend/aarch64/codebuilder.py
+++ b/rpython/jit/backend/aarch64/codebuilder.py
@@ -18,6 +18,13 @@
     def RET_r(self, arg):
         self.write32((0b1101011001011111 << 16) | (arg << 5))
 
+    def STR_ri(self, rt, rn, offset):
+        base = 0b1111100100
+        assert offset & 0x7 == 0
+        assert 0 <= offset < 32768
+        self.write32((base << 22) | ((offset >> 3) << 10) |
+                     (rn << 5) | rt)
+
     def STP_rr_preindex(self, reg1, reg2, rn, offset):
         base = 0b1010100110
         assert -512 <= offset < 512
@@ -32,9 +39,6 @@
         self.write32((base << 22) | ((0x7F & (offset >> 3)) << 15) |
                      (reg2 << 10) | (rn << 5) | reg1)
 
-    def MOV_r_u16(self, rd, immed, shift):     # u16 is an unsigned 16-bit
-        self.MOVK_r_u16(rd, immed, shift)
-
     def MOV_rr(self, rd, rn):
         self.ORR_rr(rd, r.xzr.value, rn)
 
@@ -49,6 +53,12 @@
         assert shift in (0, 16, 32, 48)
         self.write32((base << 23) | (shift >> 4 << 21) | (immed << 5) | rd) 
 
+    def MOVZ_r_u16(self, rd, immed, shift):
+        base = 0b110100101
+        assert 0 <= immed < 1 << 16
+        assert shift in (0, 16, 32, 48)
+        self.write32((base << 23) | (shift >> 4 << 21) | (immed << 5) | rd) 
+
     def MOVN_r_u16(self, rd, immed):
         base = 0b10010010100
         assert 0 <= immed < 1 << 16
@@ -64,6 +74,7 @@
         base = 0b1010100101
         assert -512 <= offset < 512
         assert offset & 0x7 == 0
+        assert reg1 != reg2
         self.write32((base << 22) | ((0x7F & (offset >> 3)) << 15) |
                      (reg2 << 10) | (rn << 5) | reg1)
 
@@ -71,6 +82,9 @@
         base = 0b1010100011
         assert -512 <= offset < 512
         assert offset & 0x7 == 0
+        assert reg1 != reg2
+        assert rn != reg1
+        assert rn != reg2
         self.write32((base << 22) | ((0x7F & (offset >> 3)) << 15) |
                      (reg2 << 10) | (rn << 5) | reg1)
 
@@ -78,8 +92,13 @@
         base = 0b1111100101
         assert 0 <= immed <= 1<<15
         assert immed & 0x7 == 0
-        immed >>= 3
-        self.write32((base << 22) | (immed << 10) | (rn << 5) | rt)
+        self.write32((base << 22) | (immed >> 3 << 10) | (rn << 5) | rt)
+    
+    def LDR_r_literal(self, rt, offset):
+        base = 0b01011000
+        assert -(1 << 20) <= offset < (1<< 20)
+        assert offset & 0x3 == 0
+        self.write32((base << 24) | ((0x7ffff & (offset >> 2)) << 5) | rt)
 
     def ADD_rr(self, rd, rn, rm):
         base = 0b10001011000
@@ -91,18 +110,11 @@
     def gen_load_int(self, r, value):
         """r is the register number, value is the value to be loaded to the
         register"""
-        shift = 0
-        if value < 0:
-            value = ~value
-            nxt = intmask(value & 0xFFFF)
-            self.MOVN_r_u16(r, nxt)
-            value >>= 16
-            shift += 16
-        while value:
-            nxt = intmask(value & 0xFFFF)
-            self.MOV_r_u16(r, nxt, shift)
-            value >>= 16
-            shift += 16
+        # XXX optimize!
+        self.MOVZ_r_u16(r, value & 0xFFFF, 0)
+        self.MOVK_r_u16(r, (value >> 16) & 0xFFFF, 16)
+        self.MOVK_r_u16(r, (value >> 32) & 0xFFFF, 32)
+        self.MOVK_r_u16(r, (value >> 48) & 0xFFFF, 48)
 
 
 class InstrBuilder(BlockBuilderMixin, AbstractAarch64Builder):
diff --git a/rpython/jit/backend/aarch64/locations.py 
b/rpython/jit/backend/aarch64/locations.py
--- a/rpython/jit/backend/aarch64/locations.py
+++ b/rpython/jit/backend/aarch64/locations.py
@@ -1,4 +1,5 @@
 
+from rpython.rlib.rarithmetic import r_int32
 from rpython.jit.backend.aarch64.arch import WORD, JITFRAME_FIXED_SIZE
 from rpython.jit.metainterp.history import INT, FLOAT
 
@@ -73,6 +74,7 @@
     _immutable_ = True
 
     def __init__(self, value):
+        assert not isinstance(value, r_int32)
         self.value = value
 
     def getint(self):
diff --git a/rpython/jit/backend/aarch64/opassembler.py 
b/rpython/jit/backend/aarch64/opassembler.py
--- a/rpython/jit/backend/aarch64/opassembler.py
+++ b/rpython/jit/backend/aarch64/opassembler.py
@@ -1,4 +1,6 @@
 
+from rpython.jit.metainterp.history import (AbstractFailDescr, ConstInt,
+                                            INT, FLOAT, REF)
 from rpython.jit.backend.aarch64 import registers as r
 from rpython.jit.backend.llsupport.assembler import GuardToken, BaseAssembler
 
@@ -30,6 +32,35 @@
         self.mc.STR_ri(value_loc.value, base_loc.value, 0)
 
     def emit_op_finish(self, op, arglocs):
+        base_ofs = self.cpu.get_baseofs_of_frame_field()
+        if len(arglocs) > 0:
+            [return_val] = arglocs
+            self.store_reg(self.mc, return_val, r.fp, base_ofs)
+        ofs = self.cpu.get_ofs_of_frame_field('jf_descr')
+
+        faildescrindex = self.get_gcref_from_faildescr(op.getdescr())
+        self.load_from_gc_table(r.ip0.value, faildescrindex)
+        # XXX self.mov(fail_descr_loc, RawStackLoc(ofs))
+        self.store_reg(self.mc, r.ip0, r.fp, ofs)
+        if op.numargs() > 0 and op.getarg(0).type == REF:
+            if self._finish_gcmap:
+                # we're returning with a guard_not_forced_2, and
+                # additionally we need to say that r0 contains
+                # a reference too:
+                self._finish_gcmap[0] |= r_uint(1)
+                gcmap = self._finish_gcmap
+            else:
+                gcmap = self.gcmap_for_finish
+            self.push_gcmap(self.mc, gcmap, store=True)
+        elif self._finish_gcmap:
+            # we're returning with a guard_not_forced_2
+            gcmap = self._finish_gcmap
+            self.push_gcmap(self.mc, gcmap, store=True)
+        else:
+            # note that the 0 here is redundant, but I would rather
+            # keep that one and kill all the others
+            ofs = self.cpu.get_ofs_of_frame_field('jf_gcmap')
+            self.store_reg(self.mc, r.xzr, r.fp, ofs)
         self.mc.MOV_rr(r.x0.value, r.fp.value)
         # exit function
         self.gen_func_epilog()
diff --git a/rpython/jit/backend/aarch64/regalloc.py 
b/rpython/jit/backend/aarch64/regalloc.py
--- a/rpython/jit/backend/aarch64/regalloc.py
+++ b/rpython/jit/backend/aarch64/regalloc.py
@@ -120,7 +120,7 @@
 
     def convert_to_imm(self, c):
         if isinstance(c, ConstInt):
-            val = rffi.cast(rffi.INT, c.value)
+            val = rffi.cast(lltype.Signed, c.value)
             return locations.ImmLocation(val)
         else:
             assert isinstance(c, ConstPtr)
diff --git a/rpython/jit/backend/aarch64/test/test_instr_builder.py 
b/rpython/jit/backend/aarch64/test/test_instr_builder.py
--- a/rpython/jit/backend/aarch64/test/test_instr_builder.py
+++ b/rpython/jit/backend/aarch64/test/test_instr_builder.py
@@ -1,4 +1,4 @@
-from hypothesis import given, settings, strategies as st
+from hypothesis import given, settings, strategies as st, assume
 from rpython.jit.backend.aarch64 import registers as r
 from rpython.jit.backend.aarch64 import codebuilder
 from rpython.jit.backend.aarch64.test.gen import assemble
@@ -19,7 +19,7 @@
 
     @settings(max_examples=20)
     @given(r1=st.sampled_from(r.registers))
-    def test_ret(self, r1):
+    def test_RET_r(self, r1):
         cb = CodeBuilder()
         cb.RET_r(r1.value)
         res = cb.hexdump()
@@ -48,6 +48,24 @@
 
     @settings(max_examples=20)
     @given(r1=st.sampled_from(r.registers),
+           immed=st.integers(min_value=0, max_value=(1<<16) - 1),
+           shift=st.integers(min_value=0, max_value=3))
+    def test_MOVK(self, r1, immed, shift):
+        cb = CodeBuilder()
+        cb.MOVK_r_u16(r1.value, immed, shift * 16)
+        assert cb.hexdump() == assemble("MOVK %r, %d, lsl %d" % (r1, immed, 
shift * 16))
+
+    @settings(max_examples=20)
+    @given(r1=st.sampled_from(r.registers),
+           immed=st.integers(min_value=0, max_value=(1<<16) - 1),
+           shift=st.integers(min_value=0, max_value=3))
+    def test_MOVZ(self, r1, immed, shift):
+        cb = CodeBuilder()
+        cb.MOVZ_r_u16(r1.value, immed, shift * 16)
+        assert cb.hexdump() == assemble("MOVZ %r, %d, lsl %d" % (r1, immed, 
shift * 16))
+
+    @settings(max_examples=20)
+    @given(r1=st.sampled_from(r.registers),
            immed=st.integers(min_value=0, max_value=(1<<16) - 1))
     def test_MOVN(self, r1, immed):
         cb = CodeBuilder()
@@ -55,14 +73,58 @@
         assert cb.hexdump() == assemble("MOV %r, %d" % (r1, ~immed))
 
     @settings(max_examples=20)
-    @given(r1=st.sampled_from(r.registers),
-           immed=st.integers(min_value=0, max_value=(1<<16) - 1),
-           shift=st.sampled_from([0, 16, 32, 48]))
-    def test_MOV_r_u16(self, r1, immed, shift):
+    @given(rt=st.sampled_from(r.registers),
+           rn=st.sampled_from(r.registers),
+           offset=st.integers(min_value=0, max_value=(1<<12)-1))
+    def test_STR_ri(self, rt, rn, offset):
         cb = CodeBuilder()
-        cb.MOV_r_u16(r1.value, immed, shift)
-        if shift == 0:
-            assert cb.hexdump() == assemble("MOVK %r, %d" % (r1, immed))
-        else:
-            assert cb.hexdump() == assemble("MOVK %r, %d, lsl %d" % (r1, 
immed, shift))
+        cb.STR_ri(rt.value, rn.value, offset * 8)
+        assert cb.hexdump() == assemble("STR %r, [%r, %d]" % (rt, rn, offset * 
8))
 
+    @settings(max_examples=20)
+    @given(reg1=st.sampled_from(r.registers),
+           reg2=st.sampled_from(r.registers),
+           rn=st.sampled_from(r.registers),
+           offset=st.integers(min_value=-64, max_value=63))
+    def test_LDP_rr(self, reg1, reg2, rn, offset):
+        assume(reg1.value != reg2.value)
+        cb = CodeBuilder()
+        cb.LDP_rri(reg1.value, reg2.value, rn.value, offset * 8)
+        assert cb.hexdump() == assemble("LDP %r, %r, [%r, %d]" % (reg1, reg2, 
rn, offset * 8))
+        #
+        assume(rn.value != reg1.value)
+        assume(rn.value != reg2.value)
+        cb = CodeBuilder()
+        cb.LDP_rr_postindex(reg1.value, reg2.value, rn.value, offset * 8)
+        assert cb.hexdump() == assemble("LDP %r, %r, [%r], %d" % (reg1, reg2, 
rn, offset * 8))
+
+    @settings(max_examples=20)
+    @given(rt=st.sampled_from(r.registers),
+           rn=st.sampled_from(r.registers),
+           offset=st.integers(min_value=0, max_value=(1<<12)-1))
+    def test_LDR_ri(self, rt, rn, offset):
+        cb = CodeBuilder()
+        cb.LDR_ri(rt.value, rn.value, offset * 8)
+        assert cb.hexdump() == assemble("LDR %r, [%r, %d]" % (rt, rn, offset * 
8))
+
+    @settings(max_examples=20)
+    @given(rt=st.sampled_from(r.registers),
+           offset=st.integers(min_value=-(1<<18), max_value=(1<<18)-1))
+    def test_LDR_r_literal(self, rt, offset):
+        cb = CodeBuilder()
+        cb.LDR_r_literal(rt.value, offset * 4)
+        assert cb.hexdump() == assemble("LDR %r, %d" % (rt, offset * 4))
+
+    @settings(max_examples=20)
+    @given(rd=st.sampled_from(r.registers),
+           rn=st.sampled_from(r.registers),
+           rm=st.sampled_from(r.registers))
+    def test_ADD_rr(self, rd, rn, rm):
+        cb = CodeBuilder()
+        cb.ADD_rr(rd.value, rn.value, rm.value)
+        assert cb.hexdump() == assemble("ADD %r, %r, %r" % (rd, rn, rm))
+
+    def test_BRK(self):
+        cb = CodeBuilder()
+        cb.BRK()
+        assert cb.hexdump() == assemble("BRK 0")
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to