Author: Armin Rigo <[email protected]>
Branch: 
Changeset: r63495:0ea3b7331c89
Date: 2013-04-19 00:16 +0200
http://bitbucket.org/pypy/pypy/changeset/0ea3b7331c89/

Log:    Test and fix for cffi issue #78. Consolidate in rx86.py the 'abs_'
        and the following 'immediate()' in a single 'abs_()', which avoids
        getting suddenly a truncated 16-bit address because the same
        instruction also takes a 16-bit immediate afterwards.

diff --git a/rpython/jit/backend/test/runner_test.py 
b/rpython/jit/backend/test/runner_test.py
--- a/rpython/jit/backend/test/runner_test.py
+++ b/rpython/jit/backend/test/runner_test.py
@@ -3935,3 +3935,19 @@
         descr = self.cpu.get_latest_descr(frame)
         assert descr.identifier == 42
         assert not self.cpu.grab_exc_value(frame)
+
+    def test_setarrayitem_raw_short(self):
+        # setarrayitem_raw(140737353744432, 0, 30583, descr=<ArrayS 2>)
+        A = rffi.CArray(rffi.SHORT)
+        arraydescr = self.cpu.arraydescrof(A)
+        a = lltype.malloc(A, 2, flavor='raw')
+        a[0] = rffi.cast(rffi.SHORT, 666)
+        a[1] = rffi.cast(rffi.SHORT, 777)
+        a_int = rffi.cast(lltype.Signed, a)
+        print 'a_int:', a_int
+        self.execute_operation(rop.SETARRAYITEM_RAW,
+                               [ConstInt(a_int), ConstInt(0), ConstInt(-7654)],
+                               'void', descr=arraydescr)
+        assert rffi.cast(lltype.Signed, a[0]) == -7654
+        assert rffi.cast(lltype.Signed, a[1]) == 777
+        lltype.free(a, flavor='raw')
diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py
--- a/rpython/jit/backend/x86/rx86.py
+++ b/rpython/jit/backend/x86/rx86.py
@@ -126,6 +126,8 @@
         mc.writeimm64(immediate)
     else:
         if mc._use_16_bit_immediate:
+            # special case for the default width='i' for immediate(),
+            # to support 16-bit if the flag is set by regloc.py
             mc.writeimm16(immediate)
         else:
             mc.writeimm32(immediate)
@@ -282,16 +284,20 @@
 # (the immediate address itself must be explicitely encoded as well,
 # with immediate(argnum)).
 
-def encode_abs(mc, _1, _2, orbyte):
[email protected](2)
+def encode_abs(mc, immediate, _, orbyte):
     # expands to either '\x05' on 32-bit, or '\x04\x25' on 64-bit
     if mc.WORD == 8:
         mc.writechar(chr(0x04 | orbyte))
         mc.writechar(chr(0x25))
     else:
         mc.writechar(chr(0x05 | orbyte))
+    # followed by an immediate, always 32 bits
+    mc.writeimm32(immediate)
     return 0
 
-abs_ = encode_abs, 0, None, None
+def abs_(argnum):
+    return encode_abs, argnum, None, None
 
 # ____________________________________________________________
 # For 64-bits mode: the REX.W, REX.R, REX.X, REG.B prefixes
@@ -374,9 +380,8 @@
     INSN_br = insn(rex_w, chr(base+1), register(2,8), stack_bp(1))
     INSN_rb = insn(rex_w, chr(base+3), register(1,8), stack_bp(2))
     INSN_rm = insn(rex_w, chr(base+3), register(1,8), mem_reg_plus_const(2))
-    INSN_rj = insn(rex_w, chr(base+3), register(1,8), abs_, immediate(2))
-    INSN_ji8 = insn(rex_w, '\x83', orbyte(base), abs_, immediate(1),
-                    immediate(2,'b'))
+    INSN_rj = insn(rex_w, chr(base+3), register(1,8), abs_(2))
+    INSN_ji8 = insn(rex_w, '\x83', orbyte(base), abs_(1), immediate(2,'b'))
     INSN_mi8 = insn(rex_w, '\x83', orbyte(base), mem_reg_plus_const(1),
                     immediate(2,'b'))
     INSN_bi8 = insn(rex_w, '\x83', orbyte(base), stack_bp(1), immediate(2,'b'))
@@ -489,12 +494,10 @@
     CMP_mi = select_8_or_32_bit_immed(CMP_mi8, CMP_mi32)
     CMP_mr = insn(rex_w, '\x39', register(2, 8), mem_reg_plus_const(1))
 
-    CMP_ji8 = insn(rex_w, '\x83', orbyte(7<<3), abs_,
-                   immediate(1), immediate(2, 'b'))
-    CMP_ji32 = insn(rex_w, '\x81', orbyte(7<<3), abs_,
-                    immediate(1), immediate(2))
+    CMP_ji8 = insn(rex_w, '\x83', orbyte(7<<3), abs_(1), immediate(2, 'b'))
+    CMP_ji32 = insn(rex_w, '\x81', orbyte(7<<3), abs_(1), immediate(2))
     CMP_ji = select_8_or_32_bit_immed(CMP_ji8, CMP_ji32)
-    CMP_jr = insn(rex_w, '\x39', register(2, 8), abs_, immediate(1))
+    CMP_jr = insn(rex_w, '\x39', register(2, 8), abs_(1))
 
     CMP32_mi = insn(rex_nw, '\x81', orbyte(7<<3), mem_reg_plus_const(1), 
immediate(2))
 
@@ -505,7 +508,7 @@
     OR8_rr = insn(rex_fw, '\x08', byte_register(1), byte_register(2,8), '\xC0')
     OR8_mi = insn(rex_nw, '\x80', orbyte(1<<3), mem_reg_plus_const(1),
                   immediate(2, 'b'))
-    OR8_ji = insn(rex_nw, '\x80', orbyte(1<<3), abs_, immediate(1),
+    OR8_ji = insn(rex_nw, '\x80', orbyte(1<<3), abs_(1),
                   immediate(2, 'b'))
 
     NEG_r = insn(rex_w, '\xF7', register(1), '\xD8')
@@ -556,7 +559,7 @@
     LEA32_rb = insn(rex_w, '\x8D', register(1,8),stack_bp(2,force_32bits=True))
     LEA_ra = insn(rex_w, '\x8D', register(1, 8), 
mem_reg_plus_scaled_reg_plus_const(2))
     LEA_rm = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_const(2))
-    LEA_rj = insn(rex_w, '\x8D', register(1, 8), abs_, immediate(2))
+    LEA_rj = insn(rex_w, '\x8D', register(1, 8), abs_(2))
 
     CALL_l = insn('\xE8', relative(1))
     CALL_r = insn(rex_nw, '\xFF', register(1), chr(0xC0 | (2<<3)))
@@ -583,11 +586,11 @@
 
     TEST8_mi = insn(rex_nw, '\xF6', orbyte(0<<3), mem_reg_plus_const(1), 
immediate(2, 'b'))
     TEST8_bi = insn(rex_nw, '\xF6', orbyte(0<<3), stack_bp(1), immediate(2, 
'b'))
-    TEST8_ji = insn(rex_nw, '\xF6', orbyte(0<<3), abs_, immediate(1), 
immediate(2, 'b'))
+    TEST8_ji = insn(rex_nw, '\xF6', orbyte(0<<3), abs_(1), immediate(2, 'b'))
     TEST_rr = insn(rex_w, '\x85', register(2,8), register(1), '\xC0')
 
     BTS_mr = insn(rex_w, '\x0F\xAB', register(2,8), mem_reg_plus_const(1))
-    BTS_jr = insn(rex_w, '\x0F\xAB', register(2,8), abs_, immediate(1))
+    BTS_jr = insn(rex_w, '\x0F\xAB', register(2,8), abs_(1))
 
     # x87 instructions
     FSTPL_b = insn('\xDD', orbyte(3<<3), stack_bp(1)) # rffi.DOUBLE ('as' 
wants L??)
@@ -718,7 +721,7 @@
     add_insn('s', stack_sp(modrm_argnum))
     add_insn('m', mem_reg_plus_const(modrm_argnum))
     add_insn('a', mem_reg_plus_scaled_reg_plus_const(modrm_argnum))
-    add_insn('j', abs_, immediate(modrm_argnum))
+    add_insn('j', abs_(modrm_argnum))
 
 # Define a regular MOV, and a variant MOV32 that only uses the low 4 bytes of a
 # register
@@ -766,7 +769,7 @@
     #
     assert insnname_template.count('*') == 1
     add_insn('x', register(2), '\xC0')
-    add_insn('j', abs_, immediate(2))
+    add_insn('j', abs_(2))
     add_insn('m', mem_reg_plus_const(2))
 
 define_pxmm_insn('PADDQ_x*',     '\xD4')
_______________________________________________
pypy-commit mailing list
[email protected]
http://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to