Signed-off-by: Richard Henderson <r...@twiddle.net>
---
 tcg/s390/tcg-target.c |   94 +++++++++++++++++++++++++++++++++++++++++-------
 tcg/s390/tcg-target.h |   20 +++++-----
 2 files changed, 90 insertions(+), 24 deletions(-)

diff --git a/tcg/s390/tcg-target.c b/tcg/s390/tcg-target.c
index fe83415..3f7d08d 100644
--- a/tcg/s390/tcg-target.c
+++ b/tcg/s390/tcg-target.c
@@ -70,10 +70,14 @@ typedef enum S390Opcode {
     RRE_DLR     = 0xb997,
     RRE_DSGFR   = 0xb91d,
     RRE_DSGR    = 0xb90d,
+    RRE_LGBR    = 0xb906,
     RRE_LCGR    = 0xb903,
     RRE_LGFR    = 0xb914,
+    RRE_LGHR    = 0xb907,
     RRE_LGR     = 0xb904,
+    RRE_LLGCR   = 0xb984,
     RRE_LLGFR   = 0xb916,
+    RRE_LLGHR   = 0xb985,
     RRE_MSGR    = 0xb90c,
     RRE_MSR     = 0xb252,
     RRE_NGR     = 0xb980,
@@ -491,6 +495,36 @@ static inline void tcg_out_st(TCGContext *s, TCGType type, 
TCGReg data,
     }
 }
 
+static inline void tgen_ext8s(TCGContext *s, TCGReg dest, TCGReg src)
+{
+    tcg_out_insn(s, RRE, LGBR, dest, src);
+}
+
+static inline void tgen_ext8u(TCGContext *s, TCGReg dest, TCGReg src)
+{
+    tcg_out_insn(s, RRE, LLGCR, dest, src);
+}
+
+static inline void tgen_ext16s(TCGContext *s, TCGReg dest, TCGReg src)
+{
+    tcg_out_insn(s, RRE, LGHR, dest, src);
+}
+
+static inline void tgen_ext16u(TCGContext *s, TCGReg dest, TCGReg src)
+{
+    tcg_out_insn(s, RRE, LLGHR, dest, src);
+}
+
+static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
+{
+    tcg_out_insn(s, RRE, LGFR, dest, src);
+}
+
+static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
+{
+    tcg_out_insn(s, RRE, LLGFR, dest, src);
+}
+
 static void tgen32_cmp(TCGContext *s, TCGCond c, TCGReg r1, TCGReg r2)
 {
     if (c > TCG_COND_GT) {
@@ -581,8 +615,8 @@ static void tcg_prepare_qemu_ldst(TCGContext* s, int 
data_reg, int addr_reg,
     }
 
 #if TARGET_LONG_BITS == 32
-    tcg_out_insn(s, RRE, LLGFR, arg1, addr_reg);
-    tcg_out_insn(s, RRE, LLGFR, arg0, addr_reg);
+    tgen_ext32u(s, arg1, addr_reg);
+    tgen_ext32u(s, arg0, addr_reg);
 #else
     tcg_out_mov(s, arg1, addr_reg);
     tcg_out_mov(s, arg0, addr_reg);
@@ -619,7 +653,7 @@ static void tcg_prepare_qemu_ldst(TCGContext* s, int 
data_reg, int addr_reg,
 
     /* call load/store helper */
 #if TARGET_LONG_BITS == 32
-    tcg_out_insn(s, RRE, LLGFR, arg0, addr_reg);
+    tgen_ext32u(s, arg0, addr_reg);
 #else
     tcg_out_mov(s, arg0, addr_reg);
 #endif
@@ -635,15 +669,13 @@ static void tcg_prepare_qemu_ldst(TCGContext* s, int 
data_reg, int addr_reg,
         /* sign extension */
         switch (opc) {
         case LD_INT8:
-            tcg_out_insn(s, RSY, SLLG, data_reg, arg0, SH64_REG_NONE, 56);
-            tcg_out_insn(s, RSY, SRAG, data_reg, data_reg, SH64_REG_NONE, 56);
+            tgen_ext8s(s, data_reg, arg0);
             break;
         case LD_INT16:
-            tcg_out_insn(s, RSY, SLLG, data_reg, arg0, SH64_REG_NONE, 48);
-            tcg_out_insn(s, RSY, SRAG, data_reg, data_reg, SH64_REG_NONE, 48);
+            tgen_ext16s(s, data_reg, arg0);
             break;
         case LD_INT32:
-            tcg_out_insn(s, RRE, LGFR, data_reg, arg0);
+            tgen_ext32s(s, data_reg, arg0);
             break;
         default:
             /* unsigned -> just copy */
@@ -741,8 +773,7 @@ static void tcg_out_qemu_ld(TCGContext* s, const TCGArg* 
args, int opc)
 #else
         /* swapped unsigned halfword load with upper bits zeroed */
         tcg_out_insn(s, RXY, LRVH, data_reg, arg0, 0, 0);
-        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, 0xffffL);
-        tcg_out_insn(s, RRE, NGR, data_reg, 13);
+        tgen_ext16u(s, data_reg, data_reg);
 #endif
         break;
     case LD_INT16:
@@ -751,8 +782,7 @@ static void tcg_out_qemu_ld(TCGContext* s, const TCGArg* 
args, int opc)
 #else
         /* swapped sign-extended halfword load */
         tcg_out_insn(s, RXY, LRVH, data_reg, arg0, 0, 0);
-        tcg_out_insn(s, RSY, SLLG, data_reg, data_reg, SH64_REG_NONE, 48);
-        tcg_out_insn(s, RSY, SRAG, data_reg, data_reg, SH64_REG_NONE, 48);
+        tgen_ext16s(s, data_reg, data_reg);
 #endif
         break;
     case LD_UINT32:
@@ -761,7 +791,7 @@ static void tcg_out_qemu_ld(TCGContext* s, const TCGArg* 
args, int opc)
 #else
         /* swapped unsigned int load with upper bits zeroed */
         tcg_out_insn(s, RXY, LRV, data_reg, arg0, 0, 0);
-        tcg_out_insn(s, RRE, LLGFR, data_reg, data_reg);
+        tgen_ext32u(s, data_reg, data_reg);
 #endif
         break;
     case LD_INT32:
@@ -770,7 +800,7 @@ static void tcg_out_qemu_ld(TCGContext* s, const TCGArg* 
args, int opc)
 #else
         /* swapped sign-extended int load */
         tcg_out_insn(s, RXY, LRV, data_reg, arg0, 0, 0);
-        tcg_out_insn(s, RRE, LGFR, data_reg, data_reg);
+        tgen_ext32s(s, data_reg, data_reg);
 #endif
         break;
     case LD_UINT64:
@@ -1063,6 +1093,30 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode 
opc,
         op = RSY_SRAG;
         goto do_shift64;
 
+    case INDEX_op_ext8s_i32:
+    case INDEX_op_ext8s_i64:
+        tgen_ext8s(s, args[0], args[1]);
+        break;
+    case INDEX_op_ext16s_i32:
+    case INDEX_op_ext16s_i64:
+        tgen_ext16s(s, args[0], args[1]);
+        break;
+    case INDEX_op_ext32s_i64:
+        tgen_ext32s(s, args[0], args[1]);
+        break;
+
+    case INDEX_op_ext8u_i32:
+    case INDEX_op_ext8u_i64:
+        tgen_ext8u(s, args[0], args[1]);
+        break;
+    case INDEX_op_ext16u_i32:
+    case INDEX_op_ext16u_i64:
+        tgen_ext16u(s, args[0], args[1]);
+        break;
+    case INDEX_op_ext32u_i64:
+        tgen_ext32u(s, args[0], args[1]);
+        break;
+
     case INDEX_op_br:
         tgen_branch(s, S390_CC_ALWAYS, args[0]);
         break;
@@ -1170,6 +1224,11 @@ static const TCGTargetOpDef s390_op_defs[] = {
     { INDEX_op_shr_i32, { "r", "0", "Ri" } },
     { INDEX_op_sar_i32, { "r", "0", "Ri" } },
 
+    { INDEX_op_ext8s_i32, { "r", "r" } },
+    { INDEX_op_ext8u_i32, { "r", "r" } },
+    { INDEX_op_ext16s_i32, { "r", "r" } },
+    { INDEX_op_ext16u_i32, { "r", "r" } },
+
     { INDEX_op_brcond_i32, { "r", "r" } },
     { INDEX_op_setcond_i32, { "r", "r", "r" } },
 
@@ -1220,6 +1279,13 @@ static const TCGTargetOpDef s390_op_defs[] = {
     { INDEX_op_shr_i64, { "r", "r", "Ri" } },
     { INDEX_op_sar_i64, { "r", "r", "Ri" } },
 
+    { INDEX_op_ext8s_i64, { "r", "r" } },
+    { INDEX_op_ext8u_i64, { "r", "r" } },
+    { INDEX_op_ext16s_i64, { "r", "r" } },
+    { INDEX_op_ext16u_i64, { "r", "r" } },
+    { INDEX_op_ext32s_i64, { "r", "r" } },
+    { INDEX_op_ext32u_i64, { "r", "r" } },
+
     { INDEX_op_brcond_i64, { "r", "r" } },
     { INDEX_op_setcond_i64, { "r", "r", "r" } },
 #endif
diff --git a/tcg/s390/tcg-target.h b/tcg/s390/tcg-target.h
index b987a7e..76a13fc 100644
--- a/tcg/s390/tcg-target.h
+++ b/tcg/s390/tcg-target.h
@@ -50,10 +50,10 @@ typedef enum TCGReg {
 /* optional instructions */
 #define TCG_TARGET_HAS_div2_i32
 // #define TCG_TARGET_HAS_rot_i32
-// #define TCG_TARGET_HAS_ext8s_i32
-// #define TCG_TARGET_HAS_ext16s_i32
-// #define TCG_TARGET_HAS_ext8u_i32
-// #define TCG_TARGET_HAS_ext16u_i32
+#define TCG_TARGET_HAS_ext8s_i32
+#define TCG_TARGET_HAS_ext16s_i32
+#define TCG_TARGET_HAS_ext8u_i32
+#define TCG_TARGET_HAS_ext16u_i32
 // #define TCG_TARGET_HAS_bswap16_i32
 // #define TCG_TARGET_HAS_bswap32_i32
 // #define TCG_TARGET_HAS_not_i32
@@ -66,12 +66,12 @@ typedef enum TCGReg {
 
 #define TCG_TARGET_HAS_div2_i64
 // #define TCG_TARGET_HAS_rot_i64
-// #define TCG_TARGET_HAS_ext8s_i64
-// #define TCG_TARGET_HAS_ext16s_i64
-// #define TCG_TARGET_HAS_ext32s_i64
-// #define TCG_TARGET_HAS_ext8u_i64
-// #define TCG_TARGET_HAS_ext16u_i64
-// #define TCG_TARGET_HAS_ext32u_i64
+#define TCG_TARGET_HAS_ext8s_i64
+#define TCG_TARGET_HAS_ext16s_i64
+#define TCG_TARGET_HAS_ext32s_i64
+#define TCG_TARGET_HAS_ext8u_i64
+#define TCG_TARGET_HAS_ext16u_i64
+#define TCG_TARGET_HAS_ext32u_i64
 // #define TCG_TARGET_HAS_bswap16_i64
 // #define TCG_TARGET_HAS_bswap32_i64
 // #define TCG_TARGET_HAS_bswap64_i64
-- 
1.7.0.1


Reply via email to