On 5/2/25 05:03, Richard Henderson wrote:
Since 64-on-32 is now unsupported, guest addresses always
fit in one host register.  Drop the replication of opcodes.

Signed-off-by: Richard Henderson <[email protected]>
---
  include/tcg/tcg-opc.h            |  28 ++------
  tcg/optimize.c                   |  21 ++----
  tcg/tcg-op-ldst.c                |  82 +++++----------------
  tcg/tcg.c                        |  42 ++++-------
  tcg/tci.c                        | 119 ++++++-------------------------
  tcg/aarch64/tcg-target.c.inc     |  36 ++++------
  tcg/arm/tcg-target.c.inc         |  40 +++--------
  tcg/i386/tcg-target.c.inc        |  69 ++++--------------
  tcg/loongarch64/tcg-target.c.inc |  36 ++++------
  tcg/mips/tcg-target.c.inc        |  51 +++----------
  tcg/ppc/tcg-target.c.inc         |  68 ++++--------------
  tcg/riscv/tcg-target.c.inc       |  24 +++----
  tcg/s390x/tcg-target.c.inc       |  36 ++++------
  tcg/sparc64/tcg-target.c.inc     |  24 +++----
  tcg/tci/tcg-target.c.inc         |  60 ++++------------
  15 files changed, 177 insertions(+), 559 deletions(-)


diff --git a/tcg/tcg.c b/tcg/tcg.c
index 43b6712286..295004b74f 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c


-            case INDEX_op_qemu_ld_a32_i32:
-            case INDEX_op_qemu_ld_a64_i32:
-            case INDEX_op_qemu_st_a32_i32:
-            case INDEX_op_qemu_st_a64_i32:
-            case INDEX_op_qemu_st8_a32_i32:
-            case INDEX_op_qemu_st8_a64_i32:
-            case INDEX_op_qemu_ld_a32_i64:
-            case INDEX_op_qemu_ld_a64_i64:
-            case INDEX_op_qemu_st_a32_i64:
-            case INDEX_op_qemu_st_a64_i64:
-            case INDEX_op_qemu_ld_a32_i128:
-            case INDEX_op_qemu_ld_a64_i128:
-            case INDEX_op_qemu_st_a32_i128:
-            case INDEX_op_qemu_st_a64_i128:
+            case INDEX_op_qemu_ld_i32:
+            case INDEX_op_qemu_st_i32:
+            case INDEX_op_qemu_st8_i32:
+            case INDEX_op_qemu_ld_i64:
+            case INDEX_op_qemu_st_i64:
+            case INDEX_op_qemu_ld_i128:
+            case INDEX_op_qemu_st_i128:

Nice :)

diff --git a/tcg/tci.c b/tcg/tci.c
index 8c1c53424d..d223258efe 100644
--- a/tcg/tci.c
+++ b/tcg/tci.c
@@ -154,16 +154,6 @@ static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, 
TCGReg *r1,
      *i4 = extract32(insn, 26, 6);
  }
-static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
-                           TCGReg *r2, TCGReg *r3, TCGReg *r4)
-{
-    *r0 = extract32(insn, 8, 4);
-    *r1 = extract32(insn, 12, 4);
-    *r2 = extract32(insn, 16, 4);
-    *r3 = extract32(insn, 20, 4);
-    *r4 = extract32(insn, 24, 4);
-}
-
  static void tci_args_rrrr(uint32_t insn,
                            TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3)
  {
@@ -912,43 +902,21 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState 
*env,
              tb_ptr = ptr;
              break;
- case INDEX_op_qemu_ld_a32_i32:
+        case INDEX_op_qemu_ld_i32:
              tci_args_rrm(insn, &r0, &r1, &oi);
-            taddr = (uint32_t)regs[r1];
-            goto do_ld_i32;
-        case INDEX_op_qemu_ld_a64_i32:
-            if (TCG_TARGET_REG_BITS == 64) {
-                tci_args_rrm(insn, &r0, &r1, &oi);
-                taddr = regs[r1];
-            } else {
-                tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
-                taddr = tci_uint64(regs[r2], regs[r1]);
-                oi = regs[r3];
-            }
-        do_ld_i32:
+            taddr = regs[r1];
              regs[r0] = tci_qemu_ld(env, taddr, oi, tb_ptr);
              break;
- case INDEX_op_qemu_ld_a32_i64:
-            if (TCG_TARGET_REG_BITS == 64) {
-                tci_args_rrm(insn, &r0, &r1, &oi);
-                taddr = (uint32_t)regs[r1];
-            } else {
-                tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
-                taddr = (uint32_t)regs[r2];
-                oi = regs[r3];
-            }
-            goto do_ld_i64;
-        case INDEX_op_qemu_ld_a64_i64:
+        case INDEX_op_qemu_ld_i64:
              if (TCG_TARGET_REG_BITS == 64) {
                  tci_args_rrm(insn, &r0, &r1, &oi);
                  taddr = regs[r1];
              } else {
-                tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
-                taddr = tci_uint64(regs[r3], regs[r2]);
-                oi = regs[r4];
+                tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
+                taddr = regs[r2];
+                oi = regs[r3];
              }
-        do_ld_i64:
              tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr);
              if (TCG_TARGET_REG_BITS == 32) {
                  tci_write_reg64(regs, r1, r0, tmp64);
@@ -957,47 +925,23 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState 
*env,
              }
              break;
- case INDEX_op_qemu_st_a32_i32:
+        case INDEX_op_qemu_st_i32:
              tci_args_rrm(insn, &r0, &r1, &oi);
-            taddr = (uint32_t)regs[r1];
-            goto do_st_i32;
-        case INDEX_op_qemu_st_a64_i32:
-            if (TCG_TARGET_REG_BITS == 64) {
-                tci_args_rrm(insn, &r0, &r1, &oi);
-                taddr = regs[r1];
-            } else {
-                tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
-                taddr = tci_uint64(regs[r2], regs[r1]);
-                oi = regs[r3];
-            }
-        do_st_i32:
+            taddr = regs[r1];
              tci_qemu_st(env, taddr, regs[r0], oi, tb_ptr);
              break;
- case INDEX_op_qemu_st_a32_i64:
-            if (TCG_TARGET_REG_BITS == 64) {
-                tci_args_rrm(insn, &r0, &r1, &oi);
-                tmp64 = regs[r0];
-                taddr = (uint32_t)regs[r1];
-            } else {
-                tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
-                tmp64 = tci_uint64(regs[r1], regs[r0]);
-                taddr = (uint32_t)regs[r2];
-                oi = regs[r3];
-            }
-            goto do_st_i64;
-        case INDEX_op_qemu_st_a64_i64:
+        case INDEX_op_qemu_st_i64:
              if (TCG_TARGET_REG_BITS == 64) {
                  tci_args_rrm(insn, &r0, &r1, &oi);
                  tmp64 = regs[r0];
                  taddr = regs[r1];
              } else {
-                tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
+                tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
                  tmp64 = tci_uint64(regs[r1], regs[r0]);
-                taddr = tci_uint64(regs[r3], regs[r2]);
-                oi = regs[r4];
+                taddr = regs[r2];
+                oi = regs[r3];
              }

My tci is rusty, but this LGTM.

diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
index 6e711cd53f..801cb6f3cb 100644
--- a/tcg/ppc/tcg-target.c.inc
+++ b/tcg/ppc/tcg-target.c.inc
@@ -3308,17 +3308,10 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, 
TCGType type,
          tcg_out32(s, MODUD | TAB(args[0], args[1], args[2]));
          break;
- case INDEX_op_qemu_ld_a64_i32:
-        if (TCG_TARGET_REG_BITS == 32) {
-            tcg_out_qemu_ld(s, args[0], -1, args[1], args[2],
-                            args[3], TCG_TYPE_I32);
-            break;
-        }
-        /* fall through */
-    case INDEX_op_qemu_ld_a32_i32:
+    case INDEX_op_qemu_ld_i32:
          tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
          break;
-    case INDEX_op_qemu_ld_a32_i64:
+    case INDEX_op_qemu_ld_i64:
          if (TCG_TARGET_REG_BITS == 64) {
              tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
                              args[2], TCG_TYPE_I64);
@@ -3327,32 +3320,15 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, 
TCGType type,
                              args[3], TCG_TYPE_I64);
          }
          break;
-    case INDEX_op_qemu_ld_a64_i64:
-        if (TCG_TARGET_REG_BITS == 64) {
-            tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
-                            args[2], TCG_TYPE_I64);
-        } else {
-            tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3],
-                            args[4], TCG_TYPE_I64);
-        }
-        break;
-    case INDEX_op_qemu_ld_a32_i128:
-    case INDEX_op_qemu_ld_a64_i128:
+    case INDEX_op_qemu_ld_i128:
          tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
          tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true);
          break;
- case INDEX_op_qemu_st_a64_i32:
-        if (TCG_TARGET_REG_BITS == 32) {
-            tcg_out_qemu_st(s, args[0], -1, args[1], args[2],
-                            args[3], TCG_TYPE_I32);
-            break;
-        }
-        /* fall through */
-    case INDEX_op_qemu_st_a32_i32:
+    case INDEX_op_qemu_st_i32:
          tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
          break;
-    case INDEX_op_qemu_st_a32_i64:
+    case INDEX_op_qemu_st_i64:
          if (TCG_TARGET_REG_BITS == 64) {
              tcg_out_qemu_st(s, args[0], -1, args[1], -1,
                              args[2], TCG_TYPE_I64);
@@ -3361,17 +3337,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, 
TCGType type,
                              args[3], TCG_TYPE_I64);
          }
          break;
-    case INDEX_op_qemu_st_a64_i64:
-        if (TCG_TARGET_REG_BITS == 64) {
-            tcg_out_qemu_st(s, args[0], -1, args[1], -1,
-                            args[2], TCG_TYPE_I64);
-        } else {
-            tcg_out_qemu_st(s, args[0], args[1], args[2], args[3],
-                            args[4], TCG_TYPE_I64);
-        }
-        break;

Diff context isn't sufficient to review, but after applying and
looking at the result, PPC LGTM.


@@ -833,29 +811,21 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, 
TCGType type,
          tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]);
          break;
- case INDEX_op_qemu_ld_a32_i32:
-    case INDEX_op_qemu_st_a32_i32:
-        tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
-        break;
-    case INDEX_op_qemu_ld_a64_i32:
-    case INDEX_op_qemu_st_a64_i32:
-    case INDEX_op_qemu_ld_a32_i64:
-    case INDEX_op_qemu_st_a32_i64:
-        if (TCG_TARGET_REG_BITS == 64) {
-            tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
-        } else {
+    case INDEX_op_qemu_ld_i64:
+    case INDEX_op_qemu_st_i64:
+        if (TCG_TARGET_REG_BITS == 32) {
              tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
              tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
+            break;
          }
-        break;
-    case INDEX_op_qemu_ld_a64_i64:
-    case INDEX_op_qemu_st_a64_i64:
-        if (TCG_TARGET_REG_BITS == 64) {
-            tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
+        /* fall through */
+    case INDEX_op_qemu_ld_i32:
+    case INDEX_op_qemu_st_i32:
+        if (TCG_TARGET_REG_BITS == 64 && s->addr_type == TCG_TYPE_I32) {
+            tcg_out_ext32u(s, TCG_REG_TMP, args[1]);
+            tcg_out_op_rrm(s, opc, args[0], TCG_REG_TMP, args[2]);
          } else {
-            tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[4]);
-            tcg_out_op_rrrrr(s, opc, args[0], args[1],
-                             args[2], args[3], TCG_REG_TMP);
+            tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
          }
          break;

Also LGTM looking at applied changes, so I dare to:

Reviewed-by: Philippe Mathieu-Daudé <[email protected]>

(thanksfully 64-bit hosts were trivial)

Reply via email to