Similar to the existing set_jmp_reset_offset. Move any assert for TCG_TARGET_HAS_direct_jump into the new function (which now cannot be build-time). Will be unused if TCG_TARGET_HAS_direct_jump is constant 0, but we can't test for constant in the preprocessor, so just mark it G_GNUC_UNUSED.
Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- tcg/tcg.c | 10 ++++++++++ tcg/aarch64/tcg-target.c.inc | 3 +-- tcg/i386/tcg-target.c.inc | 3 +-- tcg/loongarch64/tcg-target.c.inc | 3 +-- tcg/ppc/tcg-target.c.inc | 7 +++---- tcg/s390x/tcg-target.c.inc | 2 +- tcg/sparc64/tcg-target.c.inc | 5 ++--- 7 files changed, 19 insertions(+), 14 deletions(-) diff --git a/tcg/tcg.c b/tcg/tcg.c index b53961baf7..df5a6cedf0 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -320,6 +320,16 @@ static void set_jmp_reset_offset(TCGContext *s, int which) s->tb_jmp_reset_offset[which] = tcg_current_code_size(s); } +static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which) +{ + /* + * We will check for overflow at the end of the opcode loop in + * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX. + */ + tcg_debug_assert(TCG_TARGET_HAS_direct_jump); + s->tb_jmp_insn_offset[which] = tcg_current_code_size(s); +} + /* Signal overflow, starting over with fewer guest insns. */ static G_NORETURN void tcg_raise_tb_overflow(TCGContext *s) diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc index 8e97da3a39..3aa89d6060 100644 --- a/tcg/aarch64/tcg-target.c.inc +++ b/tcg/aarch64/tcg-target.c.inc @@ -1917,7 +1917,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, switch (opc) { case INDEX_op_goto_tb: - qemu_build_assert(TCG_TARGET_HAS_direct_jump); /* * Ensure that ADRP+ADD are 8-byte aligned so that an atomic * write can be used to patch the target address. @@ -1925,7 +1924,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, if ((uintptr_t)s->code_ptr & 7) { tcg_out32(s, NOP); } - s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); + set_jmp_insn_offset(s, a0); /* * actual branch destination will be patched by * tb_target_set_jmp_target later diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index f3a40fc428..3d4cf71552 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -2382,7 +2382,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, switch (opc) { case INDEX_op_goto_tb: - qemu_build_assert(TCG_TARGET_HAS_direct_jump); { /* * Jump displacement must be aligned for atomic patching; @@ -2393,7 +2392,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_nopn(s, gap - 1); } tcg_out8(s, OPC_JMP_long); /* jmp im */ - s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); + set_jmp_insn_offset(s, a0); tcg_out32(s, 0); } set_jmp_reset_offset(s, a0); diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc index 78398684cd..dd4934d6d5 100644 --- a/tcg/loongarch64/tcg-target.c.inc +++ b/tcg/loongarch64/tcg-target.c.inc @@ -1089,7 +1089,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, switch (opc) { case INDEX_op_goto_tb: - qemu_build_assert(TCG_TARGET_HAS_direct_jump); /* * Ensure that patch area is 8-byte aligned so that an * atomic write can be used to patch the target address. @@ -1097,7 +1096,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, if ((uintptr_t)s->code_ptr & 7) { tcg_out_nop(s); } - s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); + set_jmp_insn_offset(s, a0); /* * actual branch destination will be patched by * tb_target_set_jmp_target later diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc index 3f9ee4b39a..86684e1c84 100644 --- a/tcg/ppc/tcg-target.c.inc +++ b/tcg/ppc/tcg-target.c.inc @@ -2630,20 +2630,19 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, switch (opc) { case INDEX_op_goto_tb: - qemu_build_assert(TCG_TARGET_HAS_direct_jump); /* Direct jump. */ if (TCG_TARGET_REG_BITS == 64) { /* Ensure the next insns are 8 or 16-byte aligned. */ while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) { tcg_out32(s, NOP); } - s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s); + set_jmp_insn_offset(s, args[0]); tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0)); tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0)); } else { - s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s); + set_jmp_insn_offset(s, args[0]); tcg_out32(s, B); - s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s); + set_jmp_reset_offset(s, args[0]); break; } tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR); diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc index 9498694564..ad6433fcf2 100644 --- a/tcg/s390x/tcg-target.c.inc +++ b/tcg/s390x/tcg-target.c.inc @@ -2105,7 +2105,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, } tcg_debug_assert(!USE_REG_TB); tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4)); - s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); + set_jmp_insn_offset(s, a0); s->code_ptr += 2; } else { /* load address stored at s->tb_jmp_target_addr + a0 */ diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc index f035bf7dd2..f0c6332ef1 100644 --- a/tcg/sparc64/tcg-target.c.inc +++ b/tcg/sparc64/tcg-target.c.inc @@ -1451,20 +1451,19 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, switch (opc) { case INDEX_op_goto_tb: - qemu_build_assert(TCG_TARGET_HAS_direct_jump); /* Direct jump. */ if (USE_REG_TB) { /* make sure the patch is 8-byte aligned. */ if ((intptr_t)s->code_ptr & 4) { tcg_out_nop(s); } - s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); + set_jmp_insn_offset(s, a0); tcg_out_sethi(s, TCG_REG_T1, 0); tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR); tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL); tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD); } else { - s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); + set_jmp_insn_offset(s, a0); tcg_out32(s, CALL); tcg_out_nop(s); } -- 2.34.1