The LWL/LDL instructions mask the GPR with a mask depending on the address alignement. It is currently computed by doing:
mask = 0x7fffffffffffffffull >> (t1 ^ 63) It's simpler to generate it by doing: mask = (1 << t1) - 1 It uses the same number of TCG instructions, but it avoids a 32/64-bit constant loading which can take a few instructions on RISC hosts. Cc: Leon Alrae <leon.al...@imgtec.com> Tested-by: Hervé Poussineau <hpous...@reactos.org> Signed-off-by: Aurelien Jarno <aurel...@aurel32.net> --- target-mips/translate.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/target-mips/translate.c b/target-mips/translate.c index 0ac3bd8..9891209 100644 --- a/target-mips/translate.c +++ b/target-mips/translate.c @@ -2153,9 +2153,9 @@ static void gen_ld(DisasContext *ctx, uint32_t opc, tcg_gen_andi_tl(t0, t0, ~7); tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ); tcg_gen_shl_tl(t0, t0, t1); - tcg_gen_xori_tl(t1, t1, 63); - t2 = tcg_const_tl(0x7fffffffffffffffull); - tcg_gen_shr_tl(t2, t2, t1); + t2 = tcg_const_tl(1); + tcg_gen_shl_tl(t2, t2, t1); + tcg_gen_subi_tl(t2, t2, 1); gen_load_gpr(t1, rt); tcg_gen_and_tl(t1, t1, t2); tcg_temp_free(t2); @@ -2246,9 +2246,9 @@ static void gen_ld(DisasContext *ctx, uint32_t opc, tcg_gen_andi_tl(t0, t0, ~3); tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUL); tcg_gen_shl_tl(t0, t0, t1); - tcg_gen_xori_tl(t1, t1, 31); - t2 = tcg_const_tl(0x7fffffffull); - tcg_gen_shr_tl(t2, t2, t1); + t2 = tcg_const_tl(1); + tcg_gen_shl_tl(t2, t2, t1); + tcg_gen_subi_tl(t2, t2, 1); gen_load_gpr(t1, rt); tcg_gen_and_tl(t1, t1, t2); tcg_temp_free(t2); -- 2.1.4