In preparation of removing the cpu_ld*_mmuidx_ra() and cpu_st*_mmuidx_ra() calls, inline them.
Signed-off-by: Philippe Mathieu-Daudé <[email protected]> --- target/ppc/mem_helper.c | 41 ++++++++++++++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 9 deletions(-) diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c index 636ad769a93..90546967593 100644 --- a/target/ppc/mem_helper.c +++ b/target/ppc/mem_helper.c @@ -88,8 +88,11 @@ void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg) } } else { /* Slow path -- at least some of the operation requires i/o. */ + MemOp op = MO_TE | MO_UL | MO_UNALN; + MemOpIdx oi = make_memop_idx(op, mmu_idx); + for (; reg < 32; reg++) { - env->gpr[reg] = cpu_ldl_mmuidx_ra(env, addr, mmu_idx, raddr); + env->gpr[reg] = cpu_ldl_mmu(env, addr, oi, raddr); addr = addr_add(env, addr, 4); } } @@ -111,7 +114,10 @@ void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg) } else { /* Slow path -- at least some of the operation requires i/o. */ for (; reg < 32; reg++) { - cpu_stl_mmuidx_ra(env, addr, env->gpr[reg], mmu_idx, raddr); + MemOp op = MO_TE | MO_UL | MO_UNALN; + MemOpIdx oi = make_memop_idx(op, mmu_idx); + + cpu_stl_mmu(env, addr, env->gpr[reg], oi, raddr); addr = addr_add(env, addr, 4); } } @@ -152,9 +158,12 @@ static void do_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, break; } } else { + MemOp op = MO_TE | MO_UL | MO_UNALN; + MemOpIdx oi = make_memop_idx(op, mmu_idx); + /* Slow path -- at least some of the operation requires i/o. */ for (; nb > 3; nb -= 4) { - env->gpr[reg] = cpu_ldl_mmuidx_ra(env, addr, mmu_idx, raddr); + env->gpr[reg] = cpu_ldl_mmu(env, addr, oi, raddr); reg = (reg + 1) % 32; addr = addr_add(env, addr, 4); } @@ -165,10 +174,14 @@ static void do_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, val = cpu_ldub_mmuidx_ra(env, addr, mmu_idx, raddr) << 24; break; case 2: - val = cpu_lduw_mmuidx_ra(env, addr, mmu_idx, raddr) << 16; + op = MO_TE | MO_UW | MO_UNALN; + oi = make_memop_idx(op, mmu_idx); + val = cpu_ldw_mmu(env, addr, oi, raddr) << 16; break; case 3: - val = cpu_lduw_mmuidx_ra(env, addr, mmu_idx, raddr) << 16; + op = MO_TE | MO_UW | MO_UNALN; + oi = make_memop_idx(op, mmu_idx); + val = cpu_ldw_mmu(env, addr, oi, raddr) << 16; addr = addr_add(env, addr, 2); val |= cpu_ldub_mmuidx_ra(env, addr, mmu_idx, raddr) << 8; break; @@ -241,8 +254,11 @@ void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb, break; } } else { + MemOp op = MO_TE | MO_UL | MO_UNALN; + MemOpIdx oi = make_memop_idx(op, mmu_idx); + for (; nb > 3; nb -= 4) { - cpu_stl_mmuidx_ra(env, addr, env->gpr[reg], mmu_idx, raddr); + cpu_stl_mmu(env, addr, env->gpr[reg], oi, raddr); reg = (reg + 1) % 32; addr = addr_add(env, addr, 4); } @@ -252,10 +268,14 @@ void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb, cpu_stb_mmuidx_ra(env, addr, val >> 24, mmu_idx, raddr); break; case 2: - cpu_stw_mmuidx_ra(env, addr, val >> 16, mmu_idx, raddr); + op = MO_TE | MO_UW | MO_UNALN; + oi = make_memop_idx(op, mmu_idx); + cpu_stw_mmu(env, addr, val >> 16, oi, raddr); break; case 3: - cpu_stw_mmuidx_ra(env, addr, val >> 16, mmu_idx, raddr); + op = MO_TE | MO_UW | MO_UNALN; + oi = make_memop_idx(op, mmu_idx); + cpu_stw_mmu(env, addr, val >> 16, oi, raddr); addr = addr_add(env, addr, 2); cpu_stb_mmuidx_ra(env, addr, val >> 8, mmu_idx, raddr); break; @@ -284,8 +304,11 @@ static void dcbz_common(CPUPPCState *env, target_ulong addr, haddr = probe_write(env, addr, dcbz_size, mmu_idx, retaddr); if (unlikely(!haddr)) { /* Slow path */ + MemOp op = MO_TE | MO_UQ | MO_UNALN; + MemOpIdx oi = make_memop_idx(op, mmu_idx); + for (int i = 0; i < dcbz_size; i += 8) { - cpu_stq_mmuidx_ra(env, addr + i, 0, mmu_idx, retaddr); + cpu_stq_mmu(env, addr + i, 0, oi, retaddr); } return; } -- 2.52.0
