The kernel will fix up unaligned accesses, so emulate that by allowing unaligned accesses to succeed.
Reviewed-by: Edgar E. Iglesias <edgar.igles...@xilinx.com> Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- target/microblaze/translate.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c index a14ffed784..ef44bca2fd 100644 --- a/target/microblaze/translate.c +++ b/target/microblaze/translate.c @@ -727,6 +727,7 @@ static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb) } #endif +#ifndef CONFIG_USER_ONLY static void record_unaligned_ess(DisasContext *dc, int rd, MemOp size, bool store) { @@ -739,6 +740,7 @@ static void record_unaligned_ess(DisasContext *dc, int rd, tcg_set_insn_start_param(dc->insn_start, 1, iflags); } +#endif static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop, int mem_index, bool rev) @@ -760,12 +762,19 @@ static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop, } } + /* + * For system mode, enforce alignment if the cpu configuration + * requires it. For user-mode, the Linux kernel will have fixed up + * any unaligned access, so emulate that by *not* setting MO_ALIGN. + */ +#ifndef CONFIG_USER_ONLY if (size > MO_8 && (dc->tb_flags & MSR_EE) && dc->cfg->unaligned_exceptions) { record_unaligned_ess(dc, rd, size, false); mop |= MO_ALIGN; } +#endif tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop); @@ -906,12 +915,19 @@ static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop, } } + /* + * For system mode, enforce alignment if the cpu configuration + * requires it. For user-mode, the Linux kernel will have fixed up + * any unaligned access, so emulate that by *not* setting MO_ALIGN. + */ +#ifndef CONFIG_USER_ONLY if (size > MO_8 && (dc->tb_flags & MSR_EE) && dc->cfg->unaligned_exceptions) { record_unaligned_ess(dc, rd, size, true); mop |= MO_ALIGN; } +#endif tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop); -- 2.25.1