Now that do_unaligned_access has been implemented for 68k CPUs, pass the required alignment into the TCG memory load/store routines. This allows the TCG memory core to generate an Address Error exception for unaligned memory accesses if required.
Suggested-by: Laurent Vivier <laur...@vivier.eu> Signed-off-by: Mark Cave-Ayland <mark.cave-ayl...@ilande.co.uk> Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2165 --- target/m68k/translate.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/target/m68k/translate.c b/target/m68k/translate.c index 445966fb6a..661a7b4def 100644 --- a/target/m68k/translate.c +++ b/target/m68k/translate.c @@ -303,13 +303,18 @@ static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr, int sign, int index) { TCGv tmp = tcg_temp_new_i32(); + MemOp memop = opsize | (sign ? MO_SIGN : 0) | MO_TE; switch (opsize) { case OS_BYTE: + tcg_gen_qemu_ld_tl(tmp, addr, index, memop); + break; case OS_WORD: case OS_LONG: - tcg_gen_qemu_ld_tl(tmp, addr, index, - opsize | (sign ? MO_SIGN : 0) | MO_TE); + if (!m68k_feature(s->env, M68K_FEATURE_UNALIGNED_DATA)) { + memop |= MO_ALIGN_2; + } + tcg_gen_qemu_ld_tl(tmp, addr, index, memop); break; default: g_assert_not_reached(); @@ -321,11 +326,18 @@ static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr, static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val, int index) { + MemOp memop = opsize | MO_TE; + switch (opsize) { case OS_BYTE: + tcg_gen_qemu_st_tl(val, addr, index, memop); + break; case OS_WORD: case OS_LONG: - tcg_gen_qemu_st_tl(val, addr, index, opsize | MO_TE); + if (!m68k_feature(s->env, M68K_FEATURE_UNALIGNED_DATA)) { + memop |= MO_ALIGN_2; + } + tcg_gen_qemu_st_tl(val, addr, index, memop); break; default: g_assert_not_reached(); -- 2.39.2