From: Richard Henderson <richard.hender...@linaro.org> This is less tricky than for loads, because we always fall back to single byte stores to implement unaligned stores.
Signed-off-by: Richard Henderson <richard.hender...@linaro.org> Signed-off-by: Alex Bennée <alex.ben...@linaro.org> --- accel/tcg/cputlb.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index e4d0c94301..a083324768 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1413,9 +1413,9 @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, * Store Helpers */ -static void store_helper(CPUArchState *env, target_ulong addr, uint64_t val, - TCGMemOpIdx oi, uintptr_t retaddr, size_t size, - bool big_endian) +static inline void __attribute__((always_inline)) +store_helper(CPUArchState *env, target_ulong addr, uint64_t val, + TCGMemOpIdx oi, uintptr_t retaddr, size_t size, bool big_endian) { uintptr_t mmu_idx = get_mmuidx(oi); uintptr_t index = tlb_index(env, mmu_idx, addr); @@ -1514,7 +1514,7 @@ static void store_helper(CPUArchState *env, target_ulong addr, uint64_t val, /* Little-endian extract. */ val8 = val >> (i * 8); } - store_helper(env, addr + i, val8, oi, retaddr, 1, big_endian); + helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr); } return; } -- 2.20.1