MMU helper functions are called from generated code and other helper functions. In both cases they try to get function's return address for using it while restoring virtual CPU state.
When MMU helper is called from some other helper function (like helper_maskmov_xmm) through cpu_st* function, the return address will point to that helper. That is why CPU state cannot be restored in the case of MMU fault. This patch introduces several inline helpers to load return address at the right place. Signed-off-by: Pavel Dovgaluk <pavel.dovga...@gmail.com> --- include/exec/exec-all.h | 27 +++++++++++++++++++++++++++ include/exec/softmmu_header.h | 32 ++++++++++++++++++++++++++++---- include/exec/softmmu_template.h | 18 ++++++++++++++++++ 3 files changed, 73 insertions(+), 4 deletions(-) diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index ea90b64..010c9ba 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -338,6 +338,33 @@ uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); +uint8_t helper_call_ldb_cmmu(CPUArchState *env, target_ulong addr, + int mmu_idx, uintptr_t retaddr); +uint16_t helper_call_ldw_cmmu(CPUArchState *env, target_ulong addr, + int mmu_idx, uintptr_t retaddr); +uint32_t helper_call_ldl_cmmu(CPUArchState *env, target_ulong addr, + int mmu_idx, uintptr_t retaddr); +uint64_t helper_call_ldq_cmmu(CPUArchState *env, target_ulong addr, + int mmu_idx, uintptr_t retaddr); + +uint8_t helper_call_ldb_mmu(CPUArchState *env, target_ulong addr, + int mmu_idx, uintptr_t retaddr); +uint16_t helper_call_ldw_mmu(CPUArchState *env, target_ulong addr, + int mmu_idx, uintptr_t retaddr); +uint32_t helper_call_ldl_mmu(CPUArchState *env, target_ulong addr, + int mmu_idx, uintptr_t retaddr); +uint64_t helper_call_ldq_mmu(CPUArchState *env, target_ulong addr, + int mmu_idx, uintptr_t retaddr); + +void helper_call_stb_mmu(CPUArchState *env, target_ulong addr, + uint8_t val, int mmu_idx, uintptr_t retaddr); +void helper_call_stw_mmu(CPUArchState *env, target_ulong addr, + uint16_t val, int mmu_idx, uintptr_t retaddr); +void helper_call_stl_mmu(CPUArchState *env, target_ulong addr, + uint32_t val, int mmu_idx, uintptr_t retaddr); +void helper_call_stq_mmu(CPUArchState *env, target_ulong addr, + uint64_t val, int mmu_idx, uintptr_t retaddr); + #define ACCESS_TYPE (NB_MMU_MODES + 1) #define MEMSUFFIX _code diff --git a/include/exec/softmmu_header.h b/include/exec/softmmu_header.h index d8d9c81..954b79e 100644 --- a/include/exec/softmmu_header.h +++ b/include/exec/softmmu_header.h @@ -78,6 +78,17 @@ #define ADDR_READ addr_read #endif +/* inline helper ld function */ + +static inline DATA_TYPE +glue(glue(helper_inline_ld, SUFFIX), MEMSUFFIX)(CPUArchState *env, + target_ulong addr, + int mmu_idx) +{ + return glue(glue(helper_call_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx, + GETRA()); +} + /* generic load/store macros */ static inline RES_TYPE @@ -93,7 +104,8 @@ glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr) mmu_idx = CPU_MMU_INDEX; if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ != (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { - res = glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx); + res = glue(glue(helper_inline_ld, SUFFIX), + MEMSUFFIX)(env, addr, mmu_idx); } else { uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend; res = glue(glue(ld, USUFFIX), _raw)(hostaddr); @@ -114,8 +126,8 @@ glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr) mmu_idx = CPU_MMU_INDEX; if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ != (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { - res = (DATA_STYPE)glue(glue(helper_ld, SUFFIX), - MMUSUFFIX)(env, addr, mmu_idx); + res = (DATA_STYPE)glue(glue(helper_inline_ld, SUFFIX), + MEMSUFFIX)(env, addr, mmu_idx); } else { uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend; res = glue(glue(lds, SUFFIX), _raw)(hostaddr); @@ -126,6 +138,18 @@ glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr) #if ACCESS_TYPE != (NB_MMU_MODES + 1) +/* inline helper st function */ + +static inline void +glue(glue(helper_inline_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, + target_ulong addr, + DATA_TYPE val, + int mmu_idx) +{ + glue(glue(helper_call_st, SUFFIX), MMUSUFFIX)(env, addr, val, + mmu_idx, GETRA()); +} + /* generic store macro */ static inline void @@ -141,7 +165,7 @@ glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr, mmu_idx = CPU_MMU_INDEX; if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write != (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { - glue(glue(helper_st, SUFFIX), MMUSUFFIX)(env, addr, v, mmu_idx); + glue(glue(helper_inline_st, SUFFIX), MEMSUFFIX)(env, addr, v, mmu_idx); } else { uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend; glue(glue(st, SUFFIX), _raw)(hostaddr, v); diff --git a/include/exec/softmmu_template.h b/include/exec/softmmu_template.h index c6a5440..5ea6611 100644 --- a/include/exec/softmmu_template.h +++ b/include/exec/softmmu_template.h @@ -298,6 +298,15 @@ glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr, return helper_te_ld_name (env, addr, mmu_idx, GETRA()); } +DATA_TYPE +glue(glue(helper_call_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, + target_ulong addr, + int mmu_idx, + uintptr_t retaddr) +{ + return helper_te_ld_name(env, addr, mmu_idx, retaddr); +} + #ifndef SOFTMMU_CODE_ACCESS /* Provide signed versions of the load routines as well. We can of course @@ -491,6 +500,15 @@ glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr, helper_te_st_name(env, addr, val, mmu_idx, GETRA()); } +void +glue(glue(helper_call_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, + target_ulong addr, + DATA_TYPE val, int mmu_idx, + uintptr_t retaddr) +{ + helper_te_st_name(env, addr, val, mmu_idx, retaddr); +} + #endif /* !defined(SOFTMMU_CODE_ACCESS) */ #undef READ_ACCESS_TYPE