MIPS SIMD Architecture vector loads and stores require misalignment support. MSA Memory access should work as an atomic operation. Therefore, it has to check validity of all addresses for an access if it is spanning into two pages.
Introduced misaligned flag to indicate MSA ld/st is ongoing, is used to allow misaligned accesses in the mips_cpu_do_unaligned_access() callback. This is crucial to support MSA misaligned accesses in Release 5 cores. Further clean up to use mmu_idx from cpu_mmu_index() instead of calculating it from hflag. Signed-off-by: Yongbok Kim <yongbok....@imgtec.com> --- target-mips/cpu.h | 5 +++ target-mips/helper.c | 33 ++++++++++++++++++++++ target-mips/op_helper.c | 69 ++++++++++++++++++++++++++++++++++------------ target-mips/translate.c | 4 +++ 4 files changed, 93 insertions(+), 18 deletions(-) diff --git a/target-mips/cpu.h b/target-mips/cpu.h index f9d2b4c..1bd1229 100644 --- a/target-mips/cpu.h +++ b/target-mips/cpu.h @@ -211,6 +211,9 @@ struct TCState { MSACSR_FS_MASK) float_status msa_fp_status; +#if !defined(CONFIG_USER_ONLY) + bool misaligned; /* indicates misaligned access is allowed */ +#endif }; typedef struct CPUMIPSState CPUMIPSState; @@ -760,6 +763,8 @@ int mips_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra); hwaddr cpu_mips_translate_address (CPUMIPSState *env, target_ulong address, int rw); +bool cpu_mips_validate_msa_block_access(CPUMIPSState *env, target_ulong addr, + int rw, int mmu_idx); #endif target_ulong exception_resume_pc (CPUMIPSState *env); diff --git a/target-mips/helper.c b/target-mips/helper.c index 8e3204a..951aea8 100644 --- a/target-mips/helper.c +++ b/target-mips/helper.c @@ -391,6 +391,37 @@ hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, int r } } +bool cpu_mips_validate_msa_block_access(CPUMIPSState *env, target_ulong addr, + int rw, int mmu_idx) +{ + target_ulong vaddr = addr & TARGET_PAGE_MASK; + target_ulong badvaddr = addr; + + CPUState *cs = CPU(mips_env_get_cpu(env)); + int ret; + + ret = mips_cpu_handle_mmu_fault(cs, vaddr, rw, mmu_idx); + if (ret != TLBRET_MATCH) { + /* calling raise_mmu_exeception again to correct badvaddr */ + raise_mmu_exception(env, badvaddr, rw, ret); + return false; + } + if (unlikely(((addr & ~TARGET_PAGE_MASK) + MSA_WRLEN - 1) + >= TARGET_PAGE_SIZE)) { + vaddr += TARGET_PAGE_SIZE; + ret = mips_cpu_handle_mmu_fault(cs, vaddr, rw, mmu_idx); + if (ret != TLBRET_MATCH) { + if (ret != TLBRET_BADADDR) { + badvaddr = vaddr; + } + /* calling raise_mmu_exeception again to correct badvaddr */ + raise_mmu_exception(env, badvaddr, rw, ret); + return false; + } + } + return true; +} + static const char * const excp_names[EXCP_LAST + 1] = { [EXCP_RESET] = "reset", [EXCP_SRESET] = "soft reset", @@ -497,6 +528,8 @@ void mips_cpu_do_interrupt(CPUState *cs) qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " %s exception\n", __func__, env->active_tc.PC, env->CP0_EPC, name); } + + env->active_tc.misaligned = false; if (cs->exception_index == EXCP_EXT_INTERRUPT && (env->hflags & MIPS_HFLAG_DM)) { cs->exception_index = EXCP_DINT; diff --git a/target-mips/op_helper.c b/target-mips/op_helper.c index 58f02cf..b3b8d52 100644 --- a/target-mips/op_helper.c +++ b/target-mips/op_helper.c @@ -47,7 +47,9 @@ static inline void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env, /* now we have a real cpu fault */ cpu_restore_state(cs, pc); } - +#if !defined(CONFIG_USER_ONLY) + env->active_tc.misaligned = false; +#endif cpu_loop_exit(cs); } @@ -2215,9 +2217,12 @@ void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr, int error_code = 0; int excp; - if (env->insn_flags & ISA_MIPS32R6) { + if ((env->insn_flags & ISA_MIPS32R6) || (env->active_tc.misaligned)) { /* Release 6 provides support for misaligned memory access for * all ordinary memory reference instructions + * + * MIPS SIMD Architecture vector loads and stores support misalignment + * memory access * */ return; } @@ -3571,33 +3576,47 @@ void helper_msa_ld_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t rs, wr_t *pwd = &(env->active_fpu.fpr[wd].wr); target_ulong addr = env->active_tc.gpr[rs] + (s10 << df); int i; + int mmu_idx = cpu_mmu_index(env); + +#if !defined(CONFIG_USER_ONLY) + if (unlikely(((addr & ~TARGET_PAGE_MASK) + MSA_WRLEN - 1) + >= TARGET_PAGE_SIZE)) { + if (!cpu_mips_validate_msa_block_access(env, addr, MMU_DATA_LOAD, + mmu_idx)) { + CPUState *cs = CPU(mips_env_get_cpu(env)); + helper_raise_exception_err(env, cs->exception_index, + env->error_code); + return; + } + } + env->active_tc.misaligned = true; +#endif switch (df) { case DF_BYTE: for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { - pwd->b[i] = do_lbu(env, addr + (i << DF_BYTE), - env->hflags & MIPS_HFLAG_KSU); + pwd->b[i] = do_lbu(env, addr + (i << DF_BYTE), mmu_idx); } break; case DF_HALF: for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { - pwd->h[i] = do_lhu(env, addr + (i << DF_HALF), - env->hflags & MIPS_HFLAG_KSU); + pwd->h[i] = do_lhu(env, addr + (i << DF_HALF), mmu_idx); } break; case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - pwd->w[i] = do_lw(env, addr + (i << DF_WORD), - env->hflags & MIPS_HFLAG_KSU); + pwd->w[i] = do_lw(env, addr + (i << DF_WORD), mmu_idx); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - pwd->d[i] = do_ld(env, addr + (i << DF_DOUBLE), - env->hflags & MIPS_HFLAG_KSU); + pwd->d[i] = do_ld(env, addr + (i << DF_DOUBLE), mmu_idx); } break; } +#if !defined(CONFIG_USER_ONLY) + env->active_tc.misaligned = false; +#endif } void helper_msa_st_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t rs, @@ -3606,31 +3625,45 @@ void helper_msa_st_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t rs, wr_t *pwd = &(env->active_fpu.fpr[wd].wr); target_ulong addr = env->active_tc.gpr[rs] + (s10 << df); int i; + int mmu_idx = cpu_mmu_index(env); + +#if !defined(CONFIG_USER_ONLY) + if (unlikely(((addr & ~TARGET_PAGE_MASK) + MSA_WRLEN - 1) + >= TARGET_PAGE_SIZE)) { + if (!cpu_mips_validate_msa_block_access(env, addr, MMU_DATA_STORE, + mmu_idx)) { + CPUState *cs = CPU(mips_env_get_cpu(env)); + helper_raise_exception_err(env, cs->exception_index, + env->error_code); + return; + } + } + env->active_tc.misaligned = true; +#endif switch (df) { case DF_BYTE: for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { - do_sb(env, addr + (i << DF_BYTE), pwd->b[i], - env->hflags & MIPS_HFLAG_KSU); + do_sb(env, addr + (i << DF_BYTE), pwd->b[i], mmu_idx); } break; case DF_HALF: for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { - do_sh(env, addr + (i << DF_HALF), pwd->h[i], - env->hflags & MIPS_HFLAG_KSU); + do_sh(env, addr + (i << DF_HALF), pwd->h[i], mmu_idx); } break; case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { - do_sw(env, addr + (i << DF_WORD), pwd->w[i], - env->hflags & MIPS_HFLAG_KSU); + do_sw(env, addr + (i << DF_WORD), pwd->w[i], mmu_idx); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { - do_sd(env, addr + (i << DF_DOUBLE), pwd->d[i], - env->hflags & MIPS_HFLAG_KSU); + do_sd(env, addr + (i << DF_DOUBLE), pwd->d[i], mmu_idx); } break; } +#if !defined(CONFIG_USER_ONLY) + env->active_tc.misaligned = false; +#endif } diff --git a/target-mips/translate.c b/target-mips/translate.c index fd063a2..674b1cb 100644 --- a/target-mips/translate.c +++ b/target-mips/translate.c @@ -19641,6 +19641,10 @@ void cpu_state_reset(CPUMIPSState *env) restore_rounding_mode(env); restore_flush_mode(env); cs->exception_index = EXCP_NONE; + +#ifndef CONFIG_USER_ONLY + env->active_tc.misaligned = false; +#endif } void restore_state_to_opc(CPUMIPSState *env, TranslationBlock *tb, int pc_pos) -- 1.7.5.4