When TBI is enabled in a given regime, 56 bits of the address are significant and we need to clear out any other matching virtual addresses with differing tags.
The other uses of tlb_flush_page (without mmuidx) in this file are only used by aarch32 mode. Fixes: 38d931687fa1 Reported-by: Jordan Frank <jordanfr...@fb.com> Reviewed-by: Peter Maydell <peter.mayd...@linaro.org> Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- target/arm/helper.c | 46 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 39 insertions(+), 7 deletions(-) diff --git a/target/arm/helper.c b/target/arm/helper.c index cd0779ff5f..f49b045d36 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -50,6 +50,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, #endif static void switch_mode(CPUARMState *env, int mode); +static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx); static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) { @@ -4457,6 +4458,33 @@ static int vae1_tlbmask(CPUARMState *env) } } +/* Return 56 if TBI is enabled, 64 otherwise. */ +static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx, + uint64_t addr) +{ + uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; + int tbi = aa64_va_parameter_tbi(tcr, mmu_idx); + int select = extract64(addr, 55, 1); + + return (tbi >> select) & 1 ? 56 : 64; +} + +static int vae1_tlbbits(CPUARMState *env, uint64_t addr) +{ + ARMMMUIdx mmu_idx; + + /* Only the regime of the mmu_idx below is significant. */ + if (arm_is_secure_below_el3(env)) { + mmu_idx = ARMMMUIdx_SE10_0; + } else if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE)) + == (HCR_E2H | HCR_TGE)) { + mmu_idx = ARMMMUIdx_E20_0; + } else { + mmu_idx = ARMMMUIdx_E10_0; + } + return tlbbits_for_regime(env, mmu_idx, addr); +} + static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { @@ -4593,8 +4621,9 @@ static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, CPUState *cs = env_cpu(env); int mask = vae1_tlbmask(env); uint64_t pageaddr = sextract64(value << 12, 0, 56); + int bits = vae1_tlbbits(env, pageaddr); - tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); + tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits); } static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -4608,11 +4637,12 @@ static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, CPUState *cs = env_cpu(env); int mask = vae1_tlbmask(env); uint64_t pageaddr = sextract64(value << 12, 0, 56); + int bits = vae1_tlbbits(env, pageaddr); if (tlb_force_broadcast(env)) { - tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); + tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits); } else { - tlb_flush_page_by_mmuidx(cs, pageaddr, mask); + tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits); } } @@ -4621,9 +4651,10 @@ static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, { CPUState *cs = env_cpu(env); uint64_t pageaddr = sextract64(value << 12, 0, 56); + int bits = tlbbits_for_regime(env, ARMMMUIdx_E2, pageaddr); - tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, - ARMMMUIdxBit_E2); + tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, + ARMMMUIdxBit_E2, bits); } static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -4631,9 +4662,10 @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, { CPUState *cs = env_cpu(env); uint64_t pageaddr = sextract64(value << 12, 0, 56); + int bits = tlbbits_for_regime(env, ARMMMUIdx_SE3, pageaddr); - tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, - ARMMMUIdxBit_SE3); + tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, + ARMMMUIdxBit_SE3, bits); } static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, -- 2.25.1