From: Zong Li <zong...@sifive.com> The minimum granularity of PMP is 4 bytes, it is small than 4KB page size, therefore, the pmp checking would be ignored if its range doesn't start from the alignment of one page. This patch detects the pmp entries and sets the small page size to TLB if there is a PMP entry which cover the page size.
Signed-off-by: Zong Li <zong...@sifive.com> Reviewed-by: Alistair Francis <alistair.fran...@wdc.com> Message-Id: <6b0bf48662ef26ab4c15381a08e78a74ebd7ca79.1595924470.git.zong...@sifive.com> Signed-off-by: Alistair Francis <alistair.fran...@wdc.com> --- target/riscv/pmp.h | 2 ++ target/riscv/cpu_helper.c | 10 ++++++-- target/riscv/pmp.c | 52 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 62 insertions(+), 2 deletions(-) diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h index 8e19793132..6a8f072871 100644 --- a/target/riscv/pmp.h +++ b/target/riscv/pmp.h @@ -60,5 +60,7 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index); bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, target_ulong size, pmp_priv_t priv, target_ulong mode); +bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa, + target_ulong *tlb_size); #endif diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index 2f337e418c..fd1d373b6f 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -693,6 +693,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, bool first_stage_error = true; int ret = TRANSLATE_FAIL; int mode = mmu_idx; + target_ulong tlb_size = 0; env->guest_phys_fault_addr = 0; @@ -784,8 +785,13 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, } if (ret == TRANSLATE_SUCCESS) { - tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK, - prot, mmu_idx, TARGET_PAGE_SIZE); + if (pmp_is_range_in_tlb(env, pa & TARGET_PAGE_MASK, &tlb_size)) { + tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1), + prot, mmu_idx, tlb_size); + } else { + tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK, + prot, mmu_idx, TARGET_PAGE_SIZE); + } return true; } else if (probe) { return false; diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c index b14feeb7da..c394e867f8 100644 --- a/target/riscv/pmp.c +++ b/target/riscv/pmp.c @@ -383,3 +383,55 @@ target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index) return val; } + +/* + * Calculate the TLB size if the start address or the end address of + * PMP entry is presented in thie TLB page. + */ +static target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index, + target_ulong tlb_sa, target_ulong tlb_ea) +{ + target_ulong pmp_sa = env->pmp_state.addr[pmp_index].sa; + target_ulong pmp_ea = env->pmp_state.addr[pmp_index].ea; + + if (pmp_sa >= tlb_sa && pmp_ea <= tlb_ea) { + return pmp_ea - pmp_sa + 1; + } + + if (pmp_sa >= tlb_sa && pmp_sa <= tlb_ea && pmp_ea >= tlb_ea) { + return tlb_ea - pmp_sa + 1; + } + + if (pmp_ea <= tlb_ea && pmp_ea >= tlb_sa && pmp_sa <= tlb_sa) { + return pmp_ea - tlb_sa + 1; + } + + return 0; +} + +/* + * Check is there a PMP entry which range covers this page. If so, + * try to find the minimum granularity for the TLB size. + */ +bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa, + target_ulong *tlb_size) +{ + int i; + target_ulong val; + target_ulong tlb_ea = (tlb_sa + TARGET_PAGE_SIZE - 1); + + for (i = 0; i < MAX_RISCV_PMPS; i++) { + val = pmp_get_tlb_size(env, i, tlb_sa, tlb_ea); + if (val) { + if (*tlb_size == 0 || *tlb_size > val) { + *tlb_size = val; + } + } + } + + if (*tlb_size != 0) { + return true; + } + + return false; +} -- 2.28.0