Simplify the implementation of get_page_addr_code_hostp by reusing the existing probe_access infrastructure.
Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- accel/tcg/cputlb.c | 76 ++++++++++++++++------------------------------ 1 file changed, 26 insertions(+), 50 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 80a3eb4f1c..2dc2affa12 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1482,56 +1482,6 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ (ADDR) & TARGET_PAGE_MASK) -/* - * Return a ram_addr_t for the virtual address for execution. - * - * Return -1 if we can't translate and execute from an entire page - * of RAM. This will force us to execute by loading and translating - * one insn at a time, without caching. - * - * NOTE: This function will trigger an exception if the page is - * not executable. - */ -tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, - void **hostp) -{ - uintptr_t mmu_idx = cpu_mmu_index(env, true); - uintptr_t index = tlb_index(env, mmu_idx, addr); - CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); - void *p; - - if (unlikely(!tlb_hit(entry->addr_code, addr))) { - if (!VICTIM_TLB_HIT(addr_code, addr)) { - tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); - index = tlb_index(env, mmu_idx, addr); - entry = tlb_entry(env, mmu_idx, addr); - - if (unlikely(entry->addr_code & TLB_INVALID_MASK)) { - /* - * The MMU protection covers a smaller range than a target - * page, so we must redo the MMU check for every insn. - */ - return -1; - } - } - assert(tlb_hit(entry->addr_code, addr)); - } - - if (unlikely(entry->addr_code & TLB_MMIO)) { - /* The region is not backed by RAM. */ - if (hostp) { - *hostp = NULL; - } - return -1; - } - - p = (void *)((uintptr_t)addr + entry->addend); - if (hostp) { - *hostp = p; - } - return qemu_ram_addr_from_host_nofail(p); -} - static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, CPUIOTLBEntry *iotlbentry, uintptr_t retaddr) { @@ -1687,6 +1637,32 @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, return flags ? NULL : host; } +/* + * Return a ram_addr_t for the virtual address for execution. + * + * Return -1 if we can't translate and execute from an entire page + * of RAM. This will force us to execute by loading and translating + * one insn at a time, without caching. + * + * NOTE: This function will trigger an exception if the page is + * not executable. + */ +tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, + void **hostp) +{ + void *p; + + (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH, + cpu_mmu_index(env, true), true, &p, 0); + if (p == NULL) { + return -1; + } + if (hostp) { + *hostp = p; + } + return qemu_ram_addr_from_host_nofail(p); +} + #ifdef CONFIG_PLUGIN /* * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. -- 2.34.1