From: Richard Henderson <richard.hender...@linaro.org> Signed-off-by: Richard Henderson <richard.hender...@linaro.org> Message-Id: <20210508201640.1045808-1-richard.hender...@linaro.org> [PMD: Split from bigger patch] Signed-off-by: Philippe Mathieu-Daudé <f4...@amsat.org> --- include/exec/exec-all.h | 12 ++++++++++++ accel/tcg/cputlb.c | 27 ++++++++++++++++++++------- 2 files changed, 32 insertions(+), 7 deletions(-)
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 9a3dbb7ec08..8021adf38f4 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -281,6 +281,11 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, target_ulong len, uint16_t idxmap, unsigned bits); +void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, + target_ulong addr, + target_ulong len, + uint16_t idxmap, + unsigned bits); /** * tlb_set_page_with_attrs: @@ -397,6 +402,13 @@ static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, unsigned bits) { } +static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, + target_ulong addr, + target_long len, + uint16_t idxmap, + unsigned bits) +{ +} #endif /** * probe_access: diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 5314349ef9d..bc4370f4e21 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -924,16 +924,20 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, idxmap, bits); } -void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, - target_ulong addr, - uint16_t idxmap, - unsigned bits) +void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, + target_ulong addr, + target_ulong len, + uint16_t idxmap, + unsigned bits) { TLBFlushRangeData d; run_on_cpu_data runon; - /* If all bits are significant, this devolves to tlb_flush_page. */ - if (bits >= TARGET_LONG_BITS) { + /* + * If all bits are significant, and len is small, + * this devolves to tlb_flush_page. + */ + if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap); return; } @@ -945,7 +949,7 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, /* This should already be page aligned */ d.addr = addr & TARGET_PAGE_MASK; - d.len = TARGET_PAGE_SIZE; + d.len = len; d.idxmap = idxmap; d.bits = bits; @@ -972,6 +976,15 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, } } +void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, + target_ulong addr, + uint16_t idxmap, + unsigned bits) +{ + tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE, + idxmap, bits); +} + /* update the TLBs so that writes to code in the virtual page 'addr' can be detected */ void tlb_protect_code(ram_addr_t ram_addr) -- 2.26.3