Add a circular buffer to store the hw addresses used in the last EXCLUSIVE_HISTORY_LEN exclusive accesses.
When an address is pop'ed from the buffer, its page will be set as not exclusive. In this way, we avoid: - frequent set/unset of a page (causing frequent flushes as well) - the possibility to forget the EXCL bit set. Suggested-by: Jani Kokkonen <jani.kokko...@huawei.com> Suggested-by: Claudio Fontana <claudio.font...@huawei.com> Signed-off-by: Alvise Rigo <a.r...@virtualopensystems.com> --- cputlb.c | 32 ++++++++++++++++++++++---------- include/qom/cpu.h | 3 +++ softmmu_llsc_template.h | 2 ++ 3 files changed, 27 insertions(+), 10 deletions(-) diff --git a/cputlb.c b/cputlb.c index 70b6404..372877e 100644 --- a/cputlb.c +++ b/cputlb.c @@ -394,16 +394,6 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, env->tlb_v_table[mmu_idx][vidx] = *te; env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index]; - if (unlikely(!(te->addr_write & TLB_MMIO) && (te->addr_write & TLB_EXCL))) { - /* We are removing an exclusive entry, set the page to dirty. This - * is not be necessary if the vCPU has performed both SC and LL. */ - hwaddr hw_addr = (env->iotlb[mmu_idx][index].addr & TARGET_PAGE_MASK) + - (te->addr_write & TARGET_PAGE_MASK); - if (!cpu->ll_sc_context) { - cpu_physical_memory_unset_excl(hw_addr, cpu->cpu_index); - } - } - /* refill the tlb */ env->iotlb[mmu_idx][index].addr = iotlb - vaddr; env->iotlb[mmu_idx][index].attrs = attrs; @@ -507,6 +497,28 @@ static inline void lookup_and_reset_cpus_ll_addr(hwaddr addr, hwaddr size) } } +static inline void excl_history_put_addr(CPUState *cpu, hwaddr addr) +{ + /* Avoid some overhead if the address we are about to put is equal to + * the last one */ + if (cpu->excl_protected_addr[cpu->excl_protected_last] != + (addr & TARGET_PAGE_MASK)) { + cpu->excl_protected_last = (cpu->excl_protected_last + 1) % + EXCLUSIVE_HISTORY_LEN; + /* Unset EXCL bit of the oldest entry */ + if (cpu->excl_protected_addr[cpu->excl_protected_last] != + EXCLUSIVE_RESET_ADDR) { + cpu_physical_memory_unset_excl( + cpu->excl_protected_addr[cpu->excl_protected_last], + cpu->cpu_index); + } + + /* Add a new address, overwriting the oldest one */ + cpu->excl_protected_addr[cpu->excl_protected_last] = + addr & TARGET_PAGE_MASK; + } +} + #define MMUSUFFIX _mmu /* Generates LoadLink/StoreConditional helpers in softmmu_template.h */ diff --git a/include/qom/cpu.h b/include/qom/cpu.h index 9e409ce..5f65ebf 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -217,6 +217,7 @@ struct kvm_run; /* Atomic insn translation TLB support. */ #define EXCLUSIVE_RESET_ADDR ULLONG_MAX +#define EXCLUSIVE_HISTORY_LEN 8 /** * CPUState: @@ -343,6 +344,8 @@ struct CPUState { * The address is set to EXCLUSIVE_RESET_ADDR if the vCPU is not. * in the middle of a LL/SC. */ struct Range excl_protected_range; + hwaddr excl_protected_addr[EXCLUSIVE_HISTORY_LEN]; + int excl_protected_last; /* Used to carry the SC result but also to flag a normal (legacy) * store access made by a stcond (see softmmu_template.h). */ int excl_succeeded; diff --git a/softmmu_llsc_template.h b/softmmu_llsc_template.h index 586bb2e..becb90b 100644 --- a/softmmu_llsc_template.h +++ b/softmmu_llsc_template.h @@ -72,6 +72,7 @@ WORD_TYPE helper_ldlink_name(CPUArchState *env, target_ulong addr, hw_addr = (env->iotlb[mmu_idx][index].addr & TARGET_PAGE_MASK) + addr; cpu_physical_memory_set_excl(hw_addr, this->cpu_index); + excl_history_put_addr(this, hw_addr); /* If all the vCPUs have the EXCL bit set for this page there is no need * to request any flush. */ if (cpu_physical_memory_not_excl(hw_addr, smp_cpus)) { @@ -80,6 +81,7 @@ WORD_TYPE helper_ldlink_name(CPUArchState *env, target_ulong addr, if (cpu_physical_memory_not_excl(hw_addr, cpu->cpu_index)) { cpu_physical_memory_set_excl(hw_addr, cpu->cpu_index); tlb_flush(cpu, 1); + excl_history_put_addr(cpu, hw_addr); } } } -- 2.6.4