As for the RAM case, also the MMIO exclusive ranges have to be protected by other CPU's accesses. In order to do that, we flag the accessed MemoryRegion to mark that an exclusive access has been performed and is not concluded yet.
This flag will force the other CPUs to invalidate the exclusive range in case of collision. Suggested-by: Jani Kokkonen <jani.kokko...@huawei.com> Suggested-by: Claudio Fontana <claudio.font...@huawei.com> Signed-off-by: Alvise Rigo <a.r...@virtualopensystems.com> --- cputlb.c | 20 +++++++++++++------- include/exec/memory.h | 1 + softmmu_llsc_template.h | 11 +++++++---- softmmu_template.h | 22 ++++++++++++++++++++++ 4 files changed, 43 insertions(+), 11 deletions(-) diff --git a/cputlb.c b/cputlb.c index 87d09c8..06ce2da 100644 --- a/cputlb.c +++ b/cputlb.c @@ -496,19 +496,25 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) /* For every vCPU compare the exclusive address and reset it in case of a * match. Since only one vCPU is running at once, no lock has to be held to * guard this operation. */ -static inline void lookup_and_reset_cpus_ll_addr(hwaddr addr, hwaddr size) +static inline bool lookup_and_reset_cpus_ll_addr(hwaddr addr, hwaddr size) { CPUState *cpu; + bool ret = false; CPU_FOREACH(cpu) { - if (cpu->excl_protected_range.begin != EXCLUSIVE_RESET_ADDR && - ranges_overlap(cpu->excl_protected_range.begin, - cpu->excl_protected_range.end - - cpu->excl_protected_range.begin, - addr, size)) { - cpu->excl_protected_range.begin = EXCLUSIVE_RESET_ADDR; + if (current_cpu != cpu) { + if (cpu->excl_protected_range.begin != EXCLUSIVE_RESET_ADDR && + ranges_overlap(cpu->excl_protected_range.begin, + cpu->excl_protected_range.end - + cpu->excl_protected_range.begin, + addr, size)) { + cpu->excl_protected_range.begin = EXCLUSIVE_RESET_ADDR; + ret = true; + } } } + + return ret; } #define MMUSUFFIX _mmu diff --git a/include/exec/memory.h b/include/exec/memory.h index 71e0480..bacb3ad 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -171,6 +171,7 @@ struct MemoryRegion { bool rom_device; bool flush_coalesced_mmio; bool global_locking; + bool pending_excl_access; /* A vCPU issued an exclusive access */ uint8_t dirty_log_mask; ram_addr_t ram_addr; Object *owner; diff --git a/softmmu_llsc_template.h b/softmmu_llsc_template.h index 101f5e8..b4712ba 100644 --- a/softmmu_llsc_template.h +++ b/softmmu_llsc_template.h @@ -81,15 +81,18 @@ WORD_TYPE helper_ldlink_name(CPUArchState *env, target_ulong addr, } } } + /* For this vCPU, just update the TLB entry, no need to flush. */ + env->tlb_table[mmu_idx][index].addr_write |= TLB_EXCL; } else { - hw_error("EXCL accesses to MMIO regions not supported yet."); + /* Set a pending exclusive access in the MemoryRegion */ + MemoryRegion *mr = iotlb_to_region(this, + env->iotlb[mmu_idx][index].addr, + env->iotlb[mmu_idx][index].attrs); + mr->pending_excl_access = true; } cc->cpu_set_excl_protected_range(this, hw_addr, DATA_SIZE); - /* For this vCPU, just update the TLB entry, no need to flush. */ - env->tlb_table[mmu_idx][index].addr_write |= TLB_EXCL; - /* From now on we are in LL/SC context */ this->ll_sc_context = true; diff --git a/softmmu_template.h b/softmmu_template.h index c54bdc9..71c5152 100644 --- a/softmmu_template.h +++ b/softmmu_template.h @@ -360,6 +360,14 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env, MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs); physaddr = (physaddr & TARGET_PAGE_MASK) + addr; + + /* Invalidate the exclusive range that overlaps this access */ + if (mr->pending_excl_access) { + if (lookup_and_reset_cpus_ll_addr(physaddr, 1 << SHIFT)) { + mr->pending_excl_access = false; + } + } + if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { cpu_io_recompile(cpu, retaddr); } @@ -504,6 +512,13 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, glue(helper_le_st_name, _do_mmio_access)(env, val, addr, oi, mmu_idx, index, retaddr); + /* N.B.: Here excl_succeeded == true means that this access + * comes from an exclusive instruction. */ + if (cpu->excl_succeeded) { + MemoryRegion *mr = iotlb_to_region(cpu, iotlbentry->addr, + iotlbentry->attrs); + mr->pending_excl_access = false; + } } else { glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi, mmu_idx, index, @@ -655,6 +670,13 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, glue(helper_be_st_name, _do_mmio_access)(env, val, addr, oi, mmu_idx, index, retaddr); + /* N.B.: Here excl_succeeded == true means that this access + * comes from an exclusive instruction. */ + if (cpu->excl_succeeded) { + MemoryRegion *mr = iotlb_to_region(cpu, iotlbentry->addr, + iotlbentry->attrs); + mr->pending_excl_access = false; + } } else { glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi, mmu_idx, index, -- 2.7.0