Add a new flag for the TLB entries to force all the accesses made to a page to follow the slow-path.
Mark the accessed page as dirty to invalidate any pending operation of LL/SC. Suggested-by: Jani Kokkonen <jani.kokko...@huawei.com> Suggested-by: Claudio Fontana <claudio.font...@huawei.com> Signed-off-by: Alvise Rigo <a.r...@virtualopensystems.com> --- cputlb.c | 7 ++++++- include/exec/cpu-all.h | 1 + softmmu_template.h | 48 +++++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 54 insertions(+), 2 deletions(-) diff --git a/cputlb.c b/cputlb.c index 38f2151..3e4ccba 100644 --- a/cputlb.c +++ b/cputlb.c @@ -324,7 +324,12 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr, + xlat)) { te->addr_write = address | TLB_NOTDIRTY; } else { - te->addr_write = address; + if (!cpu_physical_memory_excl_is_dirty(section->mr->ram_addr + + xlat)) { + te->addr_write = address | TLB_EXCL; + } else { + te->addr_write = address; + } } } else { te->addr_write = -1; diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index ac06c67..bd19a94 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -311,6 +311,7 @@ extern RAMList ram_list; #define TLB_NOTDIRTY (1 << 4) /* Set if TLB entry is an IO callback. */ #define TLB_MMIO (1 << 5) +#define TLB_EXCL (1 << 6) void dump_exec_info(FILE *f, fprintf_function cpu_fprintf); void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf); diff --git a/softmmu_template.h b/softmmu_template.h index 0e3dd35..1ac99da 100644 --- a/softmmu_template.h +++ b/softmmu_template.h @@ -242,6 +242,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, #endif haddr = addr + env->tlb_table[mmu_idx][index].addend; + #if DATA_SIZE == 1 res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr); #else @@ -262,7 +263,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, uintptr_t haddr; DATA_TYPE res; - /* Adjust the given return address. */ + /* Adjust the given return address. */ retaddr -= GETPC_ADJ; /* If the TLB entry is for a different page, reload and try again. */ @@ -387,6 +388,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; uintptr_t haddr; + bool to_excl_mem = false; /* Adjust the given return address. */ retaddr -= GETPC_ADJ; @@ -406,6 +408,14 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, tlb_addr = env->tlb_table[mmu_idx][index].addr_write; } + if (unlikely(tlb_addr & TLB_EXCL && + !(tlb_addr & ~(TARGET_PAGE_MASK | TLB_EXCL)))) { + /* The slow-path has been forced since we are reading a page used for a + * load-link operation. */ + to_excl_mem = true; + goto skip_io; + } + /* Handle an IO access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { hwaddr ioaddr; @@ -445,6 +455,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, return; } +skip_io: /* Handle aligned access or unaligned access in the same page. */ #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { @@ -453,6 +464,17 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, } #endif + /* If we are performing a store to exclusive-protected memory, + * set the bit to dirty. */ + if (unlikely(to_excl_mem)) { + MemoryRegionSection *section; + hwaddr xlat, sz; + + section = address_space_translate_for_iotlb(ENV_GET_CPU(env), tlb_addr, + &xlat, &sz); + cpu_physical_memory_set_excl_dirty(section->mr->ram_addr + xlat); + } + haddr = addr + env->tlb_table[mmu_idx][index].addend; #if DATA_SIZE == 1 glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val); @@ -468,6 +490,14 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; uintptr_t haddr; + bool to_excl_mem = false; + + if (unlikely(tlb_addr & TLB_EXCL && + !(tlb_addr & ~(TARGET_PAGE_MASK | TLB_EXCL)))) { + /* The slow-path has been forced since we are reading a page used for a + * load-link operation. */ + to_excl_mem = true; + } /* Adjust the given return address. */ retaddr -= GETPC_ADJ; @@ -487,6 +517,10 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, tlb_addr = env->tlb_table[mmu_idx][index].addr_write; } + if (to_excl_mem) { + goto skip_io; + } + /* Handle an IO access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { hwaddr ioaddr; @@ -526,6 +560,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, return; } +skip_io: /* Handle aligned access or unaligned access in the same page. */ #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { @@ -534,6 +569,17 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, } #endif + /* If we are performing a store to exclusive-protected memory, + * set the bit to dirty. */ + if (unlikely(to_excl_mem)) { + MemoryRegionSection *section; + hwaddr xlat, sz; + + section = address_space_translate_for_iotlb(ENV_GET_CPU(env), tlb_addr, + &xlat, &sz); + cpu_physical_memory_set_excl_dirty(section->mr->ram_addr + xlat); + } + haddr = addr + env->tlb_table[mmu_idx][index].addend; glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val); } -- 2.4.0