Exploiting the tcg_excl_access_lock, port the helper_{le,be}_st_name to work in real multithreading.
Suggested-by: Jani Kokkonen <jani.kokko...@huawei.com> Suggested-by: Claudio Fontana <claudio.font...@huawei.com> Signed-off-by: Alvise Rigo <a.r...@virtualopensystems.com> --- softmmu_template.h | 36 ++++++++++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/softmmu_template.h b/softmmu_template.h index ad65d20..514aeb7 100644 --- a/softmmu_template.h +++ b/softmmu_template.h @@ -418,20 +418,29 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, * exclusive-protected memory. */ hwaddr hw_addr = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; + qemu_mutex_lock(&tcg_excl_access_lock); /* The function lookup_and_reset_cpus_ll_addr could have reset the * exclusive address. Fail the SC in this case. * N.B.: Here excl_succeeded == 0 means that helper_le_st_name has * not been called by a softmmu_llsc_template.h. */ if(env->excl_succeeded) { - if (env->excl_protected_range.begin != hw_addr) { - /* The vCPU is SC-ing to an unprotected address. */ + if (!((env->excl_protected_range.begin == hw_addr) && + env->excl_protected_range.end == (hw_addr + DATA_SIZE))) { + /* The vCPU is SC-ing to an unprotected address. This + * can also happen when a vCPU stores to the address. + * */ env->excl_protected_range.begin = EXCLUSIVE_RESET_ADDR; env->excl_succeeded = 0; + qemu_mutex_unlock(&tcg_excl_access_lock); + return; } - cpu_physical_memory_set_excl_dirty(hw_addr, ENV_GET_CPU(env)->cpu_index); + /* Now we are going for sure to complete the access. Set the + * bit to dirty. */ + cpu_physical_memory_set_excl_dirty(hw_addr, + ENV_GET_CPU(env)->cpu_index); } haddr = addr + env->tlb_table[mmu_idx][index].addend; @@ -441,8 +450,11 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val); #endif + /* This will reset the excl address also for the current vCPU. */ lookup_and_reset_cpus_ll_addr(hw_addr, DATA_SIZE); + qemu_mutex_unlock(&tcg_excl_access_lock); + return; } else { if ((addr & (DATA_SIZE - 1)) != 0) { @@ -532,20 +544,29 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, * exclusive-protected memory. */ hwaddr hw_addr = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; + qemu_mutex_lock(&tcg_excl_access_lock); /* The function lookup_and_reset_cpus_ll_addr could have reset the * exclusive address. Fail the SC in this case. * N.B.: Here excl_succeeded == 0 means that helper_le_st_name has * not been called by a softmmu_llsc_template.h. */ if(env->excl_succeeded) { - if (env->excl_protected_range.begin != hw_addr) { - /* The vCPU is SC-ing to an unprotected address. */ + if (!((env->excl_protected_range.begin == hw_addr) && + env->excl_protected_range.end == (hw_addr + DATA_SIZE))) { + /* The vCPU is SC-ing to an unprotected address. This + * can also happen when a vCPU stores to the address. + * */ env->excl_protected_range.begin = EXCLUSIVE_RESET_ADDR; env->excl_succeeded = 0; + qemu_mutex_unlock(&tcg_excl_access_lock); + return; } - cpu_physical_memory_set_excl_dirty(hw_addr, ENV_GET_CPU(env)->cpu_index); + /* Now we are going for sure to complete the access. Set the + * bit to dirty. */ + cpu_physical_memory_set_excl_dirty(hw_addr, + ENV_GET_CPU(env)->cpu_index); } haddr = addr + env->tlb_table[mmu_idx][index].addend; @@ -555,8 +576,11 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val); #endif + /* This will reset the excl address also for the current vCPU. */ lookup_and_reset_cpus_ll_addr(hw_addr, DATA_SIZE); + qemu_mutex_unlock(&tcg_excl_access_lock); + return; } else { if ((addr & (DATA_SIZE - 1)) != 0) { -- 2.5.0