As Marcelo pointed out, we need slots_lock to protect against slots changing under our nose during wall clock writing.
This patch address this issue. Signed-off-by: Glauber Costa <[EMAIL PROTECTED]> CC: Marcelo Tosatti <[EMAIL PROTECTED]> --- arch/x86/kvm/x86.c | 10 ++++++---- 1 files changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1ef56ad..9e4c6f2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -500,21 +500,21 @@ static void kvm_write_wall_clock(struct if (!wall_clock) return; - mutex_lock(&kvm->lock); - version++; + + down_read(&kvm->slots_lock); kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); wc_ts = current_kernel_time(); wc.wc_sec = wc_ts.tv_sec; wc.wc_nsec = wc_ts.tv_nsec; wc.wc_version = version; + kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); version++; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); - - mutex_unlock(&kvm->lock); + up_read(&kvm->slots_lock); } static void kvm_write_guest_time(struct kvm_vcpu *v) @@ -608,8 +608,10 @@ int kvm_set_msr_common(struct kvm_vcpu * vcpu->arch.hv_clock.tsc_shift = 22; down_read(¤t->mm->mmap_sem); + down_read(&vcpu->kvm->slots_lock); vcpu->arch.time_page = gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); + up_read(&vcpu->kvm->slots_lock); up_read(¤t->mm->mmap_sem); if (is_error_page(vcpu->arch.time_page)) { -- 1.4.2 ------------------------------------------------------------------------- This SF.net email is sponsored by: Microsoft Defy all challenges. Microsoft(R) Visual Studio 2008. http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/ _______________________________________________ kvm-devel mailing list kvm-devel@lists.sourceforge.net https://lists.sourceforge.net/lists/listinfo/kvm-devel