On Sun, Dec 30, 2007 at 01:17:19PM +0200, Avi Kivity wrote:
> From: Marcelo Tosatti <[EMAIL PROTECTED]>
> 
> Convert the synchronization of the shadow handling to a separate mmu_lock
> spinlock.
> 
> Also guard fetch() by mmap_sem in read-mode to protect against alias
> and memslot changes.
> 
> Signed-off-by: Marcelo Tosatti <[EMAIL PROTECTED]>
> Signed-off-by: Avi Kivity <[EMAIL PROTECTED]>
> ---
>  arch/x86/kvm/mmu.c         |   46 ++++++++++++++++++++++---------------------
>  arch/x86/kvm/paging_tmpl.h |   10 +++++---
>  arch/x86/kvm/vmx.c         |    2 -
>  include/linux/kvm_host.h   |    3 +-
>  virt/kvm/kvm_main.c        |    3 +-
>  5 files changed, 33 insertions(+), 31 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 1b68f07..356e361 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -971,16 +971,12 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
>  {
>  }
>  
> -static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t 
> gfn)
> +static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
> +                        gfn_t gfn, struct page *page)
>  {
>       int level = PT32E_ROOT_LEVEL;
>       hpa_t table_addr = vcpu->arch.mmu.root_hpa;
>       int pt_write = 0;
> -     struct page *page;
> -
> -     down_read(&current->mm->mmap_sem);
> -     page = gfn_to_page(vcpu->kvm, gfn);
> -     up_read(&current->mm->mmap_sem);
>  
>       for (; ; level--) {
>               u32 index = PT64_INDEX(v, level);
> @@ -1022,9 +1018,15 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t 
> v, int write, gfn_t gfn)
>  {
>       int r;
>  
> -     mutex_lock(&vcpu->kvm->lock);
> -     r = __nonpaging_map(vcpu, v, write, gfn);
> -     mutex_unlock(&vcpu->kvm->lock);
> +     struct page *page;
> +
> +     down_read(&current->mm->mmap_sem);
> +     page = gfn_to_page(vcpu->kvm, gfn);
> +     up_read(&current->mm->mmap_sem);
> +
> +     spin_lock(&vcpu->kvm->mmu_lock);
> +     r = __nonpaging_map(vcpu, v, write, gfn, page);
> +     spin_unlock(&vcpu->kvm->mmu_lock);
>       return r;
>  }

And its better to hold mmap_sem across __nonpaging_map() in similarity
to the paging fault path, to protect against memslot and alias changes.

--- mmu.c.orig  2007-12-31 09:34:50.000000000 -0500
+++ mmu.c       2007-12-31 09:59:46.000000000 -0500
@@ -1018,12 +1018,12 @@ static int nonpaging_map(struct kvm_vcpu

        down_read(&current->mm->mmap_sem);
        page = gfn_to_page(vcpu->kvm, gfn);
-       up_read(&current->mm->mmap_sem);

        spin_lock(&vcpu->kvm->mmu_lock);
        kvm_mmu_free_some_pages(vcpu);
        r = __nonpaging_map(vcpu, v, write, gfn, page);
        spin_unlock(&vcpu->kvm->mmu_lock);
+       up_read(&current->mm->mmap_sem);
        return r;
 }



-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to