Convert the synchronization of the shadow handling to a separate mmu_lock
spinlock.
Also guard fetch() by mmap_sem in read-mode to protect against alias
and memslot changes.
Signed-off-by: Marcelo Tosatti <[EMAIL PROTECTED]>
Index: kvm.quilt/arch/x86/kvm/mmu.c
===================================================================
--- kvm.quilt.orig/arch/x86/kvm/mmu.c
+++ kvm.quilt/arch/x86/kvm/mmu.c
@@ -1031,7 +1031,7 @@ static void mmu_free_roots(struct kvm_vc
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return;
- mutex_lock(&vcpu->kvm->lock);
+ spin_lock(&vcpu->kvm->mmu_lock);
#ifdef CONFIG_X86_64
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
hpa_t root = vcpu->arch.mmu.root_hpa;
@@ -1039,7 +1039,7 @@ static void mmu_free_roots(struct kvm_vc
sp = page_header(root);
--sp->root_count;
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
- mutex_unlock(&vcpu->kvm->lock);
+ spin_unlock(&vcpu->kvm->mmu_lock);
return;
}
#endif
@@ -1053,7 +1053,7 @@ static void mmu_free_roots(struct kvm_vc
}
vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
}
- mutex_unlock(&vcpu->kvm->lock);
+ spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
}
@@ -1256,9 +1256,9 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
r = mmu_topup_memory_caches(vcpu);
if (r)
goto out;
- mutex_lock(&vcpu->kvm->lock);
+ spin_lock(&vcpu->kvm->mmu_lock);
mmu_alloc_roots(vcpu);
- mutex_unlock(&vcpu->kvm->lock);
+ spin_unlock(&vcpu->kvm->mmu_lock);
kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
kvm_mmu_flush_tlb(vcpu);
out:
@@ -1435,15 +1435,15 @@ int kvm_mmu_unprotect_page_virt(struct k
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
up_read(¤t->mm->mmap_sem);
- mutex_lock(&vcpu->kvm->lock);
+ spin_lock(&vcpu->kvm->mmu_lock);
r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
- mutex_unlock(&vcpu->kvm->lock);
+ spin_unlock(&vcpu->kvm->mmu_lock);
return r;
}
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
- mutex_lock(&vcpu->kvm->lock);
+ spin_lock(&vcpu->kvm->mmu_lock);
while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
struct kvm_mmu_page *sp;
@@ -1452,7 +1452,7 @@ void __kvm_mmu_free_some_pages(struct kv
kvm_mmu_zap_page(vcpu->kvm, sp);
++vcpu->kvm->stat.mmu_recycled;
}
- mutex_unlock(&vcpu->kvm->lock);
+ spin_unlock(&vcpu->kvm->mmu_lock);
}
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
@@ -1584,10 +1584,10 @@ void kvm_mmu_zap_all(struct kvm *kvm)
{
struct kvm_mmu_page *sp, *node;
- mutex_lock(&kvm->lock);
+ spin_lock(&kvm->mmu_lock);
list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
kvm_mmu_zap_page(kvm, sp);
- mutex_unlock(&kvm->lock);
+ spin_unlock(&kvm->mmu_lock);
kvm_flush_remote_tlbs(kvm);
}
Index: kvm.quilt/arch/x86/kvm/paging_tmpl.h
===================================================================
--- kvm.quilt.orig/arch/x86/kvm/paging_tmpl.h
+++ kvm.quilt/arch/x86/kvm/paging_tmpl.h
@@ -217,9 +217,9 @@ walk:
goto walk;
}
pte |= PT_DIRTY_MASK;
- mutex_lock(&vcpu->kvm->lock);
+ spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte));
- mutex_unlock(&vcpu->kvm->lock);
+ spin_unlock(&vcpu->kvm->mmu_lock);
walker->ptes[walker->level - 1] = pte;
}
@@ -386,7 +386,6 @@ static int FNAME(page_fault)(struct kvm_
*/
r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
fetch_fault);
- up_read(¤t->mm->mmap_sem);
/*
* The page is not mapped by the guest. Let the guest handle it.
@@ -395,10 +394,11 @@ static int FNAME(page_fault)(struct kvm_
pgprintk("%s: guest page fault\n", __FUNCTION__);
inject_page_fault(vcpu, addr, walker.error_code);
vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
+ up_read(¤t->mm->mmap_sem);
return 0;
}
- mutex_lock(&vcpu->kvm->lock);
+ spin_lock(&vcpu->kvm->mmu_lock);
shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
&write_pt);
pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
@@ -411,13 +411,15 @@ static int FNAME(page_fault)(struct kvm_
* mmio: emulate if accessible, otherwise its a guest fault.
*/
if (shadow_pte && is_io_pte(*shadow_pte)) {
- mutex_unlock(&vcpu->kvm->lock);
+ spin_unlock(&vcpu->kvm->mmu_lock);
+ up_read(¤t->mm->mmap_sem);
return 1;
}
++vcpu->stat.pf_fixed;
kvm_mmu_audit(vcpu, "post page fault (fixed)");
- mutex_unlock(&vcpu->kvm->lock);
+ spin_unlock(&vcpu->kvm->mmu_lock);
+ up_read(¤t->mm->mmap_sem);
return write_pt;
}
Index: kvm.quilt/arch/x86/kvm/x86.c
===================================================================
--- kvm.quilt.orig/arch/x86/kvm/x86.c
+++ kvm.quilt/arch/x86/kvm/x86.c
@@ -1609,9 +1609,9 @@ static int emulator_write_phys(struct kv
up_read(¤t->mm->mmap_sem);
return 0;
}
- mutex_lock(&vcpu->kvm->lock);
+ spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_pte_write(vcpu, gpa, val, bytes);
- mutex_unlock(&vcpu->kvm->lock);
+ spin_unlock(&vcpu->kvm->mmu_lock);
up_read(¤t->mm->mmap_sem);
return 1;
}
Index: kvm.quilt/include/linux/kvm_host.h
===================================================================
--- kvm.quilt.orig/include/linux/kvm_host.h
+++ kvm.quilt/include/linux/kvm_host.h
@@ -104,7 +104,8 @@ struct kvm_memory_slot {
};
struct kvm {
- struct mutex lock; /* protects everything except vcpus */
+ struct mutex lock; /* protects the vcpus array and APIC accesses */
+ spinlock_t mmu_lock;
struct mm_struct *mm; /* userspace tied to this vm */
int nmemslots;
struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
Index: kvm.quilt/virt/kvm/kvm_main.c
===================================================================
--- kvm.quilt.orig/virt/kvm/kvm_main.c
+++ kvm.quilt/virt/kvm/kvm_main.c
@@ -165,6 +165,7 @@ static struct kvm *kvm_create_vm(void)
kvm->mm = current->mm;
atomic_inc(&kvm->mm->mm_count);
+ spin_lock_init(&kvm->mmu_lock);
kvm_io_bus_init(&kvm->pio_bus);
mutex_init(&kvm->lock);
kvm_io_bus_init(&kvm->mmio_bus);
@@ -571,9 +572,7 @@ int kvm_read_guest_atomic(struct kvm *kv
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr))
return -EFAULT;
- pagefault_disable();
r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
- pagefault_enable();
if (r)
return -EFAULT;
return 0;
Index: kvm.quilt/arch/x86/kvm/vmx.c
===================================================================
--- kvm.quilt.orig/arch/x86/kvm/vmx.c
+++ kvm.quilt/arch/x86/kvm/vmx.c
@@ -1476,7 +1476,6 @@ static int alloc_apic_access_page(struct
struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0;
- mutex_lock(&kvm->lock);
down_write(¤t->mm->mmap_sem);
if (kvm->arch.apic_access_page)
goto out;
@@ -1490,7 +1489,6 @@ static int alloc_apic_access_page(struct
kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
out:
up_write(¤t->mm->mmap_sem);
- mutex_unlock(&kvm->lock);
return r;
}
--
-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
kvm-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/kvm-devel