Do not hold kvm->lock mutex across the entire pagefault code, 
only acquire it in places where it is necessary, such as mmu 
hash list, active list, rmap and parent pte handling.

Allow concurrent guest walkers by switching walk_addr() to use
mmap_sem in read-mode.

And get rid of the lockless __gfn_to_page.

Signed-off-by: Marcelo Tosatti <[EMAIL PROTECTED]>

Index: kvm.quilt/arch/x86/kvm/mmu.c
===================================================================
--- kvm.quilt.orig/arch/x86/kvm/mmu.c
+++ kvm.quilt/arch/x86/kvm/mmu.c
@@ -1026,6 +1026,7 @@ static void mmu_free_roots(struct kvm_vc
 
        if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
                return;
+       mutex_lock(&vcpu->kvm->lock);
 #ifdef CONFIG_X86_64
        if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
                hpa_t root = vcpu->arch.mmu.root_hpa;
@@ -1033,6 +1034,7 @@ static void mmu_free_roots(struct kvm_vc
                sp = page_header(root);
                --sp->root_count;
                vcpu->arch.mmu.root_hpa = INVALID_PAGE;
+               mutex_unlock(&vcpu->kvm->lock);
                return;
        }
 #endif
@@ -1046,6 +1048,7 @@ static void mmu_free_roots(struct kvm_vc
                }
                vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
        }
+       mutex_unlock(&vcpu->kvm->lock);
        vcpu->arch.mmu.root_hpa = INVALID_PAGE;
 }
 
@@ -1245,15 +1248,15 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
 {
        int r;
 
-       mutex_lock(&vcpu->kvm->lock);
        r = mmu_topup_memory_caches(vcpu);
        if (r)
                goto out;
+       mutex_lock(&vcpu->kvm->lock);
        mmu_alloc_roots(vcpu);
+       mutex_unlock(&vcpu->kvm->lock);
        kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
        kvm_mmu_flush_tlb(vcpu);
 out:
-       mutex_unlock(&vcpu->kvm->lock);
        return r;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_load);
@@ -1420,13 +1423,22 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
 
 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
 {
-       gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
+       gpa_t gpa;
+       int r;
+
+       down_read(&current->mm->mmap_sem);
+       gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
+       up_read(&current->mm->mmap_sem);
 
-       return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+       mutex_lock(&vcpu->kvm->lock);
+       r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+       mutex_unlock(&vcpu->kvm->lock);
+       return r;
 }
 
 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
 {
+       mutex_lock(&vcpu->kvm->lock);
        while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
                struct kvm_mmu_page *sp;
 
@@ -1435,6 +1447,7 @@ void __kvm_mmu_free_some_pages(struct kv
                kvm_mmu_zap_page(vcpu->kvm, sp);
                ++vcpu->kvm->stat.mmu_recycled;
        }
+       mutex_unlock(&vcpu->kvm->lock);
 }
 
 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
@@ -1442,7 +1455,6 @@ int kvm_mmu_page_fault(struct kvm_vcpu *
        int r;
        enum emulation_result er;
 
-       mutex_lock(&vcpu->kvm->lock);
        r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
        if (r < 0)
                goto out;
@@ -1457,7 +1469,6 @@ int kvm_mmu_page_fault(struct kvm_vcpu *
                goto out;
 
        er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
-       mutex_unlock(&vcpu->kvm->lock);
 
        switch (er) {
        case EMULATE_DONE:
@@ -1472,7 +1483,6 @@ int kvm_mmu_page_fault(struct kvm_vcpu *
                BUG();
        }
 out:
-       mutex_unlock(&vcpu->kvm->lock);
        return r;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
@@ -1569,8 +1579,10 @@ void kvm_mmu_zap_all(struct kvm *kvm)
 {
        struct kvm_mmu_page *sp, *node;
 
+       mutex_lock(&kvm->lock);
        list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
                kvm_mmu_zap_page(kvm, sp);
+       mutex_unlock(&kvm->lock);
 
        kvm_flush_remote_tlbs(kvm);
 }
Index: kvm.quilt/arch/x86/kvm/paging_tmpl.h
===================================================================
--- kvm.quilt.orig/arch/x86/kvm/paging_tmpl.h
+++ kvm.quilt/arch/x86/kvm/paging_tmpl.h
@@ -212,7 +212,9 @@ walk:
                if (ret)
                        goto walk;
                pte |= PT_DIRTY_MASK;
+               mutex_lock(&vcpu->kvm->lock);
                kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte));
+               mutex_unlock(&vcpu->kvm->lock);
                walker->ptes[walker->level - 1] = pte;
        }
 
@@ -368,11 +370,13 @@ static int FNAME(page_fault)(struct kvm_
        if (r)
                return r;
 
+       down_read(&current->mm->mmap_sem);
        /*
         * Look up the shadow pte for the faulting address.
         */
        r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
                             fetch_fault);
+       up_read(&current->mm->mmap_sem);
 
        /*
         * The page is not mapped by the guest.  Let the guest handle it.
@@ -384,6 +388,7 @@ static int FNAME(page_fault)(struct kvm_
                return 0;
        }
 
+       mutex_lock(&vcpu->kvm->lock);
        shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
                                  &write_pt);
        pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
@@ -395,11 +400,14 @@ static int FNAME(page_fault)(struct kvm_
        /*
         * mmio: emulate if accessible, otherwise its a guest fault.
         */
-       if (shadow_pte && is_io_pte(*shadow_pte))
+       if (shadow_pte && is_io_pte(*shadow_pte)) {
+               mutex_unlock(&vcpu->kvm->lock);
                return 1;
+       }
 
        ++vcpu->stat.pf_fixed;
        kvm_mmu_audit(vcpu, "post page fault (fixed)");
+       mutex_unlock(&vcpu->kvm->lock);
 
        return write_pt;
 }
Index: kvm.quilt/arch/x86/kvm/vmx.c
===================================================================
--- kvm.quilt.orig/arch/x86/kvm/vmx.c
+++ kvm.quilt/arch/x86/kvm/vmx.c
@@ -1431,27 +1431,34 @@ static int init_rmode_tss(struct kvm *kv
 {
        gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
        u16 data = 0;
+       int ret = 0;
        int r;
 
+       down_read(&current->mm->mmap_sem);
        r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
        if (r < 0)
-               return 0;
+               goto out;
        data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
        r = kvm_write_guest_page(kvm, fn++, &data, 0x66, sizeof(u16));
        if (r < 0)
-               return 0;
+               goto out;
        r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
        if (r < 0)
-               return 0;
+               goto out;
        r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
        if (r < 0)
-               return 0;
+               goto out;
        data = ~0;
-       r = kvm_write_guest_page(kvm, fn, &data, RMODE_TSS_SIZE - 2 * PAGE_SIZE 
- 1,
-                       sizeof(u8));
+       r = kvm_write_guest_page(kvm, fn, &data,
+                                RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
+                                sizeof(u8));
        if (r < 0)
-               return 0;
-       return 1;
+               goto out;
+
+       ret = 1;
+out:
+       up_read(&current->mm->mmap_sem);
+       return ret;
 }
 
 static void seg_setup(int seg)
@@ -1470,6 +1477,7 @@ static int alloc_apic_access_page(struct
        int r = 0;
 
        mutex_lock(&kvm->lock);
+       down_write(&current->mm->mmap_sem);
        if (kvm->arch.apic_access_page)
                goto out;
        kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
@@ -1481,6 +1489,7 @@ static int alloc_apic_access_page(struct
                goto out;
        kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
 out:
+       up_write(&current->mm->mmap_sem);
        mutex_unlock(&kvm->lock);
        return r;
 }
Index: kvm.quilt/arch/x86/kvm/x86.c
===================================================================
--- kvm.quilt.orig/arch/x86/kvm/x86.c
+++ kvm.quilt/arch/x86/kvm/x86.c
@@ -180,7 +180,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, u
        int ret;
        u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
 
-       mutex_lock(&vcpu->kvm->lock);
+       down_read(&current->mm->mmap_sem);
        ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
                                  offset * sizeof(u64), sizeof(pdpte));
        if (ret < 0) {
@@ -197,7 +197,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, u
 
        memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
 out:
-       mutex_unlock(&vcpu->kvm->lock);
+       up_read(&current->mm->mmap_sem);
 
        return ret;
 }
@@ -211,13 +211,13 @@ static bool pdptrs_changed(struct kvm_vc
        if (is_long_mode(vcpu) || !is_pae(vcpu))
                return false;
 
-       mutex_lock(&vcpu->kvm->lock);
+       down_read(&current->mm->mmap_sem);
        r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, 
sizeof(pdpte));
        if (r < 0)
                goto out;
        changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
 out:
-       mutex_unlock(&vcpu->kvm->lock);
+       up_read(&current->mm->mmap_sem);
 
        return changed;
 }
@@ -277,9 +277,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsi
        kvm_x86_ops->set_cr0(vcpu, cr0);
        vcpu->arch.cr0 = cr0;
 
-       mutex_lock(&vcpu->kvm->lock);
        kvm_mmu_reset_context(vcpu);
-       mutex_unlock(&vcpu->kvm->lock);
        return;
 }
 EXPORT_SYMBOL_GPL(set_cr0);
@@ -319,9 +317,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsi
        }
        kvm_x86_ops->set_cr4(vcpu, cr4);
        vcpu->arch.cr4 = cr4;
-       mutex_lock(&vcpu->kvm->lock);
        kvm_mmu_reset_context(vcpu);
-       mutex_unlock(&vcpu->kvm->lock);
 }
 EXPORT_SYMBOL_GPL(set_cr4);
 
@@ -359,7 +355,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsi
                 */
        }
 
-       mutex_lock(&vcpu->kvm->lock);
+       down_read(&current->mm->mmap_sem);
        /*
         * Does the new cr3 value map to physical memory? (Note, we
         * catch an invalid cr3 even in real-mode, because it would
@@ -375,7 +371,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsi
                vcpu->arch.cr3 = cr3;
                vcpu->arch.mmu.new_cr3(vcpu);
        }
-       mutex_unlock(&vcpu->kvm->lock);
+       up_read(&current->mm->mmap_sem);
 }
 EXPORT_SYMBOL_GPL(set_cr3);
 
@@ -1170,12 +1166,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages
        if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
                return -EINVAL;
 
-       mutex_lock(&kvm->lock);
+       down_write(&current->mm->mmap_sem);
 
        kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
        kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
 
-       mutex_unlock(&kvm->lock);
+       up_write(&current->mm->mmap_sem);
        return 0;
 }
 
@@ -1224,7 +1220,7 @@ static int kvm_vm_ioctl_set_memory_alias
            < alias->target_phys_addr)
                goto out;
 
-       mutex_lock(&kvm->lock);
+       down_write(&current->mm->mmap_sem);
 
        p = &kvm->arch.aliases[alias->slot];
        p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
@@ -1238,7 +1234,7 @@ static int kvm_vm_ioctl_set_memory_alias
 
        kvm_mmu_zap_all(kvm);
 
-       mutex_unlock(&kvm->lock);
+       up_write(&current->mm->mmap_sem);
 
        return 0;
 
@@ -1314,7 +1310,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kv
        struct kvm_memory_slot *memslot;
        int is_dirty = 0;
 
-       mutex_lock(&kvm->lock);
+       down_write(&current->mm->mmap_sem);
 
        r = kvm_get_dirty_log(kvm, log, &is_dirty);
        if (r)
@@ -1330,7 +1326,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kv
        }
        r = 0;
 out:
-       mutex_unlock(&kvm->lock);
+       up_write(&current->mm->mmap_sem);
        return r;
 }
 
@@ -1524,25 +1520,32 @@ int emulator_read_std(unsigned long addr
                             struct kvm_vcpu *vcpu)
 {
        void *data = val;
+       int r = X86EMUL_CONTINUE;
 
+       down_read(&current->mm->mmap_sem);
        while (bytes) {
                gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
                unsigned offset = addr & (PAGE_SIZE-1);
                unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
                int ret;
 
-               if (gpa == UNMAPPED_GVA)
-                       return X86EMUL_PROPAGATE_FAULT;
+               if (gpa == UNMAPPED_GVA) {
+                       r = X86EMUL_PROPAGATE_FAULT;
+                       goto out;
+               }
                ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy);
-               if (ret < 0)
-                       return X86EMUL_UNHANDLEABLE;
+               if (ret < 0) {
+                       r = X86EMUL_UNHANDLEABLE;
+                       goto out;
+               }
 
                bytes -= tocopy;
                data += tocopy;
                addr += tocopy;
        }
-
-       return X86EMUL_CONTINUE;
+out:
+       up_read(&current->mm->mmap_sem);
+       return r;
 }
 EXPORT_SYMBOL_GPL(emulator_read_std);
 
@@ -1560,7 +1563,9 @@ static int emulator_read_emulated(unsign
                return X86EMUL_CONTINUE;
        }
 
+       down_read(&current->mm->mmap_sem);
        gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+       up_read(&current->mm->mmap_sem);
 
        /* For APIC access vmexit */
        if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
@@ -1576,11 +1581,14 @@ mmio:
        /*
         * Is this MMIO handled locally?
         */
+       mutex_lock(&vcpu->kvm->lock);
        mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
        if (mmio_dev) {
                kvm_iodevice_read(mmio_dev, gpa, bytes, val);
+               mutex_unlock(&vcpu->kvm->lock);
                return X86EMUL_CONTINUE;
        }
+       mutex_unlock(&vcpu->kvm->lock);
 
        vcpu->mmio_needed = 1;
        vcpu->mmio_phys_addr = gpa;
@@ -1595,10 +1603,16 @@ static int emulator_write_phys(struct kv
 {
        int ret;
 
+       down_read(&current->mm->mmap_sem);
        ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
-       if (ret < 0)
+       if (ret < 0) {
+               up_read(&current->mm->mmap_sem);
                return 0;
+       }
+       mutex_lock(&vcpu->kvm->lock);
        kvm_mmu_pte_write(vcpu, gpa, val, bytes);
+       mutex_unlock(&vcpu->kvm->lock);
+       up_read(&current->mm->mmap_sem);
        return 1;
 }
 
@@ -1608,7 +1622,11 @@ static int emulator_write_emulated_onepa
                                           struct kvm_vcpu *vcpu)
 {
        struct kvm_io_device *mmio_dev;
-       gpa_t                 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+       gpa_t                 gpa;
+
+       down_read(&current->mm->mmap_sem);
+       gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+       up_read(&current->mm->mmap_sem);
 
        if (gpa == UNMAPPED_GVA) {
                kvm_inject_page_fault(vcpu, addr, 2);
@@ -1626,11 +1644,14 @@ mmio:
        /*
         * Is this MMIO handled locally?
         */
+       mutex_lock(&vcpu->kvm->lock);
        mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
        if (mmio_dev) {
                kvm_iodevice_write(mmio_dev, gpa, bytes, val);
+               mutex_unlock(&vcpu->kvm->lock);
                return X86EMUL_CONTINUE;
        }
+       mutex_unlock(&vcpu->kvm->lock);
 
        vcpu->mmio_needed = 1;
        vcpu->mmio_phys_addr = gpa;
@@ -1677,11 +1698,14 @@ static int emulator_cmpxchg_emulated(uns
 #ifndef CONFIG_X86_64
        /* guests cmpxchg8b have to be emulated atomically */
        if (bytes == 8) {
-               gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+               gpa_t gpa;
                struct page *page;
                char *addr;
                u64 *val;
 
+               down_read(&current->mm->mmap_sem);
+               gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+
                if (gpa == UNMAPPED_GVA ||
                   (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
                        goto emul_write;
@@ -1697,6 +1721,7 @@ static int emulator_cmpxchg_emulated(uns
                kvm_release_page_dirty(page);
        }
 emul_write:
+       up_read(&current->mm->mmap_sem);
 #endif
 
        return emulator_write_emulated(addr, new, bytes, vcpu);
@@ -2077,10 +2102,10 @@ int kvm_emulate_pio_string(struct kvm_vc
                kvm_x86_ops->skip_emulated_instruction(vcpu);
 
        for (i = 0; i < nr_pages; ++i) {
-               mutex_lock(&vcpu->kvm->lock);
+               down_read(&current->mm->mmap_sem);
                page = gva_to_page(vcpu, address + i * PAGE_SIZE);
                vcpu->arch.pio.guest_pages[i] = page;
-               mutex_unlock(&vcpu->kvm->lock);
+               up_read(&current->mm->mmap_sem);
                if (!page) {
                        kvm_inject_gp(vcpu, 0);
                        free_pio_guest_pages(vcpu);
@@ -2203,7 +2228,6 @@ int kvm_fix_hypercall(struct kvm_vcpu *v
        char instruction[3];
        int ret = 0;
 
-       mutex_lock(&vcpu->kvm->lock);
 
        /*
         * Blow out the MMU to ensure that no other VCPU has an active mapping
@@ -2218,8 +2242,6 @@ int kvm_fix_hypercall(struct kvm_vcpu *v
            != X86EMUL_CONTINUE)
                ret = -EFAULT;
 
-       mutex_unlock(&vcpu->kvm->lock);
-
        return ret;
 }
 
@@ -2827,13 +2849,13 @@ int kvm_arch_vcpu_ioctl_translate(struct
        gpa_t gpa;
 
        vcpu_load(vcpu);
-       mutex_lock(&vcpu->kvm->lock);
+       down_read(&current->mm->mmap_sem);
        gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
+       up_read(&current->mm->mmap_sem);
        tr->physical_address = gpa;
        tr->valid = gpa != UNMAPPED_GVA;
        tr->writeable = 1;
        tr->usermode = 0;
-       mutex_unlock(&vcpu->kvm->lock);
        vcpu_put(vcpu);
 
        return 0;
@@ -3102,13 +3124,11 @@ int kvm_arch_set_memory_region(struct kv
         */
        if (!user_alloc) {
                if (npages && !old.rmap) {
-                       down_write(&current->mm->mmap_sem);
                        memslot->userspace_addr = do_mmap(NULL, 0,
                                                     npages * PAGE_SIZE,
                                                     PROT_READ | PROT_WRITE,
                                                     MAP_SHARED | MAP_ANONYMOUS,
                                                     0);
-                       up_write(&current->mm->mmap_sem);
 
                        if (IS_ERR((void *)memslot->userspace_addr))
                                return PTR_ERR((void *)memslot->userspace_addr);
@@ -3116,10 +3136,8 @@ int kvm_arch_set_memory_region(struct kv
                        if (!old.user_alloc && old.rmap) {
                                int ret;
 
-                               down_write(&current->mm->mmap_sem);
                                ret = do_munmap(current->mm, old.userspace_addr,
                                                old.npages * PAGE_SIZE);
-                               up_write(&current->mm->mmap_sem);
                                if (ret < 0)
                                        printk(KERN_WARNING
                                       "kvm_vm_ioctl_set_memory_region: "
Index: kvm.quilt/virt/kvm/kvm_main.c
===================================================================
--- kvm.quilt.orig/virt/kvm/kvm_main.c
+++ kvm.quilt/virt/kvm/kvm_main.c
@@ -227,7 +227,7 @@ static int kvm_vm_release(struct inode *
  *
  * Discontiguous memory is allowed, mostly for framebuffers.
  *
- * Must be called holding kvm->lock.
+ * Must be called holding mmap_sem for write.
  */
 int __kvm_set_memory_region(struct kvm *kvm,
                            struct kvm_userspace_memory_region *mem,
@@ -338,9 +338,9 @@ int kvm_set_memory_region(struct kvm *kv
 {
        int r;
 
-       mutex_lock(&kvm->lock);
+       down_write(&current->mm->mmap_sem);
        r = __kvm_set_memory_region(kvm, mem, user_alloc);
-       mutex_unlock(&kvm->lock);
+       up_write(&current->mm->mmap_sem);
        return r;
 }
 EXPORT_SYMBOL_GPL(kvm_set_memory_region);
@@ -456,7 +456,7 @@ static unsigned long gfn_to_hva(struct k
 /*
  * Requires current->mm->mmap_sem to be held
  */
-static struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn)
+struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
 {
        struct page *page[1];
        unsigned long addr;
@@ -481,17 +481,6 @@ static struct page *__gfn_to_page(struct
        return page[0];
 }
 
-struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
-{
-       struct page *page;
-
-       down_read(&current->mm->mmap_sem);
-       page = __gfn_to_page(kvm, gfn);
-       up_read(&current->mm->mmap_sem);
-
-       return page;
-}
-
 EXPORT_SYMBOL_GPL(gfn_to_page);
 
 void kvm_release_page_clean(struct page *page)
@@ -977,8 +966,7 @@ static int kvm_vm_fault(struct vm_area_s
 
        if (!kvm_is_visible_gfn(kvm, vmf->pgoff))
                return VM_FAULT_SIGBUS;
-       /* current->mm->mmap_sem is already held so call lockless version */
-       page = __gfn_to_page(kvm, vmf->pgoff);
+       page = gfn_to_page(kvm, vmf->pgoff);
        if (is_error_page(page)) {
                kvm_release_page_clean(page);
                return VM_FAULT_SIGBUS;

-- 


-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to