This patch allows VMA's that contain no backing page to be used for guest
memory.  This is a drop-in replacement for Ben-Ami's first page in his direct
mmio series.  Here, we continue to allow mmio pages to be represented in the
rmap.

Since v1, I've taken into account Andrea's suggestions at using VM_PFNMAP
instead of VM_IO and changed the BUG_ON to a return of bad_page.

Since v2, I've incorporated comments from Avi about returning bad_page instead
of NULL and fixed a typo spotted by Muli.

Signed-off-by: Anthony Liguori <[EMAIL PROTECTED]>

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 1d7991a..9e3e4ee 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -532,6 +532,7 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
        struct page *page[1];
        unsigned long addr;
        int npages;
+       pfn_t pfn;
 
        might_sleep();
 
@@ -544,19 +545,38 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
        npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
                                NULL);
 
-       if (npages != 1) {
-               get_page(bad_page);
-               return page_to_pfn(bad_page);
-       }
+       if (unlikely(npages != 1)) {
+               struct vm_area_struct *vma;
 
-       return page_to_pfn(page[0]);
+               vma = find_vma(current->mm, addr);
+               if (vma == NULL || addr < vma->vm_start ||
+                   !(vma->vm_flags & VM_PFNMAP)) {
+                       get_page(bad_page);
+                       return page_to_pfn(bad_page);
+               }
+
+               pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+               BUG_ON(pfn_valid(pfn));
+       } else
+               pfn = page_to_pfn(page[0]);
+
+       return pfn;
 }
 
 EXPORT_SYMBOL_GPL(gfn_to_pfn);
 
 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
 {
-       return pfn_to_page(gfn_to_pfn(kvm, gfn));
+       pfn_t pfn;
+
+       pfn = gfn_to_pfn(kvm, gfn);
+       if (pfn_valid(pfn))
+               return pfn_to_page(pfn);
+
+       WARN_ON(!pfn_valid(pfn));
+
+       get_page(bad_page);
+       return bad_page;
 }
 
 EXPORT_SYMBOL_GPL(gfn_to_page);
@@ -569,7 +589,8 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
 
 void kvm_release_pfn_clean(pfn_t pfn)
 {
-       put_page(pfn_to_page(pfn));
+       if (pfn_valid(pfn))
+               put_page(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
 
@@ -594,21 +615,25 @@ EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
 
 void kvm_set_pfn_dirty(pfn_t pfn)
 {
-       struct page *page = pfn_to_page(pfn);
-       if (!PageReserved(page))
-               SetPageDirty(page);
+       if (pfn_valid(pfn)) {
+               struct page *page = pfn_to_page(pfn);
+               if (!PageReserved(page))
+                       SetPageDirty(page);
+       }
 }
 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
 
 void kvm_set_pfn_accessed(pfn_t pfn)
 {
-       mark_page_accessed(pfn_to_page(pfn));
+       if (pfn_valid(pfn))
+               mark_page_accessed(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
 
 void kvm_get_pfn(pfn_t pfn)
 {
-       get_page(pfn_to_page(pfn));
+       if (pfn_valid(pfn))
+               get_page(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_get_pfn);
 

-------------------------------------------------------------------------
This SF.net email is sponsored by the 2008 JavaOne(SM) Conference 
Don't miss this year's exciting event. There's still time to save $100. 
Use priority code J8TL2D2. 
http://ad.doubleclick.net/clk;198757673;13503038;p?http://java.sun.com/javaone
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to