All PTEs in KVM_MEM_ALLONES slots point to the same read-only page
in KVM so instead of mapping each page upon first access we can map
everything aggressively.

Suggested-by: Michael S. Tsirkin <m...@redhat.com>
Signed-off-by: Vitaly Kuznetsov <vkuzn...@redhat.com>
---
 arch/x86/kvm/mmu/mmu.c         | 20 ++++++++++++++++++--
 arch/x86/kvm/mmu/paging_tmpl.h | 23 +++++++++++++++++++++--
 2 files changed, 39 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 3db499df2dfc..e92ca9ed3ff5 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4154,8 +4154,24 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, 
gpa_t gpa, u32 error_code,
                goto out_unlock;
        if (make_mmu_pages_available(vcpu) < 0)
                goto out_unlock;
-       r = __direct_map(vcpu, gpa, write, map_writable, max_level, pfn,
-                        prefault, is_tdp && lpage_disallowed);
+
+       if (likely(!(slot->flags & KVM_MEM_ALLONES) || write)) {
+               r = __direct_map(vcpu, gpa, write, map_writable, max_level, pfn,
+                                prefault, is_tdp && lpage_disallowed);
+       } else {
+               /*
+                * KVM_MEM_ALLONES are 4k only slots fully mapped to the same
+                * readonly 'allones' page, map all PTEs aggressively here.
+                */
+               for (gfn = slot->base_gfn; gfn < slot->base_gfn + slot->npages;
+                    gfn++) {
+                       r = __direct_map(vcpu, gfn << PAGE_SHIFT, write,
+                                        map_writable, max_level, pfn, prefault,
+                                        is_tdp && lpage_disallowed);
+                       if (r)
+                               break;
+               }
+       }
 
 out_unlock:
        spin_unlock(&vcpu->kvm->mmu_lock);
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 98e368788e8b..7bf0c48b858f 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -789,6 +789,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t 
addr, u32 error_code,
        bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
                                is_nx_huge_page_enabled();
        int max_level;
+       gfn_t gfn;
 
        pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
 
@@ -873,8 +874,26 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t 
addr, u32 error_code,
        kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
        if (make_mmu_pages_available(vcpu) < 0)
                goto out_unlock;
-       r = FNAME(fetch)(vcpu, addr, &walker, write_fault, max_level, pfn,
-                        map_writable, prefault, lpage_disallowed);
+       if (likely(!(slot->flags & KVM_MEM_ALLONES) || write_fault)) {
+               r = FNAME(fetch)(vcpu, addr, &walker, write_fault, max_level,
+                                pfn, map_writable, prefault, lpage_disallowed);
+       } else {
+               /*
+                * KVM_MEM_ALLONES are 4k only slots fully mapped to the same
+                * readonly 'allones' page, map all PTEs aggressively here.
+                */
+               for (gfn = slot->base_gfn; gfn < slot->base_gfn + slot->npages;
+                    gfn++) {
+                       walker.gfn = gfn;
+                       r = FNAME(fetch)(vcpu, gfn << PAGE_SHIFT, &walker,
+                                        write_fault, max_level, pfn,
+                                        map_writable, prefault,
+                                        lpage_disallowed);
+                       if (r)
+                               break;
+               }
+       }
+
        kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
 
 out_unlock:
-- 
2.25.4

Reply via email to