Follow the recommendation from APM2 section 15.36.10 and 15.36.11 to
resolve the RMP violation encountered during the NPT table walk.

Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Ingo Molnar <mi...@redhat.com>
Cc: Borislav Petkov <b...@alien8.de>
Cc: Joerg Roedel <jroe...@suse.de>
Cc: "H. Peter Anvin" <h...@zytor.com>
Cc: Tony Luck <tony.l...@intel.com>
Cc: Dave Hansen <dave.han...@intel.com>
Cc: "Peter Zijlstra (Intel)" <pet...@infradead.org>
Cc: Paolo Bonzini <pbonz...@redhat.com>
Cc: Tom Lendacky <thomas.lenda...@amd.com>
Cc: David Rientjes <rient...@google.com>
Cc: Sean Christopherson <sea...@google.com>
Cc: Vitaly Kuznetsov <vkuzn...@redhat.com>
Cc: Wanpeng Li <wanpen...@tencent.com>
Cc: Jim Mattson <jmatt...@google.com>
Cc: x...@kernel.org
Cc: k...@vger.kernel.org
Signed-off-by: Brijesh Singh <brijesh.si...@amd.com>
---
 arch/x86/include/asm/kvm_host.h |  2 ++
 arch/x86/kvm/mmu/mmu.c          | 20 ++++++++++++
 arch/x86/kvm/svm/sev.c          | 57 +++++++++++++++++++++++++++++++++
 arch/x86/kvm/svm/svm.c          |  1 +
 arch/x86/kvm/svm/svm.h          |  2 ++
 5 files changed, 82 insertions(+)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 5ea584606885..79dec4f93808 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1311,6 +1311,8 @@ struct kvm_x86_ops {
        void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
        void *(*alloc_apic_backing_page)(struct kvm_vcpu *vcpu);
        int (*get_tdp_max_page_level)(struct kvm_vcpu *vcpu, gpa_t gpa, int 
max_level);
+       int (*handle_rmp_page_fault)(struct kvm_vcpu *vcpu, gpa_t gpa, 
kvm_pfn_t pfn,
+                       int level, u64 error_code);
 };
 
 struct kvm_x86_nested_ops {
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 1e057e046ca4..ec396169706f 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -5105,6 +5105,18 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, 
gva_t gva)
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
 
+static int handle_rmp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 
error_code)
+{
+       kvm_pfn_t pfn;
+       int level;
+
+       if (unlikely(!kvm_mmu_get_tdp_walk(vcpu, gpa, &pfn, &level)))
+               return RET_PF_RETRY;
+
+       kvm_x86_ops.handle_rmp_page_fault(vcpu, gpa, pfn, level, error_code);
+       return RET_PF_RETRY;
+}
+
 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
                       void *insn, int insn_len)
 {
@@ -5121,6 +5133,14 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t 
cr2_or_gpa, u64 error_code,
                        goto emulate;
        }
 
+       if (unlikely(error_code & PFERR_GUEST_RMP_MASK)) {
+               r = handle_rmp_page_fault(vcpu, cr2_or_gpa, error_code);
+               if (r == RET_PF_RETRY)
+                       return 1;
+               else
+                       return r;
+       }
+
        if (r == RET_PF_INVALID) {
                r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
                                          lower_32_bits(error_code), false);
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 35e7a7bbf878..dbb4f15de9ba 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -2924,3 +2924,60 @@ int sev_get_tdp_max_page_level(struct kvm_vcpu *vcpu, 
gpa_t gpa, int max_level)
 
        return min_t(uint32_t, level, max_level);
 }
+
+int snp_handle_rmp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, kvm_pfn_t pfn,
+                             int level, u64 error_code)
+{
+       int rlevel, rc = 0;
+       rmpentry_t *e;
+       bool private;
+       gfn_t gfn;
+
+       e = lookup_page_in_rmptable(pfn_to_page(pfn), &rlevel);
+       if (!e)
+               return 1;
+
+       private = !!(error_code & PFERR_GUEST_ENC_MASK);
+
+       /*
+        * See APM section 15.36.11 on how to handle the RMP fault for the 
large pages.
+        *
+        *  npt      rmp    access      action
+        *  --------------------------------------------------
+        *  4k       2M     C=1       psmash
+        *  x        x      C=1       if page is not private then add a new RMP 
entry
+        *  x        x      C=0       if page is private then make it shared
+        *  2M       4k     C=x       zap
+        */
+       if ((error_code & PFERR_GUEST_SIZEM_MASK) ||
+           ((level == PG_LEVEL_4K) && (rlevel == PG_LEVEL_2M) && private)) {
+               rc = snp_rmptable_psmash(vcpu, pfn);
+               goto zap_gfn;
+       }
+
+       /*
+        * If it's a private access, and the page is not assigned in the RMP 
table, create a
+        * new private RMP entry.
+        */
+       if (!rmpentry_assigned(e) && private) {
+               rc = snp_make_page_private(vcpu, gpa, pfn, PG_LEVEL_4K);
+               goto zap_gfn;
+       }
+
+       /*
+        * If it's a shared access, then make the page shared in the RMP table.
+        */
+       if (rmpentry_assigned(e) && !private)
+               rc = snp_make_page_shared(vcpu, gpa, pfn, PG_LEVEL_4K);
+
+zap_gfn:
+       /*
+        * Now that we have updated the RMP pagesize, zap the existing rmaps for
+        * large entry ranges so that nested page table gets rebuilt with the 
updated RMP
+        * pagesize.
+        */
+       gfn = gpa_to_gfn(gpa) & ~(KVM_PAGES_PER_HPAGE(PG_LEVEL_2M) - 1);
+       kvm_zap_gfn_range(vcpu->kvm, gfn, gfn + 512);
+
+       return 0;
+}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 73259a3564eb..ab30c3e3956f 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4564,6 +4564,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
 
        .alloc_apic_backing_page = svm_alloc_apic_backing_page,
        .get_tdp_max_page_level = sev_get_tdp_max_page_level,
+       .handle_rmp_page_fault = snp_handle_rmp_page_fault,
 };
 
 static struct kvm_x86_init_ops svm_init_ops __initdata = {
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 9fcfceb4d71e..eacae54de9b5 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -637,6 +637,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 
vector);
 struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu);
 void sev_snp_init_vmcb(struct vcpu_svm *svm);
 int sev_get_tdp_max_page_level(struct kvm_vcpu *vcpu, gpa_t gpa, int 
max_level);
+int snp_handle_rmp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, kvm_pfn_t pfn,
+                             int level, u64 error_code);
 
 /* vmenter.S */
 
-- 
2.17.1

Reply via email to