Calling handle_mmio_page_fault() has been unnecessary since commit
e9ee956e311d ("KVM: x86: MMU: Move handle_mmio_page_fault() call to
kvm_mmu_page_fault()", 2016-02-22).

handle_mmio_page_fault() can now be made static.

Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>
---
        v1->v2: make the function static.

 arch/x86/kvm/mmu.c | 19 ++++++++++++++++++-
 arch/x86/kvm/mmu.h | 17 -----------------
 arch/x86/kvm/vmx.c | 13 +++----------
 3 files changed, 21 insertions(+), 28 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e721e10afda1..f7598883920a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3648,7 +3648,23 @@ static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, 
u64 addr, bool direct)
        return reserved;
 }
 
-int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
+/*
+ * Return values of handle_mmio_page_fault:
+ * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
+ *                     directly.
+ * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
+ *                     fault path update the mmio spte.
+ * RET_MMIO_PF_RETRY: let CPU fault again on the address.
+ * RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed).
+ */
+enum {
+       RET_MMIO_PF_EMULATE = 1,
+       RET_MMIO_PF_INVALID = 2,
+       RET_MMIO_PF_RETRY = 0,
+       RET_MMIO_PF_BUG = -1
+};
+
+static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
 {
        u64 spte;
        bool reserved;
@@ -4837,6 +4853,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, 
u64 error_code,
                        return 1;
                if (r < 0)
                        return r;
+               /* Must be RET_MMIO_PF_INVALID.  */
        }
 
        r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index d7d248a000dd..3ed6192d93b1 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -56,23 +56,6 @@ static inline u64 rsvd_bits(int s, int e)
 void
 reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
 
-/*
- * Return values of handle_mmio_page_fault:
- * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
- *                     directly.
- * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
- *                     fault path update the mmio spte.
- * RET_MMIO_PF_RETRY: let CPU fault again on the address.
- * RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed).
- */
-enum {
-       RET_MMIO_PF_EMULATE = 1,
-       RET_MMIO_PF_INVALID = 2,
-       RET_MMIO_PF_RETRY = 0,
-       RET_MMIO_PF_BUG = -1
-};
-
-int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
                             bool accessed_dirty);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index df8d2f127508..45fb0ea78ee8 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6410,17 +6410,10 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
                return kvm_skip_emulated_instruction(vcpu);
        }
 
-       ret = handle_mmio_page_fault(vcpu, gpa, true);
        vcpu->arch.gpa_available = true;
-       if (likely(ret == RET_MMIO_PF_EMULATE))
-               return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
-                                             EMULATE_DONE;
-
-       if (unlikely(ret == RET_MMIO_PF_INVALID))
-               return kvm_mmu_page_fault(vcpu, gpa, 0, NULL, 0);
-
-       if (unlikely(ret == RET_MMIO_PF_RETRY))
-               return 1;
+       ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
+       if (ret >= 0)
+               return ret;
 
        /* It is the real ept misconfig */
        WARN_ON(1);
-- 
1.8.3.1


Reply via email to