From: Joerg Roedel <[EMAIL PROTECTED]>

This patch introduces a guest TLB flush on every NPF exit in KVM. This fixes
random segfaults and #UD exceptions in the guest seen under some workloads
(e.g. long running compile workloads or tbench). A kernbench run with and
without that fix showed that it has a slowdown lower than 0.5%

Cc: [EMAIL PROTECTED]
Signed-off-by: Joerg Roedel <[EMAIL PROTECTED]>
Signed-off-by: Alexander Graf <[EMAIL PROTECTED]>
---
 arch/x86/kvm/svm.c |   10 +++++++++-
 1 files changed, 9 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 2d5aed4..980f140 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -59,6 +59,7 @@ static int npt = 1;
 module_param(npt, int, S_IRUGO);
 
 static void kvm_reput_irq(struct vcpu_svm *svm);
+static void svm_flush_tlb(struct kvm_vcpu *vcpu);
 
 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
 {
@@ -1004,10 +1005,17 @@ static int pf_interception(struct vcpu_svm *svm, struct 
kvm_run *kvm_run)
                KVMTRACE_3D(PAGE_FAULT, &svm->vcpu, error_code,
                            (u32)fault_address, (u32)(fault_address >> 32),
                            handler);
-       else
+       else {
                KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code,
                            (u32)fault_address, (u32)(fault_address >> 32),
                            handler);
+               /*
+                * FIXME: Tis shouldn't be necessary here, but there is a flush
+                * missing in the MMU code. Until we find this bug, flush the
+                * complete TLB here on an NPF
+                */
+               svm_flush_tlb(&svm->vcpu);
+       }
 
        if (event_injection)
                kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
-- 
1.5.3.7


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to