Required by EPT misconfiguration handler.

Signed-off-by: Marcelo Tosatti <mtosa...@redhat.com>

Index: kvm/arch/x86/kvm/mmu.c
===================================================================
--- kvm.orig/arch/x86/kvm/mmu.c
+++ kvm/arch/x86/kvm/mmu.c
@@ -3013,6 +3013,26 @@ out:
        return r;
 }
 
+void kvm_mmu_shadow_walk(struct kvm_vcpu *vcpu, u64 addr,
+                        struct mmu_shadow_walk *walk)
+{
+       struct kvm_shadow_walk_iterator iterator;
+
+       spin_lock(&vcpu->kvm->mmu_lock);
+       for_each_shadow_entry(vcpu, addr, iterator) {
+               int err;
+
+               err = walk->fn(vcpu, iterator.sptep, iterator.level, walk);
+               if (err)
+                       break;
+
+               if (!is_shadow_present_pte(*iterator.sptep))
+                       break;
+       }
+       spin_unlock(&vcpu->kvm->mmu_lock);
+}
+EXPORT_SYMBOL(kvm_mmu_shadow_walk);
+
 #ifdef AUDIT
 
 static const char *audit_msg;
Index: kvm/arch/x86/kvm/mmu.h
===================================================================
--- kvm.orig/arch/x86/kvm/mmu.h
+++ kvm/arch/x86/kvm/mmu.h
@@ -37,6 +37,14 @@
 #define PT32_ROOT_LEVEL 2
 #define PT32E_ROOT_LEVEL 3
 
+struct mmu_shadow_walk {
+       int (*fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level,
+                  struct mmu_shadow_walk *walk);
+};
+
+void kvm_mmu_shadow_walk(struct kvm_vcpu *vcpu, u64 addr,
+                        struct mmu_shadow_walk *walk);
+
 static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
 {
        if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to