Introduce a function to walk all parents of a given page, invoking a handler.

Signed-off-by: Marcelo Tosatti <[EMAIL PROTECTED]>


Index: kvm/arch/x86/kvm/mmu.c
===================================================================
--- kvm.orig/arch/x86/kvm/mmu.c
+++ kvm/arch/x86/kvm/mmu.c
@@ -147,6 +147,8 @@ struct kvm_shadow_walk {
                     u64 addr, u64 *spte, int level);
 };
 
+typedef int (*mmu_parent_walk_fn) (struct kvm_vcpu *vcpu, struct kvm_mmu_page 
*sp);
+
 static struct kmem_cache *pte_chain_cache;
 static struct kmem_cache *rmap_desc_cache;
 static struct kmem_cache *mmu_page_header_cache;
@@ -862,6 +864,65 @@ static void mmu_page_remove_parent_pte(s
        BUG();
 }
 
+struct mmu_parent_walk {
+       struct hlist_node *node;
+       int i;
+};
+
+static struct kvm_mmu_page *mmu_parent_next(struct kvm_mmu_page *sp,
+                                           struct mmu_parent_walk *walk)
+{
+       struct kvm_pte_chain *pte_chain;
+       struct hlist_head *h;
+
+       if (!walk->node) {
+               if (!sp || !sp->parent_pte)
+                       return NULL;
+               if (!sp->multimapped)
+                       return page_header(__pa(sp->parent_pte));
+               h = &sp->parent_ptes;
+               walk->node = h->first;
+               walk->i = 0;
+       }
+
+       while (walk->node) {
+               pte_chain = hlist_entry(walk->node, struct kvm_pte_chain, link);
+               while (walk->i < NR_PTE_CHAIN_ENTRIES) {
+                       int i = walk->i++;
+                       if (!pte_chain->parent_ptes[i])
+                               break;
+                       return page_header(__pa(pte_chain->parent_ptes[i]));
+               }
+               walk->node = walk->node->next;
+               walk->i = 0;
+       }
+
+       return NULL;
+}
+
+static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+                           mmu_parent_walk_fn fn)
+{
+       int level, start_level;
+       struct mmu_parent_walk walk[PT64_ROOT_LEVEL];
+
+       memset(&walk, 0, sizeof(walk));
+       level = start_level = sp->role.level;
+
+       do {
+               sp = mmu_parent_next(sp, &walk[level-1]);
+               if (sp) {
+                       if (sp->role.level > start_level)
+                               fn(vcpu, sp);
+                       if (level != sp->role.level)
+                               ++level;
+                       WARN_ON (level > PT64_ROOT_LEVEL);
+                       continue;
+               }
+               --level;
+       } while (level > start_level-1);
+}
+
 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
                                    struct kvm_mmu_page *sp)
 {

-- 

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to