Instead of branchy code depending on level, gpte.ps, and mmu configuration,
prepare everything in a bitmap during mode changes and look it up during
runtime.

Signed-off-by: Avi Kivity <a...@redhat.com>
---
 arch/x86/include/asm/kvm_host.h |  7 +++++++
 arch/x86/kvm/mmu.c              | 20 ++++++++++++++++++++
 arch/x86/kvm/mmu.h              |  3 ++-
 arch/x86/kvm/paging_tmpl.h      | 25 +++++--------------------
 4 files changed, 34 insertions(+), 21 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3318bde..78525f6 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -298,6 +298,13 @@ struct kvm_mmu {
        u64 *lm_root;
        u64 rsvd_bits_mask[2][4];
 
+       /*
+        * Bitmap: bit set = last pte in walk
+        * index[0]: pte.ps
+        * index[1:2]: level
+        */
+       u8 last_pte;
+
        bool nx;
 
        u64 pdptrs[4]; /* pae */
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ce78408..30a574f 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3548,6 +3548,22 @@ static void update_permission_bitmask(struct kvm_vcpu 
*vcpu, struct kvm_mmu *mmu
        }
 }
 
+static void update_last_pte_bitmap(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
+{
+       u8 map = 0;
+       unsigned level, root_level = mmu->root_level;
+
+       if (root_level == PT32E_ROOT_LEVEL)
+               --root_level;
+       map |= 3;  /* PT_PAGE_TABLE_LEVEL always terminates */
+       for (level = PT_DIRECTORY_LEVEL; level <= root_level; ++level) {
+               if (level <= PT_PDPE_LEVEL
+                   && (mmu->root_level >= PT32E_ROOT_LEVEL || is_pse(vcpu)))
+                       map |= 2 << (2 * level - 1);
+       }
+       mmu->last_pte = map;
+}
+
 static int paging64_init_context_common(struct kvm_vcpu *vcpu,
                                        struct kvm_mmu *context,
                                        int level)
@@ -3557,6 +3573,7 @@ static int paging64_init_context_common(struct kvm_vcpu 
*vcpu,
 
        reset_rsvds_bits_mask(vcpu, context);
        update_permission_bitmask(vcpu, context);
+       update_last_pte_bitmap(vcpu, context);
 
        ASSERT(is_pae(vcpu));
        context->new_cr3 = paging_new_cr3;
@@ -3586,6 +3603,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu,
 
        reset_rsvds_bits_mask(vcpu, context);
        update_permission_bitmask(vcpu, context);
+       update_last_pte_bitmap(vcpu, context);
 
        context->new_cr3 = paging_new_cr3;
        context->page_fault = paging32_page_fault;
@@ -3647,6 +3665,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
        }
 
        update_permission_bitmask(vcpu, context);
+       update_last_pte_bitmap(vcpu, context);
 
        return 0;
 }
@@ -3724,6 +3743,7 @@ static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
        }
 
        update_permission_bitmask(vcpu, g_context);
+       update_last_pte_bitmap(vcpu, g_context);
 
        return 0;
 }
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 143ee70..b08dd34 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -20,7 +20,8 @@
 #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
 #define PT_DIRTY_SHIFT 6
 #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
-#define PT_PAGE_SIZE_MASK (1ULL << 7)
+#define PT_PAGE_SIZE_SHIFT 7
+#define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
 #define PT_PAT_MASK (1ULL << 7)
 #define PT_GLOBAL_MASK (1ULL << 8)
 #define PT64_NX_SHIFT 63
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index eb4a668..dd89404 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -101,24 +101,6 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, 
struct kvm_mmu *mmu,
        return (ret != orig_pte);
 }
 
-static bool FNAME(is_last_gpte)(struct guest_walker *walker,
-                               struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-                               pt_element_t gpte)
-{
-       if (walker->level == PT_PAGE_TABLE_LEVEL)
-               return true;
-
-       if ((walker->level == PT_DIRECTORY_LEVEL) && is_large_pte(gpte) &&
-           (PTTYPE == 64 || is_pse(vcpu)))
-               return true;
-
-       if ((walker->level == PT_PDPE_LEVEL) && is_large_pte(gpte) &&
-           (mmu->root_level == PT64_ROOT_LEVEL))
-               return true;
-
-       return false;
-}
-
 /*
  * Fetch a guest pte for a guest virtual address
  */
@@ -140,6 +122,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker 
*walker,
        gpa_t real_gpa;
        gfn_t gfn;
        u32 ac;
+       bool last;
 
        trace_kvm_mmu_pagetable_walk(addr, access);
 retry_walk:
@@ -220,8 +203,10 @@ retry_walk:
                        pte |= PT_ACCESSED_MASK;
                }
 
-               walker->ptes[walker->level - 1] = pte;
-       } while (!FNAME(is_last_gpte)(walker, vcpu, mmu, pte));
+               index = (walker->level - 1) << 1;
+               index |= (pte >> PT_PAGE_SIZE_SHIFT) & 1;
+               last = mmu->last_pte & (1 << index);
+       } while (!last);
 
        if (unlikely(eperm)) {
                errcode |= PFERR_PRESENT_MASK;
-- 
1.7.11.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to