The page table walk is coded as an infinite loop, with a special
case on the last pte.

Code it as an ordinary loop with a termination condition on the last
pte (large page or walk length exhausted), and put the last pte handling
code after the loop where it belongs.

Signed-off-by: Avi Kivity <a...@redhat.com>
---
 arch/x86/kvm/paging_tmpl.h | 58 +++++++++++++++++++---------------------------
 1 file changed, 24 insertions(+), 34 deletions(-)

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 59c4319..eb4a668 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -131,12 +131,15 @@ static int FNAME(walk_addr_generic)(struct guest_walker 
*walker,
        gfn_t table_gfn;
        unsigned index, pt_access, pte_access;
        gpa_t pte_gpa;
-       bool eperm, last_gpte;
+       bool eperm;
        int offset;
        const int write_fault = access & PFERR_WRITE_MASK;
        const int user_fault  = access & PFERR_USER_MASK;
        const int fetch_fault = access & PFERR_FETCH_MASK;
        u16 errcode = 0;
+       gpa_t real_gpa;
+       gfn_t gfn;
+       u32 ac;
 
        trace_kvm_mmu_pagetable_walk(addr, access);
 retry_walk:
@@ -156,12 +159,16 @@ retry_walk:
        ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
               (mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0);
 
-       pt_access = ACC_ALL;
+       pt_access = pte_access = ACC_ALL;
+       ++walker->level;
 
-       for (;;) {
+       do {
                gfn_t real_gfn;
                unsigned long host_addr;
 
+               pt_access &= pte_access;
+               --walker->level;
+
                index = PT_INDEX(addr, walker->level);
 
                table_gfn = gpte_to_gfn(pte);
@@ -198,8 +205,6 @@ retry_walk:
                pte_access = pt_access & gpte_access(vcpu, pte);
                eperm |= (mmu->permissions[access >> 1] >> pte_access) & 1;
 
-               last_gpte = FNAME(is_last_gpte)(walker, vcpu, mmu, pte);
-
                if (!eperm && unlikely(!(pte & PT_ACCESSED_MASK))) {
                        int ret;
                        trace_kvm_mmu_set_accessed_bit(table_gfn, index,
@@ -216,41 +221,26 @@ retry_walk:
                }
 
                walker->ptes[walker->level - 1] = pte;
+       } while (!FNAME(is_last_gpte)(walker, vcpu, mmu, pte));
 
-               if (last_gpte) {
-                       int lvl = walker->level;
-                       gpa_t real_gpa;
-                       gfn_t gfn;
-                       u32 ac;
-
-                       gfn = gpte_to_gfn_lvl(pte, lvl);
-                       gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) >> PAGE_SHIFT;
-
-                       if (PTTYPE == 32 &&
-                           walker->level == PT_DIRECTORY_LEVEL &&
-                           is_cpuid_PSE36())
-                               gfn += pse36_gfn_delta(pte);
-
-                       ac = write_fault | fetch_fault | user_fault;
+       if (unlikely(eperm)) {
+               errcode |= PFERR_PRESENT_MASK;
+               goto error;
+       }
 
-                       real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn),
-                                                     ac);
-                       if (real_gpa == UNMAPPED_GVA)
-                               return 0;
+       gfn = gpte_to_gfn_lvl(pte, walker->level);
+       gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;
 
-                       walker->gfn = real_gpa >> PAGE_SHIFT;
+       if (PTTYPE == 32 && walker->level == PT_DIRECTORY_LEVEL && 
is_cpuid_PSE36())
+               gfn += pse36_gfn_delta(pte);
 
-                       break;
-               }
+       ac = write_fault | fetch_fault | user_fault;
 
-               pt_access &= pte_access;
-               --walker->level;
-       }
+       real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), ac);
+       if (real_gpa == UNMAPPED_GVA)
+               return 0;
 
-       if (unlikely(eperm)) {
-               errcode |= PFERR_PRESENT_MASK;
-               goto error;
-       }
+       walker->gfn = real_gpa >> PAGE_SHIFT;
 
        if (!write_fault)
                protect_clean_gpte(&pte_access, pte);
-- 
1.7.11.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to