Upper page do not need to be tracked the status bit, it is safe to set the
dirty and let cpu to happily prefetch it

Signed-off-by: Xiao Guangrong <xiaoguangr...@linux.vnet.ibm.com>
---
 arch/x86/kvm/mmu.c |   13 +++++--------
 1 files changed, 5 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 6913a16..a2d28aa 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -150,6 +150,9 @@ module_param(dbg, bool, 0644);
 #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)

 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
+#define SHADOW_PAGE_TABLE                                              \
+       (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask |        \
+        shadow_x_mask | shadow_accessed_mask | shadow_dirty_mask)

 static bool sp_is_unsync(struct kvm_mmu_page *sp)
 {
@@ -1808,9 +1811,7 @@ static void link_shadow_page(u64 *sptep, struct 
kvm_mmu_page *sp)
 {
        u64 spte;

-       spte = __pa(sp->spt)
-               | PT_PRESENT_MASK | PT_ACCESSED_MASK
-               | PT_WRITABLE_MASK | PT_USER_MASK;
+       spte = __pa(sp->spt) | SHADOW_PAGE_TABLE;
        mmu_spte_set(sptep, spte);
 }

@@ -2497,11 +2498,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, 
int write,
                                return -ENOMEM;
                        }

-                       mmu_spte_set(iterator.sptep,
-                                    __pa(sp->spt)
-                                    | PT_PRESENT_MASK | PT_WRITABLE_MASK
-                                    | shadow_user_mask | shadow_x_mask
-                                    | shadow_accessed_mask);
+                       link_shadow_page(iterator.sptep, sp);
                }
        }
        return emulate;
-- 
1.7.7.4

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to