Let mmio spte only use bit62 and bit63 on upper 32 bits, then bit 52 ~ bit 61
can be used for other purposes

Signed-off-by: Xiao Guangrong <xiaoguangr...@linux.vnet.ibm.com>
---
 arch/x86/kvm/vmx.c | 4 ++--
 arch/x86/kvm/x86.c | 8 +++++++-
 2 files changed, 9 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 260a919..78ee123 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4176,10 +4176,10 @@ static void ept_set_mmio_spte_mask(void)
        /*
         * EPT Misconfigurations can be generated if the value of bits 2:0
         * of an EPT paging-structure entry is 110b (write/execute).
-        * Also, magic bits (0xffull << 49) is set to quickly identify mmio
+        * Also, magic bits (0x3ull << 62) is set to quickly identify mmio
         * spte.
         */
-       kvm_mmu_set_mmio_spte_mask(0xffull << 49 | 0x6ull);
+       kvm_mmu_set_mmio_spte_mask((0x3ull << 62) | 0x6ull);
 }
 
 /*
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6402951..54059ba 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5263,7 +5263,13 @@ static void kvm_set_mmio_spte_mask(void)
         * Set the reserved bits and the present bit of an paging-structure
         * entry to generate page fault with PFER.RSV = 1.
         */
-       mask = ((1ull << (62 - maxphyaddr + 1)) - 1) << maxphyaddr;
+        /* Mask the reserved physical address bits. */
+       mask = ((1ull << (51 - maxphyaddr + 1)) - 1) << maxphyaddr;
+
+       /* Bit 62 is always reserved for 32bit host. */
+       mask |= 0x3ull << 62;
+
+       /* Set the present bit. */
        mask |= 1ull;
 
 #ifdef CONFIG_X86_64
-- 
1.8.1.4

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to