Clean up the x2APIC MSR bitmap intereption code for L2, which is the last
holdout of open coded bitmap manipulations.  Freshen up the SDM/PRM
comment, rename the function to make it abundantly clear the funky
behavior is x2APIC specific, and explain _why_ vmcs01's bitmap is ignored
(the previous comment was flat out wrong for x2APIC behavior).

No functional change intended.

Signed-off-by: Sean Christopherson <sea...@google.com>
---
 arch/x86/kvm/vmx/nested.c | 53 +++++++++++----------------------------
 arch/x86/kvm/vmx/vmx.h    |  8 ++++++
 2 files changed, 22 insertions(+), 39 deletions(-)

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index aff41a432a56..49eeffb79823 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -476,44 +476,19 @@ static int nested_vmx_check_tpr_shadow_controls(struct 
kvm_vcpu *vcpu,
 }
 
 /*
- * If a msr is allowed by L0, we should check whether it is allowed by L1.
- * The corresponding bit will be cleared unless both of L0 and L1 allow it.
+ * For x2APIC MSRs, ignore the vmcs01 bitmap.  L1 can enable x2APIC without L1
+ * itself utilizing x2APIC.  All MSRs were previously set to be intercepted,
+ * only the disable intercept case needs to be handled.
  */
-static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
-                                              unsigned long *msr_bitmap_nested,
-                                              u32 msr, int type)
+static void nested_vmx_disable_intercept_for_x2apic_msr(unsigned long 
*msr_bitmap_l1,
+                                                       unsigned long 
*msr_bitmap_l0,
+                                                       u32 msr, int type)
 {
-       int f = sizeof(unsigned long);
+       if (type & MSR_TYPE_R && !vmx_test_msr_bitmap_read(msr_bitmap_l1, msr))
+               vmx_clear_msr_bitmap_read(msr_bitmap_l0, msr);
 
-       /*
-        * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
-        * have the write-low and read-high bitmap offsets the wrong way round.
-        * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
-        */
-       if (msr <= 0x1fff) {
-               if (type & MSR_TYPE_R &&
-                  !test_bit(msr, msr_bitmap_l1 + 0x000 / f))
-                       /* read-low */
-                       __clear_bit(msr, msr_bitmap_nested + 0x000 / f);
-
-               if (type & MSR_TYPE_W &&
-                  !test_bit(msr, msr_bitmap_l1 + 0x800 / f))
-                       /* write-low */
-                       __clear_bit(msr, msr_bitmap_nested + 0x800 / f);
-
-       } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
-               msr &= 0x1fff;
-               if (type & MSR_TYPE_R &&
-                  !test_bit(msr, msr_bitmap_l1 + 0x400 / f))
-                       /* read-high */
-                       __clear_bit(msr, msr_bitmap_nested + 0x400 / f);
-
-               if (type & MSR_TYPE_W &&
-                  !test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
-                       /* write-high */
-                       __clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
-
-       }
+       if (type & MSR_TYPE_W && !vmx_test_msr_bitmap_write(msr_bitmap_l1, msr))
+               vmx_clear_msr_bitmap_write(msr_bitmap_l0, msr);
 }
 
 static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap)
@@ -582,7 +557,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct 
kvm_vcpu *vcpu,
        /*
         * To keep the control flow simple, pay eight 8-byte writes (sixteen
         * 4-byte writes on 32-bit systems) up front to enable intercepts for
-        * the x2APIC MSR range and selectively disable them below.
+        * the x2APIC MSR range and selectively toggle those relevant to L2.
         */
        enable_x2apic_msr_intercepts(msr_bitmap_l0);
 
@@ -601,17 +576,17 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct 
kvm_vcpu *vcpu,
                        }
                }
 
-               nested_vmx_disable_intercept_for_msr(
+               nested_vmx_disable_intercept_for_x2apic_msr(
                        msr_bitmap_l1, msr_bitmap_l0,
                        X2APIC_MSR(APIC_TASKPRI),
                        MSR_TYPE_R | MSR_TYPE_W);
 
                if (nested_cpu_has_vid(vmcs12)) {
-                       nested_vmx_disable_intercept_for_msr(
+                       nested_vmx_disable_intercept_for_x2apic_msr(
                                msr_bitmap_l1, msr_bitmap_l0,
                                X2APIC_MSR(APIC_EOI),
                                MSR_TYPE_W);
-                       nested_vmx_disable_intercept_for_msr(
+                       nested_vmx_disable_intercept_for_x2apic_msr(
                                msr_bitmap_l1, msr_bitmap_l0,
                                X2APIC_MSR(APIC_SELF_IPI),
                                MSR_TYPE_W);
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 42c25fc79427..03ab9ccd95d2 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -393,6 +393,14 @@ void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu,
        u32 msr, int type, bool value);
 void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
 
+/*
+ * Note, early Intel manuals have the write-low and read-high bitmap offsets
+ * the wrong way round.  The bitmaps control MSRs 0x00000000-0x00001fff and
+ * 0xc0000000-0xc0001fff.  The former (low) uses bytes 0-0x3ff for reads and
+ * 0x800-0xbff for writes.  The latter (high) uses 0x400-0x7ff for reads and
+ * 0xc00-0xfff for writes.  MSRs not covered by either of the ranges always
+ * VM-Exit.
+ */
 #define __BUILD_VMX_MSR_BITMAP_HELPER(rtype, action, bitop, access, base)      
\
 static inline rtype vmx_##action##_msr_bitmap_##access(unsigned long *bitmap,  
\
                                                       u32 msr)                \
-- 
2.31.0.rc2.261.g7f71774620-goog

Reply via email to