On 25/09/20 16:34, Alexander Graf wrote:
> We will introduce the concept of MSRs that may not be handled in kernel
> space soon. Some MSRs are directly passed through to the guest, effectively
> making them handled by KVM from user space's point of view.
> 
> This patch introduces all logic required to ensure that MSRs that
> user space wants trapped are not marked as direct access for guests.
> 
> Signed-off-by: Alexander Graf <g...@amazon.com>
> 
> ---
> 
> v7 -> v8:
> 
>   - s/KVM_MSR_ALLOW/KVM_MSR_FILTER/g
> ---

Ok, just some cosmetic fixes on top:

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index bb9f438e9e62..692110f2ac6f 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -553,7 +553,7 @@ static int svm_cpu_init(int cpu)
 
 }
 
-static int direct_access_msr_idx(u32 msr)
+static int direct_access_msr_slot(u32 msr)
 {
        u32 i;
 
@@ -561,33 +561,33 @@ static int direct_access_msr_idx(u32 msr)
                if (direct_access_msrs[i].index == msr)
                        return i;
 
-       return -EINVAL;
+       return -ENOENT;
 }
 
 static void set_shadow_msr_intercept(struct kvm_vcpu *vcpu, u32 msr, int read,
                                     int write)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
-       int idx = direct_access_msr_idx(msr);
+       int slot = direct_access_msr_slot(msr);
 
-       if (idx == -EINVAL)
+       if (slot == -ENOENT)
                return;
 
        /* Set the shadow bitmaps to the desired intercept states */
        if (read)
-               set_bit(idx, svm->shadow_msr_intercept.read);
+               set_bit(slot, svm->shadow_msr_intercept.read);
        else
-               clear_bit(idx, svm->shadow_msr_intercept.read);
+               clear_bit(slot, svm->shadow_msr_intercept.read);
 
        if (write)
-               set_bit(idx, svm->shadow_msr_intercept.write);
+               set_bit(slot, svm->shadow_msr_intercept.write);
        else
-               clear_bit(idx, svm->shadow_msr_intercept.write);
+               clear_bit(slot, svm->shadow_msr_intercept.write);
 }
 
 static bool valid_msr_intercept(u32 index)
 {
-       return direct_access_msr_idx(index) != -EINVAL;
+       return direct_access_msr_slot(index) != -ENOENT;
 }
 
 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
@@ -609,7 +609,7 @@ static bool msr_write_intercepted(struct kvm_vcpu *vcpu, 
u32 msr)
        return !!test_bit(bit_write,  &tmp);
 }
 
-static void set_msr_interception_nosync(struct kvm_vcpu *vcpu, u32 *msrpm,
+static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm,
                                        u32 msr, int read, int write)
 {
        u8 bit_read, bit_write;
@@ -646,7 +646,7 @@ static void set_msr_interception(struct kvm_vcpu *vcpu, u32 
*msrpm, u32 msr,
                                 int read, int write)
 {
        set_shadow_msr_intercept(vcpu, msr, read, write);
-       set_msr_interception_nosync(vcpu, msrpm, msr, read, write);
+       set_msr_interception_bitmap(vcpu, msrpm, msr, read, write);
 }
 
 static u32 *svm_vcpu_alloc_msrpm(void)
@@ -694,7 +694,7 @@ static void svm_msr_filter_changed(struct kvm_vcpu *vcpu)
                u32 read = test_bit(i, svm->shadow_msr_intercept.read);
                u32 write = test_bit(i, svm->shadow_msr_intercept.write);
 
-               set_msr_interception_nosync(vcpu, svm->msrpm, msr, read, write);
+               set_msr_interception_bitmap(vcpu, svm->msrpm, msr, read, write);
        }
 }
 

Reply via email to