Sort all valid variable MTRRs based on its base address, it will help us to
check a range to see if it's fully contained in variable MTRRs

Signed-off-by: Xiao Guangrong <guangrong.x...@linux.intel.com>
---
 arch/x86/include/asm/kvm_host.h |  3 ++
 arch/x86/kvm/mtrr.c             | 63 ++++++++++++++++++++++++++++++++++-------
 arch/x86/kvm/x86.c              |  2 +-
 arch/x86/kvm/x86.h              |  1 +
 4 files changed, 58 insertions(+), 11 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f735548..f2d60cc 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -345,12 +345,15 @@ enum {
 struct kvm_mtrr_range {
        u64 base;
        u64 mask;
+       struct list_head node;
 };
 
 struct kvm_mtrr {
        struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR];
        mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION];
        u64 deftype;
+
+       struct list_head head;
 };
 
 struct kvm_vcpu_arch {
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index cb9702d..c06ec13 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -281,6 +281,52 @@ static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
        kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
 }
 
+static bool var_mtrr_range_is_valid(struct kvm_mtrr_range *range)
+{
+       u64 start, end;
+
+       if (!(range->mask & (1 << 11)))
+               return false;
+
+       var_mtrr_range(range, &start, &end);
+       return end > start;
+}
+
+static void set_var_mtrr_start(struct kvm_mtrr *mtrr_state, int index)
+{
+       /* remove the entry if it's in the list. */
+       if (var_mtrr_range_is_valid(&mtrr_state->var_ranges[index]))
+               list_del(&mtrr_state->var_ranges[index].node);
+}
+
+static void set_var_mtrr_end(struct kvm_mtrr *mtrr_state, int index)
+{
+       struct kvm_mtrr_range *tmp, *cur = &mtrr_state->var_ranges[index];
+
+       /* add it to the list if it's valid. */
+       if (var_mtrr_range_is_valid(&mtrr_state->var_ranges[index])) {
+               list_for_each_entry(tmp, &mtrr_state->head, node)
+                       if (cur->base < tmp->base)
+                               list_add_tail(&cur->node, &tmp->node);
+
+               list_add_tail(&cur->node, &mtrr_state->head);
+       }
+}
+
+static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+{
+       int index, is_mtrr_mask;
+
+       index = (msr - 0x200) / 2;
+       is_mtrr_mask = msr - 0x200 - 2 * index;
+       set_var_mtrr_start(&vcpu->arch.mtrr_state, index);
+       if (!is_mtrr_mask)
+               vcpu->arch.mtrr_state.var_ranges[index].base = data;
+       else
+               vcpu->arch.mtrr_state.var_ranges[index].mask = data;
+       set_var_mtrr_end(&vcpu->arch.mtrr_state, index);
+}
+
 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 {
        int index;
@@ -295,16 +341,8 @@ int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 
data)
                vcpu->arch.mtrr_state.deftype = data;
        else if (msr == MSR_IA32_CR_PAT)
                vcpu->arch.pat = data;
-       else {  /* Variable MTRRs */
-               int is_mtrr_mask;
-
-               index = (msr - 0x200) / 2;
-               is_mtrr_mask = msr - 0x200 - 2 * index;
-               if (!is_mtrr_mask)
-                       vcpu->arch.mtrr_state.var_ranges[index].base = data;
-               else
-                       vcpu->arch.mtrr_state.var_ranges[index].mask = data;
-       }
+       else
+               set_var_mtrr_msr(vcpu, msr, data);
 
        update_mtrr(vcpu, msr);
        return 0;
@@ -350,6 +388,11 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 
*pdata)
        return 0;
 }
 
+void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
+{
+       INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
+}
+
 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
 {
        struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2ffad7f..6574fa3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7379,13 +7379,13 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 {
        int r;
 
+       kvm_vcpu_mtrr_init(vcpu);
        r = vcpu_load(vcpu);
        if (r)
                return r;
        kvm_vcpu_reset(vcpu, false);
        kvm_mmu_setup(vcpu);
        vcpu_put(vcpu);
-
        return r;
 }
 
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index aeb0bb2..0e4727c 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -162,6 +162,7 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt 
*ctxt,
        gva_t addr, void *val, unsigned int bytes,
        struct x86_exception *exception);
 
+void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
-- 
2.1.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to