Re: [PATCH 12/15] KVM: MTRR: introduce mtrr_for_each_mem_type

2015-06-08 Thread David Matlack
On Sat, May 30, 2015 at 3:59 AM, Xiao Guangrong
guangrong.x...@linux.intel.com wrote:
 It walks all MTRRs and gets all the memory cache type setting for the
 specified range also it checks if the range is fully covered by MTRRs

 Signed-off-by: Xiao Guangrong guangrong.x...@linux.intel.com
 ---
  arch/x86/kvm/mtrr.c | 183 
 
  1 file changed, 183 insertions(+)

 diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
 index e59d138..35f86303 100644
 --- a/arch/x86/kvm/mtrr.c
 +++ b/arch/x86/kvm/mtrr.c
 @@ -395,6 +395,189 @@ void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
 INIT_LIST_HEAD(vcpu-arch.mtrr_state.head);
  }

 +struct mtrr_looker {
 +   /* input fields. */
 +   struct kvm_mtrr *mtrr_state;
 +   u64 start;
 +   u64 end;
 +
 +   /* output fields. */
 +   int mem_type;
 +   /* [start, end) is fully covered in MTRRs? */

s/fully/not fully/ ?

 +   bool partial_map;
 +
 +   /* private fields. */
 +   union {
 +   /* used for fixed MTRRs. */
 +   struct {
 +   int index;
 +   int seg;
 +   };
 +
 +   /* used for var MTRRs. */
 +   struct {
 +   struct kvm_mtrr_range *range;
 +   /* max address has been covered in var MTRRs. */
 +   u64 start_max;
 +   };
 +   };
 +
 +   bool fixed;
 +};
 +
 +static void mtrr_lookup_init(struct mtrr_looker *looker,
 +struct kvm_mtrr *mtrr_state, u64 start, u64 end)
 +{
 +   looker-mtrr_state = mtrr_state;
 +   looker-start = start;
 +   looker-end = end;
 +}
 +
 +static u64 fixed_mtrr_range_end_addr(int seg, int index)
 +{
 +   struct fixed_mtrr_segment *mtrr_seg = fixed_seg_table[seg];
 +
 +return mtrr_seg-start + mtrr_seg-range_size * index;

Should be (index + 1)?

 +}
 +
 +static bool mtrr_lookup_fixed_start(struct mtrr_looker *looker)
 +{
 +   int seg, index;
 +
 +   if (!looker-mtrr_state-fixed_mtrr_enabled)
 +   return false;
 +
 +   seg = fixed_mtrr_addr_to_seg(looker-start);
 +   if (seg  0)
 +   return false;
 +
 +   looker-fixed = true;
 +   index = fixed_mtrr_addr_seg_to_range_index(looker-start, seg);
 +   looker-index = index;
 +   looker-seg = seg;
 +   looker-mem_type = looker-mtrr_state-fixed_ranges[index];
 +   looker-start = fixed_mtrr_range_end_addr(seg, index);
 +   return true;
 +}
 +
 +static bool match_var_range(struct mtrr_looker *looker,
 +   struct kvm_mtrr_range *range)
 +{
 +   u64 start, end;
 +
 +   var_mtrr_range(range, start, end);
 +   if (!(start = looker-end || end = looker-start)) {
 +   looker-range = range;
 +   looker-mem_type = range-base  0xff;
 +
 +   /*
 +* the function is called when we do kvm_mtrr.head walking
 +* that means range has the minimum base address interleaves
 +* with [looker-start_max, looker-end).
 +*/

I'm having trouble understanding this comment. I think this is what you
are trying to say:

  this function is called when we do kvm_mtrr.head walking. range has the
  minimum base address which interleaves [looker-start_max, looker-end).

Let me know if I parsed it wrong.

 +   looker-partial_map |= looker-start_max  start;
 +
 +   /* update the max address has been covered. */
 +   looker-start_max = max(looker-start_max, end);
 +   return true;
 +   }
 +
 +   return false;
 +}
 +
 +static void mtrr_lookup_var_start(struct mtrr_looker *looker)
 +{
 +   struct kvm_mtrr *mtrr_state = looker-mtrr_state;
 +   struct kvm_mtrr_range *range;
 +
 +   looker-fixed = false;
 +   looker-partial_map = false;
 +   looker-start_max = looker-start;
 +   looker-mem_type = -1;
 +
 +   list_for_each_entry(range, mtrr_state-head, node)
 +   if (match_var_range(looker, range))
 +   return;
 +
 +   looker-partial_map = true;
 +}
 +
 +static void mtrr_lookup_fixed_next(struct mtrr_looker *looker)
 +{
 +   struct fixed_mtrr_segment *eseg = fixed_seg_table[looker-seg];
 +   struct kvm_mtrr *mtrr_state = looker-mtrr_state;
 +   u64 end;
 +
 +   if (looker-start = looker-end) {
 +   looker-mem_type = -1;
 +   looker-partial_map = false;
 +   return;
 +   }
 +
 +   WARN_ON(!looker-fixed);
 +
 +   looker-index++;
 +   end = fixed_mtrr_range_end_addr(looker-seg, looker-index);
 +
 +   /* switch to next segment. */
 +   if (end = eseg-end) {
 +   looker-seg++;
 +   looker-index = 0;
 +
 +   /* have looked up for all fixed MTRRs. */
 +   if (looker-seg = 

Re: [PATCH 12/15] KVM: MTRR: introduce mtrr_for_each_mem_type

2015-06-08 Thread Xiao Guangrong



On 06/09/2015 08:36 AM, David Matlack wrote:

On Sat, May 30, 2015 at 3:59 AM, Xiao Guangrong
guangrong.x...@linux.intel.com wrote:

It walks all MTRRs and gets all the memory cache type setting for the
specified range also it checks if the range is fully covered by MTRRs

Signed-off-by: Xiao Guangrong guangrong.x...@linux.intel.com
---
  arch/x86/kvm/mtrr.c | 183 
  1 file changed, 183 insertions(+)

diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index e59d138..35f86303 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -395,6 +395,189 @@ void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
 INIT_LIST_HEAD(vcpu-arch.mtrr_state.head);
  }

+struct mtrr_looker {
+   /* input fields. */
+   struct kvm_mtrr *mtrr_state;
+   u64 start;
+   u64 end;
+
+   /* output fields. */
+   int mem_type;
+   /* [start, end) is fully covered in MTRRs? */


s/fully/not fully/ ?


Yup, thanks for pointing it out.




+   bool partial_map;
+
+   /* private fields. */
+   union {
+   /* used for fixed MTRRs. */
+   struct {
+   int index;
+   int seg;
+   };
+
+   /* used for var MTRRs. */
+   struct {
+   struct kvm_mtrr_range *range;
+   /* max address has been covered in var MTRRs. */
+   u64 start_max;
+   };
+   };
+
+   bool fixed;
+};
+
+static void mtrr_lookup_init(struct mtrr_looker *looker,
+struct kvm_mtrr *mtrr_state, u64 start, u64 end)
+{
+   looker-mtrr_state = mtrr_state;
+   looker-start = start;
+   looker-end = end;
+}
+
+static u64 fixed_mtrr_range_end_addr(int seg, int index)
+{
+   struct fixed_mtrr_segment *mtrr_seg = fixed_seg_table[seg];
+
+return mtrr_seg-start + mtrr_seg-range_size * index;


Should be (index + 1)?


Good eyes, will fix.




+}
+
+static bool mtrr_lookup_fixed_start(struct mtrr_looker *looker)
+{
+   int seg, index;
+
+   if (!looker-mtrr_state-fixed_mtrr_enabled)
+   return false;
+
+   seg = fixed_mtrr_addr_to_seg(looker-start);
+   if (seg  0)
+   return false;
+
+   looker-fixed = true;
+   index = fixed_mtrr_addr_seg_to_range_index(looker-start, seg);
+   looker-index = index;
+   looker-seg = seg;
+   looker-mem_type = looker-mtrr_state-fixed_ranges[index];
+   looker-start = fixed_mtrr_range_end_addr(seg, index);
+   return true;
+}
+
+static bool match_var_range(struct mtrr_looker *looker,
+   struct kvm_mtrr_range *range)
+{
+   u64 start, end;
+
+   var_mtrr_range(range, start, end);
+   if (!(start = looker-end || end = looker-start)) {
+   looker-range = range;
+   looker-mem_type = range-base  0xff;
+
+   /*
+* the function is called when we do kvm_mtrr.head walking
+* that means range has the minimum base address interleaves
+* with [looker-start_max, looker-end).
+*/


I'm having trouble understanding this comment. I think this is what you
are trying to say:

   this function is called when we do kvm_mtrr.head walking. range has the
   minimum base address which interleaves [looker-start_max, looker-end).

Let me know if I parsed it wrong.


Yes, it is, will improve the comment.

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 12/15] KVM: MTRR: introduce mtrr_for_each_mem_type

2015-06-02 Thread Xiao Guangrong



On 06/01/2015 05:33 PM, Paolo Bonzini wrote:



On 30/05/2015 12:59, Xiao Guangrong wrote:

+struct mtrr_looker {
+   /* input fields. */
+   struct kvm_mtrr *mtrr_state;
+   u64 start;
+   u64 end;


s/looker/iter/ or s/looker/lookup/


Good to me.




+static void mtrr_lookup_start(struct mtrr_looker *looker)
+{
+   looker-mem_type = -1;
+
+   if (!looker-mtrr_state-mtrr_enabled) {
+   looker-partial_map = true;
+   return;
+   }
+
+   if (!mtrr_lookup_fixed_start(looker))
+   mtrr_lookup_var_start(looker);
+}
+


Separating mtrr_lookup_start and mtrr_lookup_init is weird.


Agreed, will fold mtrr_lookup_start() into mtrr_lookup_init().



There are common parts of mtrr_lookup_*_start and mtrr_lookup_*_next.
For example this:


+   looker-mem_type = looker-mtrr_state-fixed_ranges[index];
+   looker-start = fixed_mtrr_range_end_addr(seg, index);
+   return true;


in mtrr_lookup_fixed_start is the same as this:



+   end = fixed_mtrr_range_end_addr(looker-seg, looker-index);
+
+   /* switch to next segment. */
+   if (end = eseg-end) {
+   looker-seg++;
+   looker-index = 0;
+
+   /* have looked up for all fixed MTRRs. */
+   if (looker-seg = ARRAY_SIZE(fixed_seg_table))
+   return mtrr_lookup_var_start(looker);
+
+   end = fixed_mtrr_range_end_addr(looker-seg, looker-index);
+   }
+
+   looker-mem_type = mtrr_state-fixed_ranges[looker-index];
+   looker-start = end;


in mtrr_lookup_fixed_next.  Can you try to make them more common?


Yes, i will check them carefully and introduce common functions to do
it.



Basically you should have

+#define mtrr_for_each_mem_type(_looker_, _mtrr_, _gpa_start_, _gpa_end_) \
+   for (mtrr_lookup_init(_looker_, _mtrr_, _gpa_start_, _gpa_end_); \
+!mtrr_lookup_end(_looker_); mtrr_lookup_next(_looker_))


Good idea, will do it in v2.

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 12/15] KVM: MTRR: introduce mtrr_for_each_mem_type

2015-06-02 Thread Xiao Guangrong



On 06/01/2015 10:26 PM, Paolo Bonzini wrote:



On 01/06/2015 11:33, Paolo Bonzini wrote:

+   looker-mem_type = looker-mtrr_state-fixed_ranges[index];

+   looker-start = fixed_mtrr_range_end_addr(seg, index);
+   return true;

in mtrr_lookup_fixed_start is the same as this:



+   end = fixed_mtrr_range_end_addr(looker-seg, looker-index);
+
+   /* switch to next segment. */
+   if (end = eseg-end) {
+   looker-seg++;
+   looker-index = 0;
+
+   /* have looked up for all fixed MTRRs. */
+   if (looker-seg = ARRAY_SIZE(fixed_seg_table))
+   return mtrr_lookup_var_start(looker);
+
+   end = fixed_mtrr_range_end_addr(looker-seg, looker-index);
+   }
+
+   looker-mem_type = mtrr_state-fixed_ranges[looker-index];
+   looker-start = end;

in mtrr_lookup_fixed_next.  Can you try to make them more common?

Basically you should have

+#define mtrr_for_each_mem_type(_looker_, _mtrr_, _gpa_start_, _gpa_end_) \
+   for (mtrr_lookup_init(_looker_, _mtrr_, _gpa_start_, _gpa_end_); \
+!mtrr_lookup_end(_looker_); mtrr_lookup_next(_looker_))


... where the above code I quoted would be part of mtrr_lookup_end.


Okay, will do. :)
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 12/15] KVM: MTRR: introduce mtrr_for_each_mem_type

2015-06-01 Thread Paolo Bonzini


On 30/05/2015 12:59, Xiao Guangrong wrote:
 +struct mtrr_looker {
 + /* input fields. */
 + struct kvm_mtrr *mtrr_state;
 + u64 start;
 + u64 end;

s/looker/iter/ or s/looker/lookup/

 +static void mtrr_lookup_start(struct mtrr_looker *looker)
 +{
 + looker-mem_type = -1;
 +
 + if (!looker-mtrr_state-mtrr_enabled) {
 + looker-partial_map = true;
 + return;
 + }
 +
 + if (!mtrr_lookup_fixed_start(looker))
 + mtrr_lookup_var_start(looker);
 +}
 +

Separating mtrr_lookup_start and mtrr_lookup_init is weird.

There are common parts of mtrr_lookup_*_start and mtrr_lookup_*_next.  
For example this:

 + looker-mem_type = looker-mtrr_state-fixed_ranges[index];
 + looker-start = fixed_mtrr_range_end_addr(seg, index);
 + return true;

in mtrr_lookup_fixed_start is the same as this:

 
 + end = fixed_mtrr_range_end_addr(looker-seg, looker-index);
 +
 + /* switch to next segment. */
 + if (end = eseg-end) {
 + looker-seg++;
 + looker-index = 0;
 +
 + /* have looked up for all fixed MTRRs. */
 + if (looker-seg = ARRAY_SIZE(fixed_seg_table))
 + return mtrr_lookup_var_start(looker);
 +
 + end = fixed_mtrr_range_end_addr(looker-seg, looker-index);
 + }
 +
 + looker-mem_type = mtrr_state-fixed_ranges[looker-index];
 + looker-start = end;

in mtrr_lookup_fixed_next.  Can you try to make them more common?

Basically you should have

+#define mtrr_for_each_mem_type(_looker_, _mtrr_, _gpa_start_, _gpa_end_) \
+   for (mtrr_lookup_init(_looker_, _mtrr_, _gpa_start_, _gpa_end_); \
+!mtrr_lookup_end(_looker_); mtrr_lookup_next(_looker_))

Paolo
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 12/15] KVM: MTRR: introduce mtrr_for_each_mem_type

2015-06-01 Thread Paolo Bonzini


On 01/06/2015 11:33, Paolo Bonzini wrote:
 +looker-mem_type = looker-mtrr_state-fixed_ranges[index];
  +  looker-start = fixed_mtrr_range_end_addr(seg, index);
  +  return true;
 in mtrr_lookup_fixed_start is the same as this:
 
  
  +  end = fixed_mtrr_range_end_addr(looker-seg, looker-index);
  +
  +  /* switch to next segment. */
  +  if (end = eseg-end) {
  +  looker-seg++;
  +  looker-index = 0;
  +
  +  /* have looked up for all fixed MTRRs. */
  +  if (looker-seg = ARRAY_SIZE(fixed_seg_table))
  +  return mtrr_lookup_var_start(looker);
  +
  +  end = fixed_mtrr_range_end_addr(looker-seg, looker-index);
  +  }
  +
  +  looker-mem_type = mtrr_state-fixed_ranges[looker-index];
  +  looker-start = end;
 in mtrr_lookup_fixed_next.  Can you try to make them more common?
 
 Basically you should have
 
 +#define mtrr_for_each_mem_type(_looker_, _mtrr_, _gpa_start_, _gpa_end_) \
 + for (mtrr_lookup_init(_looker_, _mtrr_, _gpa_start_, _gpa_end_); \
 +  !mtrr_lookup_end(_looker_); mtrr_lookup_next(_looker_))

... where the above code I quoted would be part of mtrr_lookup_end.

Paolo
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 12/15] KVM: MTRR: introduce mtrr_for_each_mem_type

2015-05-30 Thread Xiao Guangrong
It walks all MTRRs and gets all the memory cache type setting for the
specified range also it checks if the range is fully covered by MTRRs

Signed-off-by: Xiao Guangrong guangrong.x...@linux.intel.com
---
 arch/x86/kvm/mtrr.c | 183 
 1 file changed, 183 insertions(+)

diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index e59d138..35f86303 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -395,6 +395,189 @@ void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
INIT_LIST_HEAD(vcpu-arch.mtrr_state.head);
 }
 
+struct mtrr_looker {
+   /* input fields. */
+   struct kvm_mtrr *mtrr_state;
+   u64 start;
+   u64 end;
+
+   /* output fields. */
+   int mem_type;
+   /* [start, end) is fully covered in MTRRs? */
+   bool partial_map;
+
+   /* private fields. */
+   union {
+   /* used for fixed MTRRs. */
+   struct {
+   int index;
+   int seg;
+   };
+
+   /* used for var MTRRs. */
+   struct {
+   struct kvm_mtrr_range *range;
+   /* max address has been covered in var MTRRs. */
+   u64 start_max;
+   };
+   };
+
+   bool fixed;
+};
+
+static void mtrr_lookup_init(struct mtrr_looker *looker,
+struct kvm_mtrr *mtrr_state, u64 start, u64 end)
+{
+   looker-mtrr_state = mtrr_state;
+   looker-start = start;
+   looker-end = end;
+}
+
+static u64 fixed_mtrr_range_end_addr(int seg, int index)
+{
+   struct fixed_mtrr_segment *mtrr_seg = fixed_seg_table[seg];
+
+return mtrr_seg-start + mtrr_seg-range_size * index;
+}
+
+static bool mtrr_lookup_fixed_start(struct mtrr_looker *looker)
+{
+   int seg, index;
+
+   if (!looker-mtrr_state-fixed_mtrr_enabled)
+   return false;
+
+   seg = fixed_mtrr_addr_to_seg(looker-start);
+   if (seg  0)
+   return false;
+
+   looker-fixed = true;
+   index = fixed_mtrr_addr_seg_to_range_index(looker-start, seg);
+   looker-index = index;
+   looker-seg = seg;
+   looker-mem_type = looker-mtrr_state-fixed_ranges[index];
+   looker-start = fixed_mtrr_range_end_addr(seg, index);
+   return true;
+}
+
+static bool match_var_range(struct mtrr_looker *looker,
+   struct kvm_mtrr_range *range)
+{
+   u64 start, end;
+
+   var_mtrr_range(range, start, end);
+   if (!(start = looker-end || end = looker-start)) {
+   looker-range = range;
+   looker-mem_type = range-base  0xff;
+
+   /*
+* the function is called when we do kvm_mtrr.head walking
+* that means range has the minimum base address interleaves
+* with [looker-start_max, looker-end).
+*/
+   looker-partial_map |= looker-start_max  start;
+
+   /* update the max address has been covered. */
+   looker-start_max = max(looker-start_max, end);
+   return true;
+   }
+
+   return false;
+}
+
+static void mtrr_lookup_var_start(struct mtrr_looker *looker)
+{
+   struct kvm_mtrr *mtrr_state = looker-mtrr_state;
+   struct kvm_mtrr_range *range;
+
+   looker-fixed = false;
+   looker-partial_map = false;
+   looker-start_max = looker-start;
+   looker-mem_type = -1;
+
+   list_for_each_entry(range, mtrr_state-head, node)
+   if (match_var_range(looker, range))
+   return;
+
+   looker-partial_map = true;
+}
+
+static void mtrr_lookup_fixed_next(struct mtrr_looker *looker)
+{
+   struct fixed_mtrr_segment *eseg = fixed_seg_table[looker-seg];
+   struct kvm_mtrr *mtrr_state = looker-mtrr_state;
+   u64 end;
+
+   if (looker-start = looker-end) {
+   looker-mem_type = -1;
+   looker-partial_map = false;
+   return;
+   }
+
+   WARN_ON(!looker-fixed);
+
+   looker-index++;
+   end = fixed_mtrr_range_end_addr(looker-seg, looker-index);
+
+   /* switch to next segment. */
+   if (end = eseg-end) {
+   looker-seg++;
+   looker-index = 0;
+
+   /* have looked up for all fixed MTRRs. */
+   if (looker-seg = ARRAY_SIZE(fixed_seg_table))
+   return mtrr_lookup_var_start(looker);
+
+   end = fixed_mtrr_range_end_addr(looker-seg, looker-index);
+   }
+
+   looker-mem_type = mtrr_state-fixed_ranges[looker-index];
+   looker-start = end;
+}
+
+static void mtrr_lookup_var_next(struct mtrr_looker *looker)
+{
+   struct kvm_mtrr *mtrr_state = looker-mtrr_state;
+
+   WARN_ON(looker-fixed);
+
+   looker-mem_type = -1;
+
+   list_for_each_entry_continue(looker-range, mtrr_state-head, node)
+   if