[PATCH 6/6] Enable MTRR for EPT

2008-10-09 Thread Sheng Yang
The effective memory type of EPT is the mixture of MSR_IA32_CR_PAT and memory
type field of EPT entry.

Signed-off-by: Sheng Yang [EMAIL PROTECTED]
---
 arch/x86/kvm/mmu.c |   11 ++-
 arch/x86/kvm/svm.c |6 ++
 arch/x86/kvm/vmx.c |   12 +---
 arch/x86/kvm/x86.c |2 +-
 include/asm-x86/kvm_host.h |3 ++-
 5 files changed, 28 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f590142..79cb4a9 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -168,6 +168,7 @@ static u64 __read_mostly shadow_x_mask; /* mutual 
exclusive with nx_mask */
 static u64 __read_mostly shadow_user_mask;
 static u64 __read_mostly shadow_accessed_mask;
 static u64 __read_mostly shadow_dirty_mask;
+static u64 __read_mostly shadow_mt_mask;
 
 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
 {
@@ -183,13 +184,14 @@ void kvm_mmu_set_base_ptes(u64 base_pte)
 EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
 
 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
-   u64 dirty_mask, u64 nx_mask, u64 x_mask)
+   u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask)
 {
shadow_user_mask = user_mask;
shadow_accessed_mask = accessed_mask;
shadow_dirty_mask = dirty_mask;
shadow_nx_mask = nx_mask;
shadow_x_mask = x_mask;
+   shadow_mt_mask = mt_mask;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
 
@@ -1546,6 +1548,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 
*shadow_pte,
 {
u64 spte;
int ret = 0;
+   u64 mt_mask = shadow_mt_mask;
+
/*
 * We don't set the accessed bit, since we sometimes want to see
 * whether the guest actually used the pte (in order to detect
@@ -1564,6 +1568,11 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 
*shadow_pte,
spte |= shadow_user_mask;
if (largepage)
spte |= PT_PAGE_SIZE_MASK;
+   if (mt_mask) {
+   mt_mask = get_memory_type(vcpu, gfn) 
+ kvm_x86_ops-get_mt_mask_shift();
+   spte |= mt_mask;
+   }
 
spte |= (u64)pfn  PAGE_SHIFT;
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 9c4ce65..05efc4e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1912,6 +1912,11 @@ static int get_npt_level(void)
 #endif
 }
 
+static int svm_get_mt_mask_shift(void)
+{
+   return 0;
+}
+
 static struct kvm_x86_ops svm_x86_ops = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
@@ -1967,6 +1972,7 @@ static struct kvm_x86_ops svm_x86_ops = {
 
.set_tss_addr = svm_set_tss_addr,
.get_tdp_level = get_npt_level,
+   .get_mt_mask_shift = svm_get_mt_mask_shift,
 };
 
 static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 809427e..3d56554 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3572,6 +3572,11 @@ static int get_ept_level(void)
return VMX_EPT_DEFAULT_GAW + 1;
 }
 
+static int vmx_get_mt_mask_shift(void)
+{
+   return VMX_EPT_MT_EPTE_SHIFT;
+}
+
 static struct kvm_x86_ops vmx_x86_ops = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
@@ -3627,6 +3632,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
 
.set_tss_addr = vmx_set_tss_addr,
.get_tdp_level = get_ept_level,
+   .get_mt_mask_shift = vmx_get_mt_mask_shift,
 };
 
 static int __init vmx_init(void)
@@ -3682,10 +3688,10 @@ static int __init vmx_init(void)
if (vm_need_ept()) {
bypass_guest_pf = 0;
kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
-   VMX_EPT_WRITABLE_MASK |
-   VMX_EPT_DEFAULT_MT  VMX_EPT_MT_EPTE_SHIFT);
+   VMX_EPT_WRITABLE_MASK);
kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
-   VMX_EPT_EXECUTABLE_MASK);
+   VMX_EPT_EXECUTABLE_MASK,
+   VMX_EPT_DEFAULT_MT  VMX_EPT_MT_EPTE_SHIFT);
kvm_enable_tdp();
} else
kvm_disable_tdp();
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index df98a1f..dda478e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2614,7 +2614,7 @@ int kvm_arch_init(void *opaque)
kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
-   PT_DIRTY_MASK, PT64_NX_MASK, 0);
+   PT_DIRTY_MASK, PT64_NX_MASK, 0, 0);
return 0;
 
 out:
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 1c25cb7..4b06ca8 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -480,6 +480,7 @@ struct kvm_x86_ops {
 
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
int 

Re: [PATCH 6/6] Enable MTRR for EPT

2008-10-09 Thread Avi Kivity
Sheng Yang wrote:
 The effective memory type of EPT is the mixture of MSR_IA32_CR_PAT and memory
 type field of EPT entry.
   


 @@ -168,6 +168,7 @@ static u64 __read_mostly shadow_x_mask;   /* mutual 
 exclusive with nx_mask */
  static u64 __read_mostly shadow_user_mask;
  static u64 __read_mostly shadow_accessed_mask;
  static u64 __read_mostly shadow_dirty_mask;
 +static u64 __read_mostly shadow_mt_mask;
  
   

For shadow, the mt mask is different based on the level of the page
table, so we need an array here.  This can of course be left until
shadow pat is implemented.

 + if (mt_mask) {
 + mt_mask = get_memory_type(vcpu, gfn) 
 +   kvm_x86_ops-get_mt_mask_shift();
 + spte |= mt_mask;
 + }
   

For shadow, it's not a simple shift, since for large pages one of the
bits is at position 12.  So we would need the callback to calculate the
mask value.

Perhaps even simpler, have a 4x8 array, with the first index the page
table level and the second index the memory type.  The initialization
code can prepare the array like it prepares the other masks.

This can wait until we have a shadow pat implementation.

-- 
Do not meddle in the internals of kernels, for they are subtle and quick to 
panic.

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 6/6] Enable MTRR for EPT

2008-10-09 Thread Sheng Yang
On Thursday 09 October 2008 16:44:19 Avi Kivity wrote:
 Sheng Yang wrote:
  The effective memory type of EPT is the mixture of MSR_IA32_CR_PAT and
  memory type field of EPT entry.
 
 
 
  @@ -168,6 +168,7 @@ static u64 __read_mostly shadow_x_mask; /* mutual
  exclusive with nx_mask */ static u64 __read_mostly shadow_user_mask;
   static u64 __read_mostly shadow_accessed_mask;
   static u64 __read_mostly shadow_dirty_mask;
  +static u64 __read_mostly shadow_mt_mask;

 For shadow, the mt mask is different based on the level of the page
 table, so we need an array here.  This can of course be left until
 shadow pat is implemented.

  +   if (mt_mask) {
  +   mt_mask = get_memory_type(vcpu, gfn) 
  + kvm_x86_ops-get_mt_mask_shift();
  +   spte |= mt_mask;
  +   }

 For shadow, it's not a simple shift, since for large pages one of the
 bits is at position 12.  So we would need the callback to calculate the
 mask value.

 Perhaps even simpler, have a 4x8 array, with the first index the page
 table level and the second index the memory type.  The initialization
 code can prepare the array like it prepares the other masks.

 This can wait until we have a shadow pat implementation.

Yes, of course. Now this mask is just used by EPT, so I do it like this. Later 
shadow mtrr/pat would solve this as well. :)

-- 
regards
Yang, Sheng
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html