Re: [PATCH 2/2] KVM: MMU: Use base_role.nxe for mmu.nx

2010-09-16 Thread Avi Kivity

 On 09/15/2010 10:48 AM, Roedel, Joerg wrote:

On Tue, Sep 14, 2010 at 06:08:37PM -0400, Marcelo Tosatti wrote:

  For tdp better set base_role.nxe to zero, otherwise duplicate tdp
  pagetables can be created if the guest switches between nx/non-nx.


This does not work because bit 63 is marked as reserved if base_role.nxe
is 0. If the walk_addr_generic function then runs with tdp enabled it
would report a set nx bit as a rsvd fault.
We also can't use is_nx() in those path because it does not distinguish
between l1 and l2 nx. Are there guests that switch the efer.nx bit
regularly enough so that it matters? If so I would suggest to drop this
patch and keep mmu.nx.




I'll do that then.  I'm still unhappy about the duplication.

--
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 2/2] KVM: MMU: Use base_role.nxe for mmu.nx

2010-09-15 Thread Roedel, Joerg
On Tue, Sep 14, 2010 at 06:08:37PM -0400, Marcelo Tosatti wrote:
 
 For tdp better set base_role.nxe to zero, otherwise duplicate tdp
 pagetables can be created if the guest switches between nx/non-nx.


This does not work because bit 63 is marked as reserved if base_role.nxe
is 0. If the walk_addr_generic function then runs with tdp enabled it
would report a set nx bit as a rsvd fault.
We also can't use is_nx() in those path because it does not distinguish
between l1 and l2 nx. Are there guests that switch the efer.nx bit
regularly enough so that it matters? If so I would suggest to drop this
patch and keep mmu.nx.

Joerg

-- 
AMD Operating System Research Center

Advanced Micro Devices GmbH Einsteinring 24 85609 Dornach
General Managers: Alberto Bozzo, Andrew Bowd
Registration: Dornach, Landkr. Muenchen; Registerger. Muenchen, HRB Nr. 43632

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 2/2] KVM: MMU: Use base_role.nxe for mmu.nx

2010-09-15 Thread Marcelo Tosatti
On Wed, Sep 15, 2010 at 10:48:33AM +0200, Roedel, Joerg wrote:
 On Tue, Sep 14, 2010 at 06:08:37PM -0400, Marcelo Tosatti wrote:
  
  For tdp better set base_role.nxe to zero, otherwise duplicate tdp
  pagetables can be created if the guest switches between nx/non-nx.
 
 
 This does not work because bit 63 is marked as reserved if base_role.nxe
 is 0. If the walk_addr_generic function then runs with tdp enabled it
 would report a set nx bit as a rsvd fault.

Ah, OK.

 We also can't use is_nx() in those path because it does not distinguish
 between l1 and l2 nx. Are there guests that switch the efer.nx bit
 regularly enough so that it matters? If so I would suggest to drop this
 patch and keep mmu.nx.

Well, i don't think it would be a common scenario. Ignore me.

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 2/2] KVM: MMU: Use base_role.nxe for mmu.nx

2010-09-14 Thread Joerg Roedel
This patch removes the mmu.nx field and uses the equivalent
field mmu.base_role.nxe instead.

Signed-off-by: Joerg Roedel joerg.roe...@amd.com
---
 arch/x86/include/asm/kvm_host.h |2 --
 arch/x86/kvm/mmu.c  |   27 +--
 arch/x86/kvm/paging_tmpl.h  |4 ++--
 arch/x86/kvm/x86.c  |3 ---
 4 files changed, 15 insertions(+), 21 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 8a83177..50506be 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -259,8 +259,6 @@ struct kvm_mmu {
u64 *lm_root;
u64 rsvd_bits_mask[2][4];
 
-   bool nx;
-
u64 pdptrs[4]; /* pae */
 };
 
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 3ce56bf..21d2983 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -238,7 +238,7 @@ static int is_cpuid_PSE36(void)
 
 static int is_nx(struct kvm_vcpu *vcpu)
 {
-   return vcpu-arch.efer  EFER_NX;
+   return !!(vcpu-arch.efer  EFER_NX);
 }
 
 static int is_shadow_present_pte(u64 pte)
@@ -2634,7 +2634,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu,
context-shadow_root_level = PT32E_ROOT_LEVEL;
context-root_hpa = INVALID_PAGE;
context-direct_map = true;
-   context-nx = false;
+   context-base_role.nxe = 0;
return 0;
 }
 
@@ -2688,7 +2688,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
int maxphyaddr = cpuid_maxphyaddr(vcpu);
u64 exb_bit_rsvd = 0;
 
-   if (!context-nx)
+   if (!context-base_role.nxe)
exb_bit_rsvd = rsvd_bits(63, 63);
switch (level) {
case PT32_ROOT_LEVEL:
@@ -2747,7 +2747,7 @@ static int paging64_init_context_common(struct kvm_vcpu 
*vcpu,
struct kvm_mmu *context,
int level)
 {
-   context-nx = is_nx(vcpu);
+   context-base_role.nxe = is_nx(vcpu);
 
reset_rsvds_bits_mask(vcpu, context, level);
 
@@ -2775,7 +2775,7 @@ static int paging64_init_context(struct kvm_vcpu *vcpu,
 static int paging32_init_context(struct kvm_vcpu *vcpu,
 struct kvm_mmu *context)
 {
-   context-nx = false;
+   context-base_role.nxe = 0;
 
reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL);
 
@@ -2815,24 +2815,23 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context-set_cr3 = kvm_x86_ops-set_tdp_cr3;
context-get_cr3 = get_cr3;
context-inject_page_fault = kvm_inject_page_fault;
-   context-nx = is_nx(vcpu);
 
if (!is_paging(vcpu)) {
-   context-nx = false;
+   context-base_role.nxe = 0;
context-gva_to_gpa = nonpaging_gva_to_gpa;
context-root_level = 0;
} else if (is_long_mode(vcpu)) {
-   context-nx = is_nx(vcpu);
+   context-base_role.nxe = is_nx(vcpu);
reset_rsvds_bits_mask(vcpu, context, PT64_ROOT_LEVEL);
context-gva_to_gpa = paging64_gva_to_gpa;
context-root_level = PT64_ROOT_LEVEL;
} else if (is_pae(vcpu)) {
-   context-nx = is_nx(vcpu);
+   context-base_role.nxe = is_nx(vcpu);
reset_rsvds_bits_mask(vcpu, context, PT32E_ROOT_LEVEL);
context-gva_to_gpa = paging64_gva_to_gpa;
context-root_level = PT32E_ROOT_LEVEL;
} else {
-   context-nx = false;
+   context-base_role.nxe = 0;
reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL);
context-gva_to_gpa = paging32_gva_to_gpa;
context-root_level = PT32_ROOT_LEVEL;
@@ -2888,21 +2887,21 @@ static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
 * functions between mmu and nested_mmu are swapped.
 */
if (!is_paging(vcpu)) {
-   g_context-nx = false;
+   g_context-base_role.nxe = 0;
g_context-root_level = 0;
g_context-gva_to_gpa = nonpaging_gva_to_gpa_nested;
} else if (is_long_mode(vcpu)) {
-   g_context-nx = is_nx(vcpu);
+   g_context-base_role.nxe = is_nx(vcpu);
reset_rsvds_bits_mask(vcpu, g_context, PT64_ROOT_LEVEL);
g_context-root_level = PT64_ROOT_LEVEL;
g_context-gva_to_gpa = paging64_gva_to_gpa_nested;
} else if (is_pae(vcpu)) {
-   g_context-nx = is_nx(vcpu);
+   g_context-base_role.nxe = is_nx(vcpu);
reset_rsvds_bits_mask(vcpu, g_context, PT32E_ROOT_LEVEL);
g_context-root_level = PT32E_ROOT_LEVEL;
g_context-gva_to_gpa = paging64_gva_to_gpa_nested;
} else {
-   g_context-nx = false;
+   g_context-base_role.nxe = false;
reset_rsvds_bits_mask(vcpu, g_context, PT32_ROOT_LEVEL);
  

Re: [PATCH 2/2] KVM: MMU: Use base_role.nxe for mmu.nx

2010-09-14 Thread Marcelo Tosatti
On Tue, Sep 14, 2010 at 05:46:13PM +0200, Joerg Roedel wrote:
 This patch removes the mmu.nx field and uses the equivalent
 field mmu.base_role.nxe instead.
 
 Signed-off-by: Joerg Roedel joerg.roe...@amd.com
 ---
  arch/x86/include/asm/kvm_host.h |2 --
  arch/x86/kvm/mmu.c  |   27 +--
  arch/x86/kvm/paging_tmpl.h  |4 ++--
  arch/x86/kvm/x86.c  |3 ---
  4 files changed, 15 insertions(+), 21 deletions(-)
 
 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
 index 8a83177..50506be 100644
 --- a/arch/x86/include/asm/kvm_host.h
 +++ b/arch/x86/include/asm/kvm_host.h
 @@ -259,8 +259,6 @@ struct kvm_mmu {
   u64 *lm_root;
   u64 rsvd_bits_mask[2][4];
  
 - bool nx;
 -
   u64 pdptrs[4]; /* pae */
  };
  
 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
 index 3ce56bf..21d2983 100644
 --- a/arch/x86/kvm/mmu.c
 +++ b/arch/x86/kvm/mmu.c
 @@ -238,7 +238,7 @@ static int is_cpuid_PSE36(void)
  
  static int is_nx(struct kvm_vcpu *vcpu)
  {
 - return vcpu-arch.efer  EFER_NX;
 + return !!(vcpu-arch.efer  EFER_NX);
  }
  
  static int is_shadow_present_pte(u64 pte)
 @@ -2634,7 +2634,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu,
   context-shadow_root_level = PT32E_ROOT_LEVEL;
   context-root_hpa = INVALID_PAGE;
   context-direct_map = true;
 - context-nx = false;
 + context-base_role.nxe = 0;
   return 0;
  }
  
 @@ -2688,7 +2688,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
   int maxphyaddr = cpuid_maxphyaddr(vcpu);
   u64 exb_bit_rsvd = 0;
  
 - if (!context-nx)
 + if (!context-base_role.nxe)
   exb_bit_rsvd = rsvd_bits(63, 63);
   switch (level) {
   case PT32_ROOT_LEVEL:
 @@ -2747,7 +2747,7 @@ static int paging64_init_context_common(struct kvm_vcpu 
 *vcpu,
   struct kvm_mmu *context,
   int level)
  {
 - context-nx = is_nx(vcpu);
 + context-base_role.nxe = is_nx(vcpu);
  
   reset_rsvds_bits_mask(vcpu, context, level);
  
 @@ -2775,7 +2775,7 @@ static int paging64_init_context(struct kvm_vcpu *vcpu,
  static int paging32_init_context(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
  {
 - context-nx = false;
 + context-base_role.nxe = 0;
  
   reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL);
  
 @@ -2815,24 +2815,23 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
   context-set_cr3 = kvm_x86_ops-set_tdp_cr3;
   context-get_cr3 = get_cr3;
   context-inject_page_fault = kvm_inject_page_fault;
 - context-nx = is_nx(vcpu);
  
   if (!is_paging(vcpu)) {
 - context-nx = false;
 + context-base_role.nxe = 0;
   context-gva_to_gpa = nonpaging_gva_to_gpa;
   context-root_level = 0;
   } else if (is_long_mode(vcpu)) {
 - context-nx = is_nx(vcpu);
 + context-base_role.nxe = is_nx(vcpu);
   reset_rsvds_bits_mask(vcpu, context, PT64_ROOT_LEVEL);
   context-gva_to_gpa = paging64_gva_to_gpa;
   context-root_level = PT64_ROOT_LEVEL;
   } else if (is_pae(vcpu)) {
 - context-nx = is_nx(vcpu);
 + context-base_role.nxe = is_nx(vcpu);
   reset_rsvds_bits_mask(vcpu, context, PT32E_ROOT_LEVEL);
   context-gva_to_gpa = paging64_gva_to_gpa;
   context-root_level = PT32E_ROOT_LEVEL;
   } else {
 - context-nx = false;
 + context-base_role.nxe = 0;
   reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL);
   context-gva_to_gpa = paging32_gva_to_gpa;
   context-root_level = PT32_ROOT_LEVEL;

For tdp better set base_role.nxe to zero, otherwise duplicate tdp
pagetables can be created if the guest switches between nx/non-nx.

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html