[PATCH 1/2] KVM VMX: Add all-context INVVPID type support

2010-06-06 Thread Gui Jianfeng
Add all-context INVVPID type support.

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 arch/x86/include/asm/vmx.h |1 +
 arch/x86/kvm/vmx.c |   23 +--
 2 files changed, 22 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index b4e2840..96a5886 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -377,6 +377,7 @@ enum vmcs_field {
 #define VMX_EPT_EXTENT_GLOBAL_BIT  (1ull  26)
 
 #define VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT  (1ull  9) /* (41 - 32) */
+#define VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT  (1ull  10) /* (42 - 32) */
 
 #define VMX_EPT_DEFAULT_GAW3
 #define VMX_EPT_MAX_GAW0x4
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 240a407..aaf8a28 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -360,6 +360,11 @@ static inline bool cpu_has_vmx_invvpid_single(void)
return vmx_capability.vpid  VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
 }
 
+static inline bool cpu_has_vmx_invvpid_global(void)
+{
+   return vmx_capability.vpid  VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
+}
+
 static inline bool cpu_has_vmx_ept(void)
 {
return vmcs_config.cpu_based_2nd_exec_ctrl 
@@ -508,6 +513,20 @@ static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx)
__invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx-vpid, 0);
 }
 
+static inline void vpid_sync_vcpu_global(void)
+{
+   if (cpu_has_vmx_invvpid_global())
+   __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
+}
+
+static inline void vpid_sync_context(struct vcpu_vmx *vmx)
+{
+   if (cpu_has_vmx_invvpid_single())
+   vpid_sync_vcpu_all(vmx);
+   else
+   vpid_sync_vcpu_global();
+}
+
 static inline void ept_sync_global(void)
 {
if (cpu_has_vmx_invept_global())
@@ -1797,7 +1816,7 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
 
 static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
 {
-   vpid_sync_vcpu_all(to_vmx(vcpu));
+   vpid_sync_context(to_vmx(vcpu));
if (enable_ept)
ept_sync_context(construct_eptp(vcpu-arch.mmu.root_hpa));
 }
@@ -2753,7 +2772,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmx_fpu_activate(vmx-vcpu);
update_exception_bitmap(vmx-vcpu);
 
-   vpid_sync_vcpu_all(vmx);
+   vpid_sync_context(vmx);
 
ret = 0;
 
-- 
1.6.5.2


--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 2/2] KVM VMX: rename vpid_sync_vcpu_all() to vpid_sync_vcpu_single()

2010-06-06 Thread Gui Jianfeng
The name pid_sync_vcpu_all isn't appropriate since it just affect
a single vpid, so rename it to vpid_sync_vcpu_single().

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 arch/x86/kvm/vmx.c |4 ++--
 1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index aaf8a28..4d893ac 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -504,7 +504,7 @@ static void vcpu_clear(struct vcpu_vmx *vmx)
smp_call_function_single(vmx-vcpu.cpu, __vcpu_clear, vmx, 1);
 }
 
-static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx)
+static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx)
 {
if (vmx-vpid == 0)
return;
@@ -522,7 +522,7 @@ static inline void vpid_sync_vcpu_global(void)
 static inline void vpid_sync_context(struct vcpu_vmx *vmx)
 {
if (cpu_has_vmx_invvpid_single())
-   vpid_sync_vcpu_all(vmx);
+   vpid_sync_vcpu_single(vmx);
else
vpid_sync_vcpu_global();
 }
-- 
1.6.5.2

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] KVM VMX: Make sure single type invvpid is supported before issuing this instruction

2010-06-03 Thread Gui Jianfeng
According to SDM, we need check whether single-context INVVPID type is supported
before issuing invvpid instruction.

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 arch/x86/include/asm/vmx.h |2 ++
 arch/x86/kvm/vmx.c |8 +++-
 2 files changed, 9 insertions(+), 1 deletions(-)

diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 104cf86..6fa40d0 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -376,6 +376,8 @@ enum vmcs_field {
 #define VMX_EPT_EXTENT_CONTEXT_BIT (1ull  25)
 #define VMX_EPT_EXTENT_GLOBAL_BIT  (1ull  26)
 
+#define VMX_EPT_INVVPID_SINGLE_CONTEXT_BIT  (1ull  (41 - 32))
+
 #define VMX_EPT_DEFAULT_GAW3
 #define VMX_EPT_MAX_GAW0x4
 #define VMX_EPT_MT_EPTE_SHIFT  3
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 0e561a5..e2d12bf 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -355,6 +355,11 @@ static inline bool cpu_has_vmx_invept_global(void)
return vmx_capability.ept  VMX_EPT_EXTENT_GLOBAL_BIT;
 }
 
+static inline bool cpu_has_vmx_invvpid_single(void)
+{
+   return vmx_capability.vpid  VMX_EPT_INVVPID_SINGLE_CONTEXT_BIT;
+}
+
 static inline bool cpu_has_vmx_ept(void)
 {
return vmcs_config.cpu_based_2nd_exec_ctrl 
@@ -499,7 +504,8 @@ static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx)
if (vmx-vpid == 0)
return;
 
-   __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx-vpid, 0);
+   if (cpu_has_vmx_invvpid_single())
+   __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx-vpid, 0);
 }
 
 static inline void ept_sync_global(void)
-- 
1.6.5.2

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] KVM VMX: Make sure single type invvpid is supported before issuing this instruction

2010-06-03 Thread Gui Jianfeng
Sheng Yang wrote:
 On Thursday 03 June 2010 16:44:34 Gui Jianfeng wrote:
 According to SDM, we need check whether single-context INVVPID type is
 supported before issuing invvpid instruction.

 Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
 ---
  arch/x86/include/asm/vmx.h |2 ++
  arch/x86/kvm/vmx.c |8 +++-
  2 files changed, 9 insertions(+), 1 deletions(-)

 diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
 index 104cf86..6fa40d0 100644
 --- a/arch/x86/include/asm/vmx.h
 +++ b/arch/x86/include/asm/vmx.h
 @@ -376,6 +376,8 @@ enum vmcs_field {
  #define VMX_EPT_EXTENT_CONTEXT_BIT  (1ull  25)
  #define VMX_EPT_EXTENT_GLOBAL_BIT   (1ull  26)
 
 Hi Jianfeng
 +#define VMX_EPT_INVVPID_SINGLE_CONTEXT_BIT  (1ull  (41 - 32))
 +
 
 VPID is not a part of EPT. And you can keep consistent with the names above, 
 like: 
 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT
 
 And comments it later maybe better than indirectly (41 - 32)?
 

Ok, will post an updated version.

Thanks,
Gui

 Others looks fine to me.
 
 --
 regards
 Yang, Sheng
 
  #define VMX_EPT_DEFAULT_GAW 3
  #define VMX_EPT_MAX_GAW 0x4
  #define VMX_EPT_MT_EPTE_SHIFT   3
 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
 index 0e561a5..e2d12bf 100644
 --- a/arch/x86/kvm/vmx.c
 +++ b/arch/x86/kvm/vmx.c
 @@ -355,6 +355,11 @@ static inline bool cpu_has_vmx_invept_global(void)
  return vmx_capability.ept  VMX_EPT_EXTENT_GLOBAL_BIT;
  }

 +static inline bool cpu_has_vmx_invvpid_single(void)
 +{
 +return vmx_capability.vpid  VMX_EPT_INVVPID_SINGLE_CONTEXT_BIT;
 +}
 +
  static inline bool cpu_has_vmx_ept(void)
  {
  return vmcs_config.cpu_based_2nd_exec_ctrl 
 @@ -499,7 +504,8 @@ static inline void vpid_sync_vcpu_all(struct vcpu_vmx
 *vmx) if (vmx-vpid == 0)
  return;

 -__invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx-vpid, 0);
 +if (cpu_has_vmx_invvpid_single())
 +__invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx-vpid, 0);
  }

  static inline void ept_sync_global(void)
 
 
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v2] KVM VMX: Make sure single type invvpid is supported before issuing invvpid instruction

2010-06-03 Thread Gui Jianfeng
According to SDM, we need check whether single-context INVVPID type is supported
before issuing invvpid instruction.

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 arch/x86/include/asm/vmx.h |2 ++
 arch/x86/kvm/vmx.c |   14 +-
 2 files changed, 15 insertions(+), 1 deletions(-)

diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 104cf86..4e78b25 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -376,6 +376,8 @@ enum vmcs_field {
 #define VMX_EPT_EXTENT_CONTEXT_BIT (1ull  25)
 #define VMX_EPT_EXTENT_GLOBAL_BIT  (1ull  26)
 
+#define VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT  (1ull  9)
+
 #define VMX_EPT_DEFAULT_GAW3
 #define VMX_EPT_MAX_GAW0x4
 #define VMX_EPT_MT_EPTE_SHIFT  3
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 0e561a5..f363fe3 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -355,6 +355,17 @@ static inline bool cpu_has_vmx_invept_global(void)
return vmx_capability.ept  VMX_EPT_EXTENT_GLOBAL_BIT;
 }
 
+static inline bool cpu_has_vmx_invvpid_single(void)
+{
+   /* 
+*  bit 41 of IA32_VMX_EPT_VPID_CAP MSR indicates whehter the 
+*  single-context INVVPID type is supported. vmx_capability.vpid 
+*  represents the higher 32 bits of IA32_VMX_EPT_VPID_CAP MSR, so
+*  VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT(bit 9) is used here.
+*/
+   return vmx_capability.vpid  VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
+}
+
 static inline bool cpu_has_vmx_ept(void)
 {
return vmcs_config.cpu_based_2nd_exec_ctrl 
@@ -499,7 +510,8 @@ static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx)
if (vmx-vpid == 0)
return;
 
-   __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx-vpid, 0);
+   if (cpu_has_vmx_invvpid_single())
+   __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx-vpid, 0);
 }
 
 static inline void ept_sync_global(void)
-- 
1.6.5.2


--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v3] KVM VMX: Make sure single type invvpid is supported before issuing invvpid instruction

2010-06-03 Thread Gui Jianfeng
According to SDM, we need check whether single-context INVVPID type is supported
before issuing invvpid instruction.

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 arch/x86/include/asm/vmx.h |2 ++
 arch/x86/kvm/vmx.c |8 +++-
 2 files changed, 9 insertions(+), 1 deletions(-)

diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 104cf86..b4e2840 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -376,6 +376,8 @@ enum vmcs_field {
 #define VMX_EPT_EXTENT_CONTEXT_BIT (1ull  25)
 #define VMX_EPT_EXTENT_GLOBAL_BIT  (1ull  26)
 
+#define VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT  (1ull  9) /* (41 - 32) */
+
 #define VMX_EPT_DEFAULT_GAW3
 #define VMX_EPT_MAX_GAW0x4
 #define VMX_EPT_MT_EPTE_SHIFT  3
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 0e561a5..240a407 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -355,6 +355,11 @@ static inline bool cpu_has_vmx_invept_global(void)
return vmx_capability.ept  VMX_EPT_EXTENT_GLOBAL_BIT;
 }
 
+static inline bool cpu_has_vmx_invvpid_single(void)
+{
+   return vmx_capability.vpid  VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
+}
+
 static inline bool cpu_has_vmx_ept(void)
 {
return vmcs_config.cpu_based_2nd_exec_ctrl 
@@ -499,7 +504,8 @@ static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx)
if (vmx-vpid == 0)
return;
 
-   __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx-vpid, 0);
+   if (cpu_has_vmx_invvpid_single())
+   __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx-vpid, 0);
 }
 
 static inline void ept_sync_global(void)
-- 
1.6.5.2


--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] kvm mmu: Don't calculate quadrant if tdp_enabled.

2010-05-31 Thread Gui Jianfeng
There's no need to calculate quadrant if tdp is enabled.

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 arch/x86/kvm/mmu.c |2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 0bb9f17..431863b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1346,7 +1346,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct 
kvm_vcpu *vcpu,
if (role.direct)
role.cr4_pae = 0;
role.access = access;
-   if (vcpu-arch.mmu.root_level = PT32_ROOT_LEVEL) {
+   if (!tdp_enabled  vcpu-arch.mmu.root_level = PT32_ROOT_LEVEL) {
quadrant = gaddr  (PAGE_SHIFT + (PT64_PT_BITS * level));
quadrant = (1  ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
role.quadrant = quadrant;
-- 
1.6.5.2
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] kvm mmu: optimizations when tdp is in use

2010-05-27 Thread Gui Jianfeng
In case of using tdp, checking write protected page isn't needed and
quadrant also no need to be calculated.

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 arch/x86/kvm/mmu.c |7 +--
 1 files changed, 5 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 0bb9f17..ce4bbd3 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -495,10 +495,13 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t 
large_gfn)
max_level = kvm_x86_ops-get_lpage_level()  host_level ?
kvm_x86_ops-get_lpage_level() : host_level;
 
+   if (tdp_enabled)
+   goto done;
+
for (level = PT_DIRECTORY_LEVEL; level = max_level; ++level)
if (has_wrprotected_page(vcpu-kvm, large_gfn, level))
break;
-
+done:
return level - 1;
 }
 
@@ -1346,7 +1349,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct 
kvm_vcpu *vcpu,
if (role.direct)
role.cr4_pae = 0;
role.access = access;
-   if (vcpu-arch.mmu.root_level = PT32_ROOT_LEVEL) {
+   if (!tdp_enabled  vcpu-arch.mmu.root_level = PT32_ROOT_LEVEL) {
quadrant = gaddr  (PAGE_SHIFT + (PT64_PT_BITS * level));
quadrant = (1  ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
role.quadrant = quadrant;
-- 
1.6.5.2
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] kvm mmu: don't check PT_WRITABLE_MASK directly

2010-05-27 Thread Gui Jianfeng
Since we have is_writable_pte(), make use of it.

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 arch/x86/kvm/mmu.c |8 
 1 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ce4bbd3..441a5d8 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2923,7 +2923,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, 
int slot)
pt = sp-spt;
for (i = 0; i  PT64_ENT_PER_PAGE; ++i)
/* avoid RMW */
-   if (pt[i]  PT_WRITABLE_MASK)
+   if (is_writable_pte(pt[i]))
pt[i] = ~PT_WRITABLE_MASK;
}
kvm_flush_remote_tlbs(kvm);
@@ -3358,7 +3358,7 @@ void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
struct kvm_mmu_page *rev_sp;
gfn_t gfn;
 
-   if (*sptep  PT_WRITABLE_MASK) {
+   if (is_writable_pte(*sptep)) {
rev_sp = page_header(__pa(sptep));
gfn = rev_sp-gfns[sptep - rev_sp-spt];
 
@@ -3408,7 +3408,7 @@ static void check_writable_mappings_rmap(struct kvm_vcpu 
*vcpu)
 
if (!(ent  PT_PRESENT_MASK))
continue;
-   if (!(ent  PT_WRITABLE_MASK))
+   if (!is_writable_pte(ent))
continue;
inspect_spte_has_rmap(vcpu-kvm, pt[i]);
}
@@ -3442,7 +3442,7 @@ static void audit_write_protection(struct kvm_vcpu *vcpu)
 
spte = rmap_next(vcpu-kvm, rmapp, NULL);
while (spte) {
-   if (*spte  PT_WRITABLE_MASK)
+   if (is_writable_pte(*spte))
printk(KERN_ERR %s: (%s) shadow page has 
writable mappings: gfn %lx role %x\n,
   __func__, audit_msg, sp-gfn,
-- 
1.6.5.2

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] kvm mmu: optimizations when tdp is in use

2010-05-27 Thread Gui Jianfeng
Marcelo Tosatti wrote:
 On Thu, May 27, 2010 at 04:06:34PM +0800, Gui Jianfeng wrote:
 In case of using tdp, checking write protected page isn't needed and
 quadrant also no need to be calculated.

 Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
 ---
  arch/x86/kvm/mmu.c |7 +--
  1 files changed, 5 insertions(+), 2 deletions(-)

 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
 index 0bb9f17..ce4bbd3 100644
 --- a/arch/x86/kvm/mmu.c
 +++ b/arch/x86/kvm/mmu.c
 @@ -495,10 +495,13 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t 
 large_gfn)
  max_level = kvm_x86_ops-get_lpage_level()  host_level ?
  kvm_x86_ops-get_lpage_level() : host_level;
  
 +if (tdp_enabled)
 +goto done;
 +
 
 This is wrong. write_count is initialized for alignment purposes, not 
 only write protected pages. See __kvm_set_memory_region in
 virt/kvm/kvm_main.c.

thanks, avi also pointed this out.

Gui

 
  for (level = PT_DIRECTORY_LEVEL; level = max_level; ++level)
  if (has_wrprotected_page(vcpu-kvm, large_gfn, level))
  break;
 -
 +done:
  return level - 1;
  }
  
 @@ -1346,7 +1349,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct 
 kvm_vcpu *vcpu,
  if (role.direct)
  role.cr4_pae = 0;
  role.access = access;
 -if (vcpu-arch.mmu.root_level = PT32_ROOT_LEVEL) {
 +if (!tdp_enabled  vcpu-arch.mmu.root_level = PT32_ROOT_LEVEL) {
  quadrant = gaddr  (PAGE_SHIFT + (PT64_PT_BITS * level));
  quadrant = (1  ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
  role.quadrant = quadrant;
 -- 
 1.6.5.2
 --
 To unsubscribe from this list: send the line unsubscribe kvm in
 the body of a message to majord...@vger.kernel.org
 More majordomo info at  http://vger.kernel.org/majordomo-info.html
 
 

-- 
Regards
Gui Jianfeng
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] KVM: update mmu documetation for role.nxe.

2010-05-11 Thread Gui Jianfeng
There's no member cr4_nxe in struct kvm_mmu_page_role, it names nxe now.
Update mmu document.

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 Documentation/kvm/mmu.txt |2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/Documentation/kvm/mmu.txt b/Documentation/kvm/mmu.txt
index 0cc28fb..fde7989 100644
--- a/Documentation/kvm/mmu.txt
+++ b/Documentation/kvm/mmu.txt
@@ -161,7 +161,7 @@ Shadow pages contain the following information:
   role.cr4_pae:
 Contains the value of cr4.pae for which the page is valid (e.g. whether
 32-bit or 64-bit gptes are in use).
-  role.cr4_nxe:
+  role.nxe:
 Contains the value of efer.nxe for which the page is valid.
   gfn:
 Either the guest page table containing the translations shadowed by this
-- 
1.6.5.2


--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] KVM: make kvm_mmu_zap_page() return the number of pages it actually freed.

2010-05-06 Thread Gui Jianfeng
Avi Kivity wrote:
 On 05/05/2010 04:03 AM, Gui Jianfeng wrote:
 Currently, kvm_mmu_zap_page() returning the number of freed children sp.
 This might confuse the caller, because caller don't know the actual freed
 number. Let's make kvm_mmu_zap_page() return the number of pages it
 actually
 freed.


 
 if (kvm_mmu_zap_page(kvm, sp))
 goto restart;
 
 Needs to be updated.

Hi Avi,

if kvm_mmu_zap_page() returns 1, we don't know whether the freed sp is the one 
we passes into
or the child. So here just restart hash walk as long as kvm_mmu_zap_page() 
returning a positive
number, although sometimes un-needed hash walk will happen. This fix gets code 
simpler, the
idea comes from Marcelo.

Thanks,
Gui

 

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 1/3 v2] KVM MMU: make kvm_mmu_zap_page() return the number of zapped sp in total.

2010-05-04 Thread Gui Jianfeng
Marcelo Tosatti wrote:
 On Mon, May 03, 2010 at 09:38:54PM +0800, Gui Jianfeng wrote:
 Hi Marcelo

 Actually, it doesn't only affect kvm_mmu_change_mmu_pages() but also affects 
 kvm_mmu_remove_some_alloc_mmu_pages()
 which is called by mmu shrink routine. This will induce upper layer get a 
 wrong number, so i think this should be
 fixed. Here is a updated version.

 ---
 From: Gui Jianfeng guijianf...@cn.fujitsu.com

 Currently, in kvm_mmu_change_mmu_pages(kvm, page), used_pages-- is  
 performed after calling
 kvm_mmu_zap_page() in spite of that whether page is actually reclaimed. 
 Because root sp won't 
 be reclaimed by kvm_mmu_zap_page(). So making kvm_mmu_zap_page() return 
 total number of reclaimed 
 sp makes more sense. A new flag is put into kvm_mmu_zap_page() to indicate 
 whether the top page is
 reclaimed. kvm_mmu_remove_some_alloc_mmu_pages() also rely on 
 kvm_mmu_zap_page() to return a total
 relcaimed number.
 
 Isnt it simpler to have kvm_mmu_zap_page return the number of pages it
 actually freed? Then always restart the hash walk if return is positive.
 

OK, although in some cases we might encounter unneeded hash walk restart, but 
it's not a big
problem. I don't object this solution, will post a new patch.

Thanks,
Gui

 
 
 



--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] KVM: make kvm_mmu_zap_page() return the number of pages it actually freed.

2010-05-04 Thread Gui Jianfeng
Currently, kvm_mmu_zap_page() returning the number of freed children sp.
This might confuse the caller, because caller don't know the actual freed
number. Let's make kvm_mmu_zap_page() return the number of pages it actually
freed.

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 arch/x86/kvm/mmu.c |5 +++--
 1 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 51eb6d6..8ab6820 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1503,6 +1503,8 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct 
kvm_mmu_page *sp)
if (sp-unsync)
kvm_unlink_unsync_page(kvm, sp);
if (!sp-root_count) {
+   /* Count self */
+   ret++;
hlist_del(sp-hash_link);
kvm_mmu_free_page(kvm, sp);
} else {
@@ -1539,7 +1541,6 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned 
int kvm_nr_mmu_pages)
page = container_of(kvm-arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
used_pages -= kvm_mmu_zap_page(kvm, page);
-   used_pages--;
}
kvm_nr_mmu_pages = used_pages;
kvm-arch.n_free_mmu_pages = 0;
@@ -2908,7 +2909,7 @@ static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm 
*kvm)
 
page = container_of(kvm-arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
-   return kvm_mmu_zap_page(kvm, page) + 1;
+   return kvm_mmu_zap_page(kvm, page);
 }
 
 static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
-- 
1.6.5.2


--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] KVM: mark page dirty when page is actually modified.

2010-05-04 Thread Gui Jianfeng
Sometime cmpxchg_gpte doesn't modify gpte, in such case, don't mark
page table page as dirty.

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 arch/x86/kvm/paging_tmpl.h |4 ++--
 1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 89d66ca..1ad9843 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -177,10 +177,10 @@ walk:
if (!(pte  PT_ACCESSED_MASK)) {
trace_kvm_mmu_set_accessed_bit(table_gfn, index,
   sizeof(pte));
-   mark_page_dirty(vcpu-kvm, table_gfn);
if (FNAME(cmpxchg_gpte)(vcpu-kvm, table_gfn,
index, pte, pte|PT_ACCESSED_MASK))
goto walk;
+   mark_page_dirty(vcpu-kvm, table_gfn);
pte |= PT_ACCESSED_MASK;
}
 
@@ -217,11 +217,11 @@ walk:
bool ret;
 
trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
-   mark_page_dirty(vcpu-kvm, table_gfn);
ret = FNAME(cmpxchg_gpte)(vcpu-kvm, table_gfn, index, pte,
pte|PT_DIRTY_MASK);
if (ret)
goto walk;
+   mark_page_dirty(vcpu-kvm, table_gfn);
pte |= PT_DIRTY_MASK;
walker-ptes[walker-level - 1] = pte;
}
-- 
1.6.5.2



--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] KVM: Fix debug output error

2010-05-04 Thread Gui Jianfeng
Fix a debug output error in walk_addr

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 arch/x86/kvm/paging_tmpl.h |2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 89d66ca..d2c5164 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -229,7 +229,7 @@ walk:
walker-pt_access = pt_access;
walker-pte_access = pte_access;
pgprintk(%s: pte %llx pte_access %x pt_access %x\n,
-__func__, (u64)pte, pt_access, pte_access);
+__func__, (u64)pte, pte_access, pt_access);
return 1;
 
 not_present:
-- 
1.6.5.2

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 1/3 v2] KVM MMU: make kvm_mmu_zap_page() return the number of zapped sp in total.

2010-05-03 Thread Gui Jianfeng
Marcelo Tosatti wrote:
 On Fri, Apr 23, 2010 at 01:58:22PM +0800, Gui Jianfeng wrote:
 Currently, in kvm_mmu_change_mmu_pages(kvm, page), used_pages-- is  
 performed after calling
 kvm_mmu_zap_page() in spite of that whether page is actually reclaimed. 
 Because root sp won't 
 be reclaimed by kvm_mmu_zap_page(). So making kvm_mmu_zap_page() return 
 total number of reclaimed 
 sp makes more sense. A new flag is put into kvm_mmu_zap_page() to indicate 
 whether the top page is
 reclaimed.

 Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
 ---
  arch/x86/kvm/mmu.c |   53 
 +++
  1 files changed, 36 insertions(+), 17 deletions(-)
 
 Gui, 
 
 There will be only a few pinned roots, and there is no need for
 kvm_mmu_change_mmu_pages to be precise at that level (pages will be
 reclaimed through kvm_unmap_hva eventually).

Hi Marcelo

Actually, it doesn't only affect kvm_mmu_change_mmu_pages() but also affects 
kvm_mmu_remove_some_alloc_mmu_pages()
which is called by mmu shrink routine. This will induce upper layer get a wrong 
number, so i think this should be
fixed. Here is a updated version.

---
From: Gui Jianfeng guijianf...@cn.fujitsu.com

Currently, in kvm_mmu_change_mmu_pages(kvm, page), used_pages-- is  performed 
after calling
kvm_mmu_zap_page() in spite of that whether page is actually reclaimed. 
Because root sp won't 
be reclaimed by kvm_mmu_zap_page(). So making kvm_mmu_zap_page() return total 
number of reclaimed 
sp makes more sense. A new flag is put into kvm_mmu_zap_page() to indicate 
whether the top page is
reclaimed. kvm_mmu_remove_some_alloc_mmu_pages() also rely on 
kvm_mmu_zap_page() to return a total
relcaimed number.

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 arch/x86/kvm/mmu.c |   53 +++
 1 files changed, 36 insertions(+), 17 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 51eb6d6..e545da8 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1194,12 +1194,13 @@ static void kvm_unlink_unsync_page(struct kvm *kvm, 
struct kvm_mmu_page *sp)
--kvm-stat.mmu_unsync;
 }
 
-static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
+static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+   int *self_deleted);
 
 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
if (sp-role.cr4_pae != !!is_pae(vcpu)) {
-   kvm_mmu_zap_page(vcpu-kvm, sp);
+   kvm_mmu_zap_page(vcpu-kvm, sp, NULL);
return 1;
}
 
@@ -1207,7 +1208,7 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct 
kvm_mmu_page *sp)
kvm_flush_remote_tlbs(vcpu-kvm);
kvm_unlink_unsync_page(vcpu-kvm, sp);
if (vcpu-arch.mmu.sync_page(vcpu, sp)) {
-   kvm_mmu_zap_page(vcpu-kvm, sp);
+   kvm_mmu_zap_page(vcpu-kvm, sp, NULL);
return 1;
}
 
@@ -1478,7 +1479,7 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
struct kvm_mmu_page *sp;
 
for_each_sp(pages, sp, parents, i) {
-   kvm_mmu_zap_page(kvm, sp);
+   kvm_mmu_zap_page(kvm, sp, NULL);
mmu_pages_clear_parents(parents);
zapped++;
}
@@ -1488,7 +1489,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
return zapped;
 }
 
-static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+   int *self_deleted)
 {
int ret;
 
@@ -1505,11 +1507,16 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct 
kvm_mmu_page *sp)
if (!sp-root_count) {
hlist_del(sp-hash_link);
kvm_mmu_free_page(kvm, sp);
+   /* Count self */
+   ret++;
+   if (self_deleted)
+   *self_deleted = 1;
} else {
sp-role.invalid = 1;
list_move(sp-link, kvm-arch.active_mmu_pages);
kvm_reload_remote_mmus(kvm);
}
+
kvm_mmu_reset_last_pte_updated(kvm);
return ret;
 }
@@ -1538,8 +1545,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned 
int kvm_nr_mmu_pages)
 
page = container_of(kvm-arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
-   used_pages -= kvm_mmu_zap_page(kvm, page);
-   used_pages--;
+   used_pages -= kvm_mmu_zap_page(kvm, page, NULL);
}
kvm_nr_mmu_pages = used_pages;
kvm-arch.n_free_mmu_pages = 0;
@@ -1558,6 +1564,8 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t 
gfn)
struct kvm_mmu_page *sp;
struct hlist_node *node

[PATCH] KVM: Fix mmu shrinker error

2010-04-26 Thread Gui Jianfeng
kvm_mmu_remove_one_alloc_mmu_page() assumes kvm_mmu_zap_page() only reclaims
only one sp, but that's not the case. This will cause mmu shrinker returns
a wrong number. This patch fix the counting error.

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 arch/x86/kvm/mmu.c |   10 +-
 1 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7a17db1..c97368e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2914,13 +2914,13 @@ restart:
kvm_flush_remote_tlbs(kvm);
 }
 
-static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
+static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm)
 {
struct kvm_mmu_page *page;
 
page = container_of(kvm-arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
-   kvm_mmu_zap_page(kvm, page);
+   return kvm_mmu_zap_page(kvm, page) + 1;
 }
 
 static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
@@ -2932,7 +2932,7 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
spin_lock(kvm_lock);
 
list_for_each_entry(kvm, vm_list, vm_list) {
-   int npages, idx;
+   int npages, idx, freed_pages;
 
idx = srcu_read_lock(kvm-srcu);
spin_lock(kvm-mmu_lock);
@@ -2940,8 +2940,8 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
 kvm-arch.n_free_mmu_pages;
cache_count += npages;
if (!kvm_freed  nr_to_scan  0  npages  0) {
-   kvm_mmu_remove_one_alloc_mmu_page(kvm);
-   cache_count--;
+   freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm);
+   cache_count -= freed_pages;
kvm_freed = kvm;
}
nr_to_scan--;
-- 
1.6.5.2


--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 3/3] KVM MMU: Take sp level into account when calculating quadran

2010-04-23 Thread Gui Jianfeng
Gui Jianfeng wrote:
 Take sp level into account when calculating quadrant, because only when
 level == PT_PAGE_TABLE_LEVEL, quadrant is needed.

Please ignore this patch, Sorry for the noise.

Gui

 
 Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
 ---
  arch/x86/kvm/mmu.c |3 ++-
  1 files changed, 2 insertions(+), 1 deletions(-)
 
 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
 index 640b82d..2a35a65 100644
 --- a/arch/x86/kvm/mmu.c
 +++ b/arch/x86/kvm/mmu.c
 @@ -1324,7 +1324,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct 
 kvm_vcpu *vcpu,
   if (role.direct)
   role.cr4_pae = 0;
   role.access = access;
 - if (vcpu-arch.mmu.root_level = PT32_ROOT_LEVEL) {
 + if (vcpu-arch.mmu.root_level = PT32_ROOT_LEVEL 
 + level == PT_PAGE_TABLE_LEVEL) {
   quadrant = gaddr  (PAGE_SHIFT + (PT64_PT_BITS * level));
   quadrant = (1  ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
   role.quadrant = quadrant;

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 2/3] KVM MMU: fix sp-unsync type error in trace event definition.

2010-04-22 Thread Gui Jianfeng
sp-unsync is bool now, so update trace event declaration.

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 arch/x86/kvm/mmutrace.h |2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index 3851f1f..9966e80 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -11,7 +11,7 @@
__field(__u64, gfn) \
__field(__u32, role) \
__field(__u32, root_count) \
-   __field(__u32, unsync)
+   __field(bool, unsync)
 
 #define KVM_MMU_PAGE_ASSIGN(sp) \
__entry-gfn = sp-gfn;  \
-- 
1.6.5.2


--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 3/3] KVM MMU: Take sp level into account when calculating quadran

2010-04-22 Thread Gui Jianfeng
Take sp level into account when calculating quadrant, because only when
level == PT_PAGE_TABLE_LEVEL, quadrant is needed.

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 arch/x86/kvm/mmu.c |3 ++-
 1 files changed, 2 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 640b82d..2a35a65 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1324,7 +1324,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct 
kvm_vcpu *vcpu,
if (role.direct)
role.cr4_pae = 0;
role.access = access;
-   if (vcpu-arch.mmu.root_level = PT32_ROOT_LEVEL) {
+   if (vcpu-arch.mmu.root_level = PT32_ROOT_LEVEL 
+   level == PT_PAGE_TABLE_LEVEL) {
quadrant = gaddr  (PAGE_SHIFT + (PT64_PT_BITS * level));
quadrant = (1  ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
role.quadrant = quadrant;
-- 
1.6.5.2


--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 1/3] KVM MMU: make kvm_mmu_zap_page() return the number of zapped sp in total.

2010-04-22 Thread Gui Jianfeng
Xiao Guangrong wrote:
 
 Gui Jianfeng wrote:
 Currently, in kvm_mmu_change_mmu_pages(kvm, page), used_pages-- is  
 performed after calling
 kvm_mmu_zap_page() in spite of that whether page is actually reclaimed. 
 Because root sp won't be 
 reclaimed by kvm_mmu_zap_page(). So making kvm_mmu_zap_page() return total 
 number of reclaimed sp
 makes more sense. A new flag is put into kvm_mmu_zap_page() to indicate 
 whether the top page is reclaimed.

 
 This bug only hurts kvm_mmu_change_mmu_pages() function, we'd better allow 
 'self_deleted' is
 NULL, then we can pass NULL at other place.

Ok, will change. Will send a updated version.

 
 @@ -1571,7 +1584,8 @@ restart:
  pgprintk(%s: gfn %lx role %x\n, __func__, gfn,
   sp-role.word);
  r = 1;
 -if (kvm_mmu_zap_page(kvm, sp))
 +ret = kvm_mmu_zap_page(kvm, sp, self_deleted);
 +if (ret  1 || (ret == 1  self_deleted == 0))
  goto restart;
 
 Maybe we can keep kvm_mmu_zap_page() returns the number of zapped children,
 and 'self_deleted' indicates whether self is zapped, then we no need modify
 those function, just fix kvm_mmu_change_mmu_pages() that is if 'self_deleted 
 == 1',
 inc 'used_pages'

I think kvm_mmu_zap_page() returning the total zapped number is more intuitive, 
so i'd
prefer to retain the original code. Thanks.

Gui,
Thanks

 
 Xiao
 
 
 

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 1/3 v2] KVM MMU: make kvm_mmu_zap_page() return the number of zapped sp in total.

2010-04-22 Thread Gui Jianfeng
Currently, in kvm_mmu_change_mmu_pages(kvm, page), used_pages-- is  performed 
after calling
kvm_mmu_zap_page() in spite of that whether page is actually reclaimed. 
Because root sp won't 
be reclaimed by kvm_mmu_zap_page(). So making kvm_mmu_zap_page() return total 
number of reclaimed 
sp makes more sense. A new flag is put into kvm_mmu_zap_page() to indicate 
whether the top page is
reclaimed.

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 arch/x86/kvm/mmu.c |   53 +++
 1 files changed, 36 insertions(+), 17 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7a17db1..d0960f1 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1195,12 +1195,13 @@ static void kvm_unlink_unsync_page(struct kvm *kvm, 
struct kvm_mmu_page *sp)
--kvm-stat.mmu_unsync;
 }
 
-static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
+static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+   int *self_deleted);
 
 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
if (sp-role.cr4_pae != !!is_pae(vcpu)) {
-   kvm_mmu_zap_page(vcpu-kvm, sp);
+   kvm_mmu_zap_page(vcpu-kvm, sp, NULL);
return 1;
}
 
@@ -1209,7 +1210,7 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct 
kvm_mmu_page *sp)
kvm_flush_remote_tlbs(vcpu-kvm);
kvm_unlink_unsync_page(vcpu-kvm, sp);
if (vcpu-arch.mmu.sync_page(vcpu, sp)) {
-   kvm_mmu_zap_page(vcpu-kvm, sp);
+   kvm_mmu_zap_page(vcpu-kvm, sp, NULL);
return 1;
}
 
@@ -1480,7 +1481,7 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
struct kvm_mmu_page *sp;
 
for_each_sp(pages, sp, parents, i) {
-   kvm_mmu_zap_page(kvm, sp);
+   kvm_mmu_zap_page(kvm, sp, NULL);
mmu_pages_clear_parents(parents);
zapped++;
}
@@ -1490,7 +1491,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
return zapped;
 }
 
-static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+   int *self_deleted)
 {
int ret;
 
@@ -1507,11 +1509,16 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct 
kvm_mmu_page *sp)
if (!sp-root_count) {
hlist_del(sp-hash_link);
kvm_mmu_free_page(kvm, sp);
+   /* Count self */
+   ret++;
+   if (self_deleted)
+   *self_deleted = 1;
} else {
sp-role.invalid = 1;
list_move(sp-link, kvm-arch.active_mmu_pages);
kvm_reload_remote_mmus(kvm);
}
+
kvm_mmu_reset_last_pte_updated(kvm);
return ret;
 }
@@ -1540,8 +1547,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned 
int kvm_nr_mmu_pages)
 
page = container_of(kvm-arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
-   used_pages -= kvm_mmu_zap_page(kvm, page);
-   used_pages--;
+   used_pages -= kvm_mmu_zap_page(kvm, page, NULL);
}
kvm_nr_mmu_pages = used_pages;
kvm-arch.n_free_mmu_pages = 0;
@@ -1560,6 +1566,8 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t 
gfn)
struct kvm_mmu_page *sp;
struct hlist_node *node, *n;
int r;
+   int self_deleted = 0;
+   int ret;
 
pgprintk(%s: looking for gfn %lx\n, __func__, gfn);
r = 0;
@@ -1571,7 +1579,8 @@ restart:
pgprintk(%s: gfn %lx role %x\n, __func__, gfn,
 sp-role.word);
r = 1;
-   if (kvm_mmu_zap_page(kvm, sp))
+   ret = kvm_mmu_zap_page(kvm, sp, self_deleted);
+   if (ret  1 || (ret == 1  self_deleted == 0))
goto restart;
}
return r;
@@ -1583,6 +1592,8 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
struct hlist_head *bucket;
struct kvm_mmu_page *sp;
struct hlist_node *node, *nn;
+   int ret;
+   int self_deleted = 0;
 
index = kvm_page_table_hashfn(gfn);
bucket = kvm-arch.mmu_page_hash[index];
@@ -1592,7 +1603,8 @@ restart:
 !sp-role.invalid) {
pgprintk(%s: zap %lx %x\n,
 __func__, gfn, sp-role.word);
-   if (kvm_mmu_zap_page(kvm, sp))
+   ret = kvm_mmu_zap_page(kvm, sp, self_deleted);
+   if (ret  1 || (ret == 1  self_deleted == 0

[PATCH 1/4] KVM: Move first pte address calculation out of loop to save some cycles

2010-04-16 Thread Gui Jianfeng
Move first pte address calculation out of loop to save some cycles

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 arch/x86/kvm/paging_tmpl.h |6 --
 1 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index d9dea28..5910557 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -572,12 +572,15 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct 
kvm_mmu_page *sp)
 {
int i, offset, nr_present;
bool reset_host_protection;
+   gpa_t first_pte_gpa;
 
offset = nr_present = 0;
 
if (PTTYPE == 32)
offset = sp-role.quadrant  PT64_LEVEL_BITS;
 
+   first_pte_gpa = gfn_to_gpa(sp-gfn) + offset * sizeof(pt_element_t);
+
for (i = 0; i  PT64_ENT_PER_PAGE; i++) {
unsigned pte_access;
pt_element_t gpte;
@@ -587,8 +590,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct 
kvm_mmu_page *sp)
if (!is_shadow_present_pte(sp-spt[i]))
continue;
 
-   pte_gpa = gfn_to_gpa(sp-gfn);
-   pte_gpa += (i+offset) * sizeof(pt_element_t);
+   pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
 
if (kvm_read_guest_atomic(vcpu-kvm, pte_gpa, gpte,
  sizeof(pt_element_t)))
-- 
1.6.5.2




--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 2/4] KVM: Make use of is_large_pte() instead

2010-04-16 Thread Gui Jianfeng
Make use of is_large_pte() instead of checking PT_PAGE_SIZE_MASK 
bit directly.

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 arch/x86/kvm/paging_tmpl.h |4 ++--
 1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 5910557..d0cc07e 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -190,10 +190,10 @@ walk:
 
if ((walker-level == PT_PAGE_TABLE_LEVEL) ||
((walker-level == PT_DIRECTORY_LEVEL) 
-   (pte  PT_PAGE_SIZE_MASK)  
+   is_large_pte(pte) 
(PTTYPE == 64 || is_pse(vcpu))) ||
((walker-level == PT_PDPE_LEVEL) 
-   (pte  PT_PAGE_SIZE_MASK)  
+   is_large_pte(pte) 
is_long_mode(vcpu))) {
int lvl = walker-level;
 
-- 1.6.5.2 

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 3/4] KVM: Remove unused varialbe in rmap_next()

2010-04-16 Thread Gui Jianfeng
Remove unused varialbe in rmap_next()

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 arch/x86/kvm/mmu.c |2 --
 1 files changed, 0 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b44380b..dec8a6d 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -651,7 +651,6 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
 {
struct kvm_rmap_desc *desc;
-   struct kvm_rmap_desc *prev_desc;
u64 *prev_spte;
int i;
 
@@ -663,7 +662,6 @@ static u64 *rmap_next(struct kvm *kvm, unsigned long 
*rmapp, u64 *spte)
return NULL;
}
desc = (struct kvm_rmap_desc *)(*rmapp  ~1ul);
-   prev_desc = NULL;
prev_spte = NULL;
while (desc) {
for (i = 0; i  RMAP_EXT  desc-sptes[i]; ++i) {
-- 1.6.5.2 

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 4/4] KVM: Get rid of dead function

2010-04-16 Thread Gui Jianfeng
Nobody use gva_to_page() anymore, get rid of it.

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 arch/x86/kvm/mmu.c   |   14 --
 include/linux/kvm_host.h |1 -
 2 files changed, 0 insertions(+), 15 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index dec8a6d..bb18917 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1619,20 +1619,6 @@ static void mmu_convert_notrap(struct kvm_mmu_page *sp)
}
 }
 
-struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
-{
-   struct page *page;
-
-   gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
-
-   if (gpa == UNMAPPED_GVA)
-   return NULL;
-
-   page = gfn_to_page(vcpu-kvm, gpa  PAGE_SHIFT);
-
-   return page;
-}
-
 /*
  * The function is based on mtrr_type_lookup() in
  * arch/x86/kernel/cpu/mtrr/generic.c
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 169d077..f1aabe2 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -247,7 +247,6 @@ void kvm_put_kvm(struct kvm *kvm);
 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
 #define HPA_ERR_MASK ((hpa_t)1  HPA_MSB)
 static inline int is_error_hpa(hpa_t hpa) { return hpa  HPA_MSB; }
-struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
 
 extern struct page *bad_page;
 extern pfn_t bad_pfn;
-- 1.6.5.2 

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


virsh dump blocking problem

2010-04-05 Thread Gui Jianfeng
Hi all,

I'm not sure whether it's appropriate to post the problem here.
I played with virsh under Fedora 12, and started a KVM fedora12 guest
by virsh start command. The fedora12 guest is successfully started.
Than I run the following command to dump the guest core:
#virsh dump 1 mycoredump (domain id is 1)

This command seemed blocking and not return. According to he strace
output, virsh dump seems that it's blocking at poll() call. I think
the following should be the call trace of virsh.

cmdDump()
  - virDomainCoreDump()
- remoteDomainCoreDump()
 - call()
 - remoteIO()
 - remoteIOEventLoop()
  - poll(fds, ARRAY_CARDINALITY(fds), -1)


Any one encounters this problem also, any thoughts?

Thanks
Gui

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] KMV: VMX: consult IA32_VMX_EPT_VPID_CAP to determine EPT paging-structure memory type

2010-03-25 Thread Gui Jianfeng
Avi Kivity wrote:
 On 03/22/2010 11:13 AM, Sheng Yang wrote:

 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
 index 06108f3..f971b9b 100644
 --- a/arch/x86/kvm/vmx.c
 +++ b/arch/x86/kvm/vmx.c
 @@ -1804,9 +1804,15 @@ static u64 construct_eptp(unsigned long root_hpa)
   {
   u64 eptp;

 -/* TODO write the value reading from MSR */
 -eptp = VMX_EPT_DEFAULT_MT |
 -VMX_EPT_DEFAULT_GAW  VMX_EPT_GAW_EPTP_SHIFT;
 +if (cpu_has_vmx_eptp_writeback())
 +eptp = VMX_EPT_MT_WRBACK |
 +VMX_EPT_DEFAULT_GAW  VMX_EPT_GAW_EPTP_SHIFT;
  
 I prefer to ensure WB is supported and used as default. Otherwise it
 would be
 a big trouble for memory subsystem(to use UC for all memory). Both WB
 and UC
 EPT memory types are ensured to be support in hardware.

 And you can remove VMX_EPT_DEFAULT_MT as well.

 
 I agree, hopefully we never ever see a cpu that doesn't support EPT WB.

OK, seems we don't need to have this concern. :)

 

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] KMV: VMX: consult IA32_VMX_EPT_VPID_CAP to determine EPT paging-structure memory type

2010-03-22 Thread Gui Jianfeng
According to SDM, we need to configure EPT paging-structure memory type
by consulting IA32_VMX_EPT_VPID_CAP.

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 arch/x86/include/asm/vmx.h |2 ++
 arch/x86/kvm/vmx.c |   12 +---
 2 files changed, 11 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index fb9a080..1b33a60 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -374,6 +374,8 @@ enum vmcs_field {
 #define VMX_EPT_MT_EPTE_SHIFT  3
 #define VMX_EPT_GAW_EPTP_SHIFT 3
 #define VMX_EPT_DEFAULT_MT 0x6ull
+#define VMX_EPT_MT_WRBACK  0x6ull
+#define VMX_EPT_MT_UNCACHABLE  0x0ull
 #define VMX_EPT_READABLE_MASK  0x1ull
 #define VMX_EPT_WRITABLE_MASK  0x2ull
 #define VMX_EPT_EXECUTABLE_MASK0x4ull
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 06108f3..f971b9b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1804,9 +1804,15 @@ static u64 construct_eptp(unsigned long root_hpa)
 {
u64 eptp;
 
-   /* TODO write the value reading from MSR */
-   eptp = VMX_EPT_DEFAULT_MT |
-   VMX_EPT_DEFAULT_GAW  VMX_EPT_GAW_EPTP_SHIFT;
+   if (cpu_has_vmx_eptp_writeback())
+   eptp = VMX_EPT_MT_WRBACK |
+   VMX_EPT_DEFAULT_GAW  VMX_EPT_GAW_EPTP_SHIFT;
+   else if (cpu_has_vmx_eptp_uncacheable())
+   eptp = VMX_EPT_MT_UNCACHABLE |
+   VMX_EPT_DEFAULT_GAW  VMX_EPT_GAW_EPTP_SHIFT;
+   else
+   BUG();
+
eptp |= (root_hpa  PAGE_MASK);
 
return eptp;
-- 
1.6.5.2




--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] KVM: cleanup: change to use bool return values

2010-03-15 Thread Gui Jianfeng
Make use of bool as return valuses.

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 arch/x86/kvm/vmx.c |   72 ++--
 1 files changed, 36 insertions(+), 36 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 06108f3..cc0628e 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -231,65 +231,65 @@ static const u32 vmx_msr_index[] = {
 };
 #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
 
-static inline int is_page_fault(u32 intr_info)
+static inline bool is_page_fault(u32 intr_info)
 {
return (intr_info  (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
 INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
 }
 
-static inline int is_no_device(u32 intr_info)
+static inline bool is_no_device(u32 intr_info)
 {
return (intr_info  (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
 INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
 }
 
-static inline int is_invalid_opcode(u32 intr_info)
+static inline bool is_invalid_opcode(u32 intr_info)
 {
return (intr_info  (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
 INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
 }
 
-static inline int is_external_interrupt(u32 intr_info)
+static inline bool is_external_interrupt(u32 intr_info)
 {
return (intr_info  (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
 }
 
-static inline int is_machine_check(u32 intr_info)
+static inline bool is_machine_check(u32 intr_info)
 {
return (intr_info  (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
 INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
 }
 
-static inline int cpu_has_vmx_msr_bitmap(void)
+static inline bool cpu_has_vmx_msr_bitmap(void)
 {
-   return vmcs_config.cpu_based_exec_ctrl  CPU_BASED_USE_MSR_BITMAPS;
+   return !!(vmcs_config.cpu_based_exec_ctrl  CPU_BASED_USE_MSR_BITMAPS);
 }
 
-static inline int cpu_has_vmx_tpr_shadow(void)
+static inline bool cpu_has_vmx_tpr_shadow(void)
 {
-   return vmcs_config.cpu_based_exec_ctrl  CPU_BASED_TPR_SHADOW;
+   return !!(vmcs_config.cpu_based_exec_ctrl  CPU_BASED_TPR_SHADOW);
 }
 
-static inline int vm_need_tpr_shadow(struct kvm *kvm)
+static inline bool vm_need_tpr_shadow(struct kvm *kvm)
 {
return (cpu_has_vmx_tpr_shadow())  (irqchip_in_kernel(kvm));
 }
 
-static inline int cpu_has_secondary_exec_ctrls(void)
+static inline bool cpu_has_secondary_exec_ctrls(void)
 {
-   return vmcs_config.cpu_based_exec_ctrl 
-   CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
+   return !!(vmcs_config.cpu_based_exec_ctrl 
+ CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
 }
 
 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
 {
-   return vmcs_config.cpu_based_2nd_exec_ctrl 
-   SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+   return !!(vmcs_config.cpu_based_2nd_exec_ctrl 
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
 }
 
 static inline bool cpu_has_vmx_flexpriority(void)
@@ -323,59 +323,59 @@ static inline bool cpu_has_vmx_ept_1g_page(void)
return !!(vmx_capability.ept  VMX_EPT_1GB_PAGE_BIT);
 }
 
-static inline int cpu_has_vmx_invept_individual_addr(void)
+static inline bool cpu_has_vmx_invept_individual_addr(void)
 {
return !!(vmx_capability.ept  VMX_EPT_EXTENT_INDIVIDUAL_BIT);
 }
 
-static inline int cpu_has_vmx_invept_context(void)
+static inline bool cpu_has_vmx_invept_context(void)
 {
return !!(vmx_capability.ept  VMX_EPT_EXTENT_CONTEXT_BIT);
 }
 
-static inline int cpu_has_vmx_invept_global(void)
+static inline bool cpu_has_vmx_invept_global(void)
 {
return !!(vmx_capability.ept  VMX_EPT_EXTENT_GLOBAL_BIT);
 }
 
-static inline int cpu_has_vmx_ept(void)
+static inline bool cpu_has_vmx_ept(void)
 {
-   return vmcs_config.cpu_based_2nd_exec_ctrl 
-   SECONDARY_EXEC_ENABLE_EPT;
+   return !!(vmcs_config.cpu_based_2nd_exec_ctrl 
+ SECONDARY_EXEC_ENABLE_EPT);
 }
 
-static inline int cpu_has_vmx_unrestricted_guest(void)
+static inline bool cpu_has_vmx_unrestricted_guest(void)
 {
-   return vmcs_config.cpu_based_2nd_exec_ctrl 
-   SECONDARY_EXEC_UNRESTRICTED_GUEST;
+   return !!(vmcs_config.cpu_based_2nd_exec_ctrl 
+ SECONDARY_EXEC_UNRESTRICTED_GUEST);
 }
 
-static inline int cpu_has_vmx_ple(void)
+static inline bool cpu_has_vmx_ple(void)
 {
-   return vmcs_config.cpu_based_2nd_exec_ctrl 
-   SECONDARY_EXEC_PAUSE_LOOP_EXITING;
+   return !!(vmcs_config.cpu_based_2nd_exec_ctrl

[PATCH] KVM: Cleanup: change to use bool return values

2010-03-15 Thread Gui Jianfeng
Make use of bool as return values, and remove some useless
bool value converting. Thanks Avi to point this out.

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 arch/x86/kvm/vmx.c |   54 ++--
 1 files changed, 27 insertions(+), 27 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 06108f3..3ddcfc5 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -231,56 +231,56 @@ static const u32 vmx_msr_index[] = {
 };
 #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
 
-static inline int is_page_fault(u32 intr_info)
+static inline bool is_page_fault(u32 intr_info)
 {
return (intr_info  (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
 INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
 }
 
-static inline int is_no_device(u32 intr_info)
+static inline bool is_no_device(u32 intr_info)
 {
return (intr_info  (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
 INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
 }
 
-static inline int is_invalid_opcode(u32 intr_info)
+static inline bool is_invalid_opcode(u32 intr_info)
 {
return (intr_info  (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
 INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
 }
 
-static inline int is_external_interrupt(u32 intr_info)
+static inline bool is_external_interrupt(u32 intr_info)
 {
return (intr_info  (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
 }
 
-static inline int is_machine_check(u32 intr_info)
+static inline bool is_machine_check(u32 intr_info)
 {
return (intr_info  (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
 INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
 }
 
-static inline int cpu_has_vmx_msr_bitmap(void)
+static inline bool cpu_has_vmx_msr_bitmap(void)
 {
return vmcs_config.cpu_based_exec_ctrl  CPU_BASED_USE_MSR_BITMAPS;
 }
 
-static inline int cpu_has_vmx_tpr_shadow(void)
+static inline bool cpu_has_vmx_tpr_shadow(void)
 {
return vmcs_config.cpu_based_exec_ctrl  CPU_BASED_TPR_SHADOW;
 }
 
-static inline int vm_need_tpr_shadow(struct kvm *kvm)
+static inline bool vm_need_tpr_shadow(struct kvm *kvm)
 {
return (cpu_has_vmx_tpr_shadow())  (irqchip_in_kernel(kvm));
 }
 
-static inline int cpu_has_secondary_exec_ctrls(void)
+static inline bool cpu_has_secondary_exec_ctrls(void)
 {
return vmcs_config.cpu_based_exec_ctrl 
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
@@ -300,80 +300,80 @@ static inline bool cpu_has_vmx_flexpriority(void)
 
 static inline bool cpu_has_vmx_ept_execute_only(void)
 {
-   return !!(vmx_capability.ept  VMX_EPT_EXECUTE_ONLY_BIT);
+   return vmx_capability.ept  VMX_EPT_EXECUTE_ONLY_BIT;
 }
 
 static inline bool cpu_has_vmx_eptp_uncacheable(void)
 {
-   return !!(vmx_capability.ept  VMX_EPTP_UC_BIT);
+   return vmx_capability.ept  VMX_EPTP_UC_BIT;
 }
 
 static inline bool cpu_has_vmx_eptp_writeback(void)
 {
-   return !!(vmx_capability.ept  VMX_EPTP_WB_BIT);
+   return vmx_capability.ept  VMX_EPTP_WB_BIT;
 }
 
 static inline bool cpu_has_vmx_ept_2m_page(void)
 {
-   return !!(vmx_capability.ept  VMX_EPT_2MB_PAGE_BIT);
+   return vmx_capability.ept  VMX_EPT_2MB_PAGE_BIT;
 }
 
 static inline bool cpu_has_vmx_ept_1g_page(void)
 {
-   return !!(vmx_capability.ept  VMX_EPT_1GB_PAGE_BIT);
+   return vmx_capability.ept  VMX_EPT_1GB_PAGE_BIT;
 }
 
-static inline int cpu_has_vmx_invept_individual_addr(void)
+static inline bool cpu_has_vmx_invept_individual_addr(void)
 {
-   return !!(vmx_capability.ept  VMX_EPT_EXTENT_INDIVIDUAL_BIT);
+   return vmx_capability.ept  VMX_EPT_EXTENT_INDIVIDUAL_BIT;
 }
 
-static inline int cpu_has_vmx_invept_context(void)
+static inline bool cpu_has_vmx_invept_context(void)
 {
-   return !!(vmx_capability.ept  VMX_EPT_EXTENT_CONTEXT_BIT);
+   return vmx_capability.ept  VMX_EPT_EXTENT_CONTEXT_BIT;
 }
 
-static inline int cpu_has_vmx_invept_global(void)
+static inline bool cpu_has_vmx_invept_global(void)
 {
-   return !!(vmx_capability.ept  VMX_EPT_EXTENT_GLOBAL_BIT);
+   return vmx_capability.ept  VMX_EPT_EXTENT_GLOBAL_BIT;
 }
 
-static inline int cpu_has_vmx_ept(void)
+static inline bool cpu_has_vmx_ept(void)
 {
return vmcs_config.cpu_based_2nd_exec_ctrl 
SECONDARY_EXEC_ENABLE_EPT;
 }
 
-static inline int cpu_has_vmx_unrestricted_guest(void)
+static inline bool cpu_has_vmx_unrestricted_guest(void)
 {
return vmcs_config.cpu_based_2nd_exec_ctrl 
SECONDARY_EXEC_UNRESTRICTED_GUEST;
 }
 
-static inline int

[PATCH] KVM: Remove redundant check in vm_need_virtualize_apic_accesses()

2010-01-28 Thread Gui Jianfeng
flexpriority_enabled implies cpu_has_vmx_virtualize_apic_accesses() returning
true, so we don't need this check here.

Signed-off-by: Gui Jianfeng guijianf...@cn.fujitsu.com
---
 arch/x86/kvm/vmx.c |4 +---
 1 files changed, 1 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9f56110..0e6af4a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -358,9 +358,7 @@ static inline int cpu_has_vmx_ple(void)
 
 static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
 {
-   return flexpriority_enabled 
-   (cpu_has_vmx_virtualize_apic_accesses()) 
-   (irqchip_in_kernel(kvm));
+   return flexpriority_enabled  irqchip_in_kernel(kvm);
 }
 
 static inline int cpu_has_vmx_vpid(void)
-- 
1.5.4.rc3


--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html