Re: [PATCH v3 7/10] KVM MMU: allow more page become unsync at gfn mapping time

2010-05-05 Thread Xiao Guangrong


Marcelo Tosatti wrote:
> On Wed, Apr 28, 2010 at 11:55:49AM +0800, Xiao Guangrong wrote:
>> In current code, shadow page can become asynchronous only if one
>> shadow page for a gfn, this rule is too strict, in fact, we can
>> let all last mapping page(i.e, it's the pte page) become unsync,
>> and sync them at invlpg or flush tlb time.
>>
>> This patch allow more page become asynchronous at gfn mapping time 
>>
>> Signed-off-by: Xiao Guangrong 
> 
> Xiao,
> 
> This patch breaks Fedora 8 32 install. Reverted patches 5-10.

Hi Marcelo,

Sorry for the delay reply since i'm on holiday.

I have found the reason of this issue, two fix patches will be sent soon,
could you please try it?

Thanks,
Xiao
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v3 7/10] KVM MMU: allow more page become unsync at gfn mapping time

2010-04-30 Thread Marcelo Tosatti
On Wed, Apr 28, 2010 at 11:55:49AM +0800, Xiao Guangrong wrote:
> In current code, shadow page can become asynchronous only if one
> shadow page for a gfn, this rule is too strict, in fact, we can
> let all last mapping page(i.e, it's the pte page) become unsync,
> and sync them at invlpg or flush tlb time.
> 
> This patch allow more page become asynchronous at gfn mapping time 
> 
> Signed-off-by: Xiao Guangrong 

Xiao,

This patch breaks Fedora 8 32 install. Reverted patches 5-10.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v3 7/10] KVM MMU: allow more page become unsync at gfn mapping time

2010-04-27 Thread Xiao Guangrong
In current code, shadow page can become asynchronous only if one
shadow page for a gfn, this rule is too strict, in fact, we can
let all last mapping page(i.e, it's the pte page) become unsync,
and sync them at invlpg or flush tlb time.

This patch allow more page become asynchronous at gfn mapping time 

Signed-off-by: Xiao Guangrong 
---
 arch/x86/kvm/mmu.c |   81 +++
 1 files changed, 37 insertions(+), 44 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index fb0c33c..a60cd51 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1166,26 +1166,6 @@ static int mmu_unsync_walk(struct kvm_mmu_page *sp,
return __mmu_unsync_walk(sp, pvec);
 }
 
-static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
-{
-   unsigned index;
-   struct hlist_head *bucket;
-   struct kvm_mmu_page *sp;
-   struct hlist_node *node;
-
-   pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
-   index = kvm_page_table_hashfn(gfn);
-   bucket = &kvm->arch.mmu_page_hash[index];
-   hlist_for_each_entry(sp, node, bucket, hash_link)
-   if (sp->gfn == gfn && !sp->role.direct
-   && !sp->role.invalid) {
-   pgprintk("%s: found role %x\n",
-__func__, sp->role.word);
-   return sp;
-   }
-   return NULL;
-}
-
 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
WARN_ON(!sp->unsync);
@@ -1751,47 +1731,60 @@ u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, 
gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
 
-static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+{
+   trace_kvm_mmu_unsync_page(sp);
+   ++vcpu->kvm->stat.mmu_unsync;
+   sp->unsync = 1;
+
+   kvm_mmu_mark_parents_unsync(sp);
+   mmu_convert_notrap(sp);
+}
+
+static void kvm_unsync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
 {
-   unsigned index;
struct hlist_head *bucket;
struct kvm_mmu_page *s;
struct hlist_node *node, *n;
+   unsigned index;
 
-   index = kvm_page_table_hashfn(sp->gfn);
+   index = kvm_page_table_hashfn(gfn);
bucket = &vcpu->kvm->arch.mmu_page_hash[index];
-   /* don't unsync if pagetable is shadowed with multiple roles */
+
hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
-   if (s->gfn != sp->gfn || s->role.direct)
+   if (s->gfn != gfn || s->role.direct || s->unsync)
continue;
-   if (s->role.word != sp->role.word)
-   return 1;
+   WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
+   __kvm_unsync_page(vcpu, s);
}
-   trace_kvm_mmu_unsync_page(sp);
-   ++vcpu->kvm->stat.mmu_unsync;
-   sp->unsync = 1;
-
-   kvm_mmu_mark_parents_unsync(sp);
-
-   mmu_convert_notrap(sp);
-   return 0;
 }
 
 static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
  bool can_unsync)
 {
-   struct kvm_mmu_page *shadow;
+   unsigned index;
+   struct hlist_head *bucket;
+   struct kvm_mmu_page *s;
+   struct hlist_node *node, *n;
+   bool need_unsync = false;
+
+   index = kvm_page_table_hashfn(gfn);
+   bucket = &vcpu->kvm->arch.mmu_page_hash[index];
+   hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
+   if (s->gfn != gfn || s->role.direct)
+   continue;
 
-   shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
-   if (shadow) {
-   if (shadow->role.level != PT_PAGE_TABLE_LEVEL)
+   if (s->role.level != PT_PAGE_TABLE_LEVEL)
return 1;
-   if (shadow->unsync)
-   return 0;
-   if (can_unsync && oos_shadow)
-   return kvm_unsync_page(vcpu, shadow);
-   return 1;
+
+   if (!need_unsync && !s->unsync) {
+   if (!can_unsync || !oos_shadow)
+   return 1;
+   need_unsync = true;
+   }
}
+   if (need_unsync)
+   kvm_unsync_pages(vcpu, gfn);
return 0;
 }
 
-- 
1.6.1.2



--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html