Re: [RFC][PATCH RFC 10/12] KVM: move dirty bitmaps to user space

2010-05-11 Thread Takuya Yoshikawa



r = 0;
@@ -1195,11 +1232,16 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
gfn = unalias_gfn(kvm, gfn);
memslot = gfn_to_memslot_unaliased(kvm, gfn);
if (memslot&&  memslot->dirty_bitmap) {
-   unsigned long rel_gfn = gfn - memslot->base_gfn;
+   int nr = generic_le_bit_offset(gfn - memslot->base_gfn);

-   generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
+   if (kvm_set_bit_user(nr, memslot->dirty_bitmap))
+   goto out_fault;


mark_page_dirty is called with the mmu_lock spinlock held in set_spte.
Must find a way to move it outside of the spinlock section.



Oh, it's a serious problem. I have to consider it.


Thanks,
  Takuya
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev


Re: [RFC][PATCH RFC 10/12] KVM: move dirty bitmaps to user space

2010-05-10 Thread Marcelo Tosatti
On Tue, May 04, 2010 at 10:07:02PM +0900, Takuya Yoshikawa wrote:
> We move dirty bitmaps to user space.
> 
>  - Allocation and destruction: we use do_mmap() and do_munmap().
>The new bitmap space is twice longer than the original one and we
>use the additional space for double buffering: this makes it
>possible to update the active bitmap while letting the user space
>read the other one safely. For x86, we can also remove the vmalloc()
>in kvm_vm_ioctl_get_dirty_log().
> 
>  - Bitmap manipulations: we replace all functions which access dirty
>bitmaps with *_user() functions.
> 
>  - For ia64: moving the dirty bitmaps of memory slots does not affect
>ia64 much because it's using a different place to store dirty logs
>rather than the dirty bitmaps of memory slots: all we have to change
>are sync and get of dirty log, so we don't need set_bit_user like
>functions for ia64.
> 
> Signed-off-by: Takuya Yoshikawa 
> Signed-off-by: Fernando Luis Vazquez Cao 
> CC: Avi Kivity 
> CC: Alexander Graf 
> ---
>  arch/ia64/kvm/kvm-ia64.c  |   15 +-
>  arch/powerpc/kvm/book3s.c |5 +++-
>  arch/x86/kvm/x86.c|   25 --
>  include/linux/kvm_host.h  |3 +-
>  virt/kvm/kvm_main.c   |   62 +---
>  5 files changed, 82 insertions(+), 28 deletions(-)
> 
> diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
> index 17fd65c..03503e6 100644
> --- a/arch/ia64/kvm/kvm-ia64.c
> +++ b/arch/ia64/kvm/kvm-ia64.c
> @@ -1823,11 +1823,19 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
>   n = kvm_dirty_bitmap_bytes(memslot);
>   base = memslot->base_gfn / BITS_PER_LONG;
>  
> + r = -EFAULT;
> + if (!access_ok(VERIFY_WRITE, memslot->dirty_bitmap, n))
> + goto out;
> +
>   for (i = 0; i < n/sizeof(long); ++i) {
>   if (dirty_bitmap[base + i])
>   memslot->is_dirty = true;
>  
> - memslot->dirty_bitmap[i] = dirty_bitmap[base + i];
> + if (__put_user(dirty_bitmap[base + i],
> +&memslot->dirty_bitmap[i])) {
> + r = -EFAULT;
> + goto out;
> + }
>   dirty_bitmap[base + i] = 0;
>   }
>   r = 0;
> @@ -1858,7 +1866,10 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
>   if (memslot->is_dirty) {
>   kvm_flush_remote_tlbs(kvm);
>   n = kvm_dirty_bitmap_bytes(memslot);
> - memset(memslot->dirty_bitmap, 0, n);
> + if (clear_user(memslot->dirty_bitmap, n)) {
> + r = -EFAULT;
> + goto out;
> + }
>   memslot->is_dirty = false;
>   }
>   r = 0;
> diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
> index 4b074f1..2a31d2f 100644
> --- a/arch/powerpc/kvm/book3s.c
> +++ b/arch/powerpc/kvm/book3s.c
> @@ -1210,7 +1210,10 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
>   kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
>  
>   n = kvm_dirty_bitmap_bytes(memslot);
> - memset(memslot->dirty_bitmap, 0, n);
> + if (clear_user(memslot->dirty_bitmap, n)) {
> + r = -EFAULT;
> + goto out;
> + }
>   memslot->is_dirty = false;
>   }
>  
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 023c7f8..32a3d94 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -2760,40 +2760,37 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
>   /* If nothing is dirty, don't bother messing with page tables. */
>   if (memslot->is_dirty) {
>   struct kvm_memslots *slots, *old_slots;
> - unsigned long *dirty_bitmap;
> + unsigned long __user *dirty_bitmap;
> + unsigned long __user *dirty_bitmap_old;
>  
>   spin_lock(&kvm->mmu_lock);
>   kvm_mmu_slot_remove_write_access(kvm, log->slot);
>   spin_unlock(&kvm->mmu_lock);
>  
> - r = -ENOMEM;
> - dirty_bitmap = vmalloc(n);
> - if (!dirty_bitmap)
> + dirty_bitmap = memslot->dirty_bitmap;
> + dirty_bitmap_old = memslot->dirty_bitmap_old;
> + r = -EFAULT;
> + if (clear_user(dirty_bitmap_old, n))
>   goto out;
> - memset(dirty_bitmap, 0, n);
>  
>   r = -ENOMEM;
>   slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
> - if (!slots) {
> - vfree(dirty_bitmap);
> + if (!slots)
>   goto out;
> - }
> +
>   memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
> - slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
> + slots->memslots[log->slot].dirty_bitmap = dirty_bitmap_old;
> + slots->

[RFC][PATCH RFC 10/12] KVM: move dirty bitmaps to user space

2010-05-04 Thread Takuya Yoshikawa
We move dirty bitmaps to user space.

 - Allocation and destruction: we use do_mmap() and do_munmap().
   The new bitmap space is twice longer than the original one and we
   use the additional space for double buffering: this makes it
   possible to update the active bitmap while letting the user space
   read the other one safely. For x86, we can also remove the vmalloc()
   in kvm_vm_ioctl_get_dirty_log().

 - Bitmap manipulations: we replace all functions which access dirty
   bitmaps with *_user() functions.

 - For ia64: moving the dirty bitmaps of memory slots does not affect
   ia64 much because it's using a different place to store dirty logs
   rather than the dirty bitmaps of memory slots: all we have to change
   are sync and get of dirty log, so we don't need set_bit_user like
   functions for ia64.

Signed-off-by: Takuya Yoshikawa 
Signed-off-by: Fernando Luis Vazquez Cao 
CC: Avi Kivity 
CC: Alexander Graf 
---
 arch/ia64/kvm/kvm-ia64.c  |   15 +-
 arch/powerpc/kvm/book3s.c |5 +++-
 arch/x86/kvm/x86.c|   25 --
 include/linux/kvm_host.h  |3 +-
 virt/kvm/kvm_main.c   |   62 +---
 5 files changed, 82 insertions(+), 28 deletions(-)

diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 17fd65c..03503e6 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1823,11 +1823,19 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
n = kvm_dirty_bitmap_bytes(memslot);
base = memslot->base_gfn / BITS_PER_LONG;
 
+   r = -EFAULT;
+   if (!access_ok(VERIFY_WRITE, memslot->dirty_bitmap, n))
+   goto out;
+
for (i = 0; i < n/sizeof(long); ++i) {
if (dirty_bitmap[base + i])
memslot->is_dirty = true;
 
-   memslot->dirty_bitmap[i] = dirty_bitmap[base + i];
+   if (__put_user(dirty_bitmap[base + i],
+  &memslot->dirty_bitmap[i])) {
+   r = -EFAULT;
+   goto out;
+   }
dirty_bitmap[base + i] = 0;
}
r = 0;
@@ -1858,7 +1866,10 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
if (memslot->is_dirty) {
kvm_flush_remote_tlbs(kvm);
n = kvm_dirty_bitmap_bytes(memslot);
-   memset(memslot->dirty_bitmap, 0, n);
+   if (clear_user(memslot->dirty_bitmap, n)) {
+   r = -EFAULT;
+   goto out;
+   }
memslot->is_dirty = false;
}
r = 0;
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 4b074f1..2a31d2f 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -1210,7 +1210,10 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
 
n = kvm_dirty_bitmap_bytes(memslot);
-   memset(memslot->dirty_bitmap, 0, n);
+   if (clear_user(memslot->dirty_bitmap, n)) {
+   r = -EFAULT;
+   goto out;
+   }
memslot->is_dirty = false;
}
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 023c7f8..32a3d94 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2760,40 +2760,37 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
/* If nothing is dirty, don't bother messing with page tables. */
if (memslot->is_dirty) {
struct kvm_memslots *slots, *old_slots;
-   unsigned long *dirty_bitmap;
+   unsigned long __user *dirty_bitmap;
+   unsigned long __user *dirty_bitmap_old;
 
spin_lock(&kvm->mmu_lock);
kvm_mmu_slot_remove_write_access(kvm, log->slot);
spin_unlock(&kvm->mmu_lock);
 
-   r = -ENOMEM;
-   dirty_bitmap = vmalloc(n);
-   if (!dirty_bitmap)
+   dirty_bitmap = memslot->dirty_bitmap;
+   dirty_bitmap_old = memslot->dirty_bitmap_old;
+   r = -EFAULT;
+   if (clear_user(dirty_bitmap_old, n))
goto out;
-   memset(dirty_bitmap, 0, n);
 
r = -ENOMEM;
slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
-   if (!slots) {
-   vfree(dirty_bitmap);
+   if (!slots)
goto out;
-   }
+
memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
-   slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
+   slots->memslots[log->slot].dirty_bitmap = dirty_bitmap_old;
+   slots->memslots[log->slot].dirty_bitmap_old = dirty_bitmap;
slots->memslots[log->slot].is_dirty = false;
 
old_slots = kvm->memslots;
r