On 06.06.2011, at 15:56, Pekka Enberg wrote:

> This patch converts hw/vesa.c to use guest-mapped memory for framebuffer and
> drops the slow MMIO emulation. This speeds up framebuffer accesses
> considerably. Please note that this can be optimized even more with the
> KVM_GET_DIRTY_LOG ioctl() as explained by Alexander Graf.
> 
> Cc: Alexander Graf <ag...@suse.de>
> Cc: Cyrill Gorcunov <gorcu...@gmail.com>
> Cc: Ingo Molnar <mi...@elte.hu>
> Cc: John Floren <j...@jfloren.net>
> Cc: Sasha Levin <levinsasha...@gmail.com>
> Signed-off-by: Pekka Enberg <penb...@kernel.org>
> ---
> v1 -> v2: Fix mem slot index passed to KVM_SET_USER_MEMORY_REGION
> 
> tools/kvm/hw/vesa.c         |   17 +++++------------
> tools/kvm/include/kvm/kvm.h |    3 +++
> tools/kvm/kvm.c             |   10 +++++-----
> 3 files changed, 13 insertions(+), 17 deletions(-)
> 
> diff --git a/tools/kvm/hw/vesa.c b/tools/kvm/hw/vesa.c
> index 48d31ce..71322fc 100644
> --- a/tools/kvm/hw/vesa.c
> +++ b/tools/kvm/hw/vesa.c
> @@ -8,6 +8,7 @@
> #include "kvm/irq.h"
> #include "kvm/kvm.h"
> #include "kvm/pci.h"
> +#include <sys/mman.h>
> 
> #include <sys/types.h>
> #include <sys/ioctl.h>
> @@ -40,14 +41,6 @@ static struct pci_device_header vesa_pci_device = {
>       .bar[1]                 = VESA_MEM_ADDR | PCI_BASE_ADDRESS_SPACE_MEMORY,
> };
> 
> -static void vesa_mmio_callback(u64 addr, u8 *data, u32 len, u8 is_write)
> -{
> -     if (!is_write)
> -             return;
> -
> -     fb__write(addr, data, len);
> -}
> -
> static struct framebuffer vesafb;
> 
> struct framebuffer *vesa__init(struct kvm *kvm)
> @@ -65,12 +58,12 @@ struct framebuffer *vesa__init(struct kvm *kvm)
>       vesa_pci_device.bar[0]          = vesa_base_addr | 
> PCI_BASE_ADDRESS_SPACE_IO;
>       pci__register(&vesa_pci_device, dev);
> 
> -     kvm__register_mmio(kvm, VESA_MEM_ADDR, VESA_MEM_SIZE, 
> &vesa_mmio_callback);
> -
> -     mem = calloc(1, VESA_MEM_SIZE);
> -     if (!mem)
> +     mem = mmap(NULL, VESA_MEM_SIZE, PROT_RW, MAP_ANON_NORESERVE, -1, 0);
> +     if (mem == MAP_FAILED)
>               return NULL;
> 
> +     kvm__register_mem(kvm, VESA_MEM_ADDR, VESA_MEM_SIZE, mem);
> +
>       vesafb = (struct framebuffer) {
>               .width                  = VESA_WIDTH,
>               .height                 = VESA_HEIGHT,
> diff --git a/tools/kvm/include/kvm/kvm.h b/tools/kvm/include/kvm/kvm.h
> index 55551de..17b7557 100644
> --- a/tools/kvm/include/kvm/kvm.h
> +++ b/tools/kvm/include/kvm/kvm.h
> @@ -21,6 +21,8 @@ struct kvm {
> 
>       int                     nrcpus;         /* Number of cpus to run */
> 
> +     u32                     mem_slots;      /* for 
> KVM_SET_USER_MEMORY_REGION */
> +
>       u64                     ram_size;
>       void                    *ram_start;
> 
> @@ -49,6 +51,7 @@ void kvm__stop_timer(struct kvm *kvm);
> void kvm__irq_line(struct kvm *kvm, int irq, int level);
> bool kvm__emulate_io(struct kvm *kvm, u16 port, void *data, int direction, 
> int size, u32 count);
> bool kvm__emulate_mmio(struct kvm *kvm, u64 phys_addr, u8 *data, u32 len, u8 
> is_write);
> +void kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void 
> *userspace_addr);
> bool kvm__register_mmio(struct kvm *kvm, u64 phys_addr, u64 phys_addr_len, 
> void (*kvm_mmio_callback_fn)(u64 addr, u8 *data, u32 len, u8 is_write));
> bool kvm__deregister_mmio(struct kvm *kvm, u64 phys_addr);
> void kvm__pause(void);
> diff --git a/tools/kvm/kvm.c b/tools/kvm/kvm.c
> index 54e3203..65e94a1 100644
> --- a/tools/kvm/kvm.c
> +++ b/tools/kvm/kvm.c
> @@ -162,13 +162,13 @@ static bool kvm__cpu_supports_vm(void)
>       return regs.ecx & (1 << feature);
> }
> 
> -static void kvm_register_mem_slot(struct kvm *kvm, u32 slot, u64 guest_phys, 
> u64 size, void *userspace_addr)
> +void kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void 
> *userspace_addr)
> {
>       struct kvm_userspace_memory_region mem;
>       int ret;
> 
>       mem = (struct kvm_userspace_memory_region) {
> -             .slot                   = slot,
> +             .slot                   = kvm->mem_slots++,

Please keep in mind that this is pretty fragile. It will probably work out for 
you now, but memslots are

  1) limited
  2) don't deal with overlap

So please add at least a comment here, warning people that this is a very 
simple implementation that could break in subtile ways when implementing other 
hardware that could map its own memory regions somewhere else (PCI BARs), but 
wants them backed by RAM.


Alex

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to