From: Zhang Xiantao <[EMAIL PROTECTED]>
Date: Fri, 14 Dec 2007 01:21:39 +0800
Subject: [PATCH] Moving pio_data, pio, mmio_fault_cr2 to arch.

This patches moves the three fileds to archs as follows:
1. pio_data
2. mmio_fault_cr2
3. pio
Signed-off-by: Zhang Xiantao <[EMAIL PROTECTED]>
---
 drivers/kvm/kvm_main.c |    2 +-
 drivers/kvm/x86.c      |  102
++++++++++++++++++++++++------------------------
 drivers/kvm/x86.h      |    9 ++--
 3 files changed, 57 insertions(+), 56 deletions(-)

diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 530c391..2d2ff55 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -670,7 +670,7 @@ static int kvm_vcpu_fault(struct vm_area_struct
*vma, struct vm_fault *vmf)
        if (vmf->pgoff == 0)
                page = virt_to_page(vcpu->run);
        else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
-               page = virt_to_page(vcpu->pio_data);
+               page = virt_to_page(vcpu->arch.pio_data);
        else
                return VM_FAULT_SIGBUS;
        get_page(page);
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index 92b40cf..475918e 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -1781,11 +1781,11 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
 {
        int r;
 
-       vcpu->mmio_fault_cr2 = cr2;
+       vcpu->arch.mmio_fault_cr2 = cr2;
        kvm_x86_ops->cache_regs(vcpu);
 
        vcpu->mmio_is_write = 0;
-       vcpu->pio.string = 0;
+       vcpu->arch.pio.string = 0;
 
        if (!no_decode) {
                int cs_db, cs_l;
@@ -1832,7 +1832,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
 
        r = x86_emulate_insn(&vcpu->emulate_ctxt, &emulate_ops);
 
-       if (vcpu->pio.string)
+       if (vcpu->arch.pio.string)
                return EMULATE_DO_MMIO;
 
        if ((r || vcpu->mmio_is_write) && run) {
@@ -1869,33 +1869,33 @@ static void free_pio_guest_pages(struct kvm_vcpu
*vcpu)
 {
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i)
-               if (vcpu->pio.guest_pages[i]) {
-
kvm_release_page_dirty(vcpu->pio.guest_pages[i]);
-                       vcpu->pio.guest_pages[i] = NULL;
+       for (i = 0; i < ARRAY_SIZE(vcpu->arch.pio.guest_pages); ++i)
+               if (vcpu->arch.pio.guest_pages[i]) {
+
kvm_release_page_dirty(vcpu->arch.pio.guest_pages[i]);
+                       vcpu->arch.pio.guest_pages[i] = NULL;
                }
 }
 
 static int pio_copy_data(struct kvm_vcpu *vcpu)
 {
-       void *p = vcpu->pio_data;
+       void *p = vcpu->arch.pio_data;
        void *q;
        unsigned bytes;
-       int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1;
+       int nr_pages = vcpu->arch.pio.guest_pages[1] ? 2 : 1;
 
-       q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
+       q = vmap(vcpu->arch.pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
                 PAGE_KERNEL);
        if (!q) {
                free_pio_guest_pages(vcpu);
                return -ENOMEM;
        }
-       q += vcpu->pio.guest_page_offset;
-       bytes = vcpu->pio.size * vcpu->pio.cur_count;
-       if (vcpu->pio.in)
+       q += vcpu->arch.pio.guest_page_offset;
+       bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
+       if (vcpu->arch.pio.in)
                memcpy(q, p, bytes);
        else
                memcpy(p, q, bytes);
-       q -= vcpu->pio.guest_page_offset;
+       q -= vcpu->arch.pio.guest_page_offset;
        vunmap(q);
        free_pio_guest_pages(vcpu);
        return 0;
@@ -1903,7 +1903,7 @@ static int pio_copy_data(struct kvm_vcpu *vcpu)
 
 int complete_pio(struct kvm_vcpu *vcpu)
 {
-       struct kvm_pio_request *io = &vcpu->pio;
+       struct kvm_pio_request *io = &vcpu->arch.pio;
        long delta;
        int r;
 
@@ -1911,7 +1911,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
 
        if (!io->string) {
                if (io->in)
-                       memcpy(&vcpu->arch.regs[VCPU_REGS_RAX],
vcpu->pio_data,
+                       memcpy(&vcpu->arch.regs[VCPU_REGS_RAX],
vcpu->arch.pio_data,
                               io->size);
        } else {
                if (io->in) {
@@ -1955,13 +1955,13 @@ static void kernel_pio(struct kvm_io_device
*pio_dev,
        /* TODO: String I/O for in kernel device */
 
        mutex_lock(&vcpu->kvm->lock);
-       if (vcpu->pio.in)
-               kvm_iodevice_read(pio_dev, vcpu->pio.port,
-                                 vcpu->pio.size,
+       if (vcpu->arch.pio.in)
+               kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
+                                 vcpu->arch.pio.size,
                                  pd);
        else
-               kvm_iodevice_write(pio_dev, vcpu->pio.port,
-                                  vcpu->pio.size,
+               kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
+                                  vcpu->arch.pio.size,
                                   pd);
        mutex_unlock(&vcpu->kvm->lock);
 }
@@ -1969,8 +1969,8 @@ static void kernel_pio(struct kvm_io_device
*pio_dev,
 static void pio_string_write(struct kvm_io_device *pio_dev,
                             struct kvm_vcpu *vcpu)
 {
-       struct kvm_pio_request *io = &vcpu->pio;
-       void *pd = vcpu->pio_data;
+       struct kvm_pio_request *io = &vcpu->arch.pio;
+       void *pd = vcpu->arch.pio_data;
        int i;
 
        mutex_lock(&vcpu->kvm->lock);
@@ -1996,25 +1996,25 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu,
struct kvm_run *run, int in,
 
        vcpu->run->exit_reason = KVM_EXIT_IO;
        vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
-       vcpu->run->io.size = vcpu->pio.size = size;
+       vcpu->run->io.size = vcpu->arch.pio.size = size;
        vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
-       vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = 1;
-       vcpu->run->io.port = vcpu->pio.port = port;
-       vcpu->pio.in = in;
-       vcpu->pio.string = 0;
-       vcpu->pio.down = 0;
-       vcpu->pio.guest_page_offset = 0;
-       vcpu->pio.rep = 0;
+       vcpu->run->io.count = vcpu->arch.pio.count =
vcpu->arch.pio.cur_count = 1;
+       vcpu->run->io.port = vcpu->arch.pio.port = port;
+       vcpu->arch.pio.in = in;
+       vcpu->arch.pio.string = 0;
+       vcpu->arch.pio.down = 0;
+       vcpu->arch.pio.guest_page_offset = 0;
+       vcpu->arch.pio.rep = 0;
 
        kvm_x86_ops->cache_regs(vcpu);
-       memcpy(vcpu->pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
+       memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
        kvm_x86_ops->decache_regs(vcpu);
 
        kvm_x86_ops->skip_emulated_instruction(vcpu);
 
        pio_dev = vcpu_find_pio_dev(vcpu, port);
        if (pio_dev) {
-               kernel_pio(pio_dev, vcpu, vcpu->pio_data);
+               kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
                complete_pio(vcpu);
                return 1;
        }
@@ -2034,15 +2034,15 @@ int kvm_emulate_pio_string(struct kvm_vcpu
*vcpu, struct kvm_run *run, int in,
 
        vcpu->run->exit_reason = KVM_EXIT_IO;
        vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
-       vcpu->run->io.size = vcpu->pio.size = size;
+       vcpu->run->io.size = vcpu->arch.pio.size = size;
        vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
-       vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count =
count;
-       vcpu->run->io.port = vcpu->pio.port = port;
-       vcpu->pio.in = in;
-       vcpu->pio.string = 1;
-       vcpu->pio.down = down;
-       vcpu->pio.guest_page_offset = offset_in_page(address);
-       vcpu->pio.rep = rep;
+       vcpu->run->io.count = vcpu->arch.pio.count =
vcpu->arch.pio.cur_count = count;
+       vcpu->run->io.port = vcpu->arch.pio.port = port;
+       vcpu->arch.pio.in = in;
+       vcpu->arch.pio.string = 1;
+       vcpu->arch.pio.down = down;
+       vcpu->arch.pio.guest_page_offset = offset_in_page(address);
+       vcpu->arch.pio.rep = rep;
 
        if (!count) {
                kvm_x86_ops->skip_emulated_instruction(vcpu);
@@ -2072,15 +2072,15 @@ int kvm_emulate_pio_string(struct kvm_vcpu
*vcpu, struct kvm_run *run, int in,
                return 1;
        }
        vcpu->run->io.count = now;
-       vcpu->pio.cur_count = now;
+       vcpu->arch.pio.cur_count = now;
 
-       if (vcpu->pio.cur_count == vcpu->pio.count)
+       if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
                kvm_x86_ops->skip_emulated_instruction(vcpu);
 
        for (i = 0; i < nr_pages; ++i) {
                mutex_lock(&vcpu->kvm->lock);
                page = gva_to_page(vcpu, address + i * PAGE_SIZE);
-               vcpu->pio.guest_pages[i] = page;
+               vcpu->arch.pio.guest_pages[i] = page;
                mutex_unlock(&vcpu->kvm->lock);
                if (!page) {
                        kvm_inject_gp(vcpu, 0);
@@ -2090,13 +2090,13 @@ int kvm_emulate_pio_string(struct kvm_vcpu
*vcpu, struct kvm_run *run, int in,
        }
 
        pio_dev = vcpu_find_pio_dev(vcpu, port);
-       if (!vcpu->pio.in) {
+       if (!vcpu->arch.pio.in) {
                /* string PIO write */
                ret = pio_copy_data(vcpu);
                if (ret >= 0 && pio_dev) {
                        pio_string_write(pio_dev, vcpu);
                        complete_pio(vcpu);
-                       if (vcpu->pio.count == 0)
+                       if (vcpu->arch.pio.count == 0)
                                ret = 1;
                }
        } else if (pio_dev)
@@ -2525,7 +2525,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
        if (!irqchip_in_kernel(vcpu->kvm))
                set_cr8(vcpu, kvm_run->cr8);
 
-       if (vcpu->pio.cur_count) {
+       if (vcpu->arch.pio.cur_count) {
                r = complete_pio(vcpu);
                if (r)
                        goto out;
@@ -2536,7 +2536,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
                vcpu->mmio_read_completed = 1;
                vcpu->mmio_needed = 0;
                r = emulate_instruction(vcpu, kvm_run,
-                                       vcpu->mmio_fault_cr2, 0, 1);
+                                       vcpu->arch.mmio_fault_cr2, 0,
1);
                if (r == EMULATE_DO_MMIO) {
                        /*
                         * Read-modify-write.  Back to userspace.
@@ -3014,7 +3014,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
                r = -ENOMEM;
                goto fail;
        }
-       vcpu->pio_data = page_address(page);
+       vcpu->arch.pio_data = page_address(page);
 
        r = kvm_mmu_create(vcpu);
        if (r < 0)
@@ -3031,7 +3031,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 fail_mmu_destroy:
        kvm_mmu_destroy(vcpu);
 fail_free_pio_data:
-       free_page((unsigned long)vcpu->pio_data);
+       free_page((unsigned long)vcpu->arch.pio_data);
 fail:
        return r;
 }
@@ -3040,7 +3040,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
 {
        kvm_free_lapic(vcpu);
        kvm_mmu_destroy(vcpu);
-       free_page((unsigned long)vcpu->pio_data);
+       free_page((unsigned long)vcpu->arch.pio_data);
 }
 
 struct  kvm *kvm_arch_create_vm(void)
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h
index c698ffc..81ac7b0 100644
--- a/drivers/kvm/x86.h
+++ b/drivers/kvm/x86.h
@@ -132,15 +132,16 @@ struct kvm_vcpu_arch {
        struct i387_fxsave_struct host_fx_image;
        struct i387_fxsave_struct guest_fx_image;
 
+       gva_t mmio_fault_cr2;
+       struct kvm_pio_request pio;
+       void *pio_data;
+
+
 };
 
 struct kvm_vcpu {
        KVM_VCPU_COMM;
 
-       gva_t mmio_fault_cr2;
-       struct kvm_pio_request pio;
-       void *pio_data;
-
        struct kvm_queued_exception {
                bool pending;
                bool has_error_code;
-- 
1.5.1.2

Attachment: 0010-Moving-pio_data-pio-mmio_fault_cr2-to-arch.patch
Description: 0010-Moving-pio_data-pio-mmio_fault_cr2-to-arch.patch

-------------------------------------------------------------------------
SF.Net email is sponsored by:
Check out the new SourceForge.net Marketplace.
It's the best place to buy or sell services
for just about anything Open Source.
http://ad.doubleclick.net/clk;164216239;13503038;w?http://sf.net/marketplace
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to