Zhang, Xiantao wrote:
>>From 12457e0fb85ef32f1a1f808be294bebe8d22667c Mon Sep 17 00:00:00 2001
> From: Zhang xiantao <[EMAIL PROTECTED]>
> Date: Fri, 12 Oct 2007 13:29:30 +0800
> Subject: [PATCH] Split kvm_vcpu to support new archs. Define a new sub
> field
> kvm_arch_vcpu to hold arch-specific sections.
> 
> I am not sure data fields related to mmu should put under kvm_arch_vcpu
> or not, because 
> IA64 side doesn't need them, and only need kvm module to allocate memory
> for guests.
We don't need them either on 390, and so does ppc. I think we should 
consider Avi's ingenious softmmu to be x86 specific. Therefore, those 
fields should go to the x86 part afaics.

> diff --git a/drivers/kvm/ioapic.c b/drivers/kvm/ioapic.c
> index 3b69541..b149c07 100644
> --- a/drivers/kvm/ioapic.c
> +++ b/drivers/kvm/ioapic.c
> @@ -156,7 +156,7 @@ static u32 ioapic_get_delivery_bitmask(struct
> kvm_ioapic *ioapic, u8 dest,
>       if (dest_mode == 0) {   /* Physical mode. */
>               if (dest == 0xFF) {     /* Broadcast. */
>                       for (i = 0; i < KVM_MAX_VCPUS; ++i)
> -                             if (kvm->vcpus[i] &&
> kvm->vcpus[i]->apic)
> +                             if (kvm->vcpus[i] &&
> kvm->vcpus[i]->arch.apic)
>                                       mask |= 1 << i;
>                       return mask;
>               }
Your mail client wraps lines, thus the patch is not applicable when 
taking from an email. Try using mudd or evolution for sending patches. 
In evolution, select "preformat" mode and paste into.

> diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
> index 4a52d6e..eaa28c8 100644
> --- a/drivers/kvm/kvm.h
> +++ b/drivers/kvm/kvm.h
> @@ -307,31 +307,21 @@ struct kvm_io_device *kvm_io_bus_find_dev(struct
> kvm_io_bus *bus, gpa_t addr);
>  void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
>                            struct kvm_io_device *dev);
> 
> +
> +#include "kvm_arch.h"
> +
This should be x86.h for now, and later on be moved to 
include/asm-x86/to-be-named.h

>  struct kvm_vcpu {
>       struct kvm *kvm;
>       struct preempt_notifier preempt_notifier;
>       int vcpu_id;
>       struct mutex mutex;
>       int   cpu;
> -     u64 host_tsc;
>       struct kvm_run *run;
>       int interrupt_window_open;
I am not sure if this is the right thing for all archs. We have 
various forms of interrupts (I/O, external etc) which can all be 
masked seperately. I think interrupt_window_open should go to arch.

>       int guest_mode;
>       unsigned long requests;
>       unsigned long irq_summary; /* bit vector: 1 per word in
> irq_pending */
We don't have irq. This works completely different for us, thus this 
needs to go to arch.

>       DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
Same here.

>  #define VCPU_MP_STATE_RUNNABLE          0
>  #define VCPU_MP_STATE_UNINITIALIZED     1
>  #define VCPU_MP_STATE_INIT_RECEIVED     2
> @@ -339,7 +329,6 @@ struct kvm_vcpu {
>  #define VCPU_MP_STATE_HALTED            4
>       int mp_state;
>       int sipi_vector;
This one is arch dependent and should go to arch.

> -     u64 ia32_misc_enable_msr;
> 
>       struct kvm_mmu mmu;
> 
> @@ -354,10 +343,6 @@ struct kvm_vcpu {
> 
>       struct kvm_guest_debug guest_debug;
> 
> -     struct i387_fxsave_struct host_fx_image;
> -     struct i387_fxsave_struct guest_fx_image;
> -     int fpu_active;
> -     int guest_fpu_loaded;
I think guest_fpu_loaded should be generic. Don't you want to use the 
lazy fpu restore with preempt notification too?

> 
>       int mmio_needed;
>       int mmio_read_completed;
This is arch dependent, we don't have CONFIG_MMIO.

> @@ -365,7 +350,6 @@ struct kvm_vcpu {
>       int mmio_size;
>       unsigned char mmio_data[8];
>       gpa_t mmio_phys_addr;
> -     gva_t mmio_fault_cr2;
>       struct kvm_pio_request pio;
>       void *pio_data;
All above are arch dependent.

> diff --git a/drivers/kvm/kvm_arch.h b/drivers/kvm/kvm_arch.h
> new file mode 100644
> index 0000000..fe73d3d
> --- /dev/null
> +++ b/drivers/kvm/kvm_arch.h
> @@ -0,0 +1,65 @@
> +#ifndef __KVM_ARCH_H
> +#define __KVM_ARCH_H
This should go to x86.h, no new header please.

> +struct kvm_arch_vcpu{
> +     
> +     u64 host_tsc;
> +     
> +     unsigned long regs[NR_VCPU_REGS]; /* for rsp:
> vcpu_load_rsp_rip() */
> +     unsigned long rip;      /* needs vcpu_load_rsp_rip() */
> +
> +     unsigned long cr0;
> +     unsigned long cr2;
> +     unsigned long cr3;
> +     unsigned long cr4;
> +     unsigned long cr8;
> +     u64 pdptrs[4]; /* pae */
> +     u64 shadow_efer;
> +     u64 apic_base;
> +     struct kvm_lapic *apic;    /* kernel irqchip context */
> +
> +     u64 ia32_misc_enable_msr;
> +
> +     
> +     struct i387_fxsave_struct host_fx_image;
> +     struct i387_fxsave_struct guest_fx_image;
> +     int fpu_active;
> +     int guest_fpu_loaded;
> +
> +     gva_t mmio_fault_cr2;
> +     
> +     struct {
> +             int active;
> +             u8 save_iopl;
> +             struct kvm_save_segment {
> +                     u16 selector;
> +                     unsigned long base;
> +                     u32 limit;
> +                     u32 ar;
> +             } tr, es, ds, fs, gs;
> +     } rmode;
> +     
> +     int cpuid_nent;
> +     struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES];
> +
> +     /* emulate context */
> +
> +     struct x86_emulate_ctxt emulate_ctxt;
> +};
> +
> +#endif
Very nice. The only thing that should'nt be here is fpu_active as far 
as I can tell.

I like this split overall, per architecture vcpu data structures are 
an important step and clearly the right way to go.

with kind regards,
Carsten

-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to