Change to inline style for easy review. 

>From cdbc4c54cf65e46e5545fe44c9a31ae457385ee0 Mon Sep 17 00:00:00 2001
From: Zhang xiantao <[EMAIL PROTECTED]>
Date: Thu, 11 Oct 2007 16:42:46 +0800
Subject: [PATCH] Change kvm_x86_ops to kvm_ops, and make room for
structure kvm_arch_ops.
---
 drivers/kvm/Kconfig           |   19 +++--
 drivers/kvm/Makefile          |   14 ++--
 drivers/kvm/kvm_main.c        |  176 ++++++++++++++++++++--------------------
 drivers/kvm/x86/Makefile      |    2 +-
 drivers/kvm/x86/kvm.h         |    6 +-
 drivers/kvm/x86/mmu.c         |    6 +-
 drivers/kvm/x86/paging_tmpl.h |    2 +-
 drivers/kvm/x86/svm.c         |    2 +-
 drivers/kvm/x86/vmx.c         |    2 +-
 drivers/kvm/x86/x86_emulate.c |    4 +-
 10 files changed, 119 insertions(+), 114 deletions(-)

diff --git a/drivers/kvm/Kconfig b/drivers/kvm/Kconfig
index 8749fa4..fc3a4e4 100644
--- a/drivers/kvm/Kconfig
+++ b/drivers/kvm/Kconfig
@@ -3,7 +3,7 @@
 #
 menuconfig VIRTUALIZATION
        bool "Virtualization"
-       depends on X86
+       depends on X86 || IA64
        default y
        ---help---
          Say Y here to get to see options for using your Linux host to run 
other
@@ -16,7 +16,7 @@ if VIRTUALIZATION
 
 config KVM
        tristate "Kernel-based Virtual Machine (KVM) support"
-       depends on X86 && EXPERIMENTAL
+       depends on (X86 || IA64) && EXPERIMENTAL
        select PREEMPT_NOTIFIERS
        select ANON_INODES
        ---help---
@@ -33,18 +33,25 @@ config KVM
 
          If unsure, say N.
 
-config KVM_INTEL
+config KVM_VMX
        tristate "KVM for Intel processors support"
-       depends on KVM
+       depends on KVM && X86
        ---help---
          Provides support for KVM on Intel processors equipped with the VT
          extensions.
 
-config KVM_AMD
+config KVM_SVM
        tristate "KVM for AMD processors support"
-       depends on KVM
+       depends on KVM && X86
        ---help---
          Provides support for KVM on AMD processors equipped with the AMD-V
          (SVM) extensions.
+config KVM_VTI
+       tristate "KVM for Intel Itanium processors support"
+       depends on KVM && IA64
+       ---help---
+         Provides support for KVM on Intel Itanium processors equipped with 
the VT
+         extensions.
+
 
 endif # VIRTUALIZATION
diff --git a/drivers/kvm/Makefile b/drivers/kvm/Makefile
index e5a8f4d..8051cf7 100644
--- a/drivers/kvm/Makefile
+++ b/drivers/kvm/Makefile
@@ -1,10 +1,8 @@
 #
-# Makefile for Kernel-based Virtual Machine module
+# Makefile to build KVM modules
 #
-
-kvm-objs := kvm_main.o mmu.o x86_emulate.o i8259.o irq.o lapic.o ioapic.o
-obj-$(CONFIG_KVM) += kvm.o
-kvm-intel-objs = vmx.o
-obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
-kvm-amd-objs = svm.o
-obj-$(CONFIG_KVM_AMD) += kvm-amd.o
+ifneq ($(CONFIG_KVM),)
+KVM_ARCH =$(shell uname -i | sed -e 's/.*86.*/x86/g')
+EXTRA_CFLAGS += -Idrivers/kvm/$(KVM_ARCH)
+obj-$(CONFIG_KVM) = $(KVM_ARCH)/
+endif
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index f39ac58..bb9f460 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -55,7 +55,7 @@ static LIST_HEAD(vm_list);
 
 static cpumask_t cpus_hardware_enabled;
 
-struct kvm_x86_ops *kvm_x86_ops;
+struct kvm_ops *kvm_ops;
 struct kmem_cache *kvm_vcpu_cache;
 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
 
@@ -184,14 +184,14 @@ static void vcpu_load(struct kvm_vcpu *vcpu)
        mutex_lock(&vcpu->mutex);
        cpu = get_cpu();
        preempt_notifier_register(&vcpu->preempt_notifier);
-       kvm_x86_ops->vcpu_load(vcpu, cpu);
+       kvm_ops->vcpu_load(vcpu, cpu);
        put_cpu();
 }
 
 static void vcpu_put(struct kvm_vcpu *vcpu)
 {
        preempt_disable();
-       kvm_x86_ops->vcpu_put(vcpu);
+       kvm_ops->vcpu_put(vcpu);
        preempt_notifier_unregister(&vcpu->preempt_notifier);
        preempt_enable();
        mutex_unlock(&vcpu->mutex);
@@ -384,7 +384,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
                        kvm_unload_vcpu_mmu(kvm->vcpus[i]);
        for (i = 0; i < KVM_MAX_VCPUS; ++i) {
                if (kvm->vcpus[i]) {
-                       kvm_x86_ops->vcpu_free(kvm->vcpus[i]);
+                       kvm_ops->vcpu_free(kvm->vcpus[i]);
                        kvm->vcpus[i] = NULL;
                }
        }
@@ -415,7 +415,7 @@ static int kvm_vm_release(struct inode *inode, struct file 
*filp)
 
 static void inject_gp(struct kvm_vcpu *vcpu)
 {
-       kvm_x86_ops->inject_gp(vcpu, 0);
+       kvm_ops->inject_gp(vcpu, 0);
 }
 
 /*
@@ -484,7 +484,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
                                inject_gp(vcpu);
                                return;
                        }
-                       kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+                       kvm_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
                        if (cs_l) {
                                printk(KERN_DEBUG "set_cr0: #GP, start paging "
                                       "in long mode while CS.L == 1\n");
@@ -503,7 +503,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 
        }
 
-       kvm_x86_ops->set_cr0(vcpu, cr0);
+       kvm_ops->set_cr0(vcpu, cr0);
        vcpu->cr0 = cr0;
 
        mutex_lock(&vcpu->kvm->lock);
@@ -546,7 +546,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
                inject_gp(vcpu);
                return;
        }
-       kvm_x86_ops->set_cr4(vcpu, cr4);
+       kvm_ops->set_cr4(vcpu, cr4);
        vcpu->cr4 = cr4;
        mutex_lock(&vcpu->kvm->lock);
        kvm_mmu_reset_context(vcpu);
@@ -1284,7 +1284,7 @@ static int emulator_write_emulated_onepage(unsigned long 
addr,
        gpa_t                 gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
 
        if (gpa == UNMAPPED_GVA) {
-               kvm_x86_ops->inject_page_fault(vcpu, addr, 2);
+               kvm_ops->inject_page_fault(vcpu, addr, 2);
                return X86EMUL_PROPAGATE_FAULT;
        }
 
@@ -1347,7 +1347,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
 
 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
 {
-       return kvm_x86_ops->get_segment_base(vcpu, seg);
+       return kvm_ops->get_segment_base(vcpu, seg);
 }
 
 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
@@ -1358,7 +1358,7 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
 int emulate_clts(struct kvm_vcpu *vcpu)
 {
        vcpu->cr0 &= ~X86_CR0_TS;
-       kvm_x86_ops->set_cr0(vcpu, vcpu->cr0);
+       kvm_ops->set_cr0(vcpu, vcpu->cr0);
        return X86EMUL_CONTINUE;
 }
 
@@ -1368,7 +1368,7 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int 
dr, unsigned long *dest)
 
        switch (dr) {
        case 0 ... 3:
-               *dest = kvm_x86_ops->get_dr(vcpu, dr);
+               *dest = kvm_ops->get_dr(vcpu, dr);
                return X86EMUL_CONTINUE;
        default:
                pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr);
@@ -1381,7 +1381,7 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int 
dr, unsigned long value)
        unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
        int exception;
 
-       kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
+       kvm_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
        if (exception) {
                /* FIXME: better handling */
                return X86EMUL_UNHANDLEABLE;
@@ -1426,17 +1426,17 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
        int r;
 
        vcpu->mmio_fault_cr2 = cr2;
-       kvm_x86_ops->cache_regs(vcpu);
+       kvm_ops->cache_regs(vcpu);
 
        vcpu->mmio_is_write = 0;
        vcpu->pio.string = 0;
 
        if (!no_decode) {
                int cs_db, cs_l;
-               kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+               kvm_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
 
                vcpu->emulate_ctxt.vcpu = vcpu;
-               vcpu->emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
+               vcpu->emulate_ctxt.eflags = kvm_ops->get_rflags(vcpu);
                vcpu->emulate_ctxt.cr2 = cr2;
                vcpu->emulate_ctxt.mode =
                        (vcpu->emulate_ctxt.eflags & X86_EFLAGS_VM)
@@ -1496,8 +1496,8 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
                return EMULATE_DO_MMIO;
        }
 
-       kvm_x86_ops->decache_regs(vcpu);
-       kvm_x86_ops->set_rflags(vcpu, vcpu->emulate_ctxt.eflags);
+       kvm_ops->decache_regs(vcpu);
+       kvm_ops->set_rflags(vcpu, vcpu->emulate_ctxt.eflags);
 
        if (vcpu->mmio_is_write) {
                vcpu->mmio_needed = 0;
@@ -1554,7 +1554,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
 {
        unsigned long nr, a0, a1, a2, a3, ret;
 
-       kvm_x86_ops->cache_regs(vcpu);
+       kvm_ops->cache_regs(vcpu);
 
        nr = vcpu->regs[VCPU_REGS_RAX];
        a0 = vcpu->regs[VCPU_REGS_RBX];
@@ -1576,7 +1576,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
                break;
        }
        vcpu->regs[VCPU_REGS_RAX] = ret;
-       kvm_x86_ops->decache_regs(vcpu);
+       kvm_ops->decache_regs(vcpu);
        return 0;
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
@@ -1595,8 +1595,8 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
         */
        kvm_mmu_zap_all(vcpu->kvm);
 
-       kvm_x86_ops->cache_regs(vcpu);
-       kvm_x86_ops->patch_hypercall(vcpu, instruction);
+       kvm_ops->cache_regs(vcpu);
+       kvm_ops->patch_hypercall(vcpu, instruction);
        if (emulator_write_emulated(vcpu->rip, instruction, 3, vcpu)
            != X86EMUL_CONTINUE)
                ret = -EFAULT;
@@ -1615,26 +1615,26 @@ void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, 
unsigned long base)
 {
        struct descriptor_table dt = { limit, base };
 
-       kvm_x86_ops->set_gdt(vcpu, &dt);
+       kvm_ops->set_gdt(vcpu, &dt);
 }
 
 void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
 {
        struct descriptor_table dt = { limit, base };
 
-       kvm_x86_ops->set_idt(vcpu, &dt);
+       kvm_ops->set_idt(vcpu, &dt);
 }
 
 void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
                   unsigned long *rflags)
 {
        lmsw(vcpu, msw);
-       *rflags = kvm_x86_ops->get_rflags(vcpu);
+       *rflags = kvm_ops->get_rflags(vcpu);
 }
 
 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
 {
-       kvm_x86_ops->decache_cr4_guest_bits(vcpu);
+       kvm_ops->decache_cr4_guest_bits(vcpu);
        switch (cr) {
        case 0:
                return vcpu->cr0;
@@ -1656,7 +1656,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, 
unsigned long val,
        switch (cr) {
        case 0:
                set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
-               *rflags = kvm_x86_ops->get_rflags(vcpu);
+               *rflags = kvm_ops->get_rflags(vcpu);
                break;
        case 2:
                vcpu->cr2 = val;
@@ -1728,7 +1728,7 @@ EXPORT_SYMBOL_GPL(kvm_get_msr_common);
  */
 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
 {
-       return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
+       return kvm_ops->get_msr(vcpu, msr_index, pdata);
 }
 
 #ifdef CONFIG_X86_64
@@ -1749,7 +1749,7 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
                return;
        }
 
-       kvm_x86_ops->set_efer(vcpu, efer);
+       kvm_ops->set_efer(vcpu, efer);
 
        efer &= ~EFER_LMA;
        efer |= vcpu->shadow_efer & EFER_LMA;
@@ -1800,7 +1800,7 @@ EXPORT_SYMBOL_GPL(kvm_set_msr_common);
  */
 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 {
-       return kvm_x86_ops->set_msr(vcpu, msr_index, data);
+       return kvm_ops->set_msr(vcpu, msr_index, data);
 }
 
 void kvm_resched(struct kvm_vcpu *vcpu)
@@ -1817,7 +1817,7 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
        u32 function;
        struct kvm_cpuid_entry *e, *best;
 
-       kvm_x86_ops->cache_regs(vcpu);
+       kvm_ops->cache_regs(vcpu);
        function = vcpu->regs[VCPU_REGS_RAX];
        vcpu->regs[VCPU_REGS_RAX] = 0;
        vcpu->regs[VCPU_REGS_RBX] = 0;
@@ -1843,8 +1843,8 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
                vcpu->regs[VCPU_REGS_RCX] = best->ecx;
                vcpu->regs[VCPU_REGS_RDX] = best->edx;
        }
-       kvm_x86_ops->decache_regs(vcpu);
-       kvm_x86_ops->skip_emulated_instruction(vcpu);
+       kvm_ops->decache_regs(vcpu);
+       kvm_ops->skip_emulated_instruction(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
 
@@ -1879,7 +1879,7 @@ static int complete_pio(struct kvm_vcpu *vcpu)
        long delta;
        int r;
 
-       kvm_x86_ops->cache_regs(vcpu);
+       kvm_ops->cache_regs(vcpu);
 
        if (!io->string) {
                if (io->in)
@@ -1889,7 +1889,7 @@ static int complete_pio(struct kvm_vcpu *vcpu)
                if (io->in) {
                        r = pio_copy_data(vcpu);
                        if (r) {
-                               kvm_x86_ops->cache_regs(vcpu);
+                               kvm_ops->cache_regs(vcpu);
                                return r;
                        }
                }
@@ -1912,7 +1912,7 @@ static int complete_pio(struct kvm_vcpu *vcpu)
                        vcpu->regs[VCPU_REGS_RSI] += delta;
        }
 
-       kvm_x86_ops->decache_regs(vcpu);
+       kvm_ops->decache_regs(vcpu);
 
        io->count -= io->cur_count;
        io->cur_count = 0;
@@ -1972,11 +1972,11 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct 
kvm_run *run, int in,
        vcpu->pio.guest_page_offset = 0;
        vcpu->pio.rep = 0;
 
-       kvm_x86_ops->cache_regs(vcpu);
+       kvm_ops->cache_regs(vcpu);
        memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
-       kvm_x86_ops->decache_regs(vcpu);
+       kvm_ops->decache_regs(vcpu);
 
-       kvm_x86_ops->skip_emulated_instruction(vcpu);
+       kvm_ops->skip_emulated_instruction(vcpu);
 
        pio_dev = vcpu_find_pio_dev(vcpu, port);
        if (pio_dev) {
@@ -2011,7 +2011,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct 
kvm_run *run, int in,
        vcpu->pio.rep = rep;
 
        if (!count) {
-               kvm_x86_ops->skip_emulated_instruction(vcpu);
+               kvm_ops->skip_emulated_instruction(vcpu);
                return 1;
        }
 
@@ -2041,7 +2041,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct 
kvm_run *run, int in,
        vcpu->pio.cur_count = now;
 
        if (vcpu->pio.cur_count == vcpu->pio.count)
-               kvm_x86_ops->skip_emulated_instruction(vcpu);
+               kvm_ops->skip_emulated_instruction(vcpu);
 
        for (i = 0; i < nr_pages; ++i) {
                mutex_lock(&vcpu->kvm->lock);
@@ -2088,13 +2088,13 @@ static int dm_request_for_irq_injection(struct kvm_vcpu 
*vcpu,
        return (!vcpu->irq_summary &&
                kvm_run->request_interrupt_window &&
                vcpu->interrupt_window_open &&
-               (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
+               (kvm_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
 }
 
 static void post_kvm_run_save(struct kvm_vcpu *vcpu,
                              struct kvm_run *kvm_run)
 {
-       kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
+       kvm_run->if_flag = (kvm_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
        kvm_run->cr8 = get_cr8(vcpu);
        kvm_run->apic_base = kvm_get_apic_base(vcpu);
        if (irqchip_in_kernel(vcpu->kvm))
@@ -2113,13 +2113,13 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
                pr_debug("vcpu %d received sipi with vector # %x\n",
                       vcpu->vcpu_id, vcpu->sipi_vector);
                kvm_lapic_reset(vcpu);
-               kvm_x86_ops->vcpu_reset(vcpu);
+               kvm_ops->vcpu_reset(vcpu);
                vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
        }
 
 preempted:
        if (vcpu->guest_debug.enabled)
-               kvm_x86_ops->guest_debug_pre(vcpu);
+               kvm_ops->guest_debug_pre(vcpu);
 
 again:
        r = kvm_mmu_reload(vcpu);
@@ -2128,7 +2128,7 @@ again:
 
        preempt_disable();
 
-       kvm_x86_ops->prepare_guest_switch(vcpu);
+       kvm_ops->prepare_guest_switch(vcpu);
        kvm_load_guest_fpu(vcpu);
 
        local_irq_disable();
@@ -2143,17 +2143,17 @@ again:
        }
 
        if (irqchip_in_kernel(vcpu->kvm))
-               kvm_x86_ops->inject_pending_irq(vcpu);
+               kvm_ops->inject_pending_irq(vcpu);
        else if (!vcpu->mmio_read_completed)
-               kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
+               kvm_ops->inject_pending_vectors(vcpu, kvm_run);
 
        vcpu->guest_mode = 1;
 
        if (vcpu->requests)
                if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
-                       kvm_x86_ops->tlb_flush(vcpu);
+                       kvm_ops->tlb_flush(vcpu);
 
-       kvm_x86_ops->run(vcpu, kvm_run);
+       kvm_ops->run(vcpu, kvm_run);
 
        vcpu->guest_mode = 0;
        local_irq_enable();
@@ -2166,11 +2166,11 @@ again:
         * Profile KVM exit RIPs:
         */
        if (unlikely(prof_on == KVM_PROFILING)) {
-               kvm_x86_ops->cache_regs(vcpu);
+               kvm_ops->cache_regs(vcpu);
                profile_hit(KVM_PROFILING, (void *)vcpu->rip);
        }
 
-       r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
+       r = kvm_ops->handle_exit(kvm_run, vcpu);
 
        if (r > 0) {
                if (dm_request_for_irq_injection(vcpu, kvm_run)) {
@@ -2239,9 +2239,9 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, 
struct kvm_run *kvm_run)
        }
 
        if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
-               kvm_x86_ops->cache_regs(vcpu);
+               kvm_ops->cache_regs(vcpu);
                vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
-               kvm_x86_ops->decache_regs(vcpu);
+               kvm_ops->decache_regs(vcpu);
        }
 
        r = __vcpu_run(vcpu, kvm_run);
@@ -2259,7 +2259,7 @@ static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu,
 {
        vcpu_load(vcpu);
 
-       kvm_x86_ops->cache_regs(vcpu);
+       kvm_ops->cache_regs(vcpu);
 
        regs->rax = vcpu->regs[VCPU_REGS_RAX];
        regs->rbx = vcpu->regs[VCPU_REGS_RBX];
@@ -2281,7 +2281,7 @@ static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu,
 #endif
 
        regs->rip = vcpu->rip;
-       regs->rflags = kvm_x86_ops->get_rflags(vcpu);
+       regs->rflags = kvm_ops->get_rflags(vcpu);
 
        /*
         * Don't leak debug flags in case they were set for guest debugging
@@ -2319,9 +2319,9 @@ static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu,
 #endif
 
        vcpu->rip = regs->rip;
-       kvm_x86_ops->set_rflags(vcpu, regs->rflags);
+       kvm_ops->set_rflags(vcpu, regs->rflags);
 
-       kvm_x86_ops->decache_regs(vcpu);
+       kvm_ops->decache_regs(vcpu);
 
        vcpu_put(vcpu);
 
@@ -2331,7 +2331,7 @@ static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu,
 static void get_segment(struct kvm_vcpu *vcpu,
                        struct kvm_segment *var, int seg)
 {
-       return kvm_x86_ops->get_segment(vcpu, var, seg);
+       return kvm_ops->get_segment(vcpu, var, seg);
 }
 
 static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
@@ -2352,14 +2352,14 @@ static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu 
*vcpu,
        get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
        get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
 
-       kvm_x86_ops->get_idt(vcpu, &dt);
+       kvm_ops->get_idt(vcpu, &dt);
        sregs->idt.limit = dt.limit;
        sregs->idt.base = dt.base;
-       kvm_x86_ops->get_gdt(vcpu, &dt);
+       kvm_ops->get_gdt(vcpu, &dt);
        sregs->gdt.limit = dt.limit;
        sregs->gdt.base = dt.base;
 
-       kvm_x86_ops->decache_cr4_guest_bits(vcpu);
+       kvm_ops->decache_cr4_guest_bits(vcpu);
        sregs->cr0 = vcpu->cr0;
        sregs->cr2 = vcpu->cr2;
        sregs->cr3 = vcpu->cr3;
@@ -2371,7 +2371,7 @@ static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
        if (irqchip_in_kernel(vcpu->kvm)) {
                memset(sregs->interrupt_bitmap, 0,
                       sizeof sregs->interrupt_bitmap);
-               pending_vec = kvm_x86_ops->get_irq(vcpu);
+               pending_vec = kvm_ops->get_irq(vcpu);
                if (pending_vec >= 0)
                        set_bit(pending_vec,
                                (unsigned long *)sregs->interrupt_bitmap);
@@ -2387,7 +2387,7 @@ static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 static void set_segment(struct kvm_vcpu *vcpu,
                        struct kvm_segment *var, int seg)
 {
-       return kvm_x86_ops->set_segment(vcpu, var, seg);
+       return kvm_ops->set_segment(vcpu, var, seg);
 }
 
 static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
@@ -2401,10 +2401,10 @@ static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu 
*vcpu,
 
        dt.limit = sregs->idt.limit;
        dt.base = sregs->idt.base;
-       kvm_x86_ops->set_idt(vcpu, &dt);
+       kvm_ops->set_idt(vcpu, &dt);
        dt.limit = sregs->gdt.limit;
        dt.base = sregs->gdt.base;
-       kvm_x86_ops->set_gdt(vcpu, &dt);
+       kvm_ops->set_gdt(vcpu, &dt);
 
        vcpu->cr2 = sregs->cr2;
        mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
@@ -2414,18 +2414,18 @@ static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu 
*vcpu,
 
        mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
 #ifdef CONFIG_X86_64
-       kvm_x86_ops->set_efer(vcpu, sregs->efer);
+       kvm_ops->set_efer(vcpu, sregs->efer);
 #endif
        kvm_set_apic_base(vcpu, sregs->apic_base);
 
-       kvm_x86_ops->decache_cr4_guest_bits(vcpu);
+       kvm_ops->decache_cr4_guest_bits(vcpu);
 
        mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
        vcpu->cr0 = sregs->cr0;
-       kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
+       kvm_ops->set_cr0(vcpu, sregs->cr0);
 
        mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
-       kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
+       kvm_ops->set_cr4(vcpu, sregs->cr4);
        if (!is_long_mode(vcpu) && is_pae(vcpu))
                load_pdptrs(vcpu, vcpu->cr3);
 
@@ -2446,7 +2446,7 @@ static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
                        max_bits);
                /* Only pending external irq is handled here */
                if (pending_vec < max_bits) {
-                       kvm_x86_ops->set_irq(vcpu, pending_vec);
+                       kvm_ops->set_irq(vcpu, pending_vec);
                        pr_debug("Set back pending irq %d\n",
                                 pending_vec);
                }
@@ -2640,7 +2640,7 @@ static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu 
*vcpu,
 
        vcpu_load(vcpu);
 
-       r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
+       r = kvm_ops->set_guest_debug(vcpu, dbg);
 
        vcpu_put(vcpu);
 
@@ -2722,7 +2722,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int 
n)
        if (!valid_vcpu(n))
                return -EINVAL;
 
-       vcpu = kvm_x86_ops->vcpu_create(kvm, n);
+       vcpu = kvm_ops->vcpu_create(kvm, n);
        if (IS_ERR(vcpu))
                return PTR_ERR(vcpu);
 
@@ -2763,7 +2763,7 @@ mmu_unload:
        vcpu_put(vcpu);
 
 free_vcpu:
-       kvm_x86_ops->vcpu_free(vcpu);
+       kvm_ops->vcpu_free(vcpu);
        return r;
 }
 
@@ -3400,7 +3400,7 @@ static void decache_vcpus_on_cpu(int cpu)
                         */
                        if (mutex_trylock(&vcpu->mutex)) {
                                if (vcpu->cpu == cpu) {
-                                       kvm_x86_ops->vcpu_decache(vcpu);
+                                       kvm_ops->vcpu_decache(vcpu);
                                        vcpu->cpu = -1;
                                }
                                mutex_unlock(&vcpu->mutex);
@@ -3416,7 +3416,7 @@ static void hardware_enable(void *junk)
        if (cpu_isset(cpu, cpus_hardware_enabled))
                return;
        cpu_set(cpu, cpus_hardware_enabled);
-       kvm_x86_ops->hardware_enable(NULL);
+       kvm_ops->hardware_enable(NULL);
 }
 
 static void hardware_disable(void *junk)
@@ -3427,7 +3427,7 @@ static void hardware_disable(void *junk)
                return;
        cpu_clear(cpu, cpus_hardware_enabled);
        decache_vcpus_on_cpu(cpu);
-       kvm_x86_ops->hardware_disable(NULL);
+       kvm_ops->hardware_disable(NULL);
 }
 
 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
@@ -3595,7 +3595,7 @@ static void kvm_sched_in(struct preempt_notifier *pn, int 
cpu)
 {
        struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
 
-       kvm_x86_ops->vcpu_load(vcpu, cpu);
+       kvm_ops->vcpu_load(vcpu, cpu);
 }
 
 static void kvm_sched_out(struct preempt_notifier *pn,
@@ -3603,16 +3603,16 @@ static void kvm_sched_out(struct preempt_notifier *pn,
 {
        struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
 
-       kvm_x86_ops->vcpu_put(vcpu);
+       kvm_ops->vcpu_put(vcpu);
 }
 
-int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
+int kvm_init_x86(struct kvm_ops *ops, unsigned int vcpu_size,
                  struct module *module)
 {
        int r;
        int cpu;
 
-       if (kvm_x86_ops) {
+       if (kvm_ops) {
                printk(KERN_ERR "kvm: already loaded the other module\n");
                return -EEXIST;
        }
@@ -3626,15 +3626,15 @@ int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int 
vcpu_size,
                return -EOPNOTSUPP;
        }
 
-       kvm_x86_ops = ops;
+       kvm_ops = ops;
 
-       r = kvm_x86_ops->hardware_setup();
+       r = kvm_ops->hardware_setup();
        if (r < 0)
                goto out;
 
        for_each_online_cpu(cpu) {
                smp_call_function_single(cpu,
-                               kvm_x86_ops->check_processor_compatibility,
+                               kvm_ops->check_processor_compatibility,
                                &r, 0, 1);
                if (r < 0)
                        goto out_free_0;
@@ -3689,9 +3689,9 @@ out_free_2:
 out_free_1:
        on_each_cpu(hardware_disable, NULL, 0, 1);
 out_free_0:
-       kvm_x86_ops->hardware_unsetup();
+       kvm_ops->hardware_unsetup();
 out:
-       kvm_x86_ops = NULL;
+       kvm_ops = NULL;
        return r;
 }
 EXPORT_SYMBOL_GPL(kvm_init_x86);
@@ -3705,8 +3705,8 @@ void kvm_exit_x86(void)
        unregister_reboot_notifier(&kvm_reboot_notifier);
        unregister_cpu_notifier(&kvm_cpu_notifier);
        on_each_cpu(hardware_disable, NULL, 0, 1);
-       kvm_x86_ops->hardware_unsetup();
-       kvm_x86_ops = NULL;
+       kvm_ops->hardware_unsetup();
+       kvm_ops = NULL;
 }
 EXPORT_SYMBOL_GPL(kvm_exit_x86);
 
diff --git a/drivers/kvm/x86/Makefile b/drivers/kvm/x86/Makefile
index 8d7a466..a67c5d6 100644
--- a/drivers/kvm/x86/Makefile
+++ b/drivers/kvm/x86/Makefile
@@ -2,7 +2,7 @@
 # Makefile for Kernel-based Virtual Machine module
 #
 
-EXTRA_CFLAGS += -Idrivers/kvm/include/
+EXTRA_CFLAGS += -Idrivers/kvm/x86
 
 kvm-objs := ../kvm_main.o kvm_arch.o 
 kvm-objs += mmu.o x86_emulate.o i8259.o irq.o lapic.o ioapic.o
diff --git a/drivers/kvm/x86/kvm.h b/drivers/kvm/x86/kvm.h
index 34276ab..e3c4948 100644
--- a/drivers/kvm/x86/kvm.h
+++ b/drivers/kvm/x86/kvm.h
@@ -455,7 +455,7 @@ struct descriptor_table {
        unsigned long base;
 } __attribute__((packed));
 
-struct kvm_x86_ops {
+struct kvm_ops {
        int (*cpu_has_kvm_support)(void);          /* __init */
        int (*disabled_by_bios)(void);             /* __init */
        void (*hardware_enable)(void *dummy);      /* __init */
@@ -520,7 +520,7 @@ struct kvm_x86_ops {
                                       struct kvm_run *run);
 };
 
-extern struct kvm_x86_ops *kvm_x86_ops;
+extern struct kvm_ops *kvm_ops;
 
 /* The guest did something we don't support. */
 #define pr_unimpl(vcpu, fmt, ...)                                      \
@@ -536,7 +536,7 @@ extern struct kvm_x86_ops *kvm_x86_ops;
 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
 
-int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
+int kvm_init_x86(struct kvm_ops *ops, unsigned int vcpu_size,
                  struct module *module);
 void kvm_exit_x86(void);
 
diff --git a/drivers/kvm/x86/mmu.c b/drivers/kvm/x86/mmu.c
index 382bd6a..9193443 100644
--- a/drivers/kvm/x86/mmu.c
+++ b/drivers/kvm/x86/mmu.c
@@ -1043,7 +1043,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
 static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
 {
        ++vcpu->stat.tlb_flush;
-       kvm_x86_ops->tlb_flush(vcpu);
+       kvm_ops->tlb_flush(vcpu);
 }
 
 static void paging_new_cr3(struct kvm_vcpu *vcpu)
@@ -1056,7 +1056,7 @@ static void inject_page_fault(struct kvm_vcpu *vcpu,
                              u64 addr,
                              u32 err_code)
 {
-       kvm_x86_ops->inject_page_fault(vcpu, addr, err_code);
+       kvm_ops->inject_page_fault(vcpu, addr, err_code);
 }
 
 static void paging_free(struct kvm_vcpu *vcpu)
@@ -1152,7 +1152,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
        if (r)
                goto out;
        mmu_alloc_roots(vcpu);
-       kvm_x86_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
+       kvm_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
        kvm_mmu_flush_tlb(vcpu);
 out:
        mutex_unlock(&vcpu->kvm->lock);
diff --git a/drivers/kvm/x86/paging_tmpl.h b/drivers/kvm/x86/paging_tmpl.h
index 447d2c3..e3c20a5 100644
--- a/drivers/kvm/x86/paging_tmpl.h
+++ b/drivers/kvm/x86/paging_tmpl.h
@@ -279,7 +279,7 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
                        access_bits &= ~PT_WRITABLE_MASK;
                        if (is_writeble_pte(spte)) {
                                spte &= ~PT_WRITABLE_MASK;
-                               kvm_x86_ops->tlb_flush(vcpu);
+                               kvm_ops->tlb_flush(vcpu);
                        }
                        if (write_fault)
                                *ptwrite = 1;
diff --git a/drivers/kvm/x86/svm.c b/drivers/kvm/x86/svm.c
index f643379..1cf0f8f 100644
--- a/drivers/kvm/x86/svm.c
+++ b/drivers/kvm/x86/svm.c
@@ -1678,7 +1678,7 @@ static void svm_check_processor_compat(void *rtn)
        *(int *)rtn = 0;
 }
 
-static struct kvm_x86_ops svm_x86_ops = {
+static struct kvm_ops svm_x86_ops = {
        .cpu_has_kvm_support = has_svm,
        .disabled_by_bios = is_disabled,
        .hardware_setup = svm_hardware_setup,
diff --git a/drivers/kvm/x86/vmx.c b/drivers/kvm/x86/vmx.c
index f0597f6..0d2c41c 100644
--- a/drivers/kvm/x86/vmx.c
+++ b/drivers/kvm/x86/vmx.c
@@ -2478,7 +2478,7 @@ static void __init vmx_check_processor_compat(void *rtn)
        }
 }
 
-static struct kvm_x86_ops vmx_x86_ops = {
+static struct kvm_ops vmx_x86_ops = {
        .cpu_has_kvm_support = cpu_has_kvm_support,
        .disabled_by_bios = vmx_disabled_by_bios,
        .hardware_setup = hardware_setup,
diff --git a/drivers/kvm/x86/x86_emulate.c b/drivers/kvm/x86/x86_emulate.c
index fa33fcd..6a6e89c 100644
--- a/drivers/kvm/x86/x86_emulate.c
+++ b/drivers/kvm/x86/x86_emulate.c
@@ -1717,7 +1717,7 @@ twobyte_special_insn:
                        | ((u64)c->regs[VCPU_REGS_RDX] << 32);
                rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data);
                if (rc) {
-                       kvm_x86_ops->inject_gp(ctxt->vcpu, 0);
+                       kvm_ops->inject_gp(ctxt->vcpu, 0);
                        c->eip = ctxt->vcpu->rip;
                }
                rc = X86EMUL_CONTINUE;
@@ -1726,7 +1726,7 @@ twobyte_special_insn:
                /* rdmsr */
                rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data);
                if (rc) {
-                       kvm_x86_ops->inject_gp(ctxt->vcpu, 0);
+                       kvm_ops->inject_gp(ctxt->vcpu, 0);
                        c->eip = ctxt->vcpu->rip;
                } else {
                        c->regs[VCPU_REGS_RAX] = (u32)msr_data;
-- 
1.5.0.5
 

-----Original Message-----
From: [EMAIL PROTECTED] [mailto:[EMAIL PROTECTED] On Behalf Of Zhang, Xiantao
Sent: 2007年10月11日 17:08
To: kvm-devel@lists.sourceforge.net; Avi Kivity
Subject: [kvm-devel] [Patch][2/3]

>From cdbc4c54cf65e46e5545fe44c9a31ae457385ee0 Mon Sep 17 00:00:00 2001
From: Zhang xiantao <[EMAIL PROTECTED]>
Date: Thu, 11 Oct 2007 16:42:46 +0800

In order to adapt different architectures, we have to change it to an
neutral name. kvm_ops maybe not the best name, but shouldn't introduce
different meanings. In the third patch, we add a sub field struct
kvm_arch_ops for arch-specific ops. That is, different CPU archs can
define its arch-specific 
ops for its special need. IMO, we should treat x86, IA64, ppc etc as
different archtiectures, other than see vmx, svm as different archs. svm
and vmx should be two different virtualization approaches for x86 arch
from the point view of platforms. 

-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to