From: Zhang Xiantao <[EMAIL PROTECTED]>
Date: Fri, 14 Dec 2007 01:29:58 +0800
Subject: [PATCH] kvm: portability: Moving rmode, exceptions,
halt_request to arch

This patch moves rmode exceptions and halt_request to arch.
Signed-off-by: Zhang Xiantao <[EMAIL PROTECTED]>
---
 drivers/kvm/vmx.c         |   72
++++++++++++++++++++++----------------------
 drivers/kvm/x86.c         |   36 +++++++++++-----------
 drivers/kvm/x86.h         |   11 +++----
 drivers/kvm/x86_emulate.c |    2 +-
 4 files changed, 60 insertions(+), 61 deletions(-)

diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 72cc376..e26e299 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -343,7 +343,7 @@ static void update_exception_bitmap(struct kvm_vcpu
*vcpu)
                eb |= 1u << NM_VECTOR;
        if (vcpu->guest_debug.enabled)
                eb |= 1u << 1;
-       if (vcpu->rmode.active)
+       if (vcpu->arch.rmode.active)
                eb = ~0;
        vmcs_write32(EXCEPTION_BITMAP, eb);
 }
@@ -570,7 +570,7 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu
*vcpu)
 
 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
 {
-       if (vcpu->rmode.active)
+       if (vcpu->arch.rmode.active)
                rflags |= IOPL_MASK | X86_EFLAGS_VM;
        vmcs_writel(GUEST_RFLAGS, rflags);
 }
@@ -1111,15 +1111,15 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
 {
        unsigned long flags;
 
-       vcpu->rmode.active = 0;
+       vcpu->arch.rmode.active = 0;
 
-       vmcs_writel(GUEST_TR_BASE, vcpu->rmode.tr.base);
-       vmcs_write32(GUEST_TR_LIMIT, vcpu->rmode.tr.limit);
-       vmcs_write32(GUEST_TR_AR_BYTES, vcpu->rmode.tr.ar);
+       vmcs_writel(GUEST_TR_BASE, vcpu->arch.rmode.tr.base);
+       vmcs_write32(GUEST_TR_LIMIT, vcpu->arch.rmode.tr.limit);
+       vmcs_write32(GUEST_TR_AR_BYTES, vcpu->arch.rmode.tr.ar);
 
        flags = vmcs_readl(GUEST_RFLAGS);
        flags &= ~(IOPL_MASK | X86_EFLAGS_VM);
-       flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT);
+       flags |= (vcpu->arch.rmode.save_iopl << IOPL_SHIFT);
        vmcs_writel(GUEST_RFLAGS, flags);
 
        vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
@@ -1127,10 +1127,10 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
 
        update_exception_bitmap(vcpu);
 
-       fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->rmode.es);
-       fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->rmode.ds);
-       fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->rmode.gs);
-       fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->rmode.fs);
+       fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->arch.rmode.es);
+       fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->arch.rmode.ds);
+       fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->arch.rmode.gs);
+       fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->arch.rmode.fs);
 
        vmcs_write16(GUEST_SS_SELECTOR, 0);
        vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
@@ -1168,19 +1168,19 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
 {
        unsigned long flags;
 
-       vcpu->rmode.active = 1;
+       vcpu->arch.rmode.active = 1;
 
-       vcpu->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
+       vcpu->arch.rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
        vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
 
-       vcpu->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
+       vcpu->arch.rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
        vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
 
-       vcpu->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
+       vcpu->arch.rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
        vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
 
        flags = vmcs_readl(GUEST_RFLAGS);
-       vcpu->rmode.save_iopl = (flags & IOPL_MASK) >> IOPL_SHIFT;
+       vcpu->arch.rmode.save_iopl = (flags & IOPL_MASK) >> IOPL_SHIFT;
 
        flags |= IOPL_MASK | X86_EFLAGS_VM;
 
@@ -1198,10 +1198,10 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
                vmcs_writel(GUEST_CS_BASE, 0xf0000);
        vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
 
-       fix_rmode_seg(VCPU_SREG_ES, &vcpu->rmode.es);
-       fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds);
-       fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
-       fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
+       fix_rmode_seg(VCPU_SREG_ES, &vcpu->arch.rmode.es);
+       fix_rmode_seg(VCPU_SREG_DS, &vcpu->arch.rmode.ds);
+       fix_rmode_seg(VCPU_SREG_GS, &vcpu->arch.rmode.gs);
+       fix_rmode_seg(VCPU_SREG_FS, &vcpu->arch.rmode.fs);
 
        kvm_mmu_reset_context(vcpu);
        init_rmode_tss(vcpu->kvm);
@@ -1251,10 +1251,10 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu,
unsigned long cr0)
 {
        vmx_fpu_deactivate(vcpu);
 
-       if (vcpu->rmode.active && (cr0 & X86_CR0_PE))
+       if (vcpu->arch.rmode.active && (cr0 & X86_CR0_PE))
                enter_pmode(vcpu);
 
-       if (!vcpu->rmode.active && !(cr0 & X86_CR0_PE))
+       if (!vcpu->arch.rmode.active && !(cr0 & X86_CR0_PE))
                enter_rmode(vcpu);
 
 #ifdef CONFIG_X86_64
@@ -1285,7 +1285,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu,
unsigned long cr3)
 static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
        vmcs_writel(CR4_READ_SHADOW, cr4);
-       vmcs_writel(GUEST_CR4, cr4 | (vcpu->rmode.active ?
+       vmcs_writel(GUEST_CR4, cr4 | (vcpu->arch.rmode.active ?
                    KVM_RMODE_VM_CR4_ALWAYS_ON :
KVM_PMODE_VM_CR4_ALWAYS_ON));
        vcpu->arch.cr4 = cr4;
 }
@@ -1374,17 +1374,17 @@ static void vmx_set_segment(struct kvm_vcpu
*vcpu,
        struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
        u32 ar;
 
-       if (vcpu->rmode.active && seg == VCPU_SREG_TR) {
-               vcpu->rmode.tr.selector = var->selector;
-               vcpu->rmode.tr.base = var->base;
-               vcpu->rmode.tr.limit = var->limit;
-               vcpu->rmode.tr.ar = vmx_segment_access_rights(var);
+       if (vcpu->arch.rmode.active && seg == VCPU_SREG_TR) {
+               vcpu->arch.rmode.tr.selector = var->selector;
+               vcpu->arch.rmode.tr.base = var->base;
+               vcpu->arch.rmode.tr.limit = var->limit;
+               vcpu->arch.rmode.tr.ar = vmx_segment_access_rights(var);
                return;
        }
        vmcs_writel(sf->base, var->base);
        vmcs_write32(sf->limit, var->limit);
        vmcs_write16(sf->selector, var->selector);
-       if (vcpu->rmode.active && var->s) {
+       if (vcpu->arch.rmode.active && var->s) {
                /*
                 * Hack real-mode segments into vm86 compatibility.
                 */
@@ -1613,7 +1613,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
                goto out;
        }
 
-       vmx->vcpu.rmode.active = 0;
+       vmx->vcpu.arch.rmode.active = 0;
 
        vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
        set_cr8(&vmx->vcpu, 0);
@@ -1718,7 +1718,7 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu,
int irq)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
-       if (vcpu->rmode.active) {
+       if (vcpu->arch.rmode.active) {
                vmx->rmode.irq.pending = true;
                vmx->rmode.irq.vector = irq;
                vmx->rmode.irq.rip = vmcs_readl(GUEST_RIP);
@@ -1812,7 +1812,7 @@ static void kvm_guest_debug_pre(struct kvm_vcpu
*vcpu)
 static int handle_rmode_exception(struct kvm_vcpu *vcpu,
                                  int vec, u32 err_code)
 {
-       if (!vcpu->rmode.active)
+       if (!vcpu->arch.rmode.active)
                return 0;
 
        /*
@@ -1871,11 +1871,11 @@ static int handle_exception(struct kvm_vcpu
*vcpu, struct kvm_run *kvm_run)
                return kvm_mmu_page_fault(vcpu, cr2, error_code);
        }
 
-       if (vcpu->rmode.active &&
+       if (vcpu->arch.rmode.active &&
            handle_rmode_exception(vcpu, intr_info &
INTR_INFO_VECTOR_MASK,
 
error_code)) {
-               if (vcpu->halt_request) {
-                       vcpu->halt_request = 0;
+               if (vcpu->arch.halt_request) {
+                       vcpu->arch.halt_request = 0;
                        return kvm_emulate_halt(vcpu);
                }
                return 1;
@@ -2270,7 +2270,7 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
        if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) {
                if ((idtv_info_field & VECTORING_INFO_TYPE_MASK)
                    == INTR_TYPE_EXT_INTR
-                   && vcpu->rmode.active) {
+                   && vcpu->arch.rmode.active) {
                        u8 vect = idtv_info_field &
VECTORING_INFO_VECTOR_MASK;
 
                        vmx_inject_irq(vcpu, vect);
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index 475918e..41d8e37 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -131,10 +131,10 @@ EXPORT_SYMBOL_GPL(kvm_set_apic_base);
 
 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
 {
-       WARN_ON(vcpu->exception.pending);
-       vcpu->exception.pending = true;
-       vcpu->exception.has_error_code = false;
-       vcpu->exception.nr = nr;
+       WARN_ON(vcpu->arch.exception.pending);
+       vcpu->arch.exception.pending = true;
+       vcpu->arch.exception.has_error_code = false;
+       vcpu->arch.exception.nr = nr;
 }
 EXPORT_SYMBOL_GPL(kvm_queue_exception);
 
@@ -142,11 +142,11 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu,
unsigned long addr,
                           u32 error_code)
 {
        ++vcpu->stat.pf_guest;
-       if (vcpu->exception.pending && vcpu->exception.nr == PF_VECTOR)
{
+       if (vcpu->arch.exception.pending && vcpu->arch.exception.nr ==
PF_VECTOR) {
                printk(KERN_DEBUG "kvm: inject_page_fault:"
                       " double fault 0x%lx\n", addr);
-               vcpu->exception.nr = DF_VECTOR;
-               vcpu->exception.error_code = 0;
+               vcpu->arch.exception.nr = DF_VECTOR;
+               vcpu->arch.exception.error_code = 0;
                return;
        }
        vcpu->arch.cr2 = addr;
@@ -155,19 +155,19 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu,
unsigned long addr,
 
 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32
error_code)
 {
-       WARN_ON(vcpu->exception.pending);
-       vcpu->exception.pending = true;
-       vcpu->exception.has_error_code = true;
-       vcpu->exception.nr = nr;
-       vcpu->exception.error_code = error_code;
+       WARN_ON(vcpu->arch.exception.pending);
+       vcpu->arch.exception.pending = true;
+       vcpu->arch.exception.has_error_code = true;
+       vcpu->arch.exception.nr = nr;
+       vcpu->arch.exception.error_code = error_code;
 }
 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
 
 static void __queue_exception(struct kvm_vcpu *vcpu)
 {
-       kvm_x86_ops->queue_exception(vcpu, vcpu->exception.nr,
-                                    vcpu->exception.has_error_code,
-                                    vcpu->exception.error_code);
+       kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
+
vcpu->arch.exception.has_error_code,
+                                    vcpu->arch.exception.error_code);
 }
 
 /*
@@ -2437,7 +2437,7 @@ again:
                goto out;
        }
 
-       if (vcpu->exception.pending)
+       if (vcpu->arch.exception.pending)
                __queue_exception(vcpu);
        else if (irqchip_in_kernel(vcpu->kvm))
                kvm_x86_ops->inject_pending_irq(vcpu);
@@ -2478,8 +2478,8 @@ again:
                profile_hit(KVM_PROFILING, (void *)vcpu->arch.rip);
        }
 
-       if (vcpu->exception.pending &&
kvm_x86_ops->exception_injected(vcpu))
-               vcpu->exception.pending = false;
+       if (vcpu->arch.exception.pending &&
kvm_x86_ops->exception_injected(vcpu))
+               vcpu->arch.exception.pending = false;
 
        r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
 
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h
index 81ac7b0..c4bb5fc 100644
--- a/drivers/kvm/x86.h
+++ b/drivers/kvm/x86.h
@@ -136,12 +136,6 @@ struct kvm_vcpu_arch {
        struct kvm_pio_request pio;
        void *pio_data;
 
-
-};
-
-struct kvm_vcpu {
-       KVM_VCPU_COMM;
-
        struct kvm_queued_exception {
                bool pending;
                bool has_error_code;
@@ -161,6 +155,11 @@ struct kvm_vcpu {
        } rmode;
        int halt_request; /* real mode on Intel only */
 
+};
+
+struct kvm_vcpu {
+       KVM_VCPU_COMM;
+
        int cpuid_nent;
        struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
 
diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c
index b023f19..1292485 100644
--- a/drivers/kvm/x86_emulate.c
+++ b/drivers/kvm/x86_emulate.c
@@ -1640,7 +1640,7 @@ special_insn:
                c->dst.type = OP_NONE; /* Disable writeback. */
                break;
        case 0xf4:              /* hlt */
-               ctxt->vcpu->halt_request = 1;
+               ctxt->vcpu->arch.halt_request = 1;
                goto done;
        case 0xf5:      /* cmc */
                /* complement carry flag from eflags reg */
-- 
1.5.1.2

Attachment: 0011-kvm-portability-Moving-rmode-exceptions-halt_req.patch
Description: 0011-kvm-portability-Moving-rmode-exceptions-halt_req.patch

-------------------------------------------------------------------------
SF.Net email is sponsored by:
Check out the new SourceForge.net Marketplace.
It's the best place to buy or sell services
for just about anything Open Source.
http://ad.doubleclick.net/clk;164216239;13503038;w?http://sf.net/marketplace
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to