From: Paul Turner <[EMAIL PROTECTED]>

Update vmx/emulate to use vmx structure as appropriate.

- Paul

diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index b909b54..7af8f34 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -31,7 +31,7 @@ #include "segment_descriptor.h"
  MODULE_AUTHOR("Qumranet");
  MODULE_LICENSE("GPL");

-static int init_rmode_tss(struct kvm *kvm);
+static int init_vmx_rmode_tss(struct kvm *kvm);

  static DEFINE_PER_CPU(struct vmcs *, vmxarea);
  static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -95,9 +95,9 @@ static inline u64 msr_efer_save_restore_

  static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu)
  {
-       int efer_offset = vcpu->msr_offset_efer;
-       return msr_efer_save_restore_bits(vcpu->host_msrs[efer_offset]) !=
-               msr_efer_save_restore_bits(vcpu->guest_msrs[efer_offset]);
+       int efer_offset = vcpu->vmx.msr_offset_efer;
+       return msr_efer_save_restore_bits(vcpu->vmx.host_msrs[efer_offset]) !=
+               msr_efer_save_restore_bits(vcpu->vmx.guest_msrs[efer_offset]);
  }

  static inline int is_page_fault(u32 intr_info)
@@ -125,7 +125,7 @@ static int __find_msr_index(struct kvm_v
        int i;

        for (i = 0; i < vcpu->nmsrs; ++i)
-               if (vcpu->guest_msrs[i].index == msr)
+               if (vcpu->vmx.guest_msrs[i].index == msr)
                        return i;
        return -1;
  }
@@ -136,7 +136,7 @@ static struct vmx_msr_entry *find_msr_en

        i = __find_msr_index(vcpu, msr);
        if (i >= 0)
-               return &vcpu->guest_msrs[i];
+               return &vcpu->vmx.guest_msrs[i];
        return NULL;
  }

@@ -259,7 +259,7 @@ static void update_exception_bitmap(stru
                eb |= 1u << NM_VECTOR;
        if (vcpu->guest_debug.enabled)
                eb |= 1u << 1;
-       if (vcpu->rmode.active)
+       if (vcpu->vmx.rmode.active)
                eb = ~0;
        vmcs_write32(EXCEPTION_BITMAP, eb);
  }
@@ -284,19 +284,19 @@ #endif
  static void load_transition_efer(struct kvm_vcpu *vcpu)
  {
        u64 trans_efer;
-       int efer_offset = vcpu->msr_offset_efer;
+       int efer_offset = vcpu->vmx.msr_offset_efer;

-       trans_efer = vcpu->host_msrs[efer_offset].data;
+       trans_efer = vcpu->vmx.host_msrs[efer_offset].data;
        trans_efer &= ~EFER_SAVE_RESTORE_BITS;
        trans_efer |= msr_efer_save_restore_bits(
-                               vcpu->guest_msrs[efer_offset]);
+                               vcpu->vmx.guest_msrs[efer_offset]);
        wrmsrl(MSR_EFER, trans_efer);
        vcpu->stat.efer_reload++;
  }

  static void vmx_save_host_state(struct kvm_vcpu *vcpu)
  {
-       struct vmx_host_state *hs = &vcpu->vmx_host_state;
+       struct vmx_host_state *hs = &vcpu->vmx.host_state;

        if (hs->loaded)
                return;
@@ -333,17 +333,17 @@ #endif

  #ifdef CONFIG_X86_64
        if (is_long_mode(vcpu)) {
-               save_msrs(vcpu->host_msrs + vcpu->msr_offset_kernel_gs_base, 1);
+               save_msrs(vcpu->vmx.host_msrs + 
vcpu->vmx.msr_offset_kernel_gs_base, 1);
        }
  #endif
-       load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
+       load_msrs(vcpu->vmx.guest_msrs, vcpu->save_nmsrs);
        if (msr_efer_need_save_restore(vcpu))
                load_transition_efer(vcpu);
  }

  static void vmx_load_host_state(struct kvm_vcpu *vcpu)
  {
-       struct vmx_host_state *hs = &vcpu->vmx_host_state;
+       struct vmx_host_state *hs = &vcpu->vmx.host_state;

        if (!hs->loaded)
                return;
@@ -365,10 +365,10 @@ #endif

                reload_tss();
        }
-       save_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
-       load_msrs(vcpu->host_msrs, vcpu->save_nmsrs);
+       save_msrs(vcpu->vmx.guest_msrs, vcpu->save_nmsrs);
+       load_msrs(vcpu->vmx.host_msrs, vcpu->save_nmsrs);
        if (msr_efer_need_save_restore(vcpu))
-               load_msrs(vcpu->host_msrs + vcpu->msr_offset_efer, 1);
+               load_msrs(vcpu->vmx.host_msrs + vcpu->vmx.msr_offset_efer, 1);
  }

  /*
@@ -503,12 +503,12 @@ static void vmx_inject_gp(struct kvm_vcp
  void move_msr_up(struct kvm_vcpu *vcpu, int from, int to)
  {
        struct vmx_msr_entry tmp;
-       tmp = vcpu->guest_msrs[to];
-       vcpu->guest_msrs[to] = vcpu->guest_msrs[from];
-       vcpu->guest_msrs[from] = tmp;
-       tmp = vcpu->host_msrs[to];
-       vcpu->host_msrs[to] = vcpu->host_msrs[from];
-       vcpu->host_msrs[from] = tmp;
+       tmp = vcpu->vmx.guest_msrs[to];
+       vcpu->vmx.guest_msrs[to] = vcpu->vmx.guest_msrs[from];
+       vcpu->vmx.guest_msrs[from] = tmp;
+       tmp = vcpu->vmx.host_msrs[to];
+       vcpu->vmx.host_msrs[to] = vcpu->vmx.host_msrs[from];
+       vcpu->vmx.host_msrs[from] = tmp;
  }

  /*
@@ -549,10 +549,10 @@ #endif
        vcpu->save_nmsrs = save_nmsrs;

  #ifdef CONFIG_X86_64
-       vcpu->msr_offset_kernel_gs_base =
+       vcpu->vmx.msr_offset_kernel_gs_base =
                __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
  #endif
-       vcpu->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER);
+       vcpu->vmx.msr_offset_efer = __find_msr_index(vcpu, MSR_EFER);
  }

  /*
@@ -645,7 +645,7 @@ static int vmx_set_msr(struct kvm_vcpu *
  #ifdef CONFIG_X86_64
        case MSR_EFER:
                ret = kvm_set_msr_common(vcpu, msr_index, data);
-               if (vcpu->vmx_host_state.loaded)
+               if (vcpu->vmx.host_state.loaded)
                        load_transition_efer(vcpu);
                break;
        case MSR_FS_BASE:
@@ -671,8 +671,8 @@ #endif
                msr = find_msr_entry(vcpu, msr_index);
                if (msr) {
                        msr->data = data;
-                       if (vcpu->vmx_host_state.loaded)
-                               load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
+                       if (vcpu->vmx.host_state.loaded)
+                               load_msrs(vcpu->vmx.guest_msrs, 
vcpu->save_nmsrs);
                        break;
                }
                ret = kvm_set_msr_common(vcpu, msr_index, data);
@@ -867,15 +867,15 @@ static void enter_pmode(struct kvm_vcpu
  {
        unsigned long flags;

-       vcpu->rmode.active = 0;
+       vcpu->vmx.rmode.active = 0;

-       vmcs_writel(GUEST_TR_BASE, vcpu->rmode.tr.base);
-       vmcs_write32(GUEST_TR_LIMIT, vcpu->rmode.tr.limit);
-       vmcs_write32(GUEST_TR_AR_BYTES, vcpu->rmode.tr.ar);
+       vmcs_writel(GUEST_TR_BASE, vcpu->vmx.rmode.tr.base);
+       vmcs_write32(GUEST_TR_LIMIT, vcpu->vmx.rmode.tr.limit);
+       vmcs_write32(GUEST_TR_AR_BYTES, vcpu->vmx.rmode.tr.ar);

        flags = vmcs_readl(GUEST_RFLAGS);
        flags &= ~(IOPL_MASK | X86_EFLAGS_VM);
-       flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT);
+       flags |= (vcpu->vmx.rmode.save_iopl << IOPL_SHIFT);
        vmcs_writel(GUEST_RFLAGS, flags);

        vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~CR4_VME_MASK) |
@@ -883,10 +883,10 @@ static void enter_pmode(struct kvm_vcpu

        update_exception_bitmap(vcpu);

-       fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->rmode.es);
-       fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->rmode.ds);
-       fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->rmode.gs);
-       fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->rmode.fs);
+       fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->vmx.rmode.es);
+       fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->vmx.rmode.ds);
+       fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->vmx.rmode.gs);
+       fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->vmx.rmode.fs);

        vmcs_write16(GUEST_SS_SELECTOR, 0);
        vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
@@ -896,13 +896,13 @@ static void enter_pmode(struct kvm_vcpu
        vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
  }

-static int rmode_tss_base(struct kvm* kvm)
+static int vmx_rmode_tss_base(struct kvm* kvm)
  {
        gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 
3;
        return base_gfn << PAGE_SHIFT;
  }

-static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
+static void fix_vmx_rmode_seg(int seg, struct kvm_save_segment *save)
  {
        struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];

@@ -915,23 +915,23 @@ static void fix_rmode_seg(int seg, struc
        vmcs_write32(sf->ar_bytes, 0xf3);
  }

-static void enter_rmode(struct kvm_vcpu *vcpu)
+static void enter_vmx_rmode(struct kvm_vcpu *vcpu)
  {
        unsigned long flags;

-       vcpu->rmode.active = 1;
+       vcpu->vmx.rmode.active = 1;

-       vcpu->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
-       vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
+       vcpu->vmx.rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
+       vmcs_writel(GUEST_TR_BASE, vmx_rmode_tss_base(vcpu->kvm));

-       vcpu->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
+       vcpu->vmx.rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
        vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);

-       vcpu->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
+       vcpu->vmx.rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
        vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);

        flags = vmcs_readl(GUEST_RFLAGS);
-       vcpu->rmode.save_iopl = (flags & IOPL_MASK) >> IOPL_SHIFT;
+       vcpu->vmx.rmode.save_iopl = (flags & IOPL_MASK) >> IOPL_SHIFT;

        flags |= IOPL_MASK | X86_EFLAGS_VM;

@@ -949,12 +949,11 @@ static void enter_rmode(struct kvm_vcpu
                vmcs_writel(GUEST_CS_BASE, 0xf0000);
        vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);

-       fix_rmode_seg(VCPU_SREG_ES, &vcpu->rmode.es);
-       fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds);
-       fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
-       fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
-
-       init_rmode_tss(vcpu->kvm);
+       fix_vmx_rmode_seg(VCPU_SREG_ES, &vcpu->vmx.rmode.es);
+       fix_vmx_rmode_seg(VCPU_SREG_DS, &vcpu->vmx.rmode.ds);
+       fix_vmx_rmode_seg(VCPU_SREG_GS, &vcpu->vmx.rmode.gs);
+       fix_vmx_rmode_seg(VCPU_SREG_FS, &vcpu->vmx.rmode.fs);
+       init_vmx_rmode_tss(vcpu->kvm);
  }

  #ifdef CONFIG_X86_64
@@ -1001,11 +1000,11 @@ static void vmx_set_cr0(struct kvm_vcpu
  {
        vmx_fpu_deactivate(vcpu);

-       if (vcpu->rmode.active && (cr0 & CR0_PE_MASK))
+       if (vcpu->vmx.rmode.active && (cr0 & CR0_PE_MASK))
                enter_pmode(vcpu);

-       if (!vcpu->rmode.active && !(cr0 & CR0_PE_MASK))
-               enter_rmode(vcpu);
+       if (!vcpu->vmx.rmode.active && !(cr0 & CR0_PE_MASK))
+               enter_vmx_rmode(vcpu);

  #ifdef CONFIG_X86_64
        if (vcpu->shadow_efer & EFER_LME) {
@@ -1035,7 +1034,7 @@ static void vmx_set_cr3(struct kvm_vcpu
  static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  {
        vmcs_writel(CR4_READ_SHADOW, cr4);
-       vmcs_writel(GUEST_CR4, cr4 | (vcpu->rmode.active ?
+       vmcs_writel(GUEST_CR4, cr4 | (vcpu->vmx.rmode.active ?
                    KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON));
        vcpu->cr4 = cr4;
  }
@@ -1123,17 +1122,17 @@ static void vmx_set_segment(struct kvm_v
        struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
        u32 ar;

-       if (vcpu->rmode.active && seg == VCPU_SREG_TR) {
-               vcpu->rmode.tr.selector = var->selector;
-               vcpu->rmode.tr.base = var->base;
-               vcpu->rmode.tr.limit = var->limit;
-               vcpu->rmode.tr.ar = vmx_segment_access_rights(var);
+       if (vcpu->vmx.rmode.active && seg == VCPU_SREG_TR) {
+               vcpu->vmx.rmode.tr.selector = var->selector;
+               vcpu->vmx.rmode.tr.base = var->base;
+               vcpu->vmx.rmode.tr.limit = var->limit;
+               vcpu->vmx.rmode.tr.ar = vmx_segment_access_rights(var);
                return;
        }
        vmcs_writel(sf->base, var->base);
        vmcs_write32(sf->limit, var->limit);
        vmcs_write16(sf->selector, var->selector);
-       if (vcpu->rmode.active && var->s) {
+       if (vcpu->vmx.rmode.active && var->s) {
                /*
                 * Hack real-mode segments into vm86 compatibility.
                 */
@@ -1177,10 +1176,10 @@ static void vmx_set_gdt(struct kvm_vcpu
        vmcs_writel(GUEST_GDTR_BASE, dt->base);
  }

-static int init_rmode_tss(struct kvm* kvm)
+static int init_vmx_rmode_tss(struct kvm* kvm)
  {
        struct page *p1, *p2, *p3;
-       gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
+       gfn_t fn = vmx_rmode_tss_base(kvm) >> PAGE_SHIFT;
        char *page;

        p1 = gfn_to_page(kvm, fn++);
@@ -1243,7 +1242,7 @@ static int vmx_vcpu_setup(struct kvm_vcp
        int ret = 0;
        unsigned long kvm_vmx_return;

-       if (!init_rmode_tss(vcpu->kvm)) {
+       if (!init_vmx_rmode_tss(vcpu->kvm)) {
                ret = -ENOMEM;
                goto out;
        }
@@ -1383,10 +1382,10 @@ #endif
                if (wrmsr_safe(index, data_low, data_high) < 0)
                        continue;
                data = data_low | ((u64)data_high << 32);
-               vcpu->host_msrs[j].index = index;
-               vcpu->host_msrs[j].reserved = 0;
-               vcpu->host_msrs[j].data = data;
-               vcpu->guest_msrs[j] = vcpu->host_msrs[j];
+               vcpu->vmx.host_msrs[j].index = index;
+               vcpu->vmx.host_msrs[j].reserved = 0;
+               vcpu->vmx.host_msrs[j].data = data;
+               vcpu->vmx.guest_msrs[j] = vcpu->vmx.host_msrs[j];
                ++vcpu->nmsrs;
        }

@@ -1409,7 +1408,7 @@ #endif
        vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);

        vcpu->cr0 = 0x60000010;
-       vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode
+       vmx_set_cr0(vcpu, vcpu->cr0); // enter vmx.rmode
        vmx_set_cr4(vcpu, 0);
  #ifdef CONFIG_X86_64
        vmx_set_efer(vcpu, 0);
@@ -1423,7 +1422,7 @@ out:
        return ret;
  }

-static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq)
+static void inject_vmx_rmode_irq(struct kvm_vcpu *vcpu, int irq)
  {
        u16 ent[2];
        u16 cs;
@@ -1478,8 +1477,8 @@ static void kvm_do_inject_irq(struct kvm
        if (!vcpu->irq_pending[word_index])
                clear_bit(word_index, &vcpu->irq_summary);

-       if (vcpu->rmode.active) {
-               inject_rmode_irq(vcpu, irq);
+       if (vcpu->vmx.rmode.active) {
+               inject_vmx_rmode_irq(vcpu, irq);
                return;
        }
        vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
@@ -1534,10 +1533,10 @@ static void kvm_guest_debug_pre(struct k
        }
  }

-static int handle_rmode_exception(struct kvm_vcpu *vcpu,
+static int handle_vmx_rmode_exception(struct kvm_vcpu *vcpu,
                                  int vec, u32 err_code)
  {
-       if (!vcpu->rmode.active)
+       if (!vcpu->vmx.rmode.active)
                return 0;

        /*
@@ -1619,11 +1618,11 @@ static int handle_exception(struct kvm_v
                }
        }

-       if (vcpu->rmode.active &&
-           handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
+       if (vcpu->vmx.rmode.active &&
+           handle_vmx_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
                                                                error_code)) {
-               if (vcpu->halt_request) {
-                       vcpu->halt_request = 0;
+               if (vcpu->vmx.halt_request) {
+                       vcpu->vmx.halt_request = 0;
                        return kvm_emulate_halt(vcpu);
                }
                return 1;
@@ -2240,12 +2239,12 @@ static int vmx_create_vcpu(struct kvm_vc
  {
        struct vmcs *vmcs;

-       vcpu->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
-       if (!vcpu->guest_msrs)
+       vcpu->vmx.guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       if (!vcpu->vmx.guest_msrs)
                return -ENOMEM;

-       vcpu->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
-       if (!vcpu->host_msrs)
+       vcpu->vmx.host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       if (!vcpu->vmx.host_msrs)
                goto out_free_guest_msrs;

        vmcs = alloc_vmcs();
@@ -2259,12 +2258,12 @@ static int vmx_create_vcpu(struct kvm_vc
        return 0;

  out_free_msrs:
-       kfree(vcpu->host_msrs);
-       vcpu->host_msrs = NULL;
+       kfree(vcpu->vmx.host_msrs);
+       vcpu->vmx.host_msrs = NULL;

  out_free_guest_msrs:
-       kfree(vcpu->guest_msrs);
-       vcpu->guest_msrs = NULL;
+       kfree(vcpu->vmx.guest_msrs);
+       vcpu->vmx.guest_msrs = NULL;

        return -ENOMEM;
  }

diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c
index f60012d..048f901 100644
--- a/drivers/kvm/x86_emulate.c
+++ b/drivers/kvm/x86_emulate.c
@@ -1155,7 +1155,7 @@ special_insn:
                DPRINTF("Urk! I don't handle SCAS.\n");
                goto cannot_emulate;
        case 0xf4:              /* hlt */
-               ctxt->vcpu->halt_request = 1;
+               ctxt->vcpu->vmx.halt_request = 1;
                goto done;
        case 0xc3: /* ret */
                dst.ptr = &_eip;

-------------------------------------------------------------------------
This SF.net email is sponsored by DB2 Express
Download DB2 Express C - the FREE version of DB2 express and take
control of your XML. No limits. Just data. Click to get it now.
http://sourceforge.net/powerbar/db2/
_______________________________________________
kvm-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to