From: Zhang Xiantao <[EMAIL PROTECTED]>
Date: Fri, 14 Dec 2007 00:22:42 +0800
Subject: [PATCH] kvm portability: Moving shadow_efer and pdptrs to
archs.

Moving shadow_efer and ptptrs to kvm_vcpu_arch.
Signed-off-by: Zhang Xiantao <[EMAIL PROTECTED]>
---
 drivers/kvm/mmu.c         |    6 +++---
 drivers/kvm/paging_tmpl.h |    2 +-
 drivers/kvm/svm.c         |    8 ++++----
 drivers/kvm/vmx.c         |   10 +++++-----
 drivers/kvm/x86.c         |   22 +++++++++++-----------
 drivers/kvm/x86.h         |    6 +++---
 6 files changed, 27 insertions(+), 27 deletions(-)

diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 20d5306..fcfc2fe 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -190,7 +190,7 @@ static int is_cpuid_PSE36(void)
 
 static int is_nx(struct kvm_vcpu *vcpu)
 {
-       return vcpu->shadow_efer & EFER_NX;
+       return vcpu->arch.shadow_efer & EFER_NX;
 }
 
 static int is_present_pte(unsigned long pte)
@@ -1075,11 +1075,11 @@ static void mmu_alloc_roots(struct kvm_vcpu
*vcpu)
 
                ASSERT(!VALID_PAGE(root));
                if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) {
-                       if (!is_present_pte(vcpu->pdptrs[i])) {
+                       if (!is_present_pte(vcpu->arch.pdptrs[i])) {
                                vcpu->mmu.pae_root[i] = 0;
                                continue;
                        }
-                       root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
+                       root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
                } else if (vcpu->mmu.root_level == 0)
                        root_gfn = 0;
                sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index f64f414..956ecc6 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -133,7 +133,7 @@ walk:
        pte = vcpu->arch.cr3;
 #if PTTYPE == 64
        if (!is_long_mode(vcpu)) {
-               pte = vcpu->pdptrs[(addr >> 30) & 3];
+               pte = vcpu->arch.pdptrs[(addr >> 30) & 3];
                if (!is_present_pte(pte))
                        goto not_present;
                --walker->level;
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index 23fad6c..3650e35 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -185,7 +185,7 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64
efer)
                efer &= ~EFER_LME;
 
        to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
-       vcpu->shadow_efer = efer;
+       vcpu->arch.shadow_efer = efer;
 }
 
 static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
@@ -771,14 +771,14 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu,
unsigned long cr0)
        struct vcpu_svm *svm = to_svm(vcpu);
 
 #ifdef CONFIG_X86_64
-       if (vcpu->shadow_efer & EFER_LME) {
+       if (vcpu->arch.shadow_efer & EFER_LME) {
                if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
-                       vcpu->shadow_efer |= EFER_LMA;
+                       vcpu->arch.shadow_efer |= EFER_LMA;
                        svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
                }
 
                if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
-                       vcpu->shadow_efer &= ~EFER_LMA;
+                       vcpu->arch.shadow_efer &= ~EFER_LMA;
                        svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
                }
        }
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 2521b47..d94b925 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -661,7 +661,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
                 * if efer.sce is enabled.
                 */
                index = __find_msr_index(vmx, MSR_K6_STAR);
-               if ((index >= 0) && (vmx->vcpu.shadow_efer & EFER_SCE))
+               if ((index >= 0) && (vmx->vcpu.arch.shadow_efer &
EFER_SCE))
                        move_msr_up(vmx, index, save_nmsrs++);
        }
 #endif
@@ -1222,7 +1222,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
                             | AR_TYPE_BUSY_64_TSS);
        }
 
-       vcpu->shadow_efer |= EFER_LMA;
+       vcpu->arch.shadow_efer |= EFER_LMA;
 
        find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA |
EFER_LME;
        vmcs_write32(VM_ENTRY_CONTROLS,
@@ -1232,7 +1232,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
 
 static void exit_lmode(struct kvm_vcpu *vcpu)
 {
-       vcpu->shadow_efer &= ~EFER_LMA;
+       vcpu->arch.shadow_efer &= ~EFER_LMA;
 
        vmcs_write32(VM_ENTRY_CONTROLS,
                     vmcs_read32(VM_ENTRY_CONTROLS)
@@ -1258,7 +1258,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu,
unsigned long cr0)
                enter_rmode(vcpu);
 
 #ifdef CONFIG_X86_64
-       if (vcpu->shadow_efer & EFER_LME) {
+       if (vcpu->arch.shadow_efer & EFER_LME) {
                if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
                        enter_lmode(vcpu);
                if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
@@ -1297,7 +1297,7 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu,
u64 efer)
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
 
-       vcpu->shadow_efer = efer;
+       vcpu->arch.shadow_efer = efer;
        if (efer & EFER_LMA) {
                vmcs_write32(VM_ENTRY_CONTROLS,
                                     vmcs_read32(VM_ENTRY_CONTROLS) |
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index 9867e12..f3adc15 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -179,7 +179,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long
cr3)
        unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
        int i;
        int ret;
-       u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)];
+       u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
 
        mutex_lock(&vcpu->kvm->lock);
        ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
@@ -196,7 +196,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long
cr3)
        }
        ret = 1;
 
-       memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs));
+       memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
 out:
        mutex_unlock(&vcpu->kvm->lock);
 
@@ -205,7 +205,7 @@ out:
 
 static bool pdptrs_changed(struct kvm_vcpu *vcpu)
 {
-       u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)];
+       u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
        bool changed = true;
        int r;
 
@@ -216,7 +216,7 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
        r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte,
sizeof(pdpte));
        if (r < 0)
                goto out;
-       changed = memcmp(pdpte, vcpu->pdptrs, sizeof(pdpte)) != 0;
+       changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
 out:
        mutex_unlock(&vcpu->kvm->lock);
 
@@ -247,7 +247,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long
cr0)
 
        if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
 #ifdef CONFIG_X86_64
-               if ((vcpu->shadow_efer & EFER_LME)) {
+               if ((vcpu->arch.shadow_efer & EFER_LME)) {
                        int cs_db, cs_l;
 
                        if (!is_pae(vcpu)) {
@@ -437,7 +437,7 @@ static void set_efer(struct kvm_vcpu *vcpu, u64
efer)
        }
 
        if (is_paging(vcpu)
-           && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
+           && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME))
{
                printk(KERN_DEBUG "set_efer: #GP, change LME while
paging\n");
                kvm_inject_gp(vcpu, 0);
                return;
@@ -446,9 +446,9 @@ static void set_efer(struct kvm_vcpu *vcpu, u64
efer)
        kvm_x86_ops->set_efer(vcpu, efer);
 
        efer &= ~EFER_LMA;
-       efer |= vcpu->shadow_efer & EFER_LMA;
+       efer |= vcpu->arch.shadow_efer & EFER_LMA;
 
-       vcpu->shadow_efer = efer;
+       vcpu->arch.shadow_efer = efer;
 }
 
 #endif
@@ -554,7 +554,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32
msr, u64 *pdata)
                break;
 #ifdef CONFIG_X86_64
        case MSR_EFER:
-               data = vcpu->shadow_efer;
+               data = vcpu->arch.shadow_efer;
                break;
 #endif
        default:
@@ -2681,7 +2681,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu,
        sregs->cr3 = vcpu->arch.cr3;
        sregs->cr4 = vcpu->arch.cr4;
        sregs->cr8 = get_cr8(vcpu);
-       sregs->efer = vcpu->shadow_efer;
+       sregs->efer = vcpu->arch.shadow_efer;
        sregs->apic_base = kvm_get_apic_base(vcpu);
 
        if (irqchip_in_kernel(vcpu->kvm)) {
@@ -2728,7 +2728,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu,
 
        set_cr8(vcpu, sregs->cr8);
 
-       mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
+       mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
 #ifdef CONFIG_X86_64
        kvm_x86_ops->set_efer(vcpu, sregs->efer);
 #endif
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h
index 1120499..8f5b875 100644
--- a/drivers/kvm/x86.h
+++ b/drivers/kvm/x86.h
@@ -105,12 +105,12 @@ struct kvm_vcpu_arch {
        unsigned long cr3;
        unsigned long cr4;
        unsigned long cr8;
+       u64 pdptrs[4]; /* pae */
+       u64 shadow_efer;
 };
 
 struct kvm_vcpu {
        KVM_VCPU_COMM;
-       u64 pdptrs[4]; /* pae */
-       u64 shadow_efer;
        u64 apic_base;
        struct kvm_lapic *apic;    /* kernel irqchip context */
 #define VCPU_MP_STATE_RUNNABLE          0
@@ -352,7 +352,7 @@ static inline int kvm_mmu_reload(struct kvm_vcpu
*vcpu)
 static inline int is_long_mode(struct kvm_vcpu *vcpu)
 {
 #ifdef CONFIG_X86_64
-       return vcpu->shadow_efer & EFER_LME;
+       return vcpu->arch.shadow_efer & EFER_LME;
 #else
        return 0;
 #endif
-- 
1.5.1.2

Attachment: 0004-kvm-portability-Moving-shadow_efer-and-pdptrs-to-ar.patch
Description: 0004-kvm-portability-Moving-shadow_efer-and-pdptrs-to-ar.patch

-------------------------------------------------------------------------
SF.Net email is sponsored by:
Check out the new SourceForge.net Marketplace.
It's the best place to buy or sell services
for just about anything Open Source.
http://ad.doubleclick.net/clk;164216239;13503038;w?http://sf.net/marketplace
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to