From: Zhang Xiantao <[EMAIL PROTECTED]>
Date: Fri, 14 Dec 2007 01:07:51 +0800
Subject: [PATCH] kvm portability: Moving mmu-related fields to arch.

This patch moving mmu-related fields to arch.
Signed-off-by: Zhang Xiantao <[EMAIL PROTECTED]>
---
 drivers/kvm/mmu.c         |  114
++++++++++++++++++++++----------------------
 drivers/kvm/paging_tmpl.h |    8 ++--
 drivers/kvm/x86.c         |   18 ++++----
 drivers/kvm/x86.h         |   11 ++--
 4 files changed, 76 insertions(+), 75 deletions(-)

diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index fcfc2fe..9524bbf 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -292,18 +292,18 @@ static int mmu_topup_memory_caches(struct kvm_vcpu
*vcpu)
        int r;
 
        kvm_mmu_free_some_pages(vcpu);
-       r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
+       r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
                                   pte_chain_cache, 4);
        if (r)
                goto out;
-       r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
+       r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
                                   rmap_desc_cache, 1);
        if (r)
                goto out;
-       r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 8);
+       r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
        if (r)
                goto out;
-       r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
+       r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
                                   mmu_page_header_cache, 4);
 out:
        return r;
@@ -311,10 +311,10 @@ out:
 
 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
 {
-       mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
-       mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
-       mmu_free_memory_cache_page(&vcpu->mmu_page_cache);
-       mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
+       mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
+       mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
+       mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
+       mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
 }
 
 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
@@ -330,7 +330,7 @@ static void *mmu_memory_cache_alloc(struct
kvm_mmu_memory_cache *mc,
 
 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
 {
-       return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
+       return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
                                      sizeof(struct kvm_pte_chain));
 }
 
@@ -341,7 +341,7 @@ static void mmu_free_pte_chain(struct kvm_pte_chain
*pc)
 
 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
 {
-       return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
+       return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
                                      sizeof(struct kvm_rmap_desc));
 }
 
@@ -568,9 +568,9 @@ static struct kvm_mmu_page
*kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
        if (!vcpu->kvm->n_free_mmu_pages)
                return NULL;
 
-       sp = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache, sizeof
*sp);
-       sp->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache,
PAGE_SIZE);
-       sp->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache,
PAGE_SIZE);
+       sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache,
sizeof *sp);
+       sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache,
PAGE_SIZE);
+       sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache,
PAGE_SIZE);
        set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
        list_add(&sp->link, &vcpu->kvm->active_mmu_pages);
        ASSERT(is_empty_shadow_page(sp->spt));
@@ -692,11 +692,11 @@ static struct kvm_mmu_page
*kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        struct hlist_node *node;
 
        role.word = 0;
-       role.glevels = vcpu->mmu.root_level;
+       role.glevels = vcpu->arch.mmu.root_level;
        role.level = level;
        role.metaphysical = metaphysical;
        role.access = access;
-       if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
+       if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
                quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS *
level));
                quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) *
level)) - 1;
                role.quadrant = quadrant;
@@ -718,7 +718,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct
kvm_vcpu *vcpu,
        sp->gfn = gfn;
        sp->role = role;
        hlist_add_head(&sp->hash_link, bucket);
-       vcpu->mmu.prefetch_page(vcpu, sp);
+       vcpu->arch.mmu.prefetch_page(vcpu, sp);
        if (!metaphysical)
                rmap_write_protect(vcpu->kvm, gfn);
        if (new_page)
@@ -875,7 +875,7 @@ static void page_header_update_slot(struct kvm *kvm,
void *pte, gfn_t gfn)
 
 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
 {
-       gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
+       gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
 
        if (gpa == UNMAPPED_GVA)
                return NULL;
@@ -972,7 +972,7 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
gfn_t gfn)
 {
        int level = PT32E_ROOT_LEVEL;
-       hpa_t table_addr = vcpu->mmu.root_hpa;
+       hpa_t table_addr = vcpu->arch.mmu.root_hpa;
        int pt_write = 0;
 
        for (; ; level--) {
@@ -1024,29 +1024,29 @@ static void mmu_free_roots(struct kvm_vcpu
*vcpu)
        int i;
        struct kvm_mmu_page *sp;
 
-       if (!VALID_PAGE(vcpu->mmu.root_hpa))
+       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
                return;
 #ifdef CONFIG_X86_64
-       if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
-               hpa_t root = vcpu->mmu.root_hpa;
+       if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
+               hpa_t root = vcpu->arch.mmu.root_hpa;
 
                sp = page_header(root);
                --sp->root_count;
-               vcpu->mmu.root_hpa = INVALID_PAGE;
+               vcpu->arch.mmu.root_hpa = INVALID_PAGE;
                return;
        }
 #endif
        for (i = 0; i < 4; ++i) {
-               hpa_t root = vcpu->mmu.pae_root[i];
+               hpa_t root = vcpu->arch.mmu.pae_root[i];
 
                if (root) {
                        root &= PT64_BASE_ADDR_MASK;
                        sp = page_header(root);
                        --sp->root_count;
                }
-               vcpu->mmu.pae_root[i] = INVALID_PAGE;
+               vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
        }
-       vcpu->mmu.root_hpa = INVALID_PAGE;
+       vcpu->arch.mmu.root_hpa = INVALID_PAGE;
 }
 
 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
@@ -1058,38 +1058,38 @@ static void mmu_alloc_roots(struct kvm_vcpu
*vcpu)
        root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
 
 #ifdef CONFIG_X86_64
-       if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
-               hpa_t root = vcpu->mmu.root_hpa;
+       if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
+               hpa_t root = vcpu->arch.mmu.root_hpa;
 
                ASSERT(!VALID_PAGE(root));
                sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
                                      PT64_ROOT_LEVEL, 0, ACC_ALL, NULL,
NULL);
                root = __pa(sp->spt);
                ++sp->root_count;
-               vcpu->mmu.root_hpa = root;
+               vcpu->arch.mmu.root_hpa = root;
                return;
        }
 #endif
        for (i = 0; i < 4; ++i) {
-               hpa_t root = vcpu->mmu.pae_root[i];
+               hpa_t root = vcpu->arch.mmu.pae_root[i];
 
                ASSERT(!VALID_PAGE(root));
-               if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) {
+               if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
                        if (!is_present_pte(vcpu->arch.pdptrs[i])) {
-                               vcpu->mmu.pae_root[i] = 0;
+                               vcpu->arch.mmu.pae_root[i] = 0;
                                continue;
                        }
                        root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
-               } else if (vcpu->mmu.root_level == 0)
+               } else if (vcpu->arch.mmu.root_level == 0)
                        root_gfn = 0;
                sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
                                      PT32_ROOT_LEVEL, !is_paging(vcpu),
                                      ACC_ALL, NULL, NULL);
                root = __pa(sp->spt);
                ++sp->root_count;
-               vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
+               vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
        }
-       vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
+       vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
 }
 
 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
@@ -1109,7 +1109,7 @@ static int nonpaging_page_fault(struct kvm_vcpu
*vcpu, gva_t gva,
                return r;
 
        ASSERT(vcpu);
-       ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
+       ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
 
        gfn = gva >> PAGE_SHIFT;
 
@@ -1124,7 +1124,7 @@ static void nonpaging_free(struct kvm_vcpu *vcpu)
 
 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
 {
-       struct kvm_mmu *context = &vcpu->mmu;
+       struct kvm_mmu *context = &vcpu->arch.mmu;
 
        context->new_cr3 = nonpaging_new_cr3;
        context->page_fault = nonpaging_page_fault;
@@ -1171,7 +1171,7 @@ static void paging_free(struct kvm_vcpu *vcpu)
 
 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int
level)
 {
-       struct kvm_mmu *context = &vcpu->mmu;
+       struct kvm_mmu *context = &vcpu->arch.mmu;
 
        ASSERT(is_pae(vcpu));
        context->new_cr3 = paging_new_cr3;
@@ -1192,7 +1192,7 @@ static int paging64_init_context(struct kvm_vcpu
*vcpu)
 
 static int paging32_init_context(struct kvm_vcpu *vcpu)
 {
-       struct kvm_mmu *context = &vcpu->mmu;
+       struct kvm_mmu *context = &vcpu->arch.mmu;
 
        context->new_cr3 = paging_new_cr3;
        context->page_fault = paging32_page_fault;
@@ -1213,7 +1213,7 @@ static int paging32E_init_context(struct kvm_vcpu
*vcpu)
 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
 {
        ASSERT(vcpu);
-       ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
+       ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
 
        if (!is_paging(vcpu))
                return nonpaging_init_context(vcpu);
@@ -1228,9 +1228,9 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu)
 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
 {
        ASSERT(vcpu);
-       if (VALID_PAGE(vcpu->mmu.root_hpa)) {
-               vcpu->mmu.free(vcpu);
-               vcpu->mmu.root_hpa = INVALID_PAGE;
+       if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
+               vcpu->arch.mmu.free(vcpu);
+               vcpu->arch.mmu.root_hpa = INVALID_PAGE;
        }
 }
 
@@ -1250,7 +1250,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
        if (r)
                goto out;
        mmu_alloc_roots(vcpu);
-       kvm_x86_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
+       kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
        kvm_mmu_flush_tlb(vcpu);
 out:
        mutex_unlock(&vcpu->kvm->lock);
@@ -1420,7 +1420,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu,
gpa_t gpa,
 
 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
 {
-       gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
+       gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
 
        return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
 }
@@ -1443,7 +1443,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu,
gva_t cr2, u32 error_code)
        enum emulation_result er;
 
        mutex_lock(&vcpu->kvm->lock);
-       r = vcpu->mmu.page_fault(vcpu, cr2, error_code);
+       r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
        if (r < 0)
                goto out;
 
@@ -1486,7 +1486,7 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu)
                                  struct kvm_mmu_page, link);
                kvm_mmu_zap_page(vcpu->kvm, sp);
        }
-       free_page((unsigned long)vcpu->mmu.pae_root);
+       free_page((unsigned long)vcpu->arch.mmu.pae_root);
 }
 
 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
@@ -1508,9 +1508,9 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
        page = alloc_page(GFP_KERNEL | __GFP_DMA32);
        if (!page)
                goto error_1;
-       vcpu->mmu.pae_root = page_address(page);
+       vcpu->arch.mmu.pae_root = page_address(page);
        for (i = 0; i < 4; ++i)
-               vcpu->mmu.pae_root[i] = INVALID_PAGE;
+               vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
 
        return 0;
 
@@ -1522,7 +1522,7 @@ error_1:
 int kvm_mmu_create(struct kvm_vcpu *vcpu)
 {
        ASSERT(vcpu);
-       ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
+       ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
 
        return alloc_mmu_pages(vcpu);
 }
@@ -1530,7 +1530,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
 {
        ASSERT(vcpu);
-       ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
+       ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
 
        return init_kvm_mmu(vcpu);
 }
@@ -1659,11 +1659,11 @@ static void audit_mappings_page(struct kvm_vcpu
*vcpu, u64 page_pte,
                                printk(KERN_ERR "audit: (%s) nontrapping
pte"
                                       " in nonleaf level: levels %d gva
%lx"
                                       " level %d pte %llx\n",
audit_msg,
-                                      vcpu->mmu.root_level, va, level,
ent);
+                                      vcpu->arch.mmu.root_level, va,
level, ent);
 
                        audit_mappings_page(vcpu, ent, va, level - 1);
                } else {
-                       gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
+                       gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
                        struct page *page = gpa_to_page(vcpu, gpa);
                        hpa_t hpa = page_to_phys(page);
 
@@ -1671,7 +1671,7 @@ static void audit_mappings_page(struct kvm_vcpu
*vcpu, u64 page_pte,
                            && (ent & PT64_BASE_ADDR_MASK) != hpa)
                                printk(KERN_ERR "xx audit error: (%s)
levels %d"
                                       " gva %lx gpa %llx hpa %llx ent
%llx %d\n",
-                                      audit_msg, vcpu->mmu.root_level,
+                                      audit_msg,
vcpu->arch.mmu.root_level,
                                       va, gpa, hpa, ent,
                                       is_shadow_present_pte(ent));
                        else if (ent == shadow_notrap_nonpresent_pte
@@ -1688,13 +1688,13 @@ static void audit_mappings(struct kvm_vcpu
*vcpu)
 {
        unsigned i;
 
-       if (vcpu->mmu.root_level == 4)
-               audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
+       if (vcpu->arch.mmu.root_level == 4)
+               audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0,
4);
        else
                for (i = 0; i < 4; ++i)
-                       if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK)
+                       if (vcpu->arch.mmu.pae_root[i] &
PT_PRESENT_MASK)
                                audit_mappings_page(vcpu,
-
vcpu->mmu.pae_root[i],
+
vcpu->arch.mmu.pae_root[i],
                                                    i << 30,
                                                    2);
 }
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index 956ecc6..f18543e 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -129,7 +129,7 @@ static int FNAME(walk_addr)(struct guest_walker
*walker,
 
        pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
 walk:
-       walker->level = vcpu->mmu.root_level;
+       walker->level = vcpu->arch.mmu.root_level;
        pte = vcpu->arch.cr3;
 #if PTTYPE == 64
        if (!is_long_mode(vcpu)) {
@@ -275,10 +275,10 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu,
gva_t addr,
        if (!is_present_pte(walker->ptes[walker->level - 1]))
                return NULL;
 
-       shadow_addr = vcpu->mmu.root_hpa;
-       level = vcpu->mmu.shadow_root_level;
+       shadow_addr = vcpu->arch.mmu.root_hpa;
+       level = vcpu->arch.mmu.shadow_root_level;
        if (level == PT32E_ROOT_LEVEL) {
-               shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3];
+               shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
                shadow_addr &= PT64_BASE_ADDR_MASK;
                --level;
        }
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index 94a3ca4..9136865 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -374,7 +374,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long
cr3)
                kvm_inject_gp(vcpu, 0);
        else {
                vcpu->arch.cr3 = cr3;
-               vcpu->mmu.new_cr3(vcpu);
+               vcpu->arch.mmu.new_cr3(vcpu);
        }
        mutex_unlock(&vcpu->kvm->lock);
 }
@@ -496,7 +496,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32
msr, u64 data)
                kvm_set_apic_base(vcpu, data);
                break;
        case MSR_IA32_MISC_ENABLE:
-               vcpu->ia32_misc_enable_msr = data;
+               vcpu->arch.ia32_misc_enable_msr = data;
                break;
        default:
                pr_unimpl(vcpu, "unhandled wrmsr: 0x%x\n", msr);
@@ -550,7 +550,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32
msr, u64 *pdata)
                data = kvm_get_apic_base(vcpu);
                break;
        case MSR_IA32_MISC_ENABLE:
-               data = vcpu->ia32_misc_enable_msr;
+               data = vcpu->arch.ia32_misc_enable_msr;
                break;
 #ifdef CONFIG_X86_64
        case MSR_EFER:
@@ -1527,7 +1527,7 @@ int emulator_read_std(unsigned long addr,
        void *data = val;
 
        while (bytes) {
-               gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
+               gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
                unsigned offset = addr & (PAGE_SIZE-1);
                unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE -
offset);
                int ret;
@@ -1561,7 +1561,7 @@ static int emulator_read_emulated(unsigned long
addr,
                return X86EMUL_CONTINUE;
        }
 
-       gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
+       gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
 
        /* For APIC access vmexit */
        if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
@@ -1609,7 +1609,7 @@ static int
emulator_write_emulated_onepage(unsigned long addr,
                                           struct kvm_vcpu *vcpu)
 {
        struct kvm_io_device *mmio_dev;
-       gpa_t                 gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
+       gpa_t                 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu,
addr);
 
        if (gpa == UNMAPPED_GVA) {
                kvm_inject_page_fault(vcpu, addr, 2);
@@ -1678,7 +1678,7 @@ static int emulator_cmpxchg_emulated(unsigned long
addr,
 #ifndef CONFIG_X86_64
        /* guests cmpxchg8b have to be emulated atomically */
        if (bytes == 8) {
-               gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
+               gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
                struct page *page;
                char *addr;
                u64 *val;
@@ -2829,7 +2829,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu,
 
        vcpu_load(vcpu);
        mutex_lock(&vcpu->kvm->lock);
-       gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
+       gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
        tr->physical_address = gpa;
        tr->valid = gpa != UNMAPPED_GVA;
        tr->writeable = 1;
@@ -3003,7 +3003,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        BUG_ON(vcpu->kvm == NULL);
        kvm = vcpu->kvm;
 
-       vcpu->mmu.root_hpa = INVALID_PAGE;
+       vcpu->arch.mmu.root_hpa = INVALID_PAGE;
        if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
                vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
        else
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h
index 3416fcd..ce0d2a9 100644
--- a/drivers/kvm/x86.h
+++ b/drivers/kvm/x86.h
@@ -116,10 +116,6 @@ struct kvm_vcpu_arch {
 #define VCPU_MP_STATE_HALTED            4
        int mp_state;
        int sipi_vector;
-};
-
-struct kvm_vcpu {
-       KVM_VCPU_COMM;
        u64 ia32_misc_enable_msr;
 
        struct kvm_mmu mmu;
@@ -129,6 +125,11 @@ struct kvm_vcpu {
        struct kvm_mmu_memory_cache mmu_page_cache;
        struct kvm_mmu_memory_cache mmu_page_header_cache;
 
+};
+
+struct kvm_vcpu {
+       KVM_VCPU_COMM;
+
        gfn_t last_pt_write_gfn;
        int   last_pt_write_count;
        u64  *last_pte_updated;
@@ -343,7 +344,7 @@ static inline void kvm_mmu_free_some_pages(struct
kvm_vcpu *vcpu)
 
 static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
 {
-       if (likely(vcpu->mmu.root_hpa != INVALID_PAGE))
+       if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
                return 0;
 
        return kvm_mmu_load(vcpu);
-- 
1.5.1.2

Attachment: 0007-kvm-portability-Moving-mmu-related-fields-to-arch.patch
Description: 0007-kvm-portability-Moving-mmu-related-fields-to-arch.patch

-------------------------------------------------------------------------
SF.Net email is sponsored by:
Check out the new SourceForge.net Marketplace.
It's the best place to buy or sell services
for just about anything Open Source.
http://ad.doubleclick.net/clk;164216239;13503038;w?http://sf.net/marketplace
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to