>From 30e5badef53e8e5edf72ba2dbc6b9f1ccfd2aaa0 Mon Sep 17 00:00:00 2001
From: Zhang Xiantao <[EMAIL PROTECTED]>
Date: Tue, 20 Nov 2007 23:06:06 +0800
Subject: [PATCH] KVM Portability: Spliting kvm structure
Moving fileds related to mmu into kvm_x86.
Signed-off-by: Zhang Xiantao <[EMAIL PROTECTED]>
---
 drivers/kvm/kvm.h |    8 -----
 drivers/kvm/mmu.c |   74
+++++++++++++++++++++++++++++++++--------------------
 drivers/kvm/x86.c |   14 +++++++---
 drivers/kvm/x86.h |   12 ++++++++-
 4 files changed, 67 insertions(+), 41 deletions(-)

diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index c7fdcd6..1e1d515 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -301,14 +301,6 @@ struct kvm {
        int nmemslots;
        struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
                                        KVM_PRIVATE_MEM_SLOTS];
-       /*
-        * Hash table of struct kvm_mmu_page.
-        */
-       struct list_head active_mmu_pages;
-       unsigned int n_free_mmu_pages;
-       unsigned int n_requested_mmu_pages;
-       unsigned int n_alloc_mmu_pages;
-       struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
        struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
        struct list_head vm_list;
        struct file *filp;
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 101cd53..683ad72 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -530,12 +530,14 @@ static int is_empty_shadow_page(u64 *spt)
 static void kvm_mmu_free_page(struct kvm *kvm,
                              struct kvm_mmu_page *page_head)
 {
+       struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
        ASSERT(is_empty_shadow_page(page_head->spt));
        list_del(&page_head->link);
        __free_page(virt_to_page(page_head->spt));
        __free_page(virt_to_page(page_head->gfns));
        kfree(page_head);
-       ++kvm->n_free_mmu_pages;
+       ++kvm_x86->n_free_mmu_pages;
 }
 
 static unsigned kvm_page_table_hashfn(gfn_t gfn)
@@ -547,8 +549,9 @@ static struct kvm_mmu_page
*kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
                                               u64 *parent_pte)
 {
        struct kvm_mmu_page *page;
+       struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
 
-       if (!vcpu->kvm->n_free_mmu_pages)
+       if (!kvm_x86->n_free_mmu_pages)
                return NULL;
 
        page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
@@ -556,12 +559,12 @@ static struct kvm_mmu_page
*kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
        page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache,
PAGE_SIZE);
        page->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache,
PAGE_SIZE);
        set_page_private(virt_to_page(page->spt), (unsigned long)page);
-       list_add(&page->link, &vcpu->kvm->active_mmu_pages);
+       list_add(&page->link, &kvm_x86->active_mmu_pages);
        ASSERT(is_empty_shadow_page(page->spt));
        page->slot_bitmap = 0;
        page->multimapped = 0;
        page->parent_pte = parent_pte;
-       --vcpu->kvm->n_free_mmu_pages;
+       --kvm_x86->n_free_mmu_pages;
        return page;
 }
 
@@ -647,10 +650,12 @@ static struct kvm_mmu_page
*kvm_mmu_lookup_page(struct kvm *kvm,
        struct hlist_head *bucket;
        struct kvm_mmu_page *page;
        struct hlist_node *node;
+       struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
 
        pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
        index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
-       bucket = &kvm->mmu_page_hash[index];
+       bucket = &kvm_x86->mmu_page_hash[index];
        hlist_for_each_entry(page, node, bucket, hash_link)
                if (page->gfn == gfn && !page->role.metaphysical) {
                        pgprintk("%s: found role %x\n",
@@ -674,6 +679,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct
kvm_vcpu *vcpu,
        struct hlist_head *bucket;
        struct kvm_mmu_page *page;
        struct hlist_node *node;
+       struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
+
 
        role.word = 0;
        role.glevels = vcpu->mmu.root_level;
@@ -688,7 +695,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct
kvm_vcpu *vcpu,
        pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
                 gfn, role.word);
        index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
-       bucket = &vcpu->kvm->mmu_page_hash[index];
+       bucket = &kvm_x86->mmu_page_hash[index];
        hlist_for_each_entry(page, node, bucket, hash_link)
                if (page->gfn == gfn && page->role.word == role.word) {
                        mmu_page_add_parent_pte(vcpu, page, parent_pte);
@@ -758,6 +765,7 @@ static void kvm_mmu_zap_page(struct kvm *kvm,
                             struct kvm_mmu_page *page)
 {
        u64 *parent_pte;
+       struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
 
        ++kvm->stat.mmu_shadow_zapped;
        while (page->multimapped || page->parent_pte) {
@@ -779,7 +787,7 @@ static void kvm_mmu_zap_page(struct kvm *kvm,
                hlist_del(&page->hash_link);
                kvm_mmu_free_page(kvm, page);
        } else
-               list_move(&page->link, &kvm->active_mmu_pages);
+               list_move(&page->link, &kvm_x86->active_mmu_pages);
        kvm_mmu_reset_last_pte_updated(kvm);
 }
 
@@ -794,27 +802,28 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm,
unsigned int kvm_nr_mmu_pages)
         * number of actived pages , we must to free some mmu pages
before we
         * change the value
         */
+       struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
 
-       if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) >
+       if ((kvm_x86->n_alloc_mmu_pages - kvm_x86->n_free_mmu_pages) >
            kvm_nr_mmu_pages) {
-               int n_used_mmu_pages = kvm->n_alloc_mmu_pages
-                                      - kvm->n_free_mmu_pages;
+               int n_used_mmu_pages = kvm_x86->n_alloc_mmu_pages
+                                      - kvm_x86->n_free_mmu_pages;
 
                while (n_used_mmu_pages > kvm_nr_mmu_pages) {
                        struct kvm_mmu_page *page;
 
-                       page = container_of(kvm->active_mmu_pages.prev,
+                       page =
container_of(kvm_x86->active_mmu_pages.prev,
                                            struct kvm_mmu_page, link);
                        kvm_mmu_zap_page(kvm, page);
                        n_used_mmu_pages--;
                }
-               kvm->n_free_mmu_pages = 0;
+               kvm_x86->n_free_mmu_pages = 0;
        }
        else
-               kvm->n_free_mmu_pages += kvm_nr_mmu_pages
-                                        - kvm->n_alloc_mmu_pages;
+               kvm_x86->n_free_mmu_pages += kvm_nr_mmu_pages
+                                        - kvm_x86->n_alloc_mmu_pages;
 
-       kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages;
+       kvm_x86->n_alloc_mmu_pages = kvm_nr_mmu_pages;
 }
 
 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
@@ -824,11 +833,12 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm,
gfn_t gfn)
        struct kvm_mmu_page *page;
        struct hlist_node *node, *n;
        int r;
+       struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
 
        pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
        r = 0;
        index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
-       bucket = &kvm->mmu_page_hash[index];
+       bucket = &kvm_x86->mmu_page_hash[index];
        hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
                if (page->gfn == gfn && !page->role.metaphysical) {
                        pgprintk("%s: gfn %lx role %x\n", __FUNCTION__,
gfn,
@@ -1274,6 +1284,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu,
gpa_t gpa,
        int level;
        int flooded = 0;
        int npte;
+       struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
 
        pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
        ++vcpu->kvm->stat.mmu_pte_write;
@@ -1289,7 +1300,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu,
gpa_t gpa,
                vcpu->last_pte_updated = NULL;
        }
        index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
-       bucket = &vcpu->kvm->mmu_page_hash[index];
+       bucket = &kvm_x86->mmu_page_hash[index];
        hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
                if (page->gfn != gfn || page->role.metaphysical)
                        continue;
@@ -1353,10 +1364,12 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu
*vcpu, gva_t gva)
 
 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
 {
-       while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
+       struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
+
+       while (kvm_x86->n_free_mmu_pages < KVM_REFILL_PAGES) {
                struct kvm_mmu_page *page;
 
-               page = container_of(vcpu->kvm->active_mmu_pages.prev,
+               page = container_of(kvm_x86->active_mmu_pages.prev,
                                    struct kvm_mmu_page, link);
                kvm_mmu_zap_page(vcpu->kvm, page);
                ++vcpu->kvm->stat.mmu_recycled;
@@ -1406,9 +1419,10 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
 static void free_mmu_pages(struct kvm_vcpu *vcpu)
 {
        struct kvm_mmu_page *page;
+       struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
 
-       while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
-               page = container_of(vcpu->kvm->active_mmu_pages.next,
+       while (!list_empty(&kvm_x86->active_mmu_pages)) {
+               page = container_of(kvm_x86->active_mmu_pages.next,
                                    struct kvm_mmu_page, link);
                kvm_mmu_zap_page(vcpu->kvm, page);
        }
@@ -1419,13 +1433,14 @@ static int alloc_mmu_pages(struct kvm_vcpu
*vcpu)
 {
        struct page *page;
        int i;
+       struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
 
        ASSERT(vcpu);
 
-       if (vcpu->kvm->n_requested_mmu_pages)
-               vcpu->kvm->n_free_mmu_pages =
vcpu->kvm->n_requested_mmu_pages;
+       if (kvm_x86->n_requested_mmu_pages)
+               kvm_x86->n_free_mmu_pages =
kvm_x86->n_requested_mmu_pages;
        else
-               vcpu->kvm->n_free_mmu_pages =
vcpu->kvm->n_alloc_mmu_pages;
+               kvm_x86->n_free_mmu_pages = kvm_x86->n_alloc_mmu_pages;
        /*
         * When emulating 32-bit mode, cr3 is only 32 bits even on
x86_64.
         * Therefore we need to allocate shadow page tables in the first
@@ -1473,8 +1488,9 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
 {
        struct kvm_mmu_page *page;
+       struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
 
-       list_for_each_entry(page, &kvm->active_mmu_pages, link) {
+       list_for_each_entry(page, &kvm_x86->active_mmu_pages, link) {
                int i;
                u64 *pt;
 
@@ -1492,8 +1508,9 @@ void kvm_mmu_slot_remove_write_access(struct kvm
*kvm, int slot)
 void kvm_mmu_zap_all(struct kvm *kvm)
 {
        struct kvm_mmu_page *page, *node;
+       struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
 
-       list_for_each_entry_safe(page, node, &kvm->active_mmu_pages,
link)
+       list_for_each_entry_safe(page, node, &kvm_x86->active_mmu_pages,
link)
                kvm_mmu_zap_page(kvm, page);
 
        kvm_flush_remote_tlbs(kvm);
@@ -1665,7 +1682,7 @@ static int count_writable_mappings(struct kvm_vcpu
*vcpu)
        struct kvm_mmu_page *page;
        int i;
 
-       list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
+       list_for_each_entry(page, &kvm_x86->active_mmu_pages, link) {
                u64 *pt = page->spt;
 
                if (page->role.level != PT_PAGE_TABLE_LEVEL)
@@ -1700,8 +1717,9 @@ static void audit_write_protection(struct kvm_vcpu
*vcpu)
        struct kvm_memory_slot *slot;
        unsigned long *rmapp;
        gfn_t gfn;
+       struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
 
-       list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
+       list_for_each_entry(page, &kvm_x86->active_mmu_pages, link) {
                if (page->role.metaphysical)
                        continue;
 
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index 13db394..d41d962 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -816,13 +816,15 @@ static int kvm_vm_ioctl_set_tss_addr(struct kvm
*kvm, unsigned long addr)
 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
                                          u32 kvm_nr_mmu_pages)
 {
+       struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
        if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
                return -EINVAL;
 
        mutex_lock(&kvm->lock);
 
        kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
-       kvm->n_requested_mmu_pages = kvm_nr_mmu_pages;
+       kvm_x86->n_requested_mmu_pages = kvm_nr_mmu_pages;
 
        mutex_unlock(&kvm->lock);
        return 0;
@@ -830,7 +832,9 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm
*kvm,
 
 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
 {
-       return kvm->n_alloc_mmu_pages;
+       struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
+       return kvm_x86->n_alloc_mmu_pages;
 }
 
 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
@@ -2612,7 +2616,7 @@ struct  kvm *kvm_arch_create_vm(void)
        if (!kvm_x86)
                return ERR_PTR(-ENOMEM);
 
-       INIT_LIST_HEAD(&kvm_x86->kvm.active_mmu_pages);
+       INIT_LIST_HEAD(&kvm_x86->active_mmu_pages);
 
        return &kvm_x86->kvm;
 }
@@ -2659,6 +2663,8 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
 {
        int npages = mem->memory_size >> PAGE_SHIFT;
        struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
+       struct kvm_x86 *kvm_x86 = to_kvm_x86(kvm);
+
 
        /*To keep backward compatibility with older userspace,
         *x86 needs to hanlde !user_alloc case.
@@ -2691,7 +2697,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
                }
        }
 
-       if (!kvm->n_requested_mmu_pages) {
+       if (!kvm_x86->n_requested_mmu_pages) {
                unsigned int nr_mmu_pages =
kvm_mmu_calculate_mmu_pages(kvm);
                kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
        }
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h
index f792bb9..788ec13 100644
--- a/drivers/kvm/x86.h
+++ b/drivers/kvm/x86.h
@@ -166,6 +166,14 @@ struct kvm_x86 {
        struct kvm kvm;
        int naliases;
        struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
+       /*
+        * Hash table of struct kvm_mmu_page.
+        */
+       struct list_head active_mmu_pages;
+       unsigned int n_free_mmu_pages;
+       unsigned int n_requested_mmu_pages;
+       unsigned int n_alloc_mmu_pages;
+       struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
 };
 
 static inline struct kvm_x86 *to_kvm_x86(struct kvm *kvm)
@@ -331,7 +339,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t
gva, u32 error_code);
 
 static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
 {
-       if (unlikely(vcpu->kvm->n_free_mmu_pages <
KVM_MIN_FREE_MMU_PAGES))
+       struct kvm_x86 *kvm_x86 = to_kvm_x86(vcpu->kvm);
+
+       if (unlikely(kvm_x86->n_free_mmu_pages <
KVM_MIN_FREE_MMU_PAGES))
                __kvm_mmu_free_some_pages(vcpu);
 }
 
-- 
1.5.1.2

Attachment: 0004-KVM-Portability-Spliting-kvm-structure.patch
Description: 0004-KVM-Portability-Spliting-kvm-structure.patch

-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to