Move per VCPU mmu_memory_cache to be VM basis. Your opnion?
Eddie diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index 0632d0b..77989b4 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h @@ -300,11 +300,6 @@ struct kvm_vcpu { struct kvm_mmu mmu; - struct kvm_mmu_memory_cache mmu_pte_chain_cache; - struct kvm_mmu_memory_cache mmu_rmap_desc_cache; - struct kvm_mmu_memory_cache mmu_page_cache; - struct kvm_mmu_memory_cache mmu_page_header_cache; - gfn_t last_pt_write_gfn; int last_pt_write_count; @@ -383,6 +378,11 @@ struct kvm { unsigned long rmap_overflow; struct list_head vm_list; struct file *filp; + + struct kvm_mmu_memory_cache mmu_pte_chain_cache; + struct kvm_mmu_memory_cache mmu_rmap_desc_cache; + struct kvm_mmu_memory_cache mmu_page_cache; + struct kvm_mmu_memory_cache mmu_page_header_cache; }; struct descriptor_table { diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 46491b4..b2578a8 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c @@ -231,19 +231,19 @@ static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags) { int r; - r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache, + r = mmu_topup_memory_cache(&vcpu->kvm->mmu_pte_chain_cache, pte_chain_cache, 4, gfp_flags); if (r) goto out; - r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache, + r = mmu_topup_memory_cache(&vcpu->kvm->mmu_rmap_desc_cache, rmap_desc_cache, 1, gfp_flags); if (r) goto out; - r = mmu_topup_memory_cache(&vcpu->mmu_page_cache, + r = mmu_topup_memory_cache(&vcpu->kvm->mmu_page_cache, mmu_page_cache, 4, gfp_flags); if (r) goto out; - r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache, + r = mmu_topup_memory_cache(&vcpu->kvm->mmu_page_header_cache, mmu_page_header_cache, 4, gfp_flags); out: return r; @@ -266,10 +266,10 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) { - mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache); - mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache); - mmu_free_memory_cache(&vcpu->mmu_page_cache); - mmu_free_memory_cache(&vcpu->mmu_page_header_cache); + mmu_free_memory_cache(&vcpu->kvm->mmu_pte_chain_cache); + mmu_free_memory_cache(&vcpu->kvm->mmu_rmap_desc_cache); + mmu_free_memory_cache(&vcpu->kvm->mmu_page_cache); + mmu_free_memory_cache(&vcpu->kvm->mmu_page_header_cache); } static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc, @@ -293,26 +293,26 @@ static void mmu_memory_cache_free(struct kvm_mmu_memory_cache *mc, void *obj) static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu) { - return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache, + return mmu_memory_cache_alloc(&vcpu->kvm->mmu_pte_chain_cache, sizeof(struct kvm_pte_chain)); } static void mmu_free_pte_chain(struct kvm_vcpu *vcpu, struct kvm_pte_chain *pc) { - mmu_memory_cache_free(&vcpu->mmu_pte_chain_cache, pc); + mmu_memory_cache_free(&vcpu->kvm->mmu_pte_chain_cache, pc); } static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu) { - return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache, + return mmu_memory_cache_alloc(&vcpu->kvm->mmu_rmap_desc_cache, sizeof(struct kvm_rmap_desc)); } static void mmu_free_rmap_desc(struct kvm_vcpu *vcpu, struct kvm_rmap_desc *rd) { - mmu_memory_cache_free(&vcpu->mmu_rmap_desc_cache, rd); + mmu_memory_cache_free(&vcpu->kvm->mmu_rmap_desc_cache, rd); } /* @@ -471,8 +471,8 @@ static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, { ASSERT(is_empty_shadow_page(page_head->spt)); list_del(&page_head->link); - mmu_memory_cache_free(&vcpu->mmu_page_cache, page_head->spt); - mmu_memory_cache_free(&vcpu->mmu_page_header_cache, page_head); + mmu_memory_cache_free(&vcpu->kvm->mmu_page_cache, page_head->spt); + mmu_memory_cache_free(&vcpu->kvm->mmu_page_header_cache, page_head); ++vcpu->kvm->n_free_mmu_pages; } @@ -489,9 +489,9 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, if (!vcpu->kvm->n_free_mmu_pages) return NULL; - page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache, + page = mmu_memory_cache_alloc(&vcpu->kvm->mmu_page_header_cache, sizeof *page); - page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE); + page->spt = mmu_memory_cache_alloc(&vcpu->kvm->mmu_page_cache, PAGE_SIZE); set_page_private(virt_to_page(page->spt), (unsigned long)page); list_add(&page->link, &vcpu->kvm->active_mmu_pages); ASSERT(is_empty_shadow_page(page->spt));
global_cache.patch
Description: global_cache.patch
------------------------------------------------------------------------- This SF.net email is sponsored by DB2 Express Download DB2 Express C - the FREE version of DB2 express and take control of your XML. No limits. Just data. Click to get it now. http://sourceforge.net/powerbar/db2/
_______________________________________________ kvm-devel mailing list kvm-devel@lists.sourceforge.net https://lists.sourceforge.net/lists/listinfo/kvm-devel