Some members of kvm_memory_slot are not used by every architecture.

This patch is the first step to make this difference clear by
introducing kvm_memory_slot::arch;  lpage_info is moved into it.

Signed-off-by: Takuya Yoshikawa <yoshikawa.tak...@oss.ntt.co.jp>
---
 arch/ia64/include/asm/kvm_host.h    |    3 ++
 arch/ia64/kvm/kvm-ia64.c            |   10 +++++
 arch/powerpc/include/asm/kvm_host.h |    3 ++
 arch/powerpc/kvm/powerpc.c          |   10 +++++
 arch/s390/include/asm/kvm_host.h    |    3 ++
 arch/s390/kvm/kvm-s390.c            |   10 +++++
 arch/x86/include/asm/kvm_host.h     |    9 +++++
 arch/x86/kvm/mmu.c                  |    2 +-
 arch/x86/kvm/x86.c                  |   59 ++++++++++++++++++++++++++++++
 include/linux/kvm_host.h            |   11 +++---
 virt/kvm/kvm_main.c                 |   67 ++++------------------------------
 11 files changed, 121 insertions(+), 66 deletions(-)

diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 2689ee5..e35b3a8 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -459,6 +459,9 @@ struct kvm_sal_data {
        unsigned long boot_gp;
 };
 
+struct kvm_arch_memory_slot {
+};
+
 struct kvm_arch {
        spinlock_t dirty_log_lock;
 
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 8ca7261..d8ddbba 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1571,6 +1571,16 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct 
vm_fault *vmf)
        return VM_FAULT_SIGBUS;
 }
 
+void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+                          struct kvm_memory_slot *dont)
+{
+}
+
+int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+{
+       return 0;
+}
+
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
                struct kvm_memory_slot *memslot,
                struct kvm_memory_slot old,
diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index af438b1..b9188aa 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -212,6 +212,9 @@ struct revmap_entry {
 #define KVMPPC_PAGE_WRITETHRU  HPTE_R_W        /* 0x40 */
 #define KVMPPC_GOT_PAGE                0x80
 
+struct kvm_arch_memory_slot {
+};
+
 struct kvm_arch {
 #ifdef CONFIG_KVM_BOOK3S_64_HV
        unsigned long hpt_virt;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 0e21d15..00d7e34 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -281,6 +281,16 @@ long kvm_arch_dev_ioctl(struct file *filp,
        return -EINVAL;
 }
 
+void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+                          struct kvm_memory_slot *dont)
+{
+}
+
+int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+{
+       return 0;
+}
+
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
                                    struct kvm_memory_slot *memslot,
                                    struct kvm_memory_slot old,
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index e630426..7343872 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -245,6 +245,9 @@ struct kvm_vm_stat {
        u32 remote_tlb_flush;
 };
 
+struct kvm_arch_memory_slot {
+};
+
 struct kvm_arch{
        struct sca_block *sca;
        debug_info_t *dbf;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 0b91679..418a69c 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -807,6 +807,16 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct 
vm_fault *vmf)
        return VM_FAULT_SIGBUS;
 }
 
+void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+                          struct kvm_memory_slot *dont)
+{
+}
+
+int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+{
+       return 0;
+}
+
 /* Section: memory related */
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
                                   struct kvm_memory_slot *memslot,
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4610166..de3aa43 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -479,6 +479,15 @@ struct kvm_vcpu_arch {
        } osvw;
 };
 
+struct kvm_lpage_info {
+       unsigned long rmap_pde;
+       int write_count;
+};
+
+struct kvm_arch_memory_slot {
+       struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
+};
+
 struct kvm_arch {
        unsigned int n_used_mmu_pages;
        unsigned int n_requested_mmu_pages;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 37e7f10..ff053ca 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -689,7 +689,7 @@ static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
        unsigned long idx;
 
        idx = gfn_to_index(gfn, slot->base_gfn, level);
-       return &slot->lpage_info[level - 2][idx];
+       return &slot->arch.lpage_info[level - 2][idx];
 }
 
 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2bd77a3..45e4197 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6120,6 +6120,65 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
                put_page(kvm->arch.ept_identity_pagetable);
 }
 
+void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+                          struct kvm_memory_slot *dont)
+{
+       int i;
+
+       for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
+               if (!dont || free->arch.lpage_info[i] != 
dont->arch.lpage_info[i]) {
+                       vfree(free->arch.lpage_info[i]);
+                       free->arch.lpage_info[i] = NULL;
+               }
+       }
+}
+
+int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+{
+       int i;
+
+       for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
+               unsigned long ugfn;
+               int lpages;
+               int level = i + 2;
+
+               lpages = gfn_to_index(slot->base_gfn + npages - 1,
+                                     slot->base_gfn, level) + 1;
+
+               slot->arch.lpage_info[i] =
+                       vzalloc(lpages * sizeof(*slot->arch.lpage_info[i]));
+               if (!slot->arch.lpage_info[i])
+                       goto out_free;
+
+               if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
+                       slot->arch.lpage_info[i][0].write_count = 1;
+               if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 
1))
+                       slot->arch.lpage_info[i][lpages - 1].write_count = 1;
+               ugfn = slot->userspace_addr >> PAGE_SHIFT;
+               /*
+                * If the gfn and userspace address are not aligned wrt each
+                * other, or if explicitly asked to, disable large page
+                * support for this slot
+                */
+               if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) 
||
+                   !kvm_largepages_enabled()) {
+                       unsigned long j;
+
+                       for (j = 0; j < lpages; ++j)
+                               slot->arch.lpage_info[i][j].write_count = 1;
+               }
+       }
+
+       return 0;
+
+out_free:
+       for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
+               vfree(slot->arch.lpage_info[i]);
+               slot->arch.lpage_info[i] = NULL;
+       }
+       return -ENOMEM;
+}
+
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
                                struct kvm_memory_slot *memslot,
                                struct kvm_memory_slot old,
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 06d4e41..1d833d7 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -171,11 +171,6 @@ static inline int kvm_vcpu_exiting_guest_mode(struct 
kvm_vcpu *vcpu)
  */
 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
 
-struct kvm_lpage_info {
-       unsigned long rmap_pde;
-       int write_count;
-};
-
 struct kvm_memory_slot {
        gfn_t base_gfn;
        unsigned long npages;
@@ -184,7 +179,7 @@ struct kvm_memory_slot {
        unsigned long *dirty_bitmap;
        unsigned long *dirty_bitmap_head;
        unsigned long nr_dirty_pages;
-       struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
+       struct kvm_arch_memory_slot arch;
        unsigned long userspace_addr;
        int user_alloc;
        int id;
@@ -376,6 +371,9 @@ int kvm_set_memory_region(struct kvm *kvm,
 int __kvm_set_memory_region(struct kvm *kvm,
                            struct kvm_userspace_memory_region *mem,
                            int user_alloc);
+void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+                          struct kvm_memory_slot *dont);
+int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long 
npages);
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
                                struct kvm_memory_slot *memslot,
                                struct kvm_memory_slot old,
@@ -385,6 +383,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
                                struct kvm_userspace_memory_region *mem,
                                struct kvm_memory_slot old,
                                int user_alloc);
+bool kvm_largepages_enabled(void);
 void kvm_disable_largepages(void);
 void kvm_arch_flush_shadow(struct kvm *kvm);
 
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index b6bc866..cef760e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -535,21 +535,13 @@ static void kvm_destroy_dirty_bitmap(struct 
kvm_memory_slot *memslot)
 static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
                                  struct kvm_memory_slot *dont)
 {
-       int i;
-
        if (!dont || free->rmap != dont->rmap)
                vfree(free->rmap);
 
        if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
                kvm_destroy_dirty_bitmap(free);
 
-
-       for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
-               if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
-                       vfree(free->lpage_info[i]);
-                       free->lpage_info[i] = NULL;
-               }
-       }
+       kvm_arch_free_memslot(free, dont);
 
        free->npages = 0;
        free->rmap = NULL;
@@ -698,53 +690,6 @@ void update_memslots(struct kvm_memslots *slots, struct 
kvm_memory_slot *new)
        slots->generation++;
 }
 
-#ifndef CONFIG_S390
-static int create_lpage_info(struct kvm_memory_slot *slot, unsigned long 
npages)
-{
-       int i;
-
-       for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
-               unsigned long ugfn;
-               int lpages;
-               int level = i + 2;
-
-               lpages = gfn_to_index(slot->base_gfn + npages - 1,
-                                     slot->base_gfn, level) + 1;
-
-               slot->lpage_info[i] = vzalloc(lpages * 
sizeof(*slot->lpage_info[i]));
-               if (!slot->lpage_info[i])
-                       goto out_free;
-
-               if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
-                       slot->lpage_info[i][0].write_count = 1;
-               if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 
1))
-                       slot->lpage_info[i][lpages - 1].write_count = 1;
-               ugfn = slot->userspace_addr >> PAGE_SHIFT;
-               /*
-                * If the gfn and userspace address are not aligned wrt each
-                * other, or if explicitly asked to, disable large page
-                * support for this slot
-                */
-               if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) 
||
-                   !largepages_enabled) {
-                       unsigned long j;
-
-                       for (j = 0; j < lpages; ++j)
-                               slot->lpage_info[i][j].write_count = 1;
-               }
-       }
-
-       return 0;
-
-out_free:
-       for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
-               vfree(slot->lpage_info[i]);
-               slot->lpage_info[i] = NULL;
-       }
-       return -ENOMEM;
-}
-#endif /* not defined CONFIG_S390 */
-
 /*
  * Allocate some memory and give it an address in the guest physical address
  * space.
@@ -833,7 +778,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
                if (!new.rmap)
                        goto out_free;
 
-               if (create_lpage_info(&new, npages))
+               if (kvm_arch_create_memslot(&new, npages))
                        goto out_free;
 #endif /* not defined CONFIG_S390 */
        }
@@ -893,8 +838,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
        if (!npages) {
                new.rmap = NULL;
                new.dirty_bitmap = NULL;
-               for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i)
-                       new.lpage_info[i] = NULL;
+               memset(&new.arch, 0, sizeof(new.arch));
        }
 
        update_memslots(slots, &new);
@@ -981,6 +925,11 @@ out:
        return r;
 }
 
+bool kvm_largepages_enabled(void)
+{
+       return largepages_enabled;
+}
+
 void kvm_disable_largepages(void)
 {
        largepages_enabled = false;
-- 
1.7.5.4

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to