[PATCH 01/10] KVM: x86: MMU: Encapsulate the type of rmap-chain head in a new struct

2015-11-20 Thread Takuya Yoshikawa
New struct kvm_rmap_head makes the code type-safe to some extent.

Signed-off-by: Takuya Yoshikawa 
---
 arch/x86/include/asm/kvm_host.h |   8 +-
 arch/x86/kvm/mmu.c  | 196 
 arch/x86/kvm/mmu_audit.c|  13 +--
 3 files changed, 113 insertions(+), 104 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f608e17..8140077 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -214,6 +214,10 @@ union kvm_mmu_page_role {
};
 };
 
+struct kvm_rmap_head {
+   unsigned long val;
+};
+
 struct kvm_mmu_page {
struct list_head link;
struct hlist_node hash_link;
@@ -231,7 +235,7 @@ struct kvm_mmu_page {
bool unsync;
int root_count;  /* Currently serving as active root */
unsigned int unsync_children;
-   unsigned long parent_ptes;  /* Reverse mapping for parent_pte */
+   struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
 
/* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen.  */
unsigned long mmu_valid_gen;
@@ -606,7 +610,7 @@ struct kvm_lpage_info {
 };
 
 struct kvm_arch_memory_slot {
-   unsigned long *rmap[KVM_NR_PAGE_SIZES];
+   struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
 };
 
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 276d2f2..d9a6801 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -909,36 +909,35 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t 
large_gfn,
 }
 
 /*
- * Pte mapping structures:
+ * About rmap_head encoding:
  *
- * If pte_list bit zero is zero, then pte_list point to the spte.
- *
- * If pte_list bit zero is one, (then pte_list & ~1) points to a struct
+ * If the bit zero of rmap_head->val is clear, then it points to the only spte
+ * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
  * pte_list_desc containing more mappings.
- *
- * Returns the number of pte entries before the spte was added or zero if
- * the spte was not added.
- *
+ */
+
+/*
+ * Returns the number of pointers in the rmap chain, not counting the new one.
  */
 static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
-   unsigned long *pte_list)
+   struct kvm_rmap_head *rmap_head)
 {
struct pte_list_desc *desc;
int i, count = 0;
 
-   if (!*pte_list) {
+   if (!rmap_head->val) {
rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte);
-   *pte_list = (unsigned long)spte;
-   } else if (!(*pte_list & 1)) {
+   rmap_head->val = (unsigned long)spte;
+   } else if (!(rmap_head->val & 1)) {
rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte);
desc = mmu_alloc_pte_list_desc(vcpu);
-   desc->sptes[0] = (u64 *)*pte_list;
+   desc->sptes[0] = (u64 *)rmap_head->val;
desc->sptes[1] = spte;
-   *pte_list = (unsigned long)desc | 1;
+   rmap_head->val = (unsigned long)desc | 1;
++count;
} else {
rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
-   desc = (struct pte_list_desc *)(*pte_list & ~1ul);
+   desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
while (desc->sptes[PTE_LIST_EXT-1] && desc->more) {
desc = desc->more;
count += PTE_LIST_EXT;
@@ -955,8 +954,9 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
 }
 
 static void
-pte_list_desc_remove_entry(unsigned long *pte_list, struct pte_list_desc *desc,
-  int i, struct pte_list_desc *prev_desc)
+pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
+  struct pte_list_desc *desc, int i,
+  struct pte_list_desc *prev_desc)
 {
int j;
 
@@ -967,43 +967,43 @@ pte_list_desc_remove_entry(unsigned long *pte_list, 
struct pte_list_desc *desc,
if (j != 0)
return;
if (!prev_desc && !desc->more)
-   *pte_list = (unsigned long)desc->sptes[0];
+   rmap_head->val = (unsigned long)desc->sptes[0];
else
if (prev_desc)
prev_desc->more = desc->more;
else
-   *pte_list = (unsigned long)desc->more | 1;
+   rmap_head->val = (unsigned long)desc->more | 1;
mmu_free_pte_list_desc(desc);
 }
 
-static void pte_list_remove(u64 *spte, unsigned long *pte_list)
+static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
 {
struct pte_list_desc *desc;
struct pte_list_desc *prev_desc;
int i;
 
-   if (!*pte_list) {
+   if (!rmap_head->val) {

[PATCH 01/10] KVM: x86: MMU: Encapsulate the type of rmap-chain head in a new struct

2015-11-20 Thread Takuya Yoshikawa
New struct kvm_rmap_head makes the code type-safe to some extent.

Signed-off-by: Takuya Yoshikawa 
---
 arch/x86/include/asm/kvm_host.h |   8 +-
 arch/x86/kvm/mmu.c  | 196 
 arch/x86/kvm/mmu_audit.c|  13 +--
 3 files changed, 113 insertions(+), 104 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f608e17..8140077 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -214,6 +214,10 @@ union kvm_mmu_page_role {
};
 };
 
+struct kvm_rmap_head {
+   unsigned long val;
+};
+
 struct kvm_mmu_page {
struct list_head link;
struct hlist_node hash_link;
@@ -231,7 +235,7 @@ struct kvm_mmu_page {
bool unsync;
int root_count;  /* Currently serving as active root */
unsigned int unsync_children;
-   unsigned long parent_ptes;  /* Reverse mapping for parent_pte */
+   struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
 
/* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen.  */
unsigned long mmu_valid_gen;
@@ -606,7 +610,7 @@ struct kvm_lpage_info {
 };
 
 struct kvm_arch_memory_slot {
-   unsigned long *rmap[KVM_NR_PAGE_SIZES];
+   struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
 };
 
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 276d2f2..d9a6801 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -909,36 +909,35 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t 
large_gfn,
 }
 
 /*
- * Pte mapping structures:
+ * About rmap_head encoding:
  *
- * If pte_list bit zero is zero, then pte_list point to the spte.
- *
- * If pte_list bit zero is one, (then pte_list & ~1) points to a struct
+ * If the bit zero of rmap_head->val is clear, then it points to the only spte
+ * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
  * pte_list_desc containing more mappings.
- *
- * Returns the number of pte entries before the spte was added or zero if
- * the spte was not added.
- *
+ */
+
+/*
+ * Returns the number of pointers in the rmap chain, not counting the new one.
  */
 static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
-   unsigned long *pte_list)
+   struct kvm_rmap_head *rmap_head)
 {
struct pte_list_desc *desc;
int i, count = 0;
 
-   if (!*pte_list) {
+   if (!rmap_head->val) {
rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte);
-   *pte_list = (unsigned long)spte;
-   } else if (!(*pte_list & 1)) {
+   rmap_head->val = (unsigned long)spte;
+   } else if (!(rmap_head->val & 1)) {
rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte);
desc = mmu_alloc_pte_list_desc(vcpu);
-   desc->sptes[0] = (u64 *)*pte_list;
+   desc->sptes[0] = (u64 *)rmap_head->val;
desc->sptes[1] = spte;
-   *pte_list = (unsigned long)desc | 1;
+   rmap_head->val = (unsigned long)desc | 1;
++count;
} else {
rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
-   desc = (struct pte_list_desc *)(*pte_list & ~1ul);
+   desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
while (desc->sptes[PTE_LIST_EXT-1] && desc->more) {
desc = desc->more;
count += PTE_LIST_EXT;
@@ -955,8 +954,9 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
 }
 
 static void
-pte_list_desc_remove_entry(unsigned long *pte_list, struct pte_list_desc *desc,
-  int i, struct pte_list_desc *prev_desc)
+pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
+  struct pte_list_desc *desc, int i,
+  struct pte_list_desc *prev_desc)
 {
int j;
 
@@ -967,43 +967,43 @@ pte_list_desc_remove_entry(unsigned long *pte_list, 
struct pte_list_desc *desc,
if (j != 0)
return;
if (!prev_desc && !desc->more)
-   *pte_list = (unsigned long)desc->sptes[0];
+   rmap_head->val = (unsigned long)desc->sptes[0];
else
if (prev_desc)
prev_desc->more = desc->more;
else
-   *pte_list = (unsigned long)desc->more | 1;
+   rmap_head->val = (unsigned long)desc->more | 1;
mmu_free_pte_list_desc(desc);
 }
 
-static void pte_list_remove(u64 *spte, unsigned long *pte_list)
+static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
 {
struct pte_list_desc *desc;
struct pte_list_desc *prev_desc;
int i;
 
-   if (!*pte_list) {
+   if