Add a new stage2 function, kvm_pgtable_stage2_split(), for splitting a
range of huge pages. This will be used for eager-splitting huge pages into
PAGE_SIZE pages. The goal is to avoid having to split huge pages on
write-protection faults, and instead use this function to do it ahead of
time for large ranges (e.g., all guest memory in 1G chunks at a time).

No functional change intended. This new function will be used in a

Signed-off-by: Ricardo Koller <ricar...@google.com>
---
 arch/arm64/include/asm/kvm_pgtable.h | 29 +++++++++++
 arch/arm64/kvm/hyp/pgtable.c         | 74 ++++++++++++++++++++++++++++
 2 files changed, 103 insertions(+)

diff --git a/arch/arm64/include/asm/kvm_pgtable.h 
b/arch/arm64/include/asm/kvm_pgtable.h
index d2e4a5032146..396ebb0949fb 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -594,6 +594,35 @@ bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, 
u64 addr);
  */
 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
 
+/**
+ * kvm_pgtable_stage2_split() - Split a range of huge pages into leaf PTEs 
pointing
+ *                             to PAGE_SIZE guest pages.
+ * @pgt:       Page-table structure initialised by kvm_pgtable_stage2_init*().
+ * @addr:      Intermediate physical address from which to split.
+ * @size:      Size of the range.
+ * @mc:                Cache of pre-allocated and zeroed memory from which to 
allocate
+ *             page-table pages.
+ *
+ * @addr and the end (@addr + @size) are effectively aligned down and up to
+ * the top level huge-page block size. This is an exampe using 1GB
+ * huge-pages and 4KB granules.
+ *
+ *                          [---input range---]
+ *                          :                 :
+ * [--1G block pte--][--1G block pte--][--1G block pte--][--1G block pte--]
+ *                          :                 :
+ *                   [--2MB--][--2MB--][--2MB--][--2MB--]
+ *                          :                 :
+ *                   [ ][ ][:][ ][ ][ ][ ][ ][:][ ][ ][ ]
+ *                          :                 :
+ *
+ * Return: 0 on success, negative error code on failure. Note that
+ * kvm_pgtable_stage2_split() is best effort: it tries to break as many
+ * blocks in the input range as allowed by the size of the memcache. It
+ * will fail it wasn't able to break any block.
+ */
+int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size, void 
*mc);
+
 /**
  * kvm_pgtable_walk() - Walk a page-table.
  * @pgt:       Page-table structure initialised by kvm_pgtable_*_init().
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index d1f309128118..9c42eff6d42e 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -1267,6 +1267,80 @@ static int stage2_create_removed(kvm_pte_t *ptep, u64 
phys, u32 level,
        return __kvm_pgtable_visit(&data, mm_ops, ptep, level);
 }
 
+struct stage2_split_data {
+       struct kvm_s2_mmu               *mmu;
+       void                            *memcache;
+       struct kvm_pgtable_mm_ops       *mm_ops;
+};
+
+static int stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx,
+                              enum kvm_pgtable_walk_flags visit)
+{
+       struct stage2_split_data *data = ctx->arg;
+       struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
+       kvm_pte_t pte = ctx->old, attr, new;
+       enum kvm_pgtable_prot prot;
+       void *mc = data->memcache;
+       u32 level = ctx->level;
+       u64 phys;
+
+       if (WARN_ON_ONCE(kvm_pgtable_walk_shared(ctx)))
+               return -EINVAL;
+
+       /* Nothing to split at the last level */
+       if (level == KVM_PGTABLE_MAX_LEVELS - 1)
+               return 0;
+
+       /* We only split valid block mappings */
+       if (!kvm_pte_valid(pte) || kvm_pte_table(pte, ctx->level))
+               return 0;
+
+       phys = kvm_pte_to_phys(pte);
+       prot = kvm_pgtable_stage2_pte_prot(pte);
+       stage2_set_prot_attr(data->mmu->pgt, prot, &attr);
+
+       /*
+        * Eager page splitting is best-effort, so we can ignore the error.
+        * The returned PTE (new) will be valid even if this call returns
+        * error: new will be a single (big) block PTE.  The only issue is
+        * that it will affect dirty logging performance, as the huge-pages
+        * will have to be split on fault, and so we WARN.
+        */
+       WARN_ON(stage2_create_removed(&new, phys, level, attr, mc, mm_ops));
+
+       stage2_put_pte(ctx, data->mmu, mm_ops);
+
+       /*
+        * Note, the contents of the page table are guaranteed to be made
+        * visible before the new PTE is assigned because
+        * stage2_make__pte() writes the PTE using smp_store_release().
+        */
+       stage2_make_pte(ctx, new);
+       dsb(ishst);
+       return 0;
+}
+
+int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt,
+                            u64 addr, u64 size, void *mc)
+{
+       int ret;
+
+       struct stage2_split_data split_data = {
+               .mmu            = pgt->mmu,
+               .memcache       = mc,
+               .mm_ops         = pgt->mm_ops,
+       };
+
+       struct kvm_pgtable_walker walker = {
+               .cb     = stage2_split_walker,
+               .flags  = KVM_PGTABLE_WALK_POST,
+               .arg    = &split_data,
+       };
+
+       ret = kvm_pgtable_walk(pgt, addr, size, &walker);
+       return ret;
+}
+
 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
                              struct kvm_pgtable_mm_ops *mm_ops,
                              enum kvm_pgtable_stage2_flags flags,
-- 
2.38.1.431.g37b22c650d-goog

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to