Stage2 page tables are currently not RCU safe against unmapping or VM
destruction. The previous mmu_notifier_ops members rely on
kvm->mmu_lock to synchronize with those operations.

However, the new mmu_notifier_ops member test_clear_young() provides
a fast path that does not take kvm->mmu_lock. To implement
kvm_arch_test_clear_young() for that path, unmapped page tables need
to be freed by RCU and kvm_free_stage2_pgd() needs to be after
mmu_notifier_unregister().

Remapping, specifically stage2_free_removed_table(), is already RCU
safe.

Signed-off-by: Yu Zhao <yuz...@google.com>
---
 arch/arm64/include/asm/kvm_pgtable.h |  2 ++
 arch/arm64/kvm/arm.c                 |  1 +
 arch/arm64/kvm/hyp/pgtable.c         |  8 ++++++--
 arch/arm64/kvm/mmu.c                 | 17 ++++++++++++++++-
 4 files changed, 25 insertions(+), 3 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_pgtable.h 
b/arch/arm64/include/asm/kvm_pgtable.h
index ff520598b62c..5cab52e3a35f 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -153,6 +153,7 @@ static inline bool kvm_level_supports_block_mapping(u32 
level)
  * @put_page:                  Decrement the refcount on a page. When the
  *                             refcount reaches 0 the page is automatically
  *                             freed.
+ * @put_page_rcu:              RCU variant of the above.
  * @page_count:                        Return the refcount of a page.
  * @phys_to_virt:              Convert a physical address into a virtual
  *                             address mapped in the current context.
@@ -170,6 +171,7 @@ struct kvm_pgtable_mm_ops {
        void            (*free_removed_table)(void *addr, u32 level);
        void            (*get_page)(void *addr);
        void            (*put_page)(void *addr);
+       void            (*put_page_rcu)(void *addr);
        int             (*page_count)(void *addr);
        void*           (*phys_to_virt)(phys_addr_t phys);
        phys_addr_t     (*virt_to_phys)(void *addr);
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 14391826241c..ee93271035d9 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -191,6 +191,7 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, 
struct vm_fault *vmf)
  */
 void kvm_arch_destroy_vm(struct kvm *kvm)
 {
+       kvm_free_stage2_pgd(&kvm->arch.mmu);
        bitmap_free(kvm->arch.pmu_filter);
        free_cpumask_var(kvm->arch.supported_cpus);
 
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index 24678ccba76a..dbace4c6a841 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -988,8 +988,12 @@ static int stage2_unmap_walker(const struct 
kvm_pgtable_visit_ctx *ctx,
                mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
                                               kvm_granule_size(ctx->level));
 
-       if (childp)
-               mm_ops->put_page(childp);
+       if (childp) {
+               if (mm_ops->put_page_rcu)
+                       mm_ops->put_page_rcu(childp);
+               else
+                       mm_ops->put_page(childp);
+       }
 
        return 0;
 }
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 3b9d4d24c361..c3b3e2afe26f 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -172,6 +172,21 @@ static int kvm_host_page_count(void *addr)
        return page_count(virt_to_page(addr));
 }
 
+static void kvm_s2_rcu_put_page(struct rcu_head *head)
+{
+       put_page(container_of(head, struct page, rcu_head));
+}
+
+static void kvm_s2_put_page_rcu(void *addr)
+{
+       struct page *page = virt_to_page(addr);
+
+       if (kvm_host_page_count(addr) == 1)
+               kvm_account_pgtable_pages(addr, -1);
+
+       call_rcu(&page->rcu_head, kvm_s2_rcu_put_page);
+}
+
 static phys_addr_t kvm_host_pa(void *addr)
 {
        return __pa(addr);
@@ -704,6 +719,7 @@ static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
        .free_removed_table     = stage2_free_removed_table,
        .get_page               = kvm_host_get_page,
        .put_page               = kvm_s2_put_page,
+       .put_page_rcu           = kvm_s2_put_page_rcu,
        .page_count             = kvm_host_page_count,
        .phys_to_virt           = kvm_host_va,
        .virt_to_phys           = kvm_host_pa,
@@ -1877,7 +1893,6 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
 
 void kvm_arch_flush_shadow_all(struct kvm *kvm)
 {
-       kvm_free_stage2_pgd(&kvm->arch.mmu);
 }
 
 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
-- 
2.41.0.rc0.172.g3f132b7071-goog

Reply via email to