Hi Oliver,

On 11/8/22 5:56 AM, Oliver Upton wrote:
In order to tear down page tables from outside the context of
kvm_pgtable (such as an RCU callback), stop passing a pointer through
kvm_pgtable_walk_data.

No functional change intended.

Signed-off-by: Oliver Upton <oliver.up...@linux.dev>
---
  arch/arm64/kvm/hyp/pgtable.c | 18 +++++-------------
  1 file changed, 5 insertions(+), 13 deletions(-)


Reviewed-by: Gavin Shan <gs...@redhat.com>

diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index db25e81a9890..93989b750a26 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -50,7 +50,6 @@
  #define KVM_MAX_OWNER_ID              1
struct kvm_pgtable_walk_data {
-       struct kvm_pgtable              *pgt;
        struct kvm_pgtable_walker       *walker;
u64 addr;

Ok. Here is the answer why data->pgt->mm_ops isn't reachable in the walker
and visitor, and @mm_ops needs to be passed down.

@@ -88,7 +87,7 @@ static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data 
*data, u32 level)
        return (data->addr >> shift) & mask;
  }
-static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
+static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
  {
        u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */
        u64 mask = BIT(pgt->ia_bits) - 1;
@@ -96,11 +95,6 @@ static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 
addr)
        return (addr & mask) >> shift;
  }
-static u32 kvm_pgd_page_idx(struct kvm_pgtable_walk_data *data)
-{
-       return __kvm_pgd_page_idx(data->pgt, data->addr);
-}
-
  static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
  {
        struct kvm_pgtable pgt = {
@@ -108,7 +102,7 @@ static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
                .start_level    = start_level,
        };
- return __kvm_pgd_page_idx(&pgt, -1ULL) + 1;
+       return kvm_pgd_page_idx(&pgt, -1ULL) + 1;
  }
static bool kvm_pte_table(kvm_pte_t pte, u32 level)
@@ -255,11 +249,10 @@ static int __kvm_pgtable_walk(struct 
kvm_pgtable_walk_data *data,
        return ret;
  }
-static int _kvm_pgtable_walk(struct kvm_pgtable_walk_data *data)
+static int _kvm_pgtable_walk(struct kvm_pgtable *pgt, struct 
kvm_pgtable_walk_data *data)
  {
        u32 idx;
        int ret = 0;
-       struct kvm_pgtable *pgt = data->pgt;
        u64 limit = BIT(pgt->ia_bits);
if (data->addr > limit || data->end > limit)
@@ -268,7 +261,7 @@ static int _kvm_pgtable_walk(struct kvm_pgtable_walk_data 
*data)
        if (!pgt->pgd)
                return -EINVAL;
- for (idx = kvm_pgd_page_idx(data); data->addr < data->end; ++idx) {
+       for (idx = kvm_pgd_page_idx(pgt, data->addr); data->addr < data->end; 
++idx) {
                kvm_pte_t *ptep = &pgt->pgd[idx * PTRS_PER_PTE];
ret = __kvm_pgtable_walk(data, pgt->mm_ops, ptep, pgt->start_level);
@@ -283,13 +276,12 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, 
u64 size,
                     struct kvm_pgtable_walker *walker)
  {
        struct kvm_pgtable_walk_data walk_data = {
-               .pgt    = pgt,
                .addr   = ALIGN_DOWN(addr, PAGE_SIZE),
                .end    = PAGE_ALIGN(walk_data.addr + size),
                .walker = walker,
        };
- return _kvm_pgtable_walk(&walk_data);
+       return _kvm_pgtable_walk(pgt, &walk_data);
  }
struct leaf_walk_data {


Thanks,
Gavin

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to