Re: [PATCH v5 04/14] KVM: arm64: Don't pass kvm_pgtable through kvm_pgtable_walk_data

2022-11-10 Thread Ben Gardon
On Mon, Nov 7, 2022 at 1:57 PM Oliver Upton  wrote:
>
> In order to tear down page tables from outside the context of
> kvm_pgtable (such as an RCU callback), stop passing a pointer through
> kvm_pgtable_walk_data.
>
> No functional change intended.
>
> Signed-off-by: Oliver Upton 

Reviewed-by: Ben Gardon 


> ---
>  arch/arm64/kvm/hyp/pgtable.c | 18 +-
>  1 file changed, 5 insertions(+), 13 deletions(-)
>
> diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
> index db25e81a9890..93989b750a26 100644
> --- a/arch/arm64/kvm/hyp/pgtable.c
> +++ b/arch/arm64/kvm/hyp/pgtable.c
> @@ -50,7 +50,6 @@
>  #define KVM_MAX_OWNER_ID   1
>
>  struct kvm_pgtable_walk_data {
> -   struct kvm_pgtable  *pgt;
> struct kvm_pgtable_walker   *walker;
>
> u64 addr;
> @@ -88,7 +87,7 @@ static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data 
> *data, u32 level)
> return (data->addr >> shift) & mask;
>  }
>
> -static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
> +static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
>  {
> u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow 
> */
> u64 mask = BIT(pgt->ia_bits) - 1;
> @@ -96,11 +95,6 @@ static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 
> addr)
> return (addr & mask) >> shift;
>  }
>
> -static u32 kvm_pgd_page_idx(struct kvm_pgtable_walk_data *data)
> -{
> -   return __kvm_pgd_page_idx(data->pgt, data->addr);
> -}
> -
>  static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
>  {
> struct kvm_pgtable pgt = {
> @@ -108,7 +102,7 @@ static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
> .start_level= start_level,
> };
>
> -   return __kvm_pgd_page_idx(&pgt, -1ULL) + 1;
> +   return kvm_pgd_page_idx(&pgt, -1ULL) + 1;
>  }
>
>  static bool kvm_pte_table(kvm_pte_t pte, u32 level)
> @@ -255,11 +249,10 @@ static int __kvm_pgtable_walk(struct 
> kvm_pgtable_walk_data *data,
> return ret;
>  }
>
> -static int _kvm_pgtable_walk(struct kvm_pgtable_walk_data *data)
> +static int _kvm_pgtable_walk(struct kvm_pgtable *pgt, struct 
> kvm_pgtable_walk_data *data)
>  {
> u32 idx;
> int ret = 0;
> -   struct kvm_pgtable *pgt = data->pgt;
> u64 limit = BIT(pgt->ia_bits);
>
> if (data->addr > limit || data->end > limit)
> @@ -268,7 +261,7 @@ static int _kvm_pgtable_walk(struct kvm_pgtable_walk_data 
> *data)
> if (!pgt->pgd)
> return -EINVAL;
>
> -   for (idx = kvm_pgd_page_idx(data); data->addr < data->end; ++idx) {
> +   for (idx = kvm_pgd_page_idx(pgt, data->addr); data->addr < data->end; 
> ++idx) {
> kvm_pte_t *ptep = &pgt->pgd[idx * PTRS_PER_PTE];
>
> ret = __kvm_pgtable_walk(data, pgt->mm_ops, ptep, 
> pgt->start_level);
> @@ -283,13 +276,12 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, 
> u64 size,
>  struct kvm_pgtable_walker *walker)
>  {
> struct kvm_pgtable_walk_data walk_data = {
> -   .pgt= pgt,
> .addr   = ALIGN_DOWN(addr, PAGE_SIZE),
> .end= PAGE_ALIGN(walk_data.addr + size),
> .walker = walker,
> };
>
> -   return _kvm_pgtable_walk(&walk_data);
> +   return _kvm_pgtable_walk(pgt, &walk_data);
>  }
>
>  struct leaf_walk_data {
> --
> 2.38.1.431.g37b22c650d-goog
>
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH v5 04/14] KVM: arm64: Don't pass kvm_pgtable through kvm_pgtable_walk_data

2022-11-09 Thread Oliver Upton
On Thu, Nov 10, 2022 at 01:30:08PM +0800, Gavin Shan wrote:
> Hi Oliver,
> 
> On 11/8/22 5:56 AM, Oliver Upton wrote:
> > In order to tear down page tables from outside the context of
> > kvm_pgtable (such as an RCU callback), stop passing a pointer through
> > kvm_pgtable_walk_data.
> > 
> > No functional change intended.
> > 
> > Signed-off-by: Oliver Upton 
> > ---
> >   arch/arm64/kvm/hyp/pgtable.c | 18 +-
> >   1 file changed, 5 insertions(+), 13 deletions(-)
> > 
> 
> Reviewed-by: Gavin Shan 

Appreciated :)

> > diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
> > index db25e81a9890..93989b750a26 100644
> > --- a/arch/arm64/kvm/hyp/pgtable.c
> > +++ b/arch/arm64/kvm/hyp/pgtable.c
> > @@ -50,7 +50,6 @@
> >   #define KVM_MAX_OWNER_ID  1
> >   struct kvm_pgtable_walk_data {
> > -   struct kvm_pgtable  *pgt;
> > struct kvm_pgtable_walker   *walker;
> > u64 addr;
> 
> Ok. Here is the answer why data->pgt->mm_ops isn't reachable in the walker
> and visitor, and @mm_ops needs to be passed down.

Yup, the reason for unhitching all of this from kvm_pgtable is explained
in the cover letter as well:

  Patches 1-4 clean up the context associated with a page table walk / PTE
  visit. This is helpful for:
   - Extending the context passed through for a visit
   - Building page table walkers that operate outside of a kvm_pgtable
 context (e.g. RCU callback)

--
Thanks,
Oliver
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH v5 04/14] KVM: arm64: Don't pass kvm_pgtable through kvm_pgtable_walk_data

2022-11-09 Thread Gavin Shan

Hi Oliver,

On 11/8/22 5:56 AM, Oliver Upton wrote:

In order to tear down page tables from outside the context of
kvm_pgtable (such as an RCU callback), stop passing a pointer through
kvm_pgtable_walk_data.

No functional change intended.

Signed-off-by: Oliver Upton 
---
  arch/arm64/kvm/hyp/pgtable.c | 18 +-
  1 file changed, 5 insertions(+), 13 deletions(-)



Reviewed-by: Gavin Shan 


diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index db25e81a9890..93989b750a26 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -50,7 +50,6 @@
  #define KVM_MAX_OWNER_ID  1
  
  struct kvm_pgtable_walk_data {

-   struct kvm_pgtable  *pgt;
struct kvm_pgtable_walker   *walker;
  
  	u64addr;


Ok. Here is the answer why data->pgt->mm_ops isn't reachable in the walker
and visitor, and @mm_ops needs to be passed down.


@@ -88,7 +87,7 @@ static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data 
*data, u32 level)
return (data->addr >> shift) & mask;
  }
  
-static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)

+static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
  {
u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */
u64 mask = BIT(pgt->ia_bits) - 1;
@@ -96,11 +95,6 @@ static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 
addr)
return (addr & mask) >> shift;
  }
  
-static u32 kvm_pgd_page_idx(struct kvm_pgtable_walk_data *data)

-{
-   return __kvm_pgd_page_idx(data->pgt, data->addr);
-}
-
  static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
  {
struct kvm_pgtable pgt = {
@@ -108,7 +102,7 @@ static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
.start_level= start_level,
};
  
-	return __kvm_pgd_page_idx(&pgt, -1ULL) + 1;

+   return kvm_pgd_page_idx(&pgt, -1ULL) + 1;
  }
  
  static bool kvm_pte_table(kvm_pte_t pte, u32 level)

@@ -255,11 +249,10 @@ static int __kvm_pgtable_walk(struct 
kvm_pgtable_walk_data *data,
return ret;
  }
  
-static int _kvm_pgtable_walk(struct kvm_pgtable_walk_data *data)

+static int _kvm_pgtable_walk(struct kvm_pgtable *pgt, struct 
kvm_pgtable_walk_data *data)
  {
u32 idx;
int ret = 0;
-   struct kvm_pgtable *pgt = data->pgt;
u64 limit = BIT(pgt->ia_bits);
  
  	if (data->addr > limit || data->end > limit)

@@ -268,7 +261,7 @@ static int _kvm_pgtable_walk(struct kvm_pgtable_walk_data 
*data)
if (!pgt->pgd)
return -EINVAL;
  
-	for (idx = kvm_pgd_page_idx(data); data->addr < data->end; ++idx) {

+   for (idx = kvm_pgd_page_idx(pgt, data->addr); data->addr < data->end; 
++idx) {
kvm_pte_t *ptep = &pgt->pgd[idx * PTRS_PER_PTE];
  
  		ret = __kvm_pgtable_walk(data, pgt->mm_ops, ptep, pgt->start_level);

@@ -283,13 +276,12 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, 
u64 size,
 struct kvm_pgtable_walker *walker)
  {
struct kvm_pgtable_walk_data walk_data = {
-   .pgt= pgt,
.addr   = ALIGN_DOWN(addr, PAGE_SIZE),
.end= PAGE_ALIGN(walk_data.addr + size),
.walker = walker,
};
  
-	return _kvm_pgtable_walk(&walk_data);

+   return _kvm_pgtable_walk(pgt, &walk_data);
  }
  
  struct leaf_walk_data {




Thanks,
Gavin

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm