Re: [PATCH v7 3/7] powerpc: mm: Replace p{u,m,4}d_is_leaf with p{u,m,4}_leaf

2023-02-14 Thread Christophe Leroy


Le 15/02/2023 à 03:01, Rohan McLure a écrit :
> Replace occurrences of p{u,m,4}d_is_leaf with p{u,m,4}_leaf, as the
> latter is the name given to checking that a higher-level entry in
> multi-level paging contains a page translation entry (pte) throughout
> all other archs.
> 
> A future patch will implement p{u,m,4}_leaf stubs on all platforms so
> that they may be referenced in generic code.
> 
> Signed-off-by: Rohan McLure 

Reviewed-by: Christophe Leroy 

> ---
> V4: New patch
> V5: Previously replaced stub definition for *_is_leaf with *_leaf. Do
> that in a later patch
> ---
>   arch/powerpc/kvm/book3s_64_mmu_radix.c   | 12 ++--
>   arch/powerpc/mm/book3s64/radix_pgtable.c | 14 +++---
>   arch/powerpc/mm/pgtable.c|  6 +++---
>   arch/powerpc/mm/pgtable_64.c |  6 +++---
>   arch/powerpc/xmon/xmon.c |  6 +++---
>   5 files changed, 22 insertions(+), 22 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c 
> b/arch/powerpc/kvm/book3s_64_mmu_radix.c
> index 9d3743ca16d5..0d24fd984d16 100644
> --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
> +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
> @@ -497,7 +497,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t 
> *pmd, bool full,
>   for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
>   if (!pmd_present(*p))
>   continue;
> - if (pmd_is_leaf(*p)) {
> + if (pmd_leaf(*p)) {
>   if (full) {
>   pmd_clear(p);
>   } else {
> @@ -526,7 +526,7 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t 
> *pud,
>   for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
>   if (!pud_present(*p))
>   continue;
> - if (pud_is_leaf(*p)) {
> + if (pud_leaf(*p)) {
>   pud_clear(p);
>   } else {
>   pmd_t *pmd;
> @@ -629,12 +629,12 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, 
> pte_t pte,
>   new_pud = pud_alloc_one(kvm->mm, gpa);
>   
>   pmd = NULL;
> - if (pud && pud_present(*pud) && !pud_is_leaf(*pud))
> + if (pud && pud_present(*pud) && !pud_leaf(*pud))
>   pmd = pmd_offset(pud, gpa);
>   else if (level <= 1)
>   new_pmd = kvmppc_pmd_alloc();
>   
> - if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
> + if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_leaf(*pmd)))
>   new_ptep = kvmppc_pte_alloc();
>   
>   /* Check if we might have been invalidated; let the guest retry if so */
> @@ -652,7 +652,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, 
> pte_t pte,
>   new_pud = NULL;
>   }
>   pud = pud_offset(p4d, gpa);
> - if (pud_is_leaf(*pud)) {
> + if (pud_leaf(*pud)) {
>   unsigned long hgpa = gpa & PUD_MASK;
>   
>   /* Check if we raced and someone else has set the same thing */
> @@ -703,7 +703,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, 
> pte_t pte,
>   new_pmd = NULL;
>   }
>   pmd = pmd_offset(pud, gpa);
> - if (pmd_is_leaf(*pmd)) {
> + if (pmd_leaf(*pmd)) {
>   unsigned long lgpa = gpa & PMD_MASK;
>   
>   /* Check if we raced and someone else has set the same thing */
> diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c 
> b/arch/powerpc/mm/book3s64/radix_pgtable.c
> index 26245aaf12b8..4e46e001c3c3 100644
> --- a/arch/powerpc/mm/book3s64/radix_pgtable.c
> +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
> @@ -205,14 +205,14 @@ static void radix__change_memory_range(unsigned long 
> start, unsigned long end,
>   pudp = pud_alloc(_mm, p4dp, idx);
>   if (!pudp)
>   continue;
> - if (pud_is_leaf(*pudp)) {
> + if (pud_leaf(*pudp)) {
>   ptep = (pte_t *)pudp;
>   goto update_the_pte;
>   }
>   pmdp = pmd_alloc(_mm, pudp, idx);
>   if (!pmdp)
>   continue;
> - if (pmd_is_leaf(*pmdp)) {
> + if (pmd_leaf(*pmdp)) {
>   ptep = pmdp_ptep(pmdp);
>   goto update_the_pte;
>   }
> @@ -786,7 +786,7 @@ static void __meminit remove_pmd_table(pmd_t *pmd_start, 
> unsigned long addr,
>   if (!pmd_present(*pmd))
>   continue;
>   
> - if (pmd_is_leaf(*pmd)) {
> + if (pmd_leaf(*pmd)) {
>   if (!IS_ALIGNED(addr, PMD_SIZE) ||
>   !IS_ALIGNED(next, PMD_SIZE)) {
>   WARN_ONCE(1, "%s: unaligned range\n", __func__);
> @@ -816,7 +816,7 @@ static void __meminit remove_pud_table(pud_t *pud_start, 
> unsigned long addr,
>   if (!pud_present(*pud))
>   

[PATCH v7 3/7] powerpc: mm: Replace p{u,m,4}d_is_leaf with p{u,m,4}_leaf

2023-02-14 Thread Rohan McLure
Replace occurrences of p{u,m,4}d_is_leaf with p{u,m,4}_leaf, as the
latter is the name given to checking that a higher-level entry in
multi-level paging contains a page translation entry (pte) throughout
all other archs.

A future patch will implement p{u,m,4}_leaf stubs on all platforms so
that they may be referenced in generic code.

Signed-off-by: Rohan McLure 
---
V4: New patch
V5: Previously replaced stub definition for *_is_leaf with *_leaf. Do
that in a later patch
---
 arch/powerpc/kvm/book3s_64_mmu_radix.c   | 12 ++--
 arch/powerpc/mm/book3s64/radix_pgtable.c | 14 +++---
 arch/powerpc/mm/pgtable.c|  6 +++---
 arch/powerpc/mm/pgtable_64.c |  6 +++---
 arch/powerpc/xmon/xmon.c |  6 +++---
 5 files changed, 22 insertions(+), 22 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c 
b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 9d3743ca16d5..0d24fd984d16 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -497,7 +497,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t 
*pmd, bool full,
for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
if (!pmd_present(*p))
continue;
-   if (pmd_is_leaf(*p)) {
+   if (pmd_leaf(*p)) {
if (full) {
pmd_clear(p);
} else {
@@ -526,7 +526,7 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t 
*pud,
for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
if (!pud_present(*p))
continue;
-   if (pud_is_leaf(*p)) {
+   if (pud_leaf(*p)) {
pud_clear(p);
} else {
pmd_t *pmd;
@@ -629,12 +629,12 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, 
pte_t pte,
new_pud = pud_alloc_one(kvm->mm, gpa);
 
pmd = NULL;
-   if (pud && pud_present(*pud) && !pud_is_leaf(*pud))
+   if (pud && pud_present(*pud) && !pud_leaf(*pud))
pmd = pmd_offset(pud, gpa);
else if (level <= 1)
new_pmd = kvmppc_pmd_alloc();
 
-   if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
+   if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_leaf(*pmd)))
new_ptep = kvmppc_pte_alloc();
 
/* Check if we might have been invalidated; let the guest retry if so */
@@ -652,7 +652,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, 
pte_t pte,
new_pud = NULL;
}
pud = pud_offset(p4d, gpa);
-   if (pud_is_leaf(*pud)) {
+   if (pud_leaf(*pud)) {
unsigned long hgpa = gpa & PUD_MASK;
 
/* Check if we raced and someone else has set the same thing */
@@ -703,7 +703,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, 
pte_t pte,
new_pmd = NULL;
}
pmd = pmd_offset(pud, gpa);
-   if (pmd_is_leaf(*pmd)) {
+   if (pmd_leaf(*pmd)) {
unsigned long lgpa = gpa & PMD_MASK;
 
/* Check if we raced and someone else has set the same thing */
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c 
b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 26245aaf12b8..4e46e001c3c3 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -205,14 +205,14 @@ static void radix__change_memory_range(unsigned long 
start, unsigned long end,
pudp = pud_alloc(_mm, p4dp, idx);
if (!pudp)
continue;
-   if (pud_is_leaf(*pudp)) {
+   if (pud_leaf(*pudp)) {
ptep = (pte_t *)pudp;
goto update_the_pte;
}
pmdp = pmd_alloc(_mm, pudp, idx);
if (!pmdp)
continue;
-   if (pmd_is_leaf(*pmdp)) {
+   if (pmd_leaf(*pmdp)) {
ptep = pmdp_ptep(pmdp);
goto update_the_pte;
}
@@ -786,7 +786,7 @@ static void __meminit remove_pmd_table(pmd_t *pmd_start, 
unsigned long addr,
if (!pmd_present(*pmd))
continue;
 
-   if (pmd_is_leaf(*pmd)) {
+   if (pmd_leaf(*pmd)) {
if (!IS_ALIGNED(addr, PMD_SIZE) ||
!IS_ALIGNED(next, PMD_SIZE)) {
WARN_ONCE(1, "%s: unaligned range\n", __func__);
@@ -816,7 +816,7 @@ static void __meminit remove_pud_table(pud_t *pud_start, 
unsigned long addr,
if (!pud_present(*pud))
continue;
 
-   if (pud_is_leaf(*pud)) {
+   if (pud_leaf(*pud)) {
if (!IS_ALIGNED(addr, PUD_SIZE) ||
!IS_ALIGNED(next,