On Mon Sep 5, 2022 at 7:38 PM AEST, Christophe Leroy wrote:
> update_mmu_cache() voids when hash page tables are not used.
> On PPC32 that means when MMU_FTR_HPTE_TABLE is not defined.
> On PPC64 that means when RADIX is enabled.
>
> Rename core part of update_mmu_cache() as __update_mmu_cache()
> and include the initial verification in an inlined caller.

Reivewed-by: Nicholas Piggin <npig...@gmail.com>

>
> Signed-off-by: Christophe Leroy <christophe.le...@csgroup.eu>
> ---
>  arch/powerpc/include/asm/book3s/pgtable.h | 15 ++++++++++-----
>  arch/powerpc/mm/book3s32/mmu.c            |  4 +---
>  arch/powerpc/mm/book3s64/hash_utils.c     |  5 +----
>  3 files changed, 12 insertions(+), 12 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/book3s/pgtable.h 
> b/arch/powerpc/include/asm/book3s/pgtable.h
> index e8269434ecbe..d18b748ea3ae 100644
> --- a/arch/powerpc/include/asm/book3s/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/pgtable.h
> @@ -25,7 +25,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, 
> unsigned long pfn,
>                                    unsigned long size, pgprot_t vma_prot);
>  #define __HAVE_PHYS_MEM_ACCESS_PROT
>  
> -#if defined(CONFIG_PPC32) || defined(CONFIG_PPC_64S_HASH_MMU)
> +void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 
> pte_t *ptep);
> +
>  /*
>   * This gets called at the end of handling a page fault, when
>   * the kernel has put a new PTE into the page table for the process.
> @@ -35,10 +36,14 @@ extern pgprot_t phys_mem_access_prot(struct file *file, 
> unsigned long pfn,
>   * corresponding HPTE into the hash table ahead of time, instead of
>   * waiting for the inevitable extra hash-table miss exception.
>   */
> -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 
> pte_t *ptep);
> -#else
> -static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned 
> long address, pte_t *ptep) {}
> -#endif
> +static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned 
> long address, pte_t *ptep)
> +{
> +     if (IS_ENABLED(CONFIG_PPC32) && !mmu_has_feature(MMU_FTR_HPTE_TABLE))
> +             return;
> +     if (radix_enabled())
> +             return;
> +     __update_mmu_cache(vma, address, ptep);
> +}
>  
>  #endif /* __ASSEMBLY__ */
>  #endif
> diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
> index a96b73006dfb..7053eb229b4f 100644
> --- a/arch/powerpc/mm/book3s32/mmu.c
> +++ b/arch/powerpc/mm/book3s32/mmu.c
> @@ -314,11 +314,9 @@ static void hash_preload(struct mm_struct *mm, unsigned 
> long ea)
>   *
>   * This must always be called with the pte lock held.
>   */
> -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
> +void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
>                     pte_t *ptep)
>  {
> -     if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
> -             return;
>       /*
>        * We don't need to worry about _PAGE_PRESENT here because we are
>        * called with either mm->page_table_lock held or ptl lock held
> diff --git a/arch/powerpc/mm/book3s64/hash_utils.c 
> b/arch/powerpc/mm/book3s64/hash_utils.c
> index 363a9447d63d..ced1107b1677 100644
> --- a/arch/powerpc/mm/book3s64/hash_utils.c
> +++ b/arch/powerpc/mm/book3s64/hash_utils.c
> @@ -1781,7 +1781,7 @@ static void hash_preload(struct mm_struct *mm, pte_t 
> *ptep, unsigned long ea,
>   *
>   * This must always be called with the pte lock held.
>   */
> -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
> +void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
>                     pte_t *ptep)
>  {
>       /*
> @@ -1791,9 +1791,6 @@ void update_mmu_cache(struct vm_area_struct *vma, 
> unsigned long address,
>       unsigned long trap;
>       bool is_exec;
>  
> -     if (radix_enabled())
> -             return;
> -
>       /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
>       if (!pte_young(*ptep) || address >= TASK_SIZE)
>               return;
> -- 
> 2.37.1

Reply via email to