On Tue, Jul 11, 2023 at 09:38:35PM -0700, Hugh Dickins wrote:
> Add s390-specific pte_free_defer(), to free table page via call_rcu().
> pte_free_defer() will be called inside khugepaged's retract_page_tables()
> loop, where allocating extra memory cannot be relied upon.  This precedes
> the generic version to avoid build breakage from incompatible pgtable_t.
> 
> This version is more complicated than others: because s390 fits two 2K
> page tables into one 4K page (so page->rcu_head must be shared between
> both halves), and already uses page->lru (which page->rcu_head overlays)
> to list any free halves; with clever management by page->_refcount bits.
> 
> Build upon the existing management, adjusted to follow a new rule: that
> a page is never on the free list if pte_free_defer() was used on either
> half (marked by PageActive).  And for simplicity, delay calling RCU until
> both halves are freed.
> 
> Not adding back unallocated fragments to the list in pte_free_defer()
> can result in wasting some amount of memory for pagetables, depending
> on how long the allocated fragment will stay in use. In practice, this
> effect is expected to be insignificant, and not justify a far more
> complex approach, which might allow to add the fragments back later
> in __tlb_remove_table(), where we might not have a stable mm any more.
> 
> Signed-off-by: Hugh Dickins <hu...@google.com>
> Reviewed-by: Gerald Schaefer <gerald.schae...@linux.ibm.com>
> ---
>  arch/s390/include/asm/pgalloc.h |  4 ++
>  arch/s390/mm/pgalloc.c          | 80 +++++++++++++++++++++++++++++------
>  2 files changed, 72 insertions(+), 12 deletions(-)
> 
> diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
> index 17eb618f1348..89a9d5ef94f8 100644
> --- a/arch/s390/include/asm/pgalloc.h
> +++ b/arch/s390/include/asm/pgalloc.h
> @@ -143,6 +143,10 @@ static inline void pmd_populate(struct mm_struct *mm,
>  #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
>  #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
>  
> +/* arch use pte_free_defer() implementation in arch/s390/mm/pgalloc.c */
> +#define pte_free_defer pte_free_defer
> +void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable);
> +
>  void vmem_map_init(void);
>  void *vmem_crst_alloc(unsigned long val);
>  pte_t *vmem_pte_alloc(void);
> diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
> index 66ab68db9842..760b4ace475e 100644
> --- a/arch/s390/mm/pgalloc.c
> +++ b/arch/s390/mm/pgalloc.c
> @@ -229,6 +229,15 @@ void page_table_free_pgste(struct page *page)
>   * logic described above. Both AA bits are set to 1 to denote a 4KB-pgtable
>   * while the PP bits are never used, nor such a page is added to or removed
>   * from mm_context_t::pgtable_list.
> + *
> + * pte_free_defer() overrides those rules: it takes the page off 
> pgtable_list,
> + * and prevents both 2K fragments from being reused. pte_free_defer() has to
> + * guarantee that its pgtable cannot be reused before the RCU grace period
> + * has elapsed (which page_table_free_rcu() does not actually guarantee).
> + * But for simplicity, because page->rcu_head overlays page->lru, and because
> + * the RCU callback might not be called before the mm_context_t has been 
> freed,
> + * pte_free_defer() in this implementation prevents both fragments from being
> + * reused, and delays making the call to RCU until both fragments are freed.
>   */
>  unsigned long *page_table_alloc(struct mm_struct *mm)
>  {
> @@ -261,7 +270,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
>                                       table += PTRS_PER_PTE;
>                               atomic_xor_bits(&page->_refcount,
>                                                       0x01U << (bit + 24));
> -                             list_del(&page->lru);
> +                             list_del_init(&page->lru);
>                       }
>               }
>               spin_unlock_bh(&mm->context.lock);
> @@ -281,6 +290,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
>       table = (unsigned long *) page_to_virt(page);
>       if (mm_alloc_pgste(mm)) {
>               /* Return 4K page table with PGSTEs */
> +             INIT_LIST_HEAD(&page->lru);
>               atomic_xor_bits(&page->_refcount, 0x03U << 24);
>               memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
>               memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
> @@ -300,7 +310,9 @@ static void page_table_release_check(struct page *page, 
> void *table,
>  {
>       char msg[128];
>  
> -     if (!IS_ENABLED(CONFIG_DEBUG_VM) || !mask)
> +     if (!IS_ENABLED(CONFIG_DEBUG_VM))
> +             return;
> +     if (!mask && list_empty(&page->lru))
>               return;
>       snprintf(msg, sizeof(msg),
>                "Invalid pgtable %p release half 0x%02x mask 0x%02x",
> @@ -308,6 +320,15 @@ static void page_table_release_check(struct page *page, 
> void *table,
>       dump_page(page, msg);
>  }
>  
> +static void pte_free_now(struct rcu_head *head)
> +{
> +     struct page *page;
> +
> +     page = container_of(head, struct page, rcu_head);
> +     pgtable_pte_page_dtor(page);
> +     __free_page(page);
> +}
> +
>  void page_table_free(struct mm_struct *mm, unsigned long *table)
>  {
>       unsigned int mask, bit, half;
> @@ -325,10 +346,17 @@ void page_table_free(struct mm_struct *mm, unsigned 
> long *table)
>                */
>               mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
>               mask >>= 24;
> -             if (mask & 0x03U)
> +             if ((mask & 0x03U) && !PageActive(page)) {
> +                     /*
> +                      * Other half is allocated, and neither half has had
> +                      * its free deferred: add page to head of list, to make
> +                      * this freed half available for immediate reuse.
> +                      */
>                       list_add(&page->lru, &mm->context.pgtable_list);
> -             else
> -                     list_del(&page->lru);
> +             } else {
> +                     /* If page is on list, now remove it. */
> +                     list_del_init(&page->lru);
> +             }
>               spin_unlock_bh(&mm->context.lock);
>               mask = atomic_xor_bits(&page->_refcount, 0x10U << (bit + 24));
>               mask >>= 24;
> @@ -342,8 +370,10 @@ void page_table_free(struct mm_struct *mm, unsigned long 
> *table)
>       }
>  
>       page_table_release_check(page, table, half, mask);
> -     pgtable_pte_page_dtor(page);
> -     __free_page(page);
> +     if (TestClearPageActive(page))
> +             call_rcu(&page->rcu_head, pte_free_now);
> +     else
> +             pte_free_now(&page->rcu_head);
>  }
>  
>  void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
> @@ -370,10 +400,18 @@ void page_table_free_rcu(struct mmu_gather *tlb, 
> unsigned long *table,
>        */
>       mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
>       mask >>= 24;
> -     if (mask & 0x03U)
> +     if ((mask & 0x03U) && !PageActive(page)) {
> +             /*
> +              * Other half is allocated, and neither half has had
> +              * its free deferred: add page to end of list, to make
> +              * this freed half available for reuse once its pending
> +              * bit has been cleared by __tlb_remove_table().
> +              */
>               list_add_tail(&page->lru, &mm->context.pgtable_list);
> -     else
> -             list_del(&page->lru);
> +     } else {
> +             /* If page is on list, now remove it. */
> +             list_del_init(&page->lru);
> +     }
>       spin_unlock_bh(&mm->context.lock);
>       table = (unsigned long *) ((unsigned long) table | (0x01U << bit));
>       tlb_remove_table(tlb, table);
> @@ -403,10 +441,28 @@ void __tlb_remove_table(void *_table)
>       }
>  
>       page_table_release_check(page, table, half, mask);
> -     pgtable_pte_page_dtor(page);
> -     __free_page(page);
> +     if (TestClearPageActive(page))
> +             call_rcu(&page->rcu_head, pte_free_now);
> +     else
> +             pte_free_now(&page->rcu_head);
>  }
>  
> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> +void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
> +{
> +     struct page *page;
> +
> +     page = virt_to_page(pgtable);
> +     SetPageActive(page);
> +     page_table_free(mm, (unsigned long *)pgtable);
> +     /*
> +      * page_table_free() does not do the pgste gmap_unlink() which
> +      * page_table_free_rcu() does: warn us if pgste ever reaches here.
> +      */
> +     WARN_ON_ONCE(mm_alloc_pgste(mm));
> +}
> +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
> +
>  /*
>   * Base infrastructure required to generate basic asces, region, segment,
>   * and page tables that do not make use of enhanced features like EDAT1.

Tested-by: Alexander Gordeev <agord...@linux.ibm.com>
Acked-by: Alexander Gordeev <agord...@linux.ibm.com>

Reply via email to