On Mon, 24 Aug 2020, Alex Shi wrote:

> In the func munlock_vma_page, the page must be PageLocked as well as
> pages in split_huge_page series funcs. Thus the PageLocked is enough
> to serialize both funcs.
> 
> So we could relief the TestClearPageMlocked/hpage_nr_pages which are not
> necessary under lru lock.
> 
> As to another munlock func __munlock_pagevec, which no PageLocked
> protection and should remain lru protecting.
> 
> Signed-off-by: Alex Shi <alex....@linux.alibaba.com>

I made some comments on the mlock+munlock situation last week:
I won't review this 24/32 and 25/32 now, but will take a look
at your github tree tomorrow instead.  Perhaps I'll find you have
already done the fixes, perhaps I'll find you have merged these back
into earlier patches.  And I won't be reviewing beyond this point:
this is enough for now, I think.

Hugh

> Cc: Kirill A. Shutemov <kirill.shute...@linux.intel.com>
> Cc: Vlastimil Babka <vba...@suse.cz>
> Cc: Andrew Morton <a...@linux-foundation.org>
> Cc: linux...@kvack.org
> Cc: linux-kernel@vger.kernel.org
> ---
>  mm/mlock.c | 41 +++++++++++++++--------------------------
>  1 file changed, 15 insertions(+), 26 deletions(-)
> 
> diff --git a/mm/mlock.c b/mm/mlock.c
> index 0448409184e3..46a05e6ec5ba 100644
> --- a/mm/mlock.c
> +++ b/mm/mlock.c
> @@ -69,9 +69,9 @@ void clear_page_mlock(struct page *page)
>        *
>        * See __pagevec_lru_add_fn for more explanation.
>        */
> -     if (!isolate_lru_page(page)) {
> +     if (!isolate_lru_page(page))
>               putback_lru_page(page);
> -     } else {
> +     else {
>               /*
>                * We lost the race. the page already moved to evictable list.
>                */
> @@ -178,7 +178,6 @@ static void __munlock_isolation_failed(struct page *page)
>  unsigned int munlock_vma_page(struct page *page)
>  {
>       int nr_pages;
> -     struct lruvec *lruvec;
>  
>       /* For try_to_munlock() and to serialize with page migration */
>       BUG_ON(!PageLocked(page));
> @@ -186,37 +185,22 @@ unsigned int munlock_vma_page(struct page *page)
>       VM_BUG_ON_PAGE(PageTail(page), page);
>  
>       /*
> -      * Serialize split tail pages in __split_huge_page_tail() which
> -      * might otherwise copy PageMlocked to part of the tail pages before
> -      * we clear it in the head page. It also stabilizes thp_nr_pages().
> -      * TestClearPageLRU can't be used here to block page isolation, since
> -      * out of lock clear_page_mlock may interfer PageLRU/PageMlocked
> -      * sequence, same as __pagevec_lru_add_fn, and lead the page place to
> -      * wrong lru list here. So relay on PageLocked to stop lruvec change
> -      * in mem_cgroup_move_account().
> +      * Serialize split tail pages in __split_huge_page_tail() by
> +      * lock_page(); Do TestClearPageMlocked/PageLRU sequence like
> +      * clear_page_mlock().
>        */
> -     lruvec = lock_page_lruvec_irq(page);
> -
> -     if (!TestClearPageMlocked(page)) {
> +     if (!TestClearPageMlocked(page))
>               /* Potentially, PTE-mapped THP: do not skip the rest PTEs */
> -             nr_pages = 1;
> -             goto unlock_out;
> -     }
> +             return 0;
>  
>       nr_pages = thp_nr_pages(page);
>       __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
>  
> -     if (__munlock_isolate_lru_page(page, lruvec, true)) {
> -             unlock_page_lruvec_irq(lruvec);
> +     if (!isolate_lru_page(page))
>               __munlock_isolated_page(page);
> -             goto out;
> -     }
> -     __munlock_isolation_failed(page);
> -
> -unlock_out:
> -     unlock_page_lruvec_irq(lruvec);
> +     else
> +             __munlock_isolation_failed(page);
>  
> -out:
>       return nr_pages - 1;
>  }
>  
> @@ -305,6 +289,11 @@ static void __munlock_pagevec(struct pagevec *pvec, 
> struct zone *zone)
>  
>               /* block memcg change in mem_cgroup_move_account */
>               lock_page_memcg(page);
> +             /*
> +              * Serialize split tail pages in __split_huge_page_tail() which
> +              * might otherwise copy PageMlocked to part of the tail pages
> +              * before we clear it in the head page.
> +              */
>               lruvec = relock_page_lruvec_irq(page, lruvec);
>               if (TestClearPageMlocked(page)) {
>                       /*
> -- 
> 1.8.3.1
> 
> 

Reply via email to