On Tue, Jul 23, 2019 at 01:08:42PM +0800, Huang, Ying wrote:
> @@ -2489,6 +2491,14 @@ static void __split_huge_page(struct page *page, 
> struct list_head *list,
>       /* complete memcg works before add pages to LRU */
>       mem_cgroup_split_huge_fixup(head);
>  
> +     if (PageAnon(head) && PageSwapCache(head)) {
> +             swp_entry_t entry = { .val = page_private(head) };
> +
> +             offset = swp_offset(entry);
> +             swap_cache = swap_address_space(entry);
> +             xa_lock(&swap_cache->i_pages);
> +     }
> +
>       for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
>               __split_huge_page_tail(head, i, lruvec, list);
>               /* Some pages can be beyond i_size: drop them from page cache */
> @@ -2501,6 +2511,9 @@ static void __split_huge_page(struct page *page, struct 
> list_head *list,
>               } else if (!PageAnon(page)) {
>                       __xa_store(&head->mapping->i_pages, head[i].index,
>                                       head + i, 0);
> +             } else if (swap_cache) {
> +                     __xa_store(&swap_cache->i_pages, offset + i,
> +                                head + i, 0);

I tried something along these lines (though I think I messed up the offset
calculation which is why it wasn't working for me).  My other concern
was with the case where SWAPFILE_CLUSTER was less than HPAGE_PMD_NR.
Don't we need to drop the lock and look up a new swap_cache if offset >=
SWAPFILE_CLUSTER?

Reply via email to