Currently page_lru() uses Page{Active,Unevictable} to determine which
lru list a page belongs to. Page{Active,Unevictable} contain
compound_head() and therefore page_lru() essentially tests
PG_{active,unevictable} against compound_head(page)->flags. Once an
lru list is determined, page->lru, rather than
compound_head(page)->lru, will be added to or deleted from it.

Though not bug, having compound_head() in page_lru() increases the
size of vmlinux by O(KB) because page_lru() is inlined many places.
And removing compound_head() entirely from Page{Active,Unevictable}
may not be the best option (for the moment) either because there
may be other cases that need compound_head(). This patch makes
page_lru() and __clear_page_lru_flags(), which are used immediately
before and after operations on page->lru, test
PG_{active,unevictable} directly against page->flags instead.

scripts/bloat-o-meter results before and after the entire series:
  Glang: add/remove: 0/1 grow/shrink: 7/10 up/down: 191/-1189 (-998)
  GCC: add/remove: 0/1 grow/shrink: 9/9 up/down: 1010/-783 (227)

Signed-off-by: Yu Zhao <yuz...@google.com>
---
 include/linux/mm_inline.h | 21 +++++++--------------
 1 file changed, 7 insertions(+), 14 deletions(-)

diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 355ea1ee32bd..1b8df9e6f63f 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -46,14 +46,12 @@ static __always_inline void __clear_page_lru_flags(struct 
page *page)
 {
        VM_BUG_ON_PAGE(!PageLRU(page), page);
 
-       __ClearPageLRU(page);
-
        /* this shouldn't happen, so leave the flags to bad_page() */
-       if (PageActive(page) && PageUnevictable(page))
+       if ((page->flags & (BIT(PG_active) | BIT(PG_unevictable))) ==
+           (BIT(PG_active) | BIT(PG_unevictable)))
                return;
 
-       __ClearPageActive(page);
-       __ClearPageUnevictable(page);
+       page->flags &= ~(BIT(PG_lru) | BIT(PG_active) | BIT(PG_unevictable));
 }
 
 /**
@@ -65,18 +63,13 @@ static __always_inline void __clear_page_lru_flags(struct 
page *page)
  */
 static __always_inline enum lru_list page_lru(struct page *page)
 {
-       enum lru_list lru;
+       unsigned long flags = READ_ONCE(page->flags);
 
        VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
 
-       if (PageUnevictable(page))
-               return LRU_UNEVICTABLE;
-
-       lru = page_is_file_lru(page) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
-       if (PageActive(page))
-               lru += LRU_ACTIVE;
-
-       return lru;
+       /* test page->flags directly to avoid unnecessary compound_head() */
+       return (flags & BIT(PG_unevictable)) ? LRU_UNEVICTABLE :
+              (LRU_FILE * !(flags & BIT(PG_swapbacked)) + !!(flags & 
BIT(PG_active)));
 }
 
 static __always_inline void add_page_to_lru_list(struct page *page,
-- 
2.30.0.617.g56c4b15f3c-goog

Reply via email to