This is like lock_page_killable() but for use by callers who
know they have a folio.  Convert __lock_page_killable() to be
__lock_folio_killable().  This saves one call to compound_head() per
contended call to lock_page_killable().

__lock_folio_killable() is 20 bytes smaller than __lock_page_killable()
was.  lock_page_maybe_drop_mmap() shrinks by 68 bytes and
__lock_page_or_retry() shrinks by 66 bytes.  That's a total of 154 bytes
of text saved.

Signed-off-by: Matthew Wilcox (Oracle) <wi...@infradead.org>
Reviewed-by: Christoph Hellwig <h...@lst.de>
Acked-by: Jeff Layton <jlay...@kernel.org>
---
 include/linux/pagemap.h | 15 ++++++++++-----
 mm/filemap.c            | 17 +++++++++--------
 2 files changed, 19 insertions(+), 13 deletions(-)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index c59b19f1cb0f..b23b95f771f7 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -715,7 +715,7 @@ static inline bool wake_page_match(struct wait_page_queue 
*wait_page,
 }
 
 void __lock_folio(struct folio *folio);
-extern int __lock_page_killable(struct page *page);
+int __lock_folio_killable(struct folio *folio);
 extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
                                unsigned int flags);
@@ -755,6 +755,14 @@ static inline void lock_page(struct page *page)
                __lock_folio(folio);
 }
 
+static inline int lock_folio_killable(struct folio *folio)
+{
+       might_sleep();
+       if (!trylock_folio(folio))
+               return __lock_folio_killable(folio);
+       return 0;
+}
+
 /*
  * lock_page_killable is like lock_page but can be interrupted by fatal
  * signals.  It returns 0 if it locked the page and -EINTR if it was
@@ -762,10 +770,7 @@ static inline void lock_page(struct page *page)
  */
 static inline int lock_page_killable(struct page *page)
 {
-       might_sleep();
-       if (!trylock_page(page))
-               return __lock_page_killable(page);
-       return 0;
+       return lock_folio_killable(page_folio(page));
 }
 
 /*
diff --git a/mm/filemap.c b/mm/filemap.c
index 36289a9816b5..2a4fa0b5fa88 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1585,14 +1585,13 @@ void __lock_folio(struct folio *folio)
 }
 EXPORT_SYMBOL(__lock_folio);
 
-int __lock_page_killable(struct page *__page)
+int __lock_folio_killable(struct folio *folio)
 {
-       struct page *page = compound_head(__page);
-       wait_queue_head_t *q = page_waitqueue(page);
-       return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE,
+       wait_queue_head_t *q = page_waitqueue(&folio->page);
+       return wait_on_page_bit_common(q, &folio->page, PG_locked, 
TASK_KILLABLE,
                                        EXCLUSIVE);
 }
-EXPORT_SYMBOL_GPL(__lock_page_killable);
+EXPORT_SYMBOL_GPL(__lock_folio_killable);
 
 int __lock_page_async(struct page *page, struct wait_page_queue *wait)
 {
@@ -1634,6 +1633,8 @@ int __lock_page_async(struct page *page, struct 
wait_page_queue *wait)
 int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
                         unsigned int flags)
 {
+       struct folio *folio = page_folio(page);
+
        if (fault_flag_allow_retry_first(flags)) {
                /*
                 * CAUTION! In this case, mmap_lock is not released
@@ -1652,13 +1653,13 @@ int __lock_page_or_retry(struct page *page, struct 
mm_struct *mm,
        if (flags & FAULT_FLAG_KILLABLE) {
                int ret;
 
-               ret = __lock_page_killable(page);
+               ret = __lock_folio_killable(folio);
                if (ret) {
                        mmap_read_unlock(mm);
                        return 0;
                }
        } else {
-               __lock_folio(page_folio(page));
+               __lock_folio(folio);
        }
 
        return 1;
@@ -2820,7 +2821,7 @@ static int lock_page_maybe_drop_mmap(struct vm_fault 
*vmf, struct page *page,
 
        *fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
        if (vmf->flags & FAULT_FLAG_KILLABLE) {
-               if (__lock_page_killable(&folio->page)) {
+               if (__lock_folio_killable(folio)) {
                        /*
                         * We didn't have the right flags to drop the mmap_lock,
                         * but all fault_handlers only check for fatal signals
-- 
2.30.2

Reply via email to