lock_page() must operate on the whole compound page. It doesn't make
much sense to lock part of compound page. Change code to use head page's
PG_locked, if tail page is passed.

This patch also get rid of custom helprer functions --
__set_page_locked() and __clear_page_locked(). They replaced with
helpers generated by __SETPAGEFLAG/__CLEARPAGEFLAG. Tail pages to these
helper would trigger VM_BUG_ON().

SLUB uses PG_locked as a bit spin locked. IIUC, tail pages should never
appear there. VM_BUG_ON() is added to make sure that this assumption is
correct.

Signed-off-by: Kirill A. Shutemov <kirill.shute...@linux.intel.com>
---
 fs/cifs/file.c             |  8 ++++----
 include/linux/page-flags.h |  2 +-
 include/linux/pagemap.h    | 25 ++++++++-----------------
 mm/filemap.c               | 15 +++++++++------
 mm/ksm.c                   |  2 +-
 mm/memory-failure.c        |  2 +-
 mm/migrate.c               |  2 +-
 mm/shmem.c                 |  4 ++--
 mm/slub.c                  |  2 ++
 mm/swap_state.c            |  4 ++--
 mm/vmscan.c                |  4 ++--
 mm/zswap.c                 |  4 ++--
 12 files changed, 35 insertions(+), 39 deletions(-)

diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index ca30c391a894..b9fd85dfee9b 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3413,13 +3413,13 @@ readpages_get_pages(struct address_space *mapping, 
struct list_head *page_list,
         * should have access to this page, we're safe to simply set
         * PG_locked without checking it first.
         */
-       __set_page_locked(page);
+       __SetPageLocked(page);
        rc = add_to_page_cache_locked(page, mapping,
                                      page->index, GFP_KERNEL);
 
        /* give up if we can't stick it in the cache */
        if (rc) {
-               __clear_page_locked(page);
+               __ClearPageLocked(page);
                return rc;
        }
 
@@ -3440,10 +3440,10 @@ readpages_get_pages(struct address_space *mapping, 
struct list_head *page_list,
                if (*bytes + PAGE_CACHE_SIZE > rsize)
                        break;
 
-               __set_page_locked(page);
+               __SetPageLocked(page);
                if (add_to_page_cache_locked(page, mapping, page->index,
                                                                GFP_KERNEL)) {
-                       __clear_page_locked(page);
+                       __ClearPageLocked(page);
                        break;
                }
                list_move_tail(&page->lru, tmplist);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 32ea62c0ad30..10bdde20b14c 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -269,7 +269,7 @@ static inline struct page *compound_head_fast(struct page 
*page)
        return page;
 }
 
-TESTPAGEFLAG(Locked, locked, ANY)
+__PAGEFLAG(Locked, locked, NO_TAIL)
 PAGEFLAG(Error, error, ANY) TESTCLEARFLAG(Error, error, ANY)
 PAGEFLAG(Referenced, referenced, ANY) TESTCLEARFLAG(Referenced, referenced, 
ANY)
        __SETPAGEFLAG(Referenced, referenced, ANY)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 4b3736f7065c..7c3790764795 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -426,18 +426,9 @@ extern int __lock_page_or_retry(struct page *page, struct 
mm_struct *mm,
                                unsigned int flags);
 extern void unlock_page(struct page *page);
 
-static inline void __set_page_locked(struct page *page)
-{
-       __set_bit(PG_locked, &page->flags);
-}
-
-static inline void __clear_page_locked(struct page *page)
-{
-       __clear_bit(PG_locked, &page->flags);
-}
-
 static inline int trylock_page(struct page *page)
 {
+       page = compound_head(page);
        return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
 }
 
@@ -490,9 +481,9 @@ extern int wait_on_page_bit_killable_timeout(struct page 
*page,
 
 static inline int wait_on_page_locked_killable(struct page *page)
 {
-       if (PageLocked(page))
-               return wait_on_page_bit_killable(page, PG_locked);
-       return 0;
+       if (!PageLocked(page))
+               return 0;
+       return wait_on_page_bit_killable(compound_head(page), PG_locked);
 }
 
 extern wait_queue_head_t *page_waitqueue(struct page *page);
@@ -511,7 +502,7 @@ static inline void wake_up_page(struct page *page, int bit)
 static inline void wait_on_page_locked(struct page *page)
 {
        if (PageLocked(page))
-               wait_on_page_bit(page, PG_locked);
+               wait_on_page_bit(compound_head(page), PG_locked);
 }
 
 /* 
@@ -656,17 +647,17 @@ int replace_page_cache_page(struct page *old, struct page 
*new, gfp_t gfp_mask);
 
 /*
  * Like add_to_page_cache_locked, but used to add newly allocated pages:
- * the page is new, so we can just run __set_page_locked() against it.
+ * the page is new, so we can just run __SetPageLocked() against it.
  */
 static inline int add_to_page_cache(struct page *page,
                struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
 {
        int error;
 
-       __set_page_locked(page);
+       __SetPageLocked(page);
        error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
        if (unlikely(error))
-               __clear_page_locked(page);
+               __ClearPageLocked(page);
        return error;
 }
 
diff --git a/mm/filemap.c b/mm/filemap.c
index 12548d03c11d..467768d4263b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -615,11 +615,11 @@ int add_to_page_cache_lru(struct page *page, struct 
address_space *mapping,
        void *shadow = NULL;
        int ret;
 
-       __set_page_locked(page);
+       __SetPageLocked(page);
        ret = __add_to_page_cache_locked(page, mapping, offset,
                                         gfp_mask, &shadow);
        if (unlikely(ret))
-               __clear_page_locked(page);
+               __ClearPageLocked(page);
        else {
                /*
                 * The page might have been evicted from cache only
@@ -742,6 +742,7 @@ EXPORT_SYMBOL_GPL(add_page_wait_queue);
  */
 void unlock_page(struct page *page)
 {
+       page = compound_head(page);
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        clear_bit_unlock(PG_locked, &page->flags);
        smp_mb__after_atomic();
@@ -806,18 +807,20 @@ EXPORT_SYMBOL_GPL(page_endio);
  */
 void __lock_page(struct page *page)
 {
-       DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
+       struct page *page_head = compound_head(page);
+       DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked);
 
-       __wait_on_bit_lock(page_waitqueue(page), &wait, bit_wait_io,
+       __wait_on_bit_lock(page_waitqueue(page_head), &wait, bit_wait_io,
                                                        TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(__lock_page);
 
 int __lock_page_killable(struct page *page)
 {
-       DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
+       struct page *page_head = compound_head(page);
+       DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked);
 
-       return __wait_on_bit_lock(page_waitqueue(page), &wait,
+       return __wait_on_bit_lock(page_waitqueue(page_head), &wait,
                                        bit_wait_io, TASK_KILLABLE);
 }
 EXPORT_SYMBOL_GPL(__lock_page_killable);
diff --git a/mm/ksm.c b/mm/ksm.c
index 4162dce2eb44..23138e99a531 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1884,7 +1884,7 @@ struct page *ksm_might_need_to_copy(struct page *page,
 
                SetPageDirty(new_page);
                __SetPageUptodate(new_page);
-               __set_page_locked(new_page);
+               __SetPageLocked(new_page);
        }
 
        return new_page;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index d487f8dc6d39..399eee44d13d 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1136,7 +1136,7 @@ int memory_failure(unsigned long pfn, int trapno, int 
flags)
        /*
         * We ignore non-LRU pages for good reasons.
         * - PG_locked is only well defined for LRU pages and a few others
-        * - to avoid races with __set_page_locked()
+        * - to avoid races with __SetPageLocked()
         * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
         * The check (unnecessarily) ignores LRU pages being isolated and
         * walked by the page reclaim code, however that's not a big loss.
diff --git a/mm/migrate.c b/mm/migrate.c
index 6aa9a4222ea9..114602a68111 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1734,7 +1734,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
                flush_tlb_range(vma, mmun_start, mmun_end);
 
        /* Prepare a page as a migration target */
-       __set_page_locked(new_page);
+       __SetPageLocked(new_page);
        SetPageSwapBacked(new_page);
 
        /* anon mapping, we can simply copy page->mapping to the new page: */
diff --git a/mm/shmem.c b/mm/shmem.c
index 80b360c7bcd1..2e2b943c8e62 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -981,7 +981,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t 
gfp,
        copy_highpage(newpage, oldpage);
        flush_dcache_page(newpage);
 
-       __set_page_locked(newpage);
+       __SetPageLocked(newpage);
        SetPageUptodate(newpage);
        SetPageSwapBacked(newpage);
        set_page_private(newpage, swap_index);
@@ -1173,7 +1173,7 @@ repeat:
                }
 
                __SetPageSwapBacked(page);
-               __set_page_locked(page);
+               __SetPageLocked(page);
                if (sgp == SGP_WRITE)
                        __SetPageReferenced(page);
 
diff --git a/mm/slub.c b/mm/slub.c
index 2584d4ff02eb..f33ae2b7a5e7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -338,11 +338,13 @@ static inline int oo_objects(struct 
kmem_cache_order_objects x)
  */
 static __always_inline void slab_lock(struct page *page)
 {
+       VM_BUG_ON_PAGE(PageTail(page), page);
        bit_spin_lock(PG_locked, &page->flags);
 }
 
 static __always_inline void slab_unlock(struct page *page)
 {
+       VM_BUG_ON_PAGE(PageTail(page), page);
        __bit_spin_unlock(PG_locked, &page->flags);
 }
 
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 405923f77334..d1c4a25b4362 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -357,7 +357,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t 
gfp_mask,
                }
 
                /* May fail (-ENOMEM) if radix-tree node allocation failed. */
-               __set_page_locked(new_page);
+               __SetPageLocked(new_page);
                SetPageSwapBacked(new_page);
                err = __add_to_swap_cache(new_page, entry);
                if (likely(!err)) {
@@ -371,7 +371,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t 
gfp_mask,
                }
                radix_tree_preload_end();
                ClearPageSwapBacked(new_page);
-               __clear_page_locked(new_page);
+               __ClearPageLocked(new_page);
                /*
                 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
                 * clear SWAP_HAS_CACHE flag.
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 260c413d39cd..dc6cd51577a6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1062,7 +1062,7 @@ unmap:
                                VM_BUG_ON_PAGE(PageSwapCache(page), page);
                                if (!page_freeze_refs(page, 1))
                                        goto keep_locked;
-                               __clear_page_locked(page);
+                               __ClearPageLocked(page);
                                count_vm_event(PGLAZYFREED);
                                goto free_it;
                        }
@@ -1174,7 +1174,7 @@ unmap:
                 * we obviously don't have to worry about waking up a process
                 * waiting on the page lock, because there are no references.
                 */
-               __clear_page_locked(page);
+               __ClearPageLocked(page);
 free_it:
                nr_reclaimed++;
 
diff --git a/mm/zswap.c b/mm/zswap.c
index 4249e82ff934..f8583f1fc938 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -490,7 +490,7 @@ static int zswap_get_swap_cache_page(swp_entry_t entry,
                }
 
                /* May fail (-ENOMEM) if radix-tree node allocation failed. */
-               __set_page_locked(new_page);
+               __SetPageLocked(new_page);
                SetPageSwapBacked(new_page);
                err = __add_to_swap_cache(new_page, entry);
                if (likely(!err)) {
@@ -501,7 +501,7 @@ static int zswap_get_swap_cache_page(swp_entry_t entry,
                }
                radix_tree_preload_end();
                ClearPageSwapBacked(new_page);
-               __clear_page_locked(new_page);
+               __ClearPageLocked(new_page);
                /*
                 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
                 * clear SWAP_HAS_CACHE flag.
-- 
2.1.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to