1) move poisoned page check at the beginning of the function.
2) add page_lock to avoid unpoison clear the flag.

Signed-off-by: Xishi Qiu <qiuxi...@huawei.com>
Signed-off-by: Jiang Liu <jiang....@huawei.com>
---
 mm/memory-failure.c |   43 ++++++++++++++++++++++---------------------
 1 files changed, 22 insertions(+), 21 deletions(-)

diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 8b20278..9b74983 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1419,18 +1419,17 @@ static int soft_offline_huge_page(struct page *page, 
int flags)
        unsigned long pfn = page_to_pfn(page);
        struct page *hpage = compound_head(page);

+       if (PageHWPoison(hpage)) {
+               pr_info("soft offline: %#lx hugepage already poisoned\n", pfn);
+               return -EBUSY;
+       }
+
        ret = get_any_page(page, pfn, flags);
        if (ret < 0)
                return ret;
        if (ret == 0)
                goto done;

-       if (PageHWPoison(hpage)) {
-               put_page(hpage);
-               pr_info("soft offline: %#lx hugepage already poisoned\n", pfn);
-               return -EBUSY;
-       }
-
        /* Keep page count to indicate a given hugepage is isolated. */
        ret = migrate_huge_page(hpage, new_page, MPOL_MF_MOVE_ALL, false,
                                MIGRATE_SYNC);
@@ -1441,12 +1440,14 @@ static int soft_offline_huge_page(struct page *page, 
int flags)
                return ret;
        }
 done:
-       if (!PageHWPoison(hpage))
-               atomic_long_add(1 << compound_trans_order(hpage),
-                               &mce_bad_pages);
+       /* keep elevated page count for bad page */
+       lock_page(hpage);
+       atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
        set_page_hwpoison_huge_page(hpage);
+       unlock_page(hpage);
+
        dequeue_hwpoisoned_huge_page(hpage);
-       /* keep elevated page count for bad page */
+
        return ret;
 }

@@ -1488,6 +1489,11 @@ int soft_offline_page(struct page *page, int flags)
                }
        }

+       if (PageHWPoison(page)) {
+               pr_info("soft offline: %#lx page already poisoned\n", pfn);
+               return -EBUSY;
+       }
+
        ret = get_any_page(page, pfn, flags);
        if (ret < 0)
                return ret;
@@ -1519,19 +1525,11 @@ int soft_offline_page(struct page *page, int flags)
                return -EIO;
        }

-       lock_page(page);
-       wait_on_page_writeback(page);
-
        /*
         * Synchronized using the page lock with memory_failure()
         */
-       if (PageHWPoison(page)) {
-               unlock_page(page);
-               put_page(page);
-               pr_info("soft offline: %#lx page already poisoned\n", pfn);
-               return -EBUSY;
-       }
-
+       lock_page(page);
+       wait_on_page_writeback(page);
        /*
         * Try to invalidate first. This should work for
         * non dirty unmapped page cache pages.
@@ -1582,8 +1580,11 @@ int soft_offline_page(struct page *page, int flags)
                return ret;

 done:
+       /* keep elevated page count for bad page */
+       lock_page(page);
        atomic_long_add(1, &mce_bad_pages);
        SetPageHWPoison(page);
-       /* keep elevated page count for bad page */
+       unlock_page(page);
+
        return ret;
 }
-- 
1.7.1


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to