From: Naoya Horiguchi <[email protected]>

Currently soft_offline_page() receives struct page, and its sibling
memory_failure() receives pfn. This discrepancy looks weird and makes
precheck on pfn validity tricky. So let's align them.

Signed-off-by: Naoya Horiguchi <[email protected]>
Signed-off-by: Oscar Salvador <[email protected]>
---
 drivers/base/memory.c |  7 +------
 include/linux/mm.h    |  2 +-
 mm/madvise.c          |  2 +-
 mm/memory-failure.c   | 17 +++++++++--------
 4 files changed, 12 insertions(+), 16 deletions(-)

diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index b3cae2eb1c4f..b510b4d176c9 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -538,12 +538,7 @@ static ssize_t soft_offline_page_store(struct device *dev,
        if (kstrtoull(buf, 0, &pfn) < 0)
                return -EINVAL;
        pfn >>= PAGE_SHIFT;
-       if (!pfn_valid(pfn))
-               return -ENXIO;
-       /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
-       if (!pfn_to_online_page(pfn))
-               return -EIO;
-       ret = soft_offline_page(pfn_to_page(pfn));
+       ret = soft_offline_page(pfn);
        return ret == 0 ? count : ret;
 }
 
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0f80a1ce4e86..40722854d357 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2791,7 +2791,7 @@ extern int sysctl_memory_failure_early_kill;
 extern int sysctl_memory_failure_recovery;
 extern void shake_page(struct page *p, int access);
 extern atomic_long_t num_poisoned_pages __read_mostly;
-extern int soft_offline_page(struct page *page);
+extern int soft_offline_page(unsigned long pfn);
 
 
 /*
diff --git a/mm/madvise.c b/mm/madvise.c
index 9ca48345ce45..f83b7d4c68c1 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -890,7 +890,7 @@ static int madvise_inject_error(int behavior,
                if (behavior == MADV_SOFT_OFFLINE) {
                        pr_info("Soft offlining pfn %#lx at process virtual 
address %#lx\n",
                                 pfn, start);
-                       ret = soft_offline_page(page);
+                       ret = soft_offline_page(pfn);
                } else {
                        pr_info("Injecting memory failure for pfn %#lx at 
process virtual address %#lx\n",
                                 pfn, start);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index c038896bedf0..bfecb61fc064 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1521,7 +1521,7 @@ static void memory_failure_work_func(struct work_struct 
*work)
                if (!gotten)
                        break;
                if (entry.flags & MF_SOFT_OFFLINE)
-                       soft_offline_page(pfn_to_page(entry.pfn));
+                       soft_offline_page(entry.pfn);
                else
                        memory_failure(entry.pfn, entry.flags);
        }
@@ -1834,7 +1834,7 @@ static int soft_offline_free_page(struct page *page)
 
 /**
  * soft_offline_page - Soft offline a page.
- * @page: page to offline
+ * @pfn: pfn to soft-offline
  *
  * Returns 0 on success, otherwise negated errno.
  *
@@ -1853,16 +1853,17 @@ static int soft_offline_free_page(struct page *page)
  * This is not a 100% solution for all memory, but tries to be
  * ``good enough'' for the majority of memory.
  */
-int soft_offline_page(struct page *page)
+int soft_offline_page(unsigned long pfn)
 {
        int ret;
-       unsigned long pfn = page_to_pfn(page);
+       struct page *page;
 
-       if (is_zone_device_page(page)) {
-               pr_debug_ratelimited("soft_offline: %#lx page is device page\n",
-                               pfn);
+       if (!pfn_valid(pfn))
+               return -ENXIO;
+       /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
+       page = pfn_to_online_page(pfn);
+       if (!page)
                return -EIO;
-       }
 
        if (PageHWPoison(page)) {
                pr_info("soft offline: %#lx page already poisoned\n", pfn);
-- 
2.12.3

Reply via email to