A collapse operation allocates a new large folio and migrates the
smaller folios into it. This is an issue for private nodes:
1. The private node service may not support migration
2. Collapse may promotes pages from the private node to a local node,
which may result in an LRU inversion that defeats memory tiering.
Handle this just like zone_device for now.
It may be possible to support this later for some private node services
that report explicit support for collapse (and migration).
Signed-off-by: Gregory Price <[email protected]>
---
mm/khugepaged.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 97d1b2824386..36f6bc5da53c 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -21,6 +21,7 @@
#include <linux/shmem_fs.h>
#include <linux/dax.h>
#include <linux/ksm.h>
+#include <linux/node_private.h>
#include <linux/pgalloc.h>
#include <asm/tlb.h>
@@ -571,7 +572,7 @@ static int __collapse_huge_page_isolate(struct
vm_area_struct *vma,
goto out;
}
page = vm_normal_page(vma, addr, pteval);
- if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
+ if (unlikely(!page) || unlikely(page_is_private_managed(page)))
{
result = SCAN_PAGE_NULL;
goto out;
}
@@ -1323,7 +1324,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
}
page = vm_normal_page(vma, addr, pteval);
- if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
+ if (unlikely(!page) || unlikely(page_is_private_managed(page)))
{
result = SCAN_PAGE_NULL;
goto out_unmap;
}
@@ -1575,7 +1576,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm,
unsigned long addr,
}
page = vm_normal_page(vma, addr, ptent);
- if (WARN_ON_ONCE(page && is_zone_device_page(page)))
+ if (WARN_ON_ONCE(page && page_is_private_managed(page)))
page = NULL;
/*
* Note that uprobe, debugger, or MAP_PRIVATE may change the
--
2.53.0