Private node folios should not participate in KSM merging by default. The driver manages the memory lifecycle and KSM's page sharing can interfere with driver operations.
Extend the existing zone_device checks in get_mergeable_page and ksm_next_page_pmd_entry to cover private node folios as well. Signed-off-by: Gregory Price <[email protected]> --- mm/ksm.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/mm/ksm.c b/mm/ksm.c index 2d89a7c8b4eb..c48e95a6fff9 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -40,6 +40,7 @@ #include <linux/oom.h> #include <linux/numa.h> #include <linux/pagewalk.h> +#include <linux/node_private.h> #include <asm/tlbflush.h> #include "internal.h" @@ -808,7 +809,7 @@ static struct page *get_mergeable_page(struct ksm_rmap_item *rmap_item) folio = folio_walk_start(&fw, vma, addr, 0); if (folio) { - if (!folio_is_zone_device(folio) && + if (!folio_is_private_managed(folio) && folio_test_anon(folio)) { folio_get(folio); page = fw.page; @@ -2521,7 +2522,8 @@ static int ksm_next_page_pmd_entry(pmd_t *pmdp, unsigned long addr, unsigned lon goto not_found_unlock; folio = page_folio(page); - if (folio_is_zone_device(folio) || !folio_test_anon(folio)) + if (unlikely(folio_is_private_managed(folio)) || + !folio_test_anon(folio)) goto not_found_unlock; page += ((addr & (PMD_SIZE - 1)) >> PAGE_SHIFT); @@ -2545,7 +2547,8 @@ static int ksm_next_page_pmd_entry(pmd_t *pmdp, unsigned long addr, unsigned lon continue; folio = page_folio(page); - if (folio_is_zone_device(folio) || !folio_test_anon(folio)) + if (unlikely(folio_is_private_managed(folio)) || + !folio_test_anon(folio)) continue; goto found_unlock; } -- 2.53.0
