When UFFD_FEATURE_MINOR_ASYNC is enabled, skip handle_userfault() in
the shmem and hugetlbfs minor fault paths. The normal fault path
installs the PTE from page cache directly.

Signed-off-by: Kiryl Shutsemau (Meta) <[email protected]>
Assisted-by: Claude:claude-opus-4-6
---
 mm/hugetlb.c | 3 ++-
 mm/shmem.c   | 3 ++-
 2 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 327eaa4074d3..c10d2432768c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5847,7 +5847,8 @@ static vm_fault_t hugetlb_no_page(struct address_space 
*mapping,
                }
 
                /* Check for page in userfault range. */
-               if (userfaultfd_minor(vma)) {
+               if (userfaultfd_minor(vma) &&
+                   !userfaultfd_minor_async(vma)) {
                        folio_unlock(folio);
                        folio_put(folio);
                        /* See comment in userfaultfd_missing() block above */
diff --git a/mm/shmem.c b/mm/shmem.c
index b40f3cd48961..ce47e77fc090 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2489,7 +2489,8 @@ static int shmem_get_folio_gfp(struct inode *inode, 
pgoff_t index,
        fault_mm = vma ? vma->vm_mm : NULL;
 
        folio = filemap_get_entry(inode->i_mapping, index);
-       if (folio && vma && userfaultfd_minor(vma)) {
+       if (folio && vma && userfaultfd_minor(vma) &&
+           !userfaultfd_minor_async(vma)) {
                if (!xa_is_value(folio))
                        folio_put(folio);
                *fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
-- 
2.51.2


Reply via email to