The behaviour of try_to_unmap_one() is difficult to follow because it
performs different operations based on a fairly large set of flags used
in different combinations.

TTU_MUNLOCK is one such flag. However it is exclusively used by
try_to_munlock() which specifies no other flags. Therefore rather than
overload try_to_unmap_one() with unrelated behaviour split this out into
it's own function and remove the flag.

Signed-off-by: Alistair Popple <apop...@nvidia.com>

---

Given the comments on not needing to hold mmap_lock it was not 100% clear
to me if it is safe to check vma->vma_flags & VM_LOCKED and if re-checking
under the ptl was significant. I left the extra check in case it was, but
it seems one of the checks is redunant as either the first check is racey
or the second check is unneccsary.
---
 include/linux/rmap.h |  1 -
 mm/rmap.c            | 47 ++++++++++++++++++++++++++++++++++++--------
 2 files changed, 39 insertions(+), 9 deletions(-)

diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 70085ca1a3fc..7f1ee411bd7b 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -87,7 +87,6 @@ struct anon_vma_chain {
 
 enum ttu_flags {
        TTU_MIGRATION           = 0x1,  /* migration mode */
-       TTU_MUNLOCK             = 0x2,  /* munlock mode */
 
        TTU_SPLIT_HUGE_PMD      = 0x4,  /* split huge PMD if any */
        TTU_IGNORE_MLOCK        = 0x8,  /* ignore mlock */
diff --git a/mm/rmap.c b/mm/rmap.c
index ef9ef2694c58..850eecdd866a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1391,10 +1391,6 @@ static bool try_to_unmap_one(struct page *page, struct 
vm_area_struct *vma,
        struct mmu_notifier_range range;
        enum ttu_flags flags = (enum ttu_flags)(long)arg;
 
-       /* munlock has nothing to gain from examining un-locked vmas */
-       if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
-               return true;
-
        if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) &&
            is_zone_device_page(page) && !is_device_private_page(page))
                return true;
@@ -1455,8 +1451,6 @@ static bool try_to_unmap_one(struct page *page, struct 
vm_area_struct *vma,
                                page_vma_mapped_walk_done(&pvmw);
                                break;
                        }
-                       if (flags & TTU_MUNLOCK)
-                               continue;
                }
 
                /* Unexpected PMD-mapped THP? */
@@ -1775,6 +1769,44 @@ static int page_not_mapped(struct page *page)
        return !page_mapped(page);
 };
 
+static bool try_to_munlock_one(struct page *page, struct vm_area_struct *vma,
+                    unsigned long address, void *arg)
+{
+       struct page_vma_mapped_walk pvmw = {
+               .page = page,
+               .vma = vma,
+               .address = address,
+       };
+       bool ret = true;
+
+       /* munlock has nothing to gain from examining un-locked vmas */
+       if (!(vma->vm_flags & VM_LOCKED))
+               return true;
+
+       while (page_vma_mapped_walk(&pvmw)) {
+               /*
+                * If the page is mlock()d, we cannot swap it out.
+                * If it's recently referenced (perhaps page_referenced
+                * skipped over this mm) then we should reactivate it.
+                */
+               if (vma->vm_flags & VM_LOCKED) {
+                       /* PTE-mapped THP are never mlocked */
+                       if (!PageTransCompound(page)) {
+                               /*
+                                * Holding pte lock, we do *not* need
+                                * mmap_lock here
+                                */
+                               mlock_vma_page(page);
+                       }
+                       ret = false;
+                       page_vma_mapped_walk_done(&pvmw);
+                       break;
+               }
+       }
+
+       return ret;
+}
+
 /**
  * try_to_munlock - try to munlock a page
  * @page: the page to be munlocked
@@ -1787,8 +1819,7 @@ static int page_not_mapped(struct page *page)
 void try_to_munlock(struct page *page)
 {
        struct rmap_walk_control rwc = {
-               .rmap_one = try_to_unmap_one,
-               .arg = (void *)TTU_MUNLOCK,
+               .rmap_one = try_to_munlock_one,
                .done = page_not_mapped,
                .anon_lock = page_lock_anon_vma_read,
 
-- 
2.20.1

_______________________________________________
Nouveau mailing list
Nouveau@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/nouveau

Reply via email to