Provide vm_flags_reset_once() and replace the vm_flags updates which used
WRITE_ONCE() to prevent compiler optimizations.

Fixes: 0cce31a0aa0e ("mm: replace vma->vm_flags direct modifications with 
modifier calls")
Reported-by: Hyeonggon Yoo <42.hye...@gmail.com>
Signed-off-by: Suren Baghdasaryan <sur...@google.com>
---
Notes:
- The patch applies cleanly over mm-unstable
- The SHA in Fixes: line is from mm-unstable, so is... unstable

 include/linux/mm.h | 7 +++++++
 mm/mlock.c         | 4 ++--
 2 files changed, 9 insertions(+), 2 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5bf0ad48faaa..23ce04f6e91e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -648,6 +648,13 @@ static inline void vm_flags_reset(struct vm_area_struct 
*vma,
        vm_flags_init(vma, flags);
 }
 
+static inline void vm_flags_reset_once(struct vm_area_struct *vma,
+                                      vm_flags_t flags)
+{
+       mmap_assert_write_locked(vma->vm_mm);
+       WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags);
+}
+
 static inline void vm_flags_set(struct vm_area_struct *vma,
                                vm_flags_t flags)
 {
diff --git a/mm/mlock.c b/mm/mlock.c
index ed49459e343e..617469fce96d 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -380,7 +380,7 @@ static void mlock_vma_pages_range(struct vm_area_struct 
*vma,
         */
        if (newflags & VM_LOCKED)
                newflags |= VM_IO;
-       vm_flags_reset(vma, newflags);
+       vm_flags_reset_once(vma, newflags);
 
        lru_add_drain();
        walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL);
@@ -388,7 +388,7 @@ static void mlock_vma_pages_range(struct vm_area_struct 
*vma,
 
        if (newflags & VM_IO) {
                newflags &= ~VM_IO;
-               vm_flags_reset(vma, newflags);
+               vm_flags_reset_once(vma, newflags);
        }
 }
 
-- 
2.39.1.456.gfc5497dd1b-goog

Reply via email to