In order to stay consistent between functions which manipulate a vm_flags_t
argument of the form of vma_flags_...() and those which manipulate a
VMA (in this case the flags field of a VMA), rename
vma_flag_[test/set]_atomic() to vma_[test/set]_atomic_flag().

This lays the groundwork for adding VMA flag manipulation functions in a
subsequent commit.

Signed-off-by: Lorenzo Stoakes <[email protected]>
---
 include/linux/mm.h | 13 +++++--------
 mm/khugepaged.c    |  2 +-
 mm/madvise.c       |  2 +-
 3 files changed, 7 insertions(+), 10 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index a18ade628c8e..25f7679df55c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -987,8 +987,7 @@ static inline void vm_flags_mod(struct vm_area_struct *vma,
        __vm_flags_mod(vma, set, clear);
 }
 
-static inline bool __vma_flag_atomic_valid(struct vm_area_struct *vma,
-                                          vma_flag_t bit)
+static inline bool __vma_atomic_valid_flag(struct vm_area_struct *vma, 
vma_flag_t bit)
 {
        const vm_flags_t mask = BIT((__force int)bit);
 
@@ -1003,8 +1002,7 @@ static inline bool __vma_flag_atomic_valid(struct 
vm_area_struct *vma,
  * Set VMA flag atomically. Requires only VMA/mmap read lock. Only specific
  * valid flags are allowed to do this.
  */
-static inline void vma_flag_set_atomic(struct vm_area_struct *vma,
-                                      vma_flag_t bit)
+static inline void vma_set_atomic_flag(struct vm_area_struct *vma, vma_flag_t 
bit)
 {
        unsigned long *bitmap = ACCESS_PRIVATE(&vma->flags, __vma_flags);
 
@@ -1012,7 +1010,7 @@ static inline void vma_flag_set_atomic(struct 
vm_area_struct *vma,
        if (!rwsem_is_locked(&vma->vm_mm->mmap_lock))
                vma_assert_locked(vma);
 
-       if (__vma_flag_atomic_valid(vma, bit))
+       if (__vma_atomic_valid_flag(vma, bit))
                set_bit((__force int)bit, bitmap);
 }
 
@@ -1023,10 +1021,9 @@ static inline void vma_flag_set_atomic(struct 
vm_area_struct *vma,
  * This is necessarily racey, so callers must ensure that serialisation is
  * achieved through some other means, or that races are permissible.
  */
-static inline bool vma_flag_test_atomic(struct vm_area_struct *vma,
-                                       vma_flag_t bit)
+static inline bool vma_test_atomic_flag(struct vm_area_struct *vma, vma_flag_t 
bit)
 {
-       if (__vma_flag_atomic_valid(vma, bit))
+       if (__vma_atomic_valid_flag(vma, bit))
                return test_bit((__force int)bit, &vma->vm_flags);
 
        return false;
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 9f790ec34400..e4fe9144667a 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1749,7 +1749,7 @@ static bool file_backed_vma_is_retractable(struct 
vm_area_struct *vma)
         * obtained on guard region installation after the flag is set, so this
         * check being performed under this lock excludes races.
         */
-       if (vma_flag_test_atomic(vma, VMA_MAYBE_GUARD_BIT))
+       if (vma_test_atomic_flag(vma, VMA_MAYBE_GUARD_BIT))
                return false;
 
        return true;
diff --git a/mm/madvise.c b/mm/madvise.c
index 4bf4c8c38fd3..98d0fddcc165 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -1142,7 +1142,7 @@ static long madvise_guard_install(struct madvise_behavior 
*madv_behavior)
         * acquire an mmap/VMA write lock to read it. All remaining readers may
         * or may not see the flag set, but we don't care.
         */
-       vma_flag_set_atomic(vma, VMA_MAYBE_GUARD_BIT);
+       vma_set_atomic_flag(vma, VMA_MAYBE_GUARD_BIT);
 
        /*
         * If anonymous and we are establishing page tables the VMA ought to
-- 
2.52.0

Reply via email to