From: Suren Baghdasaryan <sur...@google.com>

Add helper functions to speculatively perform operations without
read-locking mmap_lock, expecting that mmap_lock will not be
write-locked and mm is not modified from under us.

Suggested-by: Peter Zijlstra <pet...@infradead.org>
Signed-off-by: Suren Baghdasaryan <sur...@google.com>
Signed-off-by: Andrii Nakryiko <and...@kernel.org>
Link: https://lore.kernel.org/bpf/20240912210222.186542-1-sur...@google.com
---
 include/linux/mm_types.h  |  3 ++
 include/linux/mmap_lock.h | 72 ++++++++++++++++++++++++++++++++-------
 kernel/fork.c             |  3 --
 3 files changed, 63 insertions(+), 15 deletions(-)

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 485424979254..d5e3f907eea4 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -876,6 +876,9 @@ struct mm_struct {
                 * Roughly speaking, incrementing the sequence number is
                 * equivalent to releasing locks on VMAs; reading the sequence
                 * number can be part of taking a read lock on a VMA.
+                * Incremented every time mmap_lock is write-locked/unlocked.
+                * Initialized to 0, therefore odd values indicate mmap_lock
+                * is write-locked and even values that it's released.
                 *
                 * Can be modified under write mmap_lock using RELEASE
                 * semantics.
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index de9dc20b01ba..9d23635bc701 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -71,39 +71,84 @@ static inline void mmap_assert_write_locked(const struct 
mm_struct *mm)
 }
 
 #ifdef CONFIG_PER_VMA_LOCK
+static inline void init_mm_lock_seq(struct mm_struct *mm)
+{
+       mm->mm_lock_seq = 0;
+}
+
 /*
- * Drop all currently-held per-VMA locks.
- * This is called from the mmap_lock implementation directly before releasing
- * a write-locked mmap_lock (or downgrading it to read-locked).
- * This should normally NOT be called manually from other places.
- * If you want to call this manually anyway, keep in mind that this will 
release
- * *all* VMA write locks, including ones from further up the stack.
+ * Increment mm->mm_lock_seq when mmap_lock is write-locked (ACQUIRE semantics)
+ * or write-unlocked (RELEASE semantics).
  */
-static inline void vma_end_write_all(struct mm_struct *mm)
+static inline void inc_mm_lock_seq(struct mm_struct *mm, bool acquire)
 {
        mmap_assert_write_locked(mm);
        /*
         * Nobody can concurrently modify mm->mm_lock_seq due to exclusive
         * mmap_lock being held.
-        * We need RELEASE semantics here to ensure that preceding stores into
-        * the VMA take effect before we unlock it with this store.
-        * Pairs with ACQUIRE semantics in vma_start_read().
         */
-       smp_store_release(&mm->mm_lock_seq, mm->mm_lock_seq + 1);
+
+       if (acquire) {
+               WRITE_ONCE(mm->mm_lock_seq, mm->mm_lock_seq + 1);
+               /*
+                * For ACQUIRE semantics we should ensure no following stores 
are
+                * reordered to appear before the mm->mm_lock_seq modification.
+                */
+               smp_wmb();
+       } else {
+               /*
+                * We need RELEASE semantics here to ensure that preceding 
stores
+                * into the VMA take effect before we unlock it with this store.
+                * Pairs with ACQUIRE semantics in vma_start_read().
+                */
+               smp_store_release(&mm->mm_lock_seq, mm->mm_lock_seq + 1);
+       }
+}
+
+static inline bool mmap_lock_speculation_start(struct mm_struct *mm, int *seq)
+{
+       /* Pairs with RELEASE semantics in inc_mm_lock_seq(). */
+       *seq = smp_load_acquire(&mm->mm_lock_seq);
+       /* Allow speculation if mmap_lock is not write-locked */
+       return (*seq & 1) == 0;
+}
+
+static inline bool mmap_lock_speculation_end(struct mm_struct *mm, int seq)
+{
+       /* Pairs with ACQUIRE semantics in inc_mm_lock_seq(). */
+       smp_rmb();
+       return seq == READ_ONCE(mm->mm_lock_seq);
 }
+
 #else
-static inline void vma_end_write_all(struct mm_struct *mm) {}
+static inline void init_mm_lock_seq(struct mm_struct *mm) {}
+static inline void inc_mm_lock_seq(struct mm_struct *mm, bool acquire) {}
+static inline bool mmap_lock_speculation_start(struct mm_struct *mm, int *seq) 
{ return false; }
+static inline bool mmap_lock_speculation_end(struct mm_struct *mm, int seq) { 
return false; }
 #endif
 
+/*
+ * Drop all currently-held per-VMA locks.
+ * This is called from the mmap_lock implementation directly before releasing
+ * a write-locked mmap_lock (or downgrading it to read-locked).
+ * This should NOT be called manually from other places.
+ */
+static inline void vma_end_write_all(struct mm_struct *mm)
+{
+       inc_mm_lock_seq(mm, false);
+}
+
 static inline void mmap_init_lock(struct mm_struct *mm)
 {
        init_rwsem(&mm->mmap_lock);
+       init_mm_lock_seq(mm);
 }
 
 static inline void mmap_write_lock(struct mm_struct *mm)
 {
        __mmap_lock_trace_start_locking(mm, true);
        down_write(&mm->mmap_lock);
+       inc_mm_lock_seq(mm, true);
        __mmap_lock_trace_acquire_returned(mm, true, true);
 }
 
@@ -111,6 +156,7 @@ static inline void mmap_write_lock_nested(struct mm_struct 
*mm, int subclass)
 {
        __mmap_lock_trace_start_locking(mm, true);
        down_write_nested(&mm->mmap_lock, subclass);
+       inc_mm_lock_seq(mm, true);
        __mmap_lock_trace_acquire_returned(mm, true, true);
 }
 
@@ -120,6 +166,8 @@ static inline int mmap_write_lock_killable(struct mm_struct 
*mm)
 
        __mmap_lock_trace_start_locking(mm, true);
        ret = down_write_killable(&mm->mmap_lock);
+       if (!ret)
+               inc_mm_lock_seq(mm, true);
        __mmap_lock_trace_acquire_returned(mm, true, ret == 0);
        return ret;
 }
diff --git a/kernel/fork.c b/kernel/fork.c
index 18bdc87209d0..c44b71d354ee 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1259,9 +1259,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, 
struct task_struct *p,
        seqcount_init(&mm->write_protect_seq);
        mmap_init_lock(mm);
        INIT_LIST_HEAD(&mm->mmlist);
-#ifdef CONFIG_PER_VMA_LOCK
-       mm->mm_lock_seq = 0;
-#endif
        mm_pgtables_bytes_init(mm);
        mm->map_count = 0;
        mm->locked_vm = 0;
-- 
2.43.5


Reply via email to