Currently the percpu-rwsem has two issues:

 - it switches to (global) atomic ops while a writer is waiting;
   which could be quite a while and slows down releasing the readers.

 - it employs synchronize_sched_expedited() _twice_ which is evil and
   should die -- it shoots IPIs around the machine.

This patch cures the first problem by ordering the reader-state vs
reader-count (see the comments in __percpu_down_read() and
percpu_down_write()). This changes a global atomic op into a full
memory barrier, which doesn't have the global cacheline contention.

It cures the second problem by employing the rcu-sync primitives by
Oleg which reduces to no sync_sched() calls in the 'normal' case of
no write contention -- global locks had better be rare, and has a
maximum of one sync_sched() call in case of contention.

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 include/linux/percpu-rwsem.h  |   62 +++++++++-
 kernel/locking/percpu-rwsem.c |  238 +++++++++++++++++++++---------------------
 2 files changed, 176 insertions(+), 124 deletions(-)

--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -5,18 +5,64 @@
 #include <linux/rwsem.h>
 #include <linux/percpu.h>
 #include <linux/wait.h>
+#include <linux/rcusync.h>
 #include <linux/lockdep.h>
 
 struct percpu_rw_semaphore {
-       unsigned int __percpu   *fast_read_ctr;
-       atomic_t                write_ctr;
+       unsigned int __percpu   *refcount;
+       int                     state;
+       struct rcu_sync_struct  rss;
+       wait_queue_head_t       writer;
        struct rw_semaphore     rw_sem;
-       atomic_t                slow_read_ctr;
-       wait_queue_head_t       write_waitq;
 };
 
-extern void percpu_down_read(struct percpu_rw_semaphore *);
-extern void percpu_up_read(struct percpu_rw_semaphore *);
+extern void __percpu_down_read(struct percpu_rw_semaphore *);
+extern void __percpu_up_read(struct percpu_rw_semaphore *);
+
+static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
+{
+       might_sleep();
+
+       rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_);
+
+       preempt_disable();
+       /*
+        * We are in an RCU-sched read-side critical section, so the writer
+        * cannot both change sem->state from readers_fast and start
+        * checking counters while we are here. So if we see !sem->state,
+        * we know that the writer won't be checking until we past the
+        * preempt_enable() and that once the synchronize_sched() is done, the
+        * writer will see anything we did within this RCU-sched read-side
+        * critical section.
+        */
+       __this_cpu_inc(*sem->refcount);
+       if (unlikely(!rcu_sync_is_idle(&sem->rss)))
+               __percpu_down_read(sem); /* Unconditional memory barrier. */
+       preempt_enable();
+       /*
+        * The barrier() from preempt_enable() prevents the compiler from
+        * bleeding the critical section out.
+        */
+}
+
+static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
+{
+       /*
+        * The barrier() in preempt_disable() prevents the compiler from
+        * bleeding the critical section out.
+        */
+       preempt_disable();
+       /*
+        * Same as in percpu_down_read().
+        */
+       if (likely(rcu_sync_is_idle(&sem->rss)))
+               __this_cpu_dec(*sem->refcount);
+       else
+               __percpu_up_read(sem); /* Unconditional memory barrier. */
+       preempt_enable();
+
+       rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
+}
 
 extern void percpu_down_write(struct percpu_rw_semaphore *);
 extern void percpu_up_write(struct percpu_rw_semaphore *);
@@ -25,10 +71,10 @@ extern int __percpu_init_rwsem(struct pe
                                const char *, struct lock_class_key *);
 extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
 
-#define percpu_init_rwsem(brw) \
+#define percpu_init_rwsem(sem)                                 \
 ({                                                             \
        static struct lock_class_key rwsem_key;                 \
-       __percpu_init_rwsem(brw, #brw, &rwsem_key);             \
+       __percpu_init_rwsem(sem, #sem, &rwsem_key);             \
 })
 
 #endif
--- a/kernel/locking/percpu-rwsem.c
+++ b/kernel/locking/percpu-rwsem.c
@@ -8,158 +8,164 @@
 #include <linux/sched.h>
 #include <linux/errno.h>
 
-int __percpu_init_rwsem(struct percpu_rw_semaphore *brw,
+enum { readers_slow, readers_block };
+
+int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
                        const char *name, struct lock_class_key *rwsem_key)
 {
-       brw->fast_read_ctr = alloc_percpu(int);
-       if (unlikely(!brw->fast_read_ctr))
+       sem->refcount = alloc_percpu(unsigned int);
+       if (unlikely(!sem->refcount))
                return -ENOMEM;
 
-       /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
-       __init_rwsem(&brw->rw_sem, name, rwsem_key);
-       atomic_set(&brw->write_ctr, 0);
-       atomic_set(&brw->slow_read_ctr, 0);
-       init_waitqueue_head(&brw->write_waitq);
+       sem->state = readers_slow;
+       rcu_sync_init(&sem->rss, RCU_SCHED_SYNC);
+       init_waitqueue_head(&sem->writer);
+       __init_rwsem(&sem->rw_sem, name, rwsem_key);
+
        return 0;
 }
 
-void percpu_free_rwsem(struct percpu_rw_semaphore *brw)
+void percpu_free_rwsem(struct percpu_rw_semaphore *sem)
 {
-       free_percpu(brw->fast_read_ctr);
-       brw->fast_read_ctr = NULL; /* catch use after free bugs */
+       rcu_sync_dtor(&sem->rss);
+       free_percpu(sem->refcount);
+       sem->refcount = NULL; /* catch use after free bugs */
 }
 
-/*
- * This is the fast-path for down_read/up_read, it only needs to ensure
- * there is no pending writer (atomic_read(write_ctr) == 0) and inc/dec the
- * fast per-cpu counter. The writer uses synchronize_sched_expedited() to
- * serialize with the preempt-disabled section below.
- *
- * The nontrivial part is that we should guarantee acquire/release semantics
- * in case when
- *
- *     R_W: down_write() comes after up_read(), the writer should see all
- *          changes done by the reader
- * or
- *     W_R: down_read() comes after up_write(), the reader should see all
- *          changes done by the writer
- *
- * If this helper fails the callers rely on the normal rw_semaphore and
- * atomic_dec_and_test(), so in this case we have the necessary barriers.
- *
- * But if it succeeds we do not have any barriers, atomic_read(write_ctr) or
- * __this_cpu_add() below can be reordered with any LOAD/STORE done by the
- * reader inside the critical section. See the comments in down_write and
- * up_write below.
- */
-static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
+void __percpu_down_read(struct percpu_rw_semaphore *sem)
 {
-       bool success = false;
+       /*
+        * Due to having preemption disabled the decrement happens on
+        * the same CPU as the increment, avoiding the
+        * increment-on-one-CPU-and-decrement-on-another problem.
+        *
+        * And yes, if the reader misses the writer's assignment of
+        * readers_block to sem->state, then the writer is
+        * guaranteed to see the reader's increment.  Conversely, any
+        * readers that increment their sem->refcount after the
+        * writer looks are guaranteed to see the readers_block value,
+        * which in turn means that they are guaranteed to immediately
+        * decrement their sem->refcount, so that it doesn't matter
+        * that the writer missed them.
+        */
+
+       smp_mb(); /* A matches D */
+
+       if (likely(sem->state != readers_block))
+               return;
+
+       /*
+        * Per the above comment; we still have preemption disabled and
+        * will thus decrement on the same CPU as we incremented.
+        */
+       __percpu_up_read(sem);
+
+       /*
+        * We either call schedule() in the wait, or we'll fall through
+        * and reschedule on the preempt_enable() in percpu_down_read().
+        */
+       preempt_enable_no_resched();
+
+       /*
+        * Avoid lockdep for the down/up_read() we already have them.
+        */
+       __down_read(&sem->rw_sem);
+       __this_cpu_inc(*sem->refcount);
+       __up_read(&sem->rw_sem);
 
        preempt_disable();
-       if (likely(!atomic_read(&brw->write_ctr))) {
-               __this_cpu_add(*brw->fast_read_ctr, val);
-               success = true;
-       }
-       preempt_enable();
+}
+
+void __percpu_up_read(struct percpu_rw_semaphore *sem)
+{
+       smp_mb(); /* B matches C */
+       /*
+        * In other words, if they see our decrement (presumably to aggregate
+        * zero, as that is the only time it matters) they will also see our
+        * critical section.
+        */
+       this_cpu_dec(*sem->refcount);
 
-       return success;
+       /* Prod writer to recheck readers_active */
+       wake_up(&sem->writer);
 }
 
+
+#define per_cpu_sum(var)                                               \
+({                                                                     \
+       typeof(var) __sum = 0;                                          \
+       int cpu;                                                        \
+       for_each_possible_cpu(cpu)                                      \
+               __sum += per_cpu(var, cpu);                             \
+       __sum;                                                          \
+})
+
 /*
- * Like the normal down_read() this is not recursive, the writer can
- * come after the first percpu_down_read() and create the deadlock.
- *
- * Note: returns with lock_is_held(brw->rw_sem) == T for lockdep,
- * percpu_up_read() does rwsem_release(). This pairs with the usage
- * of ->rw_sem in percpu_down/up_write().
+ * Return true if the modular sum of the sem->refcount per-CPU variable is
+ * zero.  If this sum is zero, then it is stable due to the fact that if any
+ * newly arriving readers increment a given counter, they will immediately
+ * decrement that same counter.
  */
-void percpu_down_read(struct percpu_rw_semaphore *brw)
+static bool readers_active_check(struct percpu_rw_semaphore *sem)
 {
-       might_sleep();
-       if (likely(update_fast_ctr(brw, +1))) {
-               rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
-               return;
-       }
+       if (per_cpu_sum(*sem->refcount) != 0)
+               return false;
+
+       /*
+        * If we observed the decrement; ensure we see the entire critical
+        * section.
+        */
 
-       down_read(&brw->rw_sem);
-       atomic_inc(&brw->slow_read_ctr);
-       /* avoid up_read()->rwsem_release() */
-       __up_read(&brw->rw_sem);
+       smp_mb(); /* C matches B */
+
+       return true;
 }
 
-void percpu_up_read(struct percpu_rw_semaphore *brw)
+void percpu_down_write(struct percpu_rw_semaphore *sem)
 {
-       rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_);
+       down_write(&sem->rw_sem);
 
-       if (likely(update_fast_ctr(brw, -1)))
-               return;
+       /* Notify readers to take the slow path. */
+       rcu_sync_enter(&sem->rss);
 
-       /* false-positive is possible but harmless */
-       if (atomic_dec_and_test(&brw->slow_read_ctr))
-               wake_up_all(&brw->write_waitq);
-}
+       /*
+        * Notify new readers to block; up until now, and thus throughout the
+        * longish rcu_sync_enter() above, new readers could still come in.
+        */
+       sem->state = readers_block;
 
-static int clear_fast_ctr(struct percpu_rw_semaphore *brw)
-{
-       unsigned int sum = 0;
-       int cpu;
+       smp_mb(); /* D matches A */
 
-       for_each_possible_cpu(cpu) {
-               sum += per_cpu(*brw->fast_read_ctr, cpu);
-               per_cpu(*brw->fast_read_ctr, cpu) = 0;
-       }
+       /*
+        * If they don't see our writer of readers_block to sem->state,
+        * then we are guaranteed to see their sem->refcount increment, and
+        * therefore will wait for them.
+        */
 
-       return sum;
+       /* Wait for all now active readers to complete. */
+       wait_event(sem->writer, readers_active_check(sem));
 }
 
-/*
- * A writer increments ->write_ctr to force the readers to switch to the
- * slow mode, note the atomic_read() check in update_fast_ctr().
- *
- * After that the readers can only inc/dec the slow ->slow_read_ctr counter,
- * ->fast_read_ctr is stable. Once the writer moves its sum into the slow
- * counter it represents the number of active readers.
- *
- * Finally the writer takes ->rw_sem for writing and blocks the new readers,
- * then waits until the slow counter becomes zero.
- */
-void percpu_down_write(struct percpu_rw_semaphore *brw)
+void percpu_up_write(struct percpu_rw_semaphore *sem)
 {
-       /* tell update_fast_ctr() there is a pending writer */
-       atomic_inc(&brw->write_ctr);
        /*
-        * 1. Ensures that write_ctr != 0 is visible to any down_read/up_read
-        *    so that update_fast_ctr() can't succeed.
-        *
-        * 2. Ensures we see the result of every previous this_cpu_add() in
-        *    update_fast_ctr().
+        * Signal the writer is done, no fast path yet.
         *
-        * 3. Ensures that if any reader has exited its critical section via
-        *    fast-path, it executes a full memory barrier before we return.
-        *    See R_W case in the comment above update_fast_ctr().
+        * One reason that we cannot just immediately flip to readers_fast is
+        * that new readers might fail to see the results of this writer's
+        * critical section.
         */
-       synchronize_sched_expedited();
-
-       /* exclude other writers, and block the new readers completely */
-       down_write(&brw->rw_sem);
+       sem->state = readers_slow;
 
-       /* nobody can use fast_read_ctr, move its sum into slow_read_ctr */
-       atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr);
-
-       /* wait for all readers to complete their percpu_up_read() */
-       wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr));
-}
+       /*
+        * Release the write lock, this will allow readers back in the game.
+        */
+       up_write(&sem->rw_sem);
 
-void percpu_up_write(struct percpu_rw_semaphore *brw)
-{
-       /* release the lock, but the readers can't use the fast-path */
-       up_write(&brw->rw_sem);
        /*
-        * Insert the barrier before the next fast-path in down_read,
-        * see W_R case in the comment above update_fast_ctr().
+        * Once this completes (at least one RCU grace period hence) the reader
+        * fast path will be available again. Safe to use outside the exclusive
+        * write lock because its counting.
         */
-       synchronize_sched_expedited();
-       /* the last writer unblocks update_fast_ctr() */
-       atomic_dec(&brw->write_ctr);
+       rcu_sync_exit(&sem->rss);
 }


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to