On Fri, Nov 22, 2013 at 02:04:46PM -0500, Waiman Long wrote:
> There is a pending MCS lock patch series that adds generic MCS lock
> helper functions to do MCS-style locking. This patch will enable
> the queue rwlock to use that generic MCS lock/unlock primitives for
> internal queuing. This patch should only be merged after the merging
> of that generic MCS locking patch.
> 
> Signed-off-by: Waiman Long <waiman.l...@hp.com>

Reviewed-by: Paul E. McKenney <paul...@linux.vnet.ibm.com>

> ---
>  include/asm-generic/qrwlock.h |    7 +--
>  kernel/locking/qrwlock.c      |   83 +++-------------------------------------
>  2 files changed, 9 insertions(+), 81 deletions(-)
> 
> diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
> index 9d085cb..335473a 100644
> --- a/include/asm-generic/qrwlock.h
> +++ b/include/asm-generic/qrwlock.h
> @@ -37,10 +37,7 @@
>   * the writer field. The least significant 8 bits is the writer field
>   * whereas the remaining 24 bits is the reader count.
>   */
> -struct qrwnode {
> -     struct qrwnode *next;
> -     bool            wait;   /* Waiting flag */
> -};
> +struct mcs_spinlock;
> 
>  typedef struct qrwlock {
>       union qrwcnts {
> @@ -55,7 +52,7 @@ typedef struct qrwlock {
>               };
>               u32     rw;             /* Reader/writer number pair */
>       } cnts;
> -     struct qrwnode *waitq;          /* Tail of waiting queue */
> +     struct mcs_spinlock *waitq;     /* Tail of waiting queue */
>  } arch_rwlock_t;
> 
>  /*
> diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
> index ea5553d..b3e5c2b 100644
> --- a/kernel/locking/qrwlock.c
> +++ b/kernel/locking/qrwlock.c
> @@ -20,6 +20,7 @@
>  #include <linux/cpumask.h>
>  #include <linux/percpu.h>
>  #include <linux/hardirq.h>
> +#include <linux/mcs_spinlock.h>
>  #include <asm-generic/qrwlock.h>
> 
>  /*
> @@ -55,76 +56,6 @@
>  # endif
>  #endif
> 
> -#ifndef smp_mb__store_release
> -# ifdef CONFIG_X86
> -#   define smp_mb__store_release()   barrier()
> -# else
> -#   define smp_mb__store_release()   smp_mb()
> -# endif
> -#endif
> -
> -/**
> - * wait_in_queue - Add to queue and wait until it is at the head
> - * @lock: Pointer to queue rwlock structure
> - * @node: Node pointer to be added to the queue
> - */
> -static __always_inline void
> -wait_in_queue(struct qrwlock *lock, struct qrwnode *node)
> -{
> -     struct qrwnode *prev;
> -
> -     node->next = NULL;
> -     node->wait = true;
> -     prev = xchg(&lock->waitq, node);
> -     if (prev) {
> -             prev->next = node;
> -             /*
> -              * Wait until the waiting flag is off
> -              */
> -             while (ACCESS_ONCE(node->wait))
> -                     arch_mutex_cpu_relax();
> -             smp_mb__load_acquire();
> -     }
> -}
> -
> -/**
> - * signal_next - Signal the next one in queue to be at the head
> - * @lock: Pointer to queue rwlock structure
> - * @node: Node pointer to the current head of queue
> - */
> -static __always_inline void
> -signal_next(struct qrwlock *lock, struct qrwnode *node)
> -{
> -     struct qrwnode *next;
> -
> -     /*
> -      * Try to notify the next node first without disturbing the cacheline
> -      * of the lock. If that fails, check to see if it is the last node
> -      * and so should clear the wait queue.
> -      */
> -     next = ACCESS_ONCE(node->next);
> -     if (likely(next))
> -             goto notify_next;
> -
> -     /*
> -      * Clear the wait queue if it is the last node
> -      */
> -     if ((ACCESS_ONCE(lock->waitq) == node) &&
> -         (cmpxchg(&lock->waitq, node, NULL) == node))
> -                     return;
> -     /*
> -      * Wait until the next one in queue set up the next field
> -      */
> -     while (likely(!(next = ACCESS_ONCE(node->next))))
> -             arch_mutex_cpu_relax();
> -     /*
> -      * The next one in queue is now at the head
> -      */
> -notify_next:
> -     smp_mb__store_release();
> -     ACCESS_ONCE(next->wait) = false;
> -}
> -
>  /**
>   * rspin_until_writer_unlock - inc reader count & spin until writer is gone
>   * @lock: Pointer to queue rwlock structure
> @@ -148,7 +79,7 @@ rspin_until_writer_unlock(struct qrwlock *lock, union 
> qrwcnts cnts)
>   */
>  void queue_read_lock_slowpath(struct qrwlock *lock)
>  {
> -     struct qrwnode node;
> +     struct mcs_spinlock node;
>       union qrwcnts cnts;
> 
>       /*
> @@ -168,7 +99,7 @@ void queue_read_lock_slowpath(struct qrwlock *lock)
>       /*
>        * Put the reader into the wait queue
>        */
> -     wait_in_queue(lock, &node);
> +     mcs_spin_lock(&lock->waitq, &node);
> 
>       /*
>        * At the head of the wait queue now, wait until the writer state
> @@ -183,7 +114,7 @@ void queue_read_lock_slowpath(struct qrwlock *lock)
>        * Need to have a barrier with read-acquire semantics
>        */
>       smp_mb__load_acquire();
> -     signal_next(lock, &node);
> +     mcs_spin_unlock(&lock->waitq, &node);
>  }
>  EXPORT_SYMBOL(queue_read_lock_slowpath);
> 
> @@ -247,12 +178,12 @@ static noinline int queue_write_3step_lock(struct 
> qrwlock *lock)
>   */
>  void queue_write_lock_slowpath(struct qrwlock *lock)
>  {
> -     struct qrwnode node;
> +     struct mcs_spinlock node;
> 
>       /*
>        * Put the writer into the wait queue
>        */
> -     wait_in_queue(lock, &node);
> +     mcs_spin_lock(&lock->waitq, &node);
> 
>       /*
>        * At the head of the wait queue now, call queue_write_3step_lock()
> @@ -260,6 +191,6 @@ void queue_write_lock_slowpath(struct qrwlock *lock)
>        */
>       while (!queue_write_3step_lock(lock))
>               arch_mutex_cpu_relax();
> -     signal_next(lock, &node);
> +     mcs_spin_unlock(&lock->waitq, &node);
>  }
>  EXPORT_SYMBOL(queue_write_lock_slowpath);
> -- 
> 1.7.1
> 

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to