[tip:locking/urgent] locking/qrwlock: Rename functions to queued_*()

2015-07-06 Thread tip-bot for Waiman Long
Commit-ID:  f7d71f2052555ae57b47322f2c2f6c29ff2438ae
Gitweb: http://git.kernel.org/tip/f7d71f2052555ae57b47322f2c2f6c29ff2438ae
Author: Waiman Long 
AuthorDate: Fri, 19 Jun 2015 11:50:00 -0400
Committer:  Ingo Molnar 
CommitDate: Mon, 6 Jul 2015 14:11:27 +0200

locking/qrwlock: Rename functions to queued_*()

To sync up with the naming convention used in qspinlock, all the
qrwlock functions were renamed to started with "queued" instead of
"queue".

Signed-off-by: Waiman Long 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Arnd Bergmann 
Cc: Douglas Hatch 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Scott J Norton 
Cc: Thomas Gleixner 
Cc: Will Deacon 
Link: 
http://lkml.kernel.org/r/1434729002-57724-2-git-send-email-waiman.l...@hp.com
Signed-off-by: Ingo Molnar 
---
 arch/x86/include/asm/qrwlock.h |  4 +--
 include/asm-generic/qrwlock.h  | 58 +-
 kernel/locking/qrwlock.c   | 12 -
 3 files changed, 37 insertions(+), 37 deletions(-)

diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
index ae0e241..a8810bf 100644
--- a/arch/x86/include/asm/qrwlock.h
+++ b/arch/x86/include/asm/qrwlock.h
@@ -4,8 +4,8 @@
 #include 
 
 #ifndef CONFIG_X86_PPRO_FENCE
-#define queue_write_unlock queue_write_unlock
-static inline void queue_write_unlock(struct qrwlock *lock)
+#define queued_write_unlock queued_write_unlock
+static inline void queued_write_unlock(struct qrwlock *lock)
 {
 barrier();
 ACCESS_ONCE(*(u8 *)>cnts) = 0;
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index 6383d54..55e3ee1 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -36,33 +36,33 @@
 /*
  * External function declarations
  */
-extern void queue_read_lock_slowpath(struct qrwlock *lock);
-extern void queue_write_lock_slowpath(struct qrwlock *lock);
+extern void queued_read_lock_slowpath(struct qrwlock *lock);
+extern void queued_write_lock_slowpath(struct qrwlock *lock);
 
 /**
- * queue_read_can_lock- would read_trylock() succeed?
+ * queued_read_can_lock- would read_trylock() succeed?
  * @lock: Pointer to queue rwlock structure
  */
-static inline int queue_read_can_lock(struct qrwlock *lock)
+static inline int queued_read_can_lock(struct qrwlock *lock)
 {
return !(atomic_read(>cnts) & _QW_WMASK);
 }
 
 /**
- * queue_write_can_lock- would write_trylock() succeed?
+ * queued_write_can_lock- would write_trylock() succeed?
  * @lock: Pointer to queue rwlock structure
  */
-static inline int queue_write_can_lock(struct qrwlock *lock)
+static inline int queued_write_can_lock(struct qrwlock *lock)
 {
return !atomic_read(>cnts);
 }
 
 /**
- * queue_read_trylock - try to acquire read lock of a queue rwlock
+ * queued_read_trylock - try to acquire read lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  * Return: 1 if lock acquired, 0 if failed
  */
-static inline int queue_read_trylock(struct qrwlock *lock)
+static inline int queued_read_trylock(struct qrwlock *lock)
 {
u32 cnts;
 
@@ -77,11 +77,11 @@ static inline int queue_read_trylock(struct qrwlock *lock)
 }
 
 /**
- * queue_write_trylock - try to acquire write lock of a queue rwlock
+ * queued_write_trylock - try to acquire write lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  * Return: 1 if lock acquired, 0 if failed
  */
-static inline int queue_write_trylock(struct qrwlock *lock)
+static inline int queued_write_trylock(struct qrwlock *lock)
 {
u32 cnts;
 
@@ -93,10 +93,10 @@ static inline int queue_write_trylock(struct qrwlock *lock)
 cnts, cnts | _QW_LOCKED) == cnts);
 }
 /**
- * queue_read_lock - acquire read lock of a queue rwlock
+ * queued_read_lock - acquire read lock of a queue rwlock
  * @lock: Pointer to queue rwlock structure
  */
-static inline void queue_read_lock(struct qrwlock *lock)
+static inline void queued_read_lock(struct qrwlock *lock)
 {
u32 cnts;
 
@@ -105,27 +105,27 @@ static inline void queue_read_lock(struct qrwlock *lock)
return;
 
/* The slowpath will decrement the reader count, if necessary. */
-   queue_read_lock_slowpath(lock);
+   queued_read_lock_slowpath(lock);
 }
 
 /**
- * queue_write_lock - acquire write lock of a queue rwlock
+ * queued_write_lock - acquire write lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  */
-static inline void queue_write_lock(struct qrwlock *lock)
+static inline void queued_write_lock(struct qrwlock *lock)
 {
/* Optimize for the unfair lock case where the fair flag is 0. */
if (atomic_cmpxchg(>cnts, 0, _QW_LOCKED) == 0)
return;
 
-   queue_write_lock_slowpath(lock);
+   queued_write_lock_slowpath(lock);
 }
 
 /**
- * queue_read_unlock - release read lock of a queue rwlock
+ * queued_read_unlock - release read lock of a queue rwlock
  * @lock : Pointer to queue rwlock 

[tip:locking/urgent] locking/qrwlock: Rename functions to queued_*()

2015-07-06 Thread tip-bot for Waiman Long
Commit-ID:  f7d71f2052555ae57b47322f2c2f6c29ff2438ae
Gitweb: http://git.kernel.org/tip/f7d71f2052555ae57b47322f2c2f6c29ff2438ae
Author: Waiman Long waiman.l...@hp.com
AuthorDate: Fri, 19 Jun 2015 11:50:00 -0400
Committer:  Ingo Molnar mi...@kernel.org
CommitDate: Mon, 6 Jul 2015 14:11:27 +0200

locking/qrwlock: Rename functions to queued_*()

To sync up with the naming convention used in qspinlock, all the
qrwlock functions were renamed to started with queued instead of
queue.

Signed-off-by: Waiman Long waiman.l...@hp.com
Signed-off-by: Peter Zijlstra (Intel) pet...@infradead.org
Cc: Arnd Bergmann a...@arndb.de
Cc: Douglas Hatch doug.ha...@hp.com
Cc: Linus Torvalds torva...@linux-foundation.org
Cc: Peter Zijlstra pet...@infradead.org
Cc: Scott J Norton scott.nor...@hp.com
Cc: Thomas Gleixner t...@linutronix.de
Cc: Will Deacon will.dea...@arm.com
Link: 
http://lkml.kernel.org/r/1434729002-57724-2-git-send-email-waiman.l...@hp.com
Signed-off-by: Ingo Molnar mi...@kernel.org
---
 arch/x86/include/asm/qrwlock.h |  4 +--
 include/asm-generic/qrwlock.h  | 58 +-
 kernel/locking/qrwlock.c   | 12 -
 3 files changed, 37 insertions(+), 37 deletions(-)

diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
index ae0e241..a8810bf 100644
--- a/arch/x86/include/asm/qrwlock.h
+++ b/arch/x86/include/asm/qrwlock.h
@@ -4,8 +4,8 @@
 #include asm-generic/qrwlock_types.h
 
 #ifndef CONFIG_X86_PPRO_FENCE
-#define queue_write_unlock queue_write_unlock
-static inline void queue_write_unlock(struct qrwlock *lock)
+#define queued_write_unlock queued_write_unlock
+static inline void queued_write_unlock(struct qrwlock *lock)
 {
 barrier();
 ACCESS_ONCE(*(u8 *)lock-cnts) = 0;
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index 6383d54..55e3ee1 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -36,33 +36,33 @@
 /*
  * External function declarations
  */
-extern void queue_read_lock_slowpath(struct qrwlock *lock);
-extern void queue_write_lock_slowpath(struct qrwlock *lock);
+extern void queued_read_lock_slowpath(struct qrwlock *lock);
+extern void queued_write_lock_slowpath(struct qrwlock *lock);
 
 /**
- * queue_read_can_lock- would read_trylock() succeed?
+ * queued_read_can_lock- would read_trylock() succeed?
  * @lock: Pointer to queue rwlock structure
  */
-static inline int queue_read_can_lock(struct qrwlock *lock)
+static inline int queued_read_can_lock(struct qrwlock *lock)
 {
return !(atomic_read(lock-cnts)  _QW_WMASK);
 }
 
 /**
- * queue_write_can_lock- would write_trylock() succeed?
+ * queued_write_can_lock- would write_trylock() succeed?
  * @lock: Pointer to queue rwlock structure
  */
-static inline int queue_write_can_lock(struct qrwlock *lock)
+static inline int queued_write_can_lock(struct qrwlock *lock)
 {
return !atomic_read(lock-cnts);
 }
 
 /**
- * queue_read_trylock - try to acquire read lock of a queue rwlock
+ * queued_read_trylock - try to acquire read lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  * Return: 1 if lock acquired, 0 if failed
  */
-static inline int queue_read_trylock(struct qrwlock *lock)
+static inline int queued_read_trylock(struct qrwlock *lock)
 {
u32 cnts;
 
@@ -77,11 +77,11 @@ static inline int queue_read_trylock(struct qrwlock *lock)
 }
 
 /**
- * queue_write_trylock - try to acquire write lock of a queue rwlock
+ * queued_write_trylock - try to acquire write lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  * Return: 1 if lock acquired, 0 if failed
  */
-static inline int queue_write_trylock(struct qrwlock *lock)
+static inline int queued_write_trylock(struct qrwlock *lock)
 {
u32 cnts;
 
@@ -93,10 +93,10 @@ static inline int queue_write_trylock(struct qrwlock *lock)
 cnts, cnts | _QW_LOCKED) == cnts);
 }
 /**
- * queue_read_lock - acquire read lock of a queue rwlock
+ * queued_read_lock - acquire read lock of a queue rwlock
  * @lock: Pointer to queue rwlock structure
  */
-static inline void queue_read_lock(struct qrwlock *lock)
+static inline void queued_read_lock(struct qrwlock *lock)
 {
u32 cnts;
 
@@ -105,27 +105,27 @@ static inline void queue_read_lock(struct qrwlock *lock)
return;
 
/* The slowpath will decrement the reader count, if necessary. */
-   queue_read_lock_slowpath(lock);
+   queued_read_lock_slowpath(lock);
 }
 
 /**
- * queue_write_lock - acquire write lock of a queue rwlock
+ * queued_write_lock - acquire write lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  */
-static inline void queue_write_lock(struct qrwlock *lock)
+static inline void queued_write_lock(struct qrwlock *lock)
 {
/* Optimize for the unfair lock case where the fair flag is 0. */
if (atomic_cmpxchg(lock-cnts, 0, _QW_LOCKED) == 0)
return;