Commit-ID:  916633a403702549d37ea353e63a68e5b0dc27ad
Gitweb:     http://git.kernel.org/tip/916633a403702549d37ea353e63a68e5b0dc27ad
Author:     Michal Hocko <mho...@suse.com>
AuthorDate: Thu, 7 Apr 2016 17:12:31 +0200
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Fri, 22 Apr 2016 08:58:33 +0200

locking/rwsem: Provide down_write_killable()

Now that all the architectures implement the necessary glue code
we can introduce down_write_killable(). The only difference wrt. regular
down_write() is that the slow path waits in TASK_KILLABLE state and the
interruption by the fatal signal is reported as -EINTR to the caller.

Signed-off-by: Michal Hocko <mho...@suse.com>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Chris Zankel <ch...@zankel.net>
Cc: David S. Miller <da...@davemloft.net>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Max Filippov <jcmvb...@gmail.com>
Cc: Paul E. McKenney <paul...@linux.vnet.ibm.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Signed-off-by: Davidlohr Bueso <dbu...@suse.de>
Cc: Signed-off-by: Jason Low <jason.l...@hp.com>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Tony Luck <tony.l...@intel.com>
Cc: linux-al...@vger.kernel.org
Cc: linux-a...@vger.kernel.org
Cc: linux-i...@vger.kernel.org
Cc: linux-s...@vger.kernel.org
Cc: linux...@vger.kernel.org
Cc: linux-xte...@linux-xtensa.org
Cc: sparcli...@vger.kernel.org
Link: 
http://lkml.kernel.org/r/1460041951-22347-12-git-send-email-mho...@kernel.org
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 arch/x86/include/asm/rwsem.h |  6 +++---
 include/linux/lockdep.h      | 15 +++++++++++++++
 include/linux/rwsem.h        |  1 +
 kernel/locking/rwsem.c       | 19 +++++++++++++++++++
 4 files changed, 38 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index d759c5f..453744c 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -102,9 +102,9 @@ static inline int __down_read_trylock(struct rw_semaphore 
*sem)
 #define ____down_write(sem, slow_path)                 \
 ({                                                     \
        long tmp;                                       \
-       struct rw_semaphore* ret = sem;                 \
+       struct rw_semaphore* ret;                       \
        asm volatile("# beginning down_write\n\t"       \
-                    LOCK_PREFIX "  xadd      %1,(%2)\n\t"      \
+                    LOCK_PREFIX "  xadd      %1,(%3)\n\t"      \
                     /* adds 0xffff0001, returns the old value */ \
                     "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" 
\
                     /* was the active mask 0 before? */\
@@ -112,7 +112,7 @@ static inline int __down_read_trylock(struct rw_semaphore 
*sem)
                     "  call " slow_path "\n"           \
                     "1:\n"                             \
                     "# ending down_write"              \
-                    : "+m" (sem->count), "=d" (tmp), "+a" (ret)        \
+                    : "+m" (sem->count), "=d" (tmp), "=a" (ret)        \
                     : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
                     : "memory", "cc");                 \
        ret;                                            \
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index d026b19..accfe56 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -444,6 +444,18 @@ do {                                                       
        \
        lock_acquired(&(_lock)->dep_map, _RET_IP_);                     \
 } while (0)
 
+#define LOCK_CONTENDED_RETURN(_lock, try, lock)                        \
+({                                                             \
+       int ____err = 0;                                        \
+       if (!try(_lock)) {                                      \
+               lock_contended(&(_lock)->dep_map, _RET_IP_);    \
+               ____err = lock(_lock);                          \
+       }                                                       \
+       if (!____err)                                           \
+               lock_acquired(&(_lock)->dep_map, _RET_IP_);     \
+       ____err;                                                \
+})
+
 #else /* CONFIG_LOCK_STAT */
 
 #define lock_contended(lockdep_map, ip) do {} while (0)
@@ -452,6 +464,9 @@ do {                                                        
        \
 #define LOCK_CONTENDED(_lock, try, lock) \
        lock(_lock)
 
+#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
+       lock(_lock)
+
 #endif /* CONFIG_LOCK_STAT */
 
 #ifdef CONFIG_LOCKDEP
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 7d7ae02..d1c12d1 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -118,6 +118,7 @@ extern int down_read_trylock(struct rw_semaphore *sem);
  * lock for writing
  */
 extern void down_write(struct rw_semaphore *sem);
+extern int __must_check down_write_killable(struct rw_semaphore *sem);
 
 /*
  * trylock for writing -- returns 1 if successful, 0 if contention
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 205be0c..c817216 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -55,6 +55,25 @@ void __sched down_write(struct rw_semaphore *sem)
 EXPORT_SYMBOL(down_write);
 
 /*
+ * lock for writing
+ */
+int __sched down_write_killable(struct rw_semaphore *sem)
+{
+       might_sleep();
+       rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
+
+       if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, 
__down_write_killable)) {
+               rwsem_release(&sem->dep_map, 1, _RET_IP_);
+               return -EINTR;
+       }
+
+       rwsem_set_owner(sem);
+       return 0;
+}
+
+EXPORT_SYMBOL(down_write_killable);
+
+/*
  * trylock for writing -- returns 1 if successful, 0 if contention
  */
 int down_write_trylock(struct rw_semaphore *sem)

Reply via email to