[tip:locking/core] locking/lockdep: Add new check to lock_downgrade()

2017-03-16 Thread tip-bot for J. R. Okajima
Commit-ID:  6419c4af777a773a45a1b1af735de0fcd9a7dcc7
Gitweb: http://git.kernel.org/tip/6419c4af777a773a45a1b1af735de0fcd9a7dcc7
Author: J. R. Okajima 
AuthorDate: Fri, 3 Feb 2017 01:38:17 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 16 Mar 2017 09:57:07 +0100

locking/lockdep: Add new check to lock_downgrade()

Commit:

  f8319483f57f ("locking/lockdep: Provide a type check for lock_is_held")

didn't fully cover rwsems as downgrade_write() was left out.

Introduce lock_downgrade() and use it to add new checks.

See-also: http://marc.info/?l=linux-kernel&m=148581164003149&w=2
Originally-written-by: Peter Zijlstra 
Signed-off-by: J. R. Okajima 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Andrew Morton 
Cc: Linus Torvalds 
Cc: Paul E. McKenney 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/1486053497-9948-3-git-send-email-hooanon...@gmail.com
[ Rewrote the changelog. ]
Signed-off-by: Ingo Molnar 
---
 include/linux/lockdep.h  |  3 +++
 kernel/locking/lockdep.c | 55 
 kernel/locking/rwsem.c   |  6 ++
 3 files changed, 60 insertions(+), 4 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 1e327bb..fffe49f 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -361,6 +361,8 @@ static inline void lock_set_subclass(struct lockdep_map 
*lock,
lock_set_class(lock, lock->name, lock->key, subclass, ip);
 }
 
+extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
+
 extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
 extern void lockdep_clear_current_reclaim_state(void);
 extern void lockdep_trace_alloc(gfp_t mask);
@@ -411,6 +413,7 @@ static inline void lockdep_on(void)
 
 # define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
 # define lock_release(l, n, i) do { } while (0)
+# define lock_downgrade(l, i)  do { } while (0)
 # define lock_set_class(l, n, k, s, i) do { } while (0)
 # define lock_set_subclass(l, s, i)do { } while (0)
 # define lockdep_set_current_reclaim_state(g)  do { } while (0)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index da79548..b1a1cef 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3533,6 +3533,44 @@ __lock_set_class(struct lockdep_map *lock, const char 
*name,
return 1;
 }
 
+static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
+{
+   struct task_struct *curr = current;
+   struct held_lock *hlock;
+   unsigned int depth;
+   int i;
+
+   depth = curr->lockdep_depth;
+   /*
+* This function is about (re)setting the class of a held lock,
+* yet we're not actually holding any locks. Naughty user!
+*/
+   if (DEBUG_LOCKS_WARN_ON(!depth))
+   return 0;
+
+   hlock = find_held_lock(curr, lock, depth, &i);
+   if (!hlock)
+   return print_unlock_imbalance_bug(curr, lock, ip);
+
+   curr->lockdep_depth = i;
+   curr->curr_chain_key = hlock->prev_chain_key;
+
+   WARN(hlock->read, "downgrading a read lock");
+   hlock->read = 1;
+   hlock->acquire_ip = ip;
+
+   if (reacquire_held_locks(curr, depth, i))
+   return 0;
+
+   /*
+* I took it apart and put it back together again, except now I have
+* these 'spare' parts.. where shall I put them.
+*/
+   if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
+   return 0;
+   return 1;
+}
+
 /*
  * Remove the lock to the list of currently held locks - this gets
  * called on mutex_unlock()/spin_unlock*() (or on a failed
@@ -3759,6 +3797,23 @@ void lock_set_class(struct lockdep_map *lock, const char 
*name,
 }
 EXPORT_SYMBOL_GPL(lock_set_class);
 
+void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
+{
+   unsigned long flags;
+
+   if (unlikely(current->lockdep_recursion))
+   return;
+
+   raw_local_irq_save(flags);
+   current->lockdep_recursion = 1;
+   check_flags(flags);
+   if (__lock_downgrade(lock, ip))
+   check_chain_key(current);
+   current->lockdep_recursion = 0;
+   raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(lock_downgrade);
+
 /*
  * We are not always called with irqs disabled - do that here,
  * and also avoid lockdep recursion:
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 90a74cc..4d48b1c 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -124,10 +124,8 @@ EXPORT_SYMBOL(up_write);
  */
 void downgrade_write(struct rw_semaphore *sem)
 {
-   /*
-* lockdep: a downgraded write will live on as a write
-* dependency.
-*/
+   lock_downgrade(&sem->dep_map, _RET_IP_);
+
rwsem_set_reader_owned(sem);
__downgrade_write(sem);
 }


[tip:locking/core] locking/lockdep: Factor out the find_held_lock() helper function

2017-03-16 Thread tip-bot for J. R. Okajima
Commit-ID:  41c2c5b86a5e1a691ddacfc03b631b87a0b19043
Gitweb: http://git.kernel.org/tip/41c2c5b86a5e1a691ddacfc03b631b87a0b19043
Author: J. R. Okajima 
AuthorDate: Fri, 3 Feb 2017 01:38:15 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 16 Mar 2017 09:57:06 +0100

locking/lockdep: Factor out the find_held_lock() helper function

A simple consolidataion to factor out repeated patterns.

The behaviour should not change.

Signed-off-by: J. R. Okajima 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Andrew Morton 
Cc: Linus Torvalds 
Cc: Paul E. McKenney 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/1486053497-9948-1-git-send-email-hooanon...@gmail.com
Signed-off-by: Ingo Molnar 
---
 kernel/locking/lockdep.c | 114 ++-
 1 file changed, 54 insertions(+), 60 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index a95e5d1..0d28b82 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3437,13 +3437,49 @@ static int match_held_lock(struct held_lock *hlock, 
struct lockdep_map *lock)
return 0;
 }
 
+/* @depth must not be zero */
+static struct held_lock *find_held_lock(struct task_struct *curr,
+   struct lockdep_map *lock,
+   unsigned int depth, int *idx)
+{
+   struct held_lock *ret, *hlock, *prev_hlock;
+   int i;
+
+   i = depth - 1;
+   hlock = curr->held_locks + i;
+   ret = hlock;
+   if (match_held_lock(hlock, lock))
+   goto out;
+
+   ret = NULL;
+   for (i--, prev_hlock = hlock--;
+i >= 0;
+i--, prev_hlock = hlock--) {
+   /*
+* We must not cross into another context:
+*/
+   if (prev_hlock->irq_context != hlock->irq_context) {
+   ret = NULL;
+   break;
+   }
+   if (match_held_lock(hlock, lock)) {
+   ret = hlock;
+   break;
+   }
+   }
+
+out:
+   *idx = i;
+   return ret;
+}
+
 static int
 __lock_set_class(struct lockdep_map *lock, const char *name,
 struct lock_class_key *key, unsigned int subclass,
 unsigned long ip)
 {
struct task_struct *curr = current;
-   struct held_lock *hlock, *prev_hlock;
+   struct held_lock *hlock;
struct lock_class *class;
unsigned int depth;
int i;
@@ -3456,21 +3492,10 @@ __lock_set_class(struct lockdep_map *lock, const char 
*name,
if (DEBUG_LOCKS_WARN_ON(!depth))
return 0;
 
-   prev_hlock = NULL;
-   for (i = depth-1; i >= 0; i--) {
-   hlock = curr->held_locks + i;
-   /*
-* We must not cross into another context:
-*/
-   if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
-   break;
-   if (match_held_lock(hlock, lock))
-   goto found_it;
-   prev_hlock = hlock;
-   }
-   return print_unlock_imbalance_bug(curr, lock, ip);
+   hlock = find_held_lock(curr, lock, depth, &i);
+   if (!hlock)
+   return print_unlock_imbalance_bug(curr, lock, ip);
 
-found_it:
lockdep_init_map(lock, name, key, 0);
class = register_lock_class(lock, subclass, 0);
hlock->class_idx = class - lock_classes + 1;
@@ -3508,7 +3533,7 @@ static int
 __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
 {
struct task_struct *curr = current;
-   struct held_lock *hlock, *prev_hlock;
+   struct held_lock *hlock;
unsigned int depth;
int i;
 
@@ -3527,21 +3552,10 @@ __lock_release(struct lockdep_map *lock, int nested, 
unsigned long ip)
 * Check whether the lock exists in the current stack
 * of held locks:
 */
-   prev_hlock = NULL;
-   for (i = depth-1; i >= 0; i--) {
-   hlock = curr->held_locks + i;
-   /*
-* We must not cross into another context:
-*/
-   if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
-   break;
-   if (match_held_lock(hlock, lock))
-   goto found_it;
-   prev_hlock = hlock;
-   }
-   return print_unlock_imbalance_bug(curr, lock, ip);
+   hlock = find_held_lock(curr, lock, depth, &i);
+   if (!hlock)
+   return print_unlock_imbalance_bug(curr, lock, ip);
 
-found_it:
if (hlock->instance == lock)
lock_release_holdtime(hlock);
 
@@ -3903,7 +3917,7 @@ static void
 __lock_contended(struct lockdep_map *lock, unsigned long ip)
 {
struct task_struct *curr = current;
-   struct held_lock *hlock, *prev_hlock;
+   struct held_lock *hlock;
struct loc

[tip:locking/core] locking/lockdep: Factor out the validate_held_lock() helper function

2017-03-16 Thread tip-bot for J. R. Okajima
Commit-ID:  e969970be033841d4c16b2e8ec8a3608347db861
Gitweb: http://git.kernel.org/tip/e969970be033841d4c16b2e8ec8a3608347db861
Author: J. R. Okajima 
AuthorDate: Fri, 3 Feb 2017 01:38:16 +0900
Committer:  Ingo Molnar 
CommitDate: Thu, 16 Mar 2017 09:57:07 +0100

locking/lockdep: Factor out the validate_held_lock() helper function

Behaviour should not change.

Signed-off-by: J. R. Okajima 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Andrew Morton 
Cc: Linus Torvalds 
Cc: Paul E. McKenney 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: 
http://lkml.kernel.org/r/1486053497-9948-2-git-send-email-hooanon...@gmail.com
Signed-off-by: Ingo Molnar 
---
 kernel/locking/lockdep.c | 40 ++--
 1 file changed, 22 insertions(+), 18 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 0d28b82..da79548 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3473,6 +3473,24 @@ out:
return ret;
 }
 
+static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
+ int idx)
+{
+   struct held_lock *hlock;
+
+   for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
+   if (!__lock_acquire(hlock->instance,
+   hlock_class(hlock)->subclass,
+   hlock->trylock,
+   hlock->read, hlock->check,
+   hlock->hardirqs_off,
+   hlock->nest_lock, hlock->acquire_ip,
+   hlock->references, hlock->pin_count))
+   return 1;
+   }
+   return 0;
+}
+
 static int
 __lock_set_class(struct lockdep_map *lock, const char *name,
 struct lock_class_key *key, unsigned int subclass,
@@ -3503,15 +3521,8 @@ __lock_set_class(struct lockdep_map *lock, const char 
*name,
curr->lockdep_depth = i;
curr->curr_chain_key = hlock->prev_chain_key;
 
-   for (; i < depth; i++) {
-   hlock = curr->held_locks + i;
-   if (!__lock_acquire(hlock->instance,
-   hlock_class(hlock)->subclass, hlock->trylock,
-   hlock->read, hlock->check, hlock->hardirqs_off,
-   hlock->nest_lock, hlock->acquire_ip,
-   hlock->references, hlock->pin_count))
-   return 0;
-   }
+   if (reacquire_held_locks(curr, depth, i))
+   return 0;
 
/*
 * I took it apart and put it back together again, except now I have
@@ -3582,15 +3593,8 @@ __lock_release(struct lockdep_map *lock, int nested, 
unsigned long ip)
curr->lockdep_depth = i;
curr->curr_chain_key = hlock->prev_chain_key;
 
-   for (i++; i < depth; i++) {
-   hlock = curr->held_locks + i;
-   if (!__lock_acquire(hlock->instance,
-   hlock_class(hlock)->subclass, hlock->trylock,
-   hlock->read, hlock->check, hlock->hardirqs_off,
-   hlock->nest_lock, hlock->acquire_ip,
-   hlock->references, hlock->pin_count))
-   return 0;
-   }
+   if (reacquire_held_locks(curr, depth, i + 1))
+   return 0;
 
/*
 * We had N bottles of beer on the wall, we drank one, but now