Oleg Nesterov recently noticed that the lockdep annotations in lglock.c
are not sufficient to detect some obvious deadlocks, such as
lg_local_lock(LOCK) + lg_local_lock(LOCK) or
spin_lock(X) + lg_local_lock(Y) vs lg_local_lock(Y) + spin_lock(X).

Both issues are easily fixed by indicating to lockdep that lglock's local
locks are not recursive. We shouldn't use the rwlock acquire/release
functions here, as lglock doesn't share the same semantics. Instead
we can base our lockdep annotations on the lock_acquire_shared
(for local lglock) and lock_acquire_exclusive (for global lglock)
helpers.

I am not proposing new lglock specific helpers as I don't see the point
of the existing second level of helpers :)

Signed-off-by: Michel Lespinasse <[email protected]>

---
 kernel/lglock.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/kernel/lglock.c b/kernel/lglock.c
index 6535a667a5a7..86ae2aebf004 100644
--- a/kernel/lglock.c
+++ b/kernel/lglock.c
@@ -21,7 +21,7 @@ void lg_local_lock(struct lglock *lg)
        arch_spinlock_t *lock;
 
        preempt_disable();
-       rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
+       lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
        lock = this_cpu_ptr(lg->lock);
        arch_spin_lock(lock);
 }
@@ -31,7 +31,7 @@ void lg_local_unlock(struct lglock *lg)
 {
        arch_spinlock_t *lock;
 
-       rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+       lock_release(&lg->lock_dep_map, 1, _RET_IP_);
        lock = this_cpu_ptr(lg->lock);
        arch_spin_unlock(lock);
        preempt_enable();
@@ -43,7 +43,7 @@ void lg_local_lock_cpu(struct lglock *lg, int cpu)
        arch_spinlock_t *lock;
 
        preempt_disable();
-       rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
+       lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
        lock = per_cpu_ptr(lg->lock, cpu);
        arch_spin_lock(lock);
 }
@@ -53,7 +53,7 @@ void lg_local_unlock_cpu(struct lglock *lg, int cpu)
 {
        arch_spinlock_t *lock;
 
-       rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+       lock_release(&lg->lock_dep_map, 1, _RET_IP_);
        lock = per_cpu_ptr(lg->lock, cpu);
        arch_spin_unlock(lock);
        preempt_enable();
@@ -65,7 +65,7 @@ void lg_global_lock(struct lglock *lg)
        int i;
 
        preempt_disable();
-       rwlock_acquire(&lg->lock_dep_map, 0, 0, _RET_IP_);
+       lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
        for_each_possible_cpu(i) {
                arch_spinlock_t *lock;
                lock = per_cpu_ptr(lg->lock, i);
@@ -78,7 +78,7 @@ void lg_global_unlock(struct lglock *lg)
 {
        int i;
 
-       rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+       lock_release(&lg->lock_dep_map, 1, _RET_IP_);
        for_each_possible_cpu(i) {
                arch_spinlock_t *lock;
                lock = per_cpu_ptr(lg->lock, i);
-- 
1.8.1.3
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to