[tip:locking/core] locking/ww_mutex: Add kselftests for resolving ww_mutex cyclic deadlocks

2017-01-14 Thread tip-bot for Chris Wilson
Commit-ID:  d1b42b800e5d09dcee52812b4396aca3a3696ba9
Gitweb: http://git.kernel.org/tip/d1b42b800e5d09dcee52812b4396aca3a3696ba9
Author: Chris Wilson 
AuthorDate: Thu, 1 Dec 2016 11:47:09 +
Committer:  Ingo Molnar 
CommitDate: Sat, 14 Jan 2017 11:37:16 +0100

locking/ww_mutex: Add kselftests for resolving ww_mutex cyclic deadlocks

Check that ww_mutexes can detect cyclic deadlocks (generalised ABBA
cycles) and resolve them by lock reordering.

Signed-off-by: Chris Wilson 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Andrew Morton 
Cc: Linus Torvalds 
Cc: Maarten Lankhorst 
Cc: Nicolai Hähnle 
Cc: Paul E. McKenney 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: http://lkml.kernel.org/r/20161201114711.28697-7-ch...@chris-wilson.co.uk
Signed-off-by: Ingo Molnar 
---
 kernel/locking/test-ww_mutex.c | 115 +
 1 file changed, 115 insertions(+)

diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index 5a643ba..84da738 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -21,9 +21,11 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 static DEFINE_WW_CLASS(ww_class);
+struct workqueue_struct *wq;
 
 struct test_mutex {
struct work_struct work;
@@ -243,10 +245,118 @@ static int test_abba(bool resolve)
return ret;
 }
 
+struct test_cycle {
+   struct work_struct work;
+   struct ww_mutex a_mutex;
+   struct ww_mutex *b_mutex;
+   struct completion *a_signal;
+   struct completion b_signal;
+   int result;
+};
+
+static void test_cycle_work(struct work_struct *work)
+{
+   struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
+   struct ww_acquire_ctx ctx;
+   int err;
+
+   ww_acquire_init(, _class);
+   ww_mutex_lock(>a_mutex, );
+
+   complete(cycle->a_signal);
+   wait_for_completion(>b_signal);
+
+   err = ww_mutex_lock(cycle->b_mutex, );
+   if (err == -EDEADLK) {
+   ww_mutex_unlock(>a_mutex);
+   ww_mutex_lock_slow(cycle->b_mutex, );
+   err = ww_mutex_lock(>a_mutex, );
+   }
+
+   if (!err)
+   ww_mutex_unlock(cycle->b_mutex);
+   ww_mutex_unlock(>a_mutex);
+   ww_acquire_fini();
+
+   cycle->result = err;
+}
+
+static int __test_cycle(unsigned int nthreads)
+{
+   struct test_cycle *cycles;
+   unsigned int n, last = nthreads - 1;
+   int ret;
+
+   cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
+   if (!cycles)
+   return -ENOMEM;
+
+   for (n = 0; n < nthreads; n++) {
+   struct test_cycle *cycle = [n];
+
+   ww_mutex_init(>a_mutex, _class);
+   if (n == last)
+   cycle->b_mutex = [0].a_mutex;
+   else
+   cycle->b_mutex = [n + 1].a_mutex;
+
+   if (n == 0)
+   cycle->a_signal = [last].b_signal;
+   else
+   cycle->a_signal = [n - 1].b_signal;
+   init_completion(>b_signal);
+
+   INIT_WORK(>work, test_cycle_work);
+   cycle->result = 0;
+   }
+
+   for (n = 0; n < nthreads; n++)
+   queue_work(wq, [n].work);
+
+   flush_workqueue(wq);
+
+   ret = 0;
+   for (n = 0; n < nthreads; n++) {
+   struct test_cycle *cycle = [n];
+
+   if (!cycle->result)
+   continue;
+
+   pr_err("cylic deadlock not resolved, ret[%d/%d] = %d\n",
+  n, nthreads, cycle->result);
+   ret = -EINVAL;
+   break;
+   }
+
+   for (n = 0; n < nthreads; n++)
+   ww_mutex_destroy([n].a_mutex);
+   kfree(cycles);
+   return ret;
+}
+
+static int test_cycle(unsigned int ncpus)
+{
+   unsigned int n;
+   int ret;
+
+   for (n = 2; n <= ncpus + 1; n++) {
+   ret = __test_cycle(n);
+   if (ret)
+   return ret;
+   }
+
+   return 0;
+}
+
 static int __init test_ww_mutex_init(void)
 {
+   int ncpus = num_online_cpus();
int ret;
 
+   wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
+   if (!wq)
+   return -ENOMEM;
+
ret = test_mutex();
if (ret)
return ret;
@@ -263,11 +373,16 @@ static int __init test_ww_mutex_init(void)
if (ret)
return ret;
 
+   ret = test_cycle(ncpus);
+   if (ret)
+   return ret;
+
return 0;
 }
 
 static void __exit test_ww_mutex_exit(void)
 {
+   destroy_workqueue(wq);
 }
 
 

[tip:locking/core] locking/ww_mutex: Add kselftests for resolving ww_mutex cyclic deadlocks

2017-01-14 Thread tip-bot for Chris Wilson
Commit-ID:  d1b42b800e5d09dcee52812b4396aca3a3696ba9
Gitweb: http://git.kernel.org/tip/d1b42b800e5d09dcee52812b4396aca3a3696ba9
Author: Chris Wilson 
AuthorDate: Thu, 1 Dec 2016 11:47:09 +
Committer:  Ingo Molnar 
CommitDate: Sat, 14 Jan 2017 11:37:16 +0100

locking/ww_mutex: Add kselftests for resolving ww_mutex cyclic deadlocks

Check that ww_mutexes can detect cyclic deadlocks (generalised ABBA
cycles) and resolve them by lock reordering.

Signed-off-by: Chris Wilson 
Signed-off-by: Peter Zijlstra (Intel) 
Cc: Andrew Morton 
Cc: Linus Torvalds 
Cc: Maarten Lankhorst 
Cc: Nicolai Hähnle 
Cc: Paul E. McKenney 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: http://lkml.kernel.org/r/20161201114711.28697-7-ch...@chris-wilson.co.uk
Signed-off-by: Ingo Molnar 
---
 kernel/locking/test-ww_mutex.c | 115 +
 1 file changed, 115 insertions(+)

diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index 5a643ba..84da738 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -21,9 +21,11 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 static DEFINE_WW_CLASS(ww_class);
+struct workqueue_struct *wq;
 
 struct test_mutex {
struct work_struct work;
@@ -243,10 +245,118 @@ static int test_abba(bool resolve)
return ret;
 }
 
+struct test_cycle {
+   struct work_struct work;
+   struct ww_mutex a_mutex;
+   struct ww_mutex *b_mutex;
+   struct completion *a_signal;
+   struct completion b_signal;
+   int result;
+};
+
+static void test_cycle_work(struct work_struct *work)
+{
+   struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
+   struct ww_acquire_ctx ctx;
+   int err;
+
+   ww_acquire_init(, _class);
+   ww_mutex_lock(>a_mutex, );
+
+   complete(cycle->a_signal);
+   wait_for_completion(>b_signal);
+
+   err = ww_mutex_lock(cycle->b_mutex, );
+   if (err == -EDEADLK) {
+   ww_mutex_unlock(>a_mutex);
+   ww_mutex_lock_slow(cycle->b_mutex, );
+   err = ww_mutex_lock(>a_mutex, );
+   }
+
+   if (!err)
+   ww_mutex_unlock(cycle->b_mutex);
+   ww_mutex_unlock(>a_mutex);
+   ww_acquire_fini();
+
+   cycle->result = err;
+}
+
+static int __test_cycle(unsigned int nthreads)
+{
+   struct test_cycle *cycles;
+   unsigned int n, last = nthreads - 1;
+   int ret;
+
+   cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
+   if (!cycles)
+   return -ENOMEM;
+
+   for (n = 0; n < nthreads; n++) {
+   struct test_cycle *cycle = [n];
+
+   ww_mutex_init(>a_mutex, _class);
+   if (n == last)
+   cycle->b_mutex = [0].a_mutex;
+   else
+   cycle->b_mutex = [n + 1].a_mutex;
+
+   if (n == 0)
+   cycle->a_signal = [last].b_signal;
+   else
+   cycle->a_signal = [n - 1].b_signal;
+   init_completion(>b_signal);
+
+   INIT_WORK(>work, test_cycle_work);
+   cycle->result = 0;
+   }
+
+   for (n = 0; n < nthreads; n++)
+   queue_work(wq, [n].work);
+
+   flush_workqueue(wq);
+
+   ret = 0;
+   for (n = 0; n < nthreads; n++) {
+   struct test_cycle *cycle = [n];
+
+   if (!cycle->result)
+   continue;
+
+   pr_err("cylic deadlock not resolved, ret[%d/%d] = %d\n",
+  n, nthreads, cycle->result);
+   ret = -EINVAL;
+   break;
+   }
+
+   for (n = 0; n < nthreads; n++)
+   ww_mutex_destroy([n].a_mutex);
+   kfree(cycles);
+   return ret;
+}
+
+static int test_cycle(unsigned int ncpus)
+{
+   unsigned int n;
+   int ret;
+
+   for (n = 2; n <= ncpus + 1; n++) {
+   ret = __test_cycle(n);
+   if (ret)
+   return ret;
+   }
+
+   return 0;
+}
+
 static int __init test_ww_mutex_init(void)
 {
+   int ncpus = num_online_cpus();
int ret;
 
+   wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
+   if (!wq)
+   return -ENOMEM;
+
ret = test_mutex();
if (ret)
return ret;
@@ -263,11 +373,16 @@ static int __init test_ww_mutex_init(void)
if (ret)
return ret;
 
+   ret = test_cycle(ncpus);
+   if (ret)
+   return ret;
+
return 0;
 }
 
 static void __exit test_ww_mutex_exit(void)
 {
+   destroy_workqueue(wq);
 }
 
 module_init(test_ww_mutex_init);