On Wed, 2015-07-22 at 17:17 -0700, Paul E. McKenney wrote:
> On Wed, Jul 22, 2015 at 02:07:27PM -0700, Davidlohr Bueso wrote:
> > Real time mutexes is one of the few general primitives
> > that we do not have in locktorture. Address this -- a few
> > considerations:
> > 
> > o To spice things up, enable competing thread(s) to become
> > rt, such that we can stress different prio boosting paths
> > in the rtmutex code. Introduce a ->task_boost callback,
> > only used by rtmutex-torturer. Tasks will boost/deboost
> > around every 50k (arbitrarily) lock/unlock operations.
> > 
> > o Hold times are similar to what we have for other locks:
> > only occasionally having longer hold times (per ~200k ops).
> > So we roughly do two full rt boost+deboosting ops with
> > short hold times.
> > 
> > Signed-off-by: Davidlohr Bueso <dbu...@suse.de>
> 
> I have queued this one for testing, and by default would push it into
> the 4.4 merge window (the one after next).  Please let me know if you
> want it sooner.

Thanks, although here's a v2 with some small fixes:

- comment when reseting prio s/500k/50k.
- when reseting prio, there's a chance a non-rt task can be converted
(which we certainly don't want). Check trsp for nil.

While I was planning for 4.3, I can certainly wait a few more weeks for
4.4. Either way is fine.

Thanks,
Davidlohr

8<-------------------------------------------------------------------
[PATCH v2] locktorture: Support rtmutex torturing

Real time mutexes is one of the few general primitives
that we do not have in locktorture. Address this -- a few
considerations:

o To spice things up, enable competing thread(s) to become
rt, such that we can stress different prio boosting paths
in the rtmutex code. Introduce a ->task_boost callback,
only used by rtmutex-torturer. Tasks will boost/deboost
around every 50k (arbitrarily) lock/unlock operations.

o Hold times are similar to what we have for other locks:
only occasionally having longer hold times (per ~200k ops).
So we roughly do two full rt boost+deboosting ops with
short hold times.

Signed-off-by: Davidlohr Bueso <dbu...@suse.de>
---
 Documentation/locking/locktorture.txt              |   3 +
 kernel/locking/locktorture.c                       | 114 ++++++++++++++++++++-
 .../selftests/rcutorture/configs/lock/CFLIST       |   3 +-
 .../selftests/rcutorture/configs/lock/LOCK05       |   6 ++
 .../selftests/rcutorture/configs/lock/LOCK05.boot  |   1 +
 5 files changed, 124 insertions(+), 3 deletions(-)
 create mode 100644 tools/testing/selftests/rcutorture/configs/lock/LOCK05
 create mode 100644 tools/testing/selftests/rcutorture/configs/lock/LOCK05.boot

diff --git a/Documentation/locking/locktorture.txt 
b/Documentation/locking/locktorture.txt
index 619f2bb..a2ef3a9 100644
--- a/Documentation/locking/locktorture.txt
+++ b/Documentation/locking/locktorture.txt
@@ -52,6 +52,9 @@ torture_type    Type of lock to torture. By default, only 
spinlocks will
 
                     o "mutex_lock": mutex_lock() and mutex_unlock() pairs.
 
+                    o "rtmutex_lock": rtmutex_lock() and rtmutex_unlock()
+                                      pairs. Kernel must have 
CONFIG_RT_MUTEX=y.
+
                     o "rwsem_lock": read/write down() and up() semaphore pairs.
 
 torture_runnable  Start locktorture at boot time in the case where the
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 3224418..26ddd63 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -17,12 +17,14 @@
  *
  * Copyright (C) IBM Corporation, 2014
  *
- * Author: Paul E. McKenney <paul...@us.ibm.com>
+ * Authors: Paul E. McKenney <paul...@us.ibm.com>
+ *          Davidlohr Bueso <d...@stgolabs.net>
  *     Based on kernel/rcu/torture.c.
  */
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/kthread.h>
+#include <linux/sched/rt.h>
 #include <linux/spinlock.h>
 #include <linux/rwlock.h>
 #include <linux/mutex.h>
@@ -91,11 +93,13 @@ struct lock_torture_ops {
        void (*init)(void);
        int (*writelock)(void);
        void (*write_delay)(struct torture_random_state *trsp);
+       void (*task_boost)(struct torture_random_state *trsp);
        void (*writeunlock)(void);
        int (*readlock)(void);
        void (*read_delay)(struct torture_random_state *trsp);
        void (*readunlock)(void);
-       unsigned long flags;
+
+       unsigned long flags; /* for irq spinlocks */
        const char *name;
 };
 
@@ -139,9 +143,15 @@ static void torture_lock_busted_write_unlock(void)
          /* BUGGY, do not use in real life!!! */
 }
 
+static void torture_boost_dummy(struct torture_random_state *trsp)
+{
+       /* Only rtmutexes care about priority */
+}
+
 static struct lock_torture_ops lock_busted_ops = {
        .writelock      = torture_lock_busted_write_lock,
        .write_delay    = torture_lock_busted_write_delay,
+       .task_boost     = torture_boost_dummy,
        .writeunlock    = torture_lock_busted_write_unlock,
        .readlock       = NULL,
        .read_delay     = NULL,
@@ -185,6 +195,7 @@ static void torture_spin_lock_write_unlock(void) 
__releases(torture_spinlock)
 static struct lock_torture_ops spin_lock_ops = {
        .writelock      = torture_spin_lock_write_lock,
        .write_delay    = torture_spin_lock_write_delay,
+       .task_boost     = torture_boost_dummy,
        .writeunlock    = torture_spin_lock_write_unlock,
        .readlock       = NULL,
        .read_delay     = NULL,
@@ -211,6 +222,7 @@ __releases(torture_spinlock)
 static struct lock_torture_ops spin_lock_irq_ops = {
        .writelock      = torture_spin_lock_write_lock_irq,
        .write_delay    = torture_spin_lock_write_delay,
+       .task_boost     = torture_boost_dummy,
        .writeunlock    = torture_lock_spin_write_unlock_irq,
        .readlock       = NULL,
        .read_delay     = NULL,
@@ -275,6 +287,7 @@ static void torture_rwlock_read_unlock(void) 
__releases(torture_rwlock)
 static struct lock_torture_ops rw_lock_ops = {
        .writelock      = torture_rwlock_write_lock,
        .write_delay    = torture_rwlock_write_delay,
+       .task_boost     = torture_boost_dummy,
        .writeunlock    = torture_rwlock_write_unlock,
        .readlock       = torture_rwlock_read_lock,
        .read_delay     = torture_rwlock_read_delay,
@@ -315,6 +328,7 @@ __releases(torture_rwlock)
 static struct lock_torture_ops rw_lock_irq_ops = {
        .writelock      = torture_rwlock_write_lock_irq,
        .write_delay    = torture_rwlock_write_delay,
+       .task_boost     = torture_boost_dummy,
        .writeunlock    = torture_rwlock_write_unlock_irq,
        .readlock       = torture_rwlock_read_lock_irq,
        .read_delay     = torture_rwlock_read_delay,
@@ -354,6 +368,7 @@ static void torture_mutex_unlock(void) 
__releases(torture_mutex)
 static struct lock_torture_ops mutex_lock_ops = {
        .writelock      = torture_mutex_lock,
        .write_delay    = torture_mutex_delay,
+       .task_boost     = torture_boost_dummy,
        .writeunlock    = torture_mutex_unlock,
        .readlock       = NULL,
        .read_delay     = NULL,
@@ -361,6 +376,90 @@ static struct lock_torture_ops mutex_lock_ops = {
        .name           = "mutex_lock"
 };
 
+#ifdef CONFIG_RT_MUTEXES
+static DEFINE_RT_MUTEX(torture_rtmutex);
+
+static int torture_rtmutex_lock(void) __acquires(torture_rtmutex)
+{
+       rt_mutex_lock(&torture_rtmutex);
+       return 0;
+}
+
+static void torture_rtmutex_boost(struct torture_random_state *trsp)
+{
+       int policy;
+       struct sched_param param;
+       const unsigned int factor = 50000; /* yes, quite arbitrary */
+
+       if (!rt_task(current)) {
+               /*
+                * (1) Boost priority once every ~50k operations. When the
+                * task tries to take the lock, the rtmutex it will account
+                * for the new priority, and do any corresponding pi-dance.
+                */
+               if (trsp && !(torture_random(trsp) %
+                             (cxt.nrealwriters_stress * factor))) {
+                       policy = SCHED_FIFO;
+                       param.sched_priority = MAX_RT_PRIO - 1;
+               } else /* common case, do nothing */
+                       return;
+       } else {
+               /*
+                * (2) The task will remain boosted for another ~50k operations,
+                * then restored back to its original prio, and so forth.
+                *
+                * When @trsp is nil, we want to force-reset the task for
+                * stopping the kthread.
+                */
+               if (!trsp || !(torture_random(trsp) %
+                              (cxt.nrealwriters_stress * factor * 2))) {
+                       policy = SCHED_NORMAL;
+                       param.sched_priority = 0;
+               } else /* common case, do nothing */
+                       return;
+       }
+
+       sched_setscheduler_nocheck(current, policy, &param);
+}
+
+static void torture_rtmutex_delay(struct torture_random_state *trsp)
+{
+       const unsigned long shortdelay_us = 2;
+       const unsigned long longdelay_ms = 100;
+
+       /*
+        * We want a short delay mostly to emulate likely code, and
+        * we want a long delay occasionally to force massive contention.
+        */
+       if (!(torture_random(trsp) %
+             (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
+               mdelay(longdelay_ms);
+       if (!(torture_random(trsp) %
+             (cxt.nrealwriters_stress * 2 * shortdelay_us)))
+               udelay(shortdelay_us);
+#ifdef CONFIG_PREEMPT
+       if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
+               preempt_schedule();  /* Allow test to be preempted. */
+#endif
+}
+
+static void torture_rtmutex_unlock(void) __releases(torture_rtmutex)
+{
+       rt_mutex_unlock(&torture_rtmutex);
+}
+
+static struct lock_torture_ops rtmutex_lock_ops = {
+       .writelock      = torture_rtmutex_lock,
+       .write_delay    = torture_rtmutex_delay,
+       .task_boost     = torture_rtmutex_boost,
+       .writeunlock    = torture_rtmutex_unlock,
+       .readlock       = NULL,
+       .read_delay     = NULL,
+       .readunlock     = NULL,
+       .name           = "rtmutex_lock"
+};
+#endif
+
 static DECLARE_RWSEM(torture_rwsem);
 static int torture_rwsem_down_write(void) __acquires(torture_rwsem)
 {
@@ -419,6 +518,7 @@ static void torture_rwsem_up_read(void) 
__releases(torture_rwsem)
 static struct lock_torture_ops rwsem_lock_ops = {
        .writelock      = torture_rwsem_down_write,
        .write_delay    = torture_rwsem_write_delay,
+       .task_boost     = torture_boost_dummy,
        .writeunlock    = torture_rwsem_up_write,
        .readlock       = torture_rwsem_down_read,
        .read_delay     = torture_rwsem_read_delay,
@@ -442,6 +542,7 @@ static int lock_torture_writer(void *arg)
                if ((torture_random(&rand) & 0xfffff) == 0)
                        schedule_timeout_uninterruptible(1);
 
+               cxt.cur_ops->task_boost(&rand);
                cxt.cur_ops->writelock();
                if (WARN_ON_ONCE(lock_is_write_held))
                        lwsp->n_lock_fail++;
@@ -456,6 +557,8 @@ static int lock_torture_writer(void *arg)
 
                stutter_wait("lock_torture_writer");
        } while (!torture_must_stop());
+
+       cxt.cur_ops->task_boost(NULL); /* reset prio */
        torture_kthread_stopping("lock_torture_writer");
        return 0;
 }
@@ -642,6 +745,9 @@ static int __init lock_torture_init(void)
                &spin_lock_ops, &spin_lock_irq_ops,
                &rw_lock_ops, &rw_lock_irq_ops,
                &mutex_lock_ops,
+#ifdef CONFIG_RT_MUTEXES
+               &rtmutex_lock_ops,
+#endif
                &rwsem_lock_ops,
        };
 
@@ -676,6 +782,10 @@ static int __init lock_torture_init(void)
        if (strncmp(torture_type, "mutex", 5) == 0)
                cxt.debug_lock = true;
 #endif
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+       if (strncmp(torture_type, "rtmutex", 7) == 0)
+               cxt.debug_lock = true;
+#endif
 #ifdef CONFIG_DEBUG_SPINLOCK
        if ((strncmp(torture_type, "spin", 4) == 0) ||
            (strncmp(torture_type, "rw_lock", 7) == 0))
diff --git a/tools/testing/selftests/rcutorture/configs/lock/CFLIST 
b/tools/testing/selftests/rcutorture/configs/lock/CFLIST
index 6910b73..6ed3279 100644
--- a/tools/testing/selftests/rcutorture/configs/lock/CFLIST
+++ b/tools/testing/selftests/rcutorture/configs/lock/CFLIST
@@ -1,4 +1,5 @@
 LOCK01
 LOCK02
 LOCK03
-LOCK04
\ No newline at end of file
+LOCK04
+LOCK05
diff --git a/tools/testing/selftests/rcutorture/configs/lock/LOCK05 
b/tools/testing/selftests/rcutorture/configs/lock/LOCK05
new file mode 100644
index 0000000..1d1da14
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/lock/LOCK05
@@ -0,0 +1,6 @@
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
diff --git a/tools/testing/selftests/rcutorture/configs/lock/LOCK05.boot 
b/tools/testing/selftests/rcutorture/configs/lock/LOCK05.boot
new file mode 100644
index 0000000..8ac3730
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/lock/LOCK05.boot
@@ -0,0 +1 @@
+locktorture.torture_type=rtmutex_lock
-- 
2.1.4



--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to