Re: [PATCH -v2 5/9] rtmutex: Clean up

2016-09-29 Thread Thomas Gleixner
On Mon, 26 Sep 2016, Steven Rostedt wrote:
> Peter Zijlstra  wrote:
> 
> Can this be rephrased to: "Returns true if preemption has been
> disabled and a call to rt_mutex_postunlock() is required (which will
> re-enable preemption)"

I agree with Steven that the comments should be rephrased.

Thanks,

tglx


Re: [PATCH -v2 5/9] rtmutex: Clean up

2016-09-29 Thread Thomas Gleixner
On Mon, 26 Sep 2016, Steven Rostedt wrote:
> Peter Zijlstra  wrote:
> 
> Can this be rephrased to: "Returns true if preemption has been
> disabled and a call to rt_mutex_postunlock() is required (which will
> re-enable preemption)"

I agree with Steven that the comments should be rephrased.

Thanks,

tglx


Re: [PATCH -v2 5/9] rtmutex: Clean up

2016-09-26 Thread Steven Rostedt
On Mon, 26 Sep 2016 14:32:18 +0200
Peter Zijlstra  wrote:

> Previous patches changed the meaning of the return value of
> rt_mutex_slowunlock(); update comments and code to reflect this.
> 
> Signed-off-by: Peter Zijlstra (Intel) 
> ---
>  kernel/futex.c  |   12 ++--
>  kernel/locking/rtmutex.c|   20 +---
>  kernel/locking/rtmutex_common.h |2 +-
>  3 files changed, 16 insertions(+), 18 deletions(-)
> 
> --- a/kernel/futex.c
> +++ b/kernel/futex.c
> @@ -1261,7 +1261,7 @@ static int wake_futex_pi(u32 __user *uad
>   struct futex_pi_state *pi_state = this->pi_state;
>   u32 uninitialized_var(curval), newval;
>   WAKE_Q(wake_q);
> - bool deboost;
> + bool postunlock;
>   int ret = 0;
>  
>   if (!pi_state)
> @@ -1327,17 +1327,17 @@ static int wake_futex_pi(u32 __user *uad
>  
>   raw_spin_unlock_irq(_state->pi_mutex.wait_lock);
>  
> - deboost = rt_mutex_futex_unlock(_state->pi_mutex, _q);
> + postunlock = rt_mutex_futex_unlock(_state->pi_mutex, _q);
>  
>   /*
>* First unlock HB so the waiter does not spin on it once he got woken
> -  * up. Second wake up the waiter before the priority is adjusted. If we
> -  * deboost first (and lose our higher priority), then the task might get
> -  * scheduled away before the wake up can take place.
> +  * up. Then wakeup the waiter by calling rt_mutex_postunlock(). Priority
> +  * is already adjusted and preemption is disabled to avoid inversion.

Can we specify here that preemption is only disabled if
rt_mutex_futex_unlock() returns true, and will be enabled again with
rt_mutex_postunlock().


>*/
>   spin_unlock(>lock);
>  
> - rt_mutex_postunlock(_q, deboost);
> + if (postunlock)
> + rt_mutex_postunlock(_q);
>  
>   return 0;
>  }
> --- a/kernel/locking/rtmutex.c
> +++ b/kernel/locking/rtmutex.c
> @@ -1254,7 +1254,8 @@ static inline int rt_mutex_slowtrylock(s
>  
>  /*
>   * Slow path to release a rt-mutex.
> - * Return whether the current task needs to undo a potential priority 
> boosting.
> + *
> + * Return whether the current task needs to call rt_mutex_postunlock().
>   */
>  static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
>   struct wake_q_head *wake_q)
> @@ -1327,7 +1328,7 @@ static bool __sched rt_mutex_slowunlock(
>  
>   raw_spin_unlock_irqrestore(>wait_lock, flags);
>  
> - /* check PI boosting */
> + /* call rt_mutex_postunlock() */

Can we rephrase this to "A call to rt_mutex_postunlock() is required".

>   return true;
>  }
>  
> @@ -1378,15 +1379,14 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
>  }
>  
>  /*
> - * Undo pi boosting (if necessary) and wake top waiter.
> + * Performs the wakeup of the the top-waiter and re-enables preemption.
>   */
> -void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost)
> +void rt_mutex_postunlock(struct wake_q_head *wake_q)
>  {
>   wake_up_q(wake_q);
>  
>   /* Pairs with preempt_disable() in rt_mutex_slowunlock() */
> - if (deboost)
> - preempt_enable();
> + preempt_enable();
>  }
>  
>  /**
> @@ -1489,9 +1489,8 @@ void __sched rt_mutex_unlock(struct rt_m
>   rt_mutex_deadlock_account_unlock(current);
>  
>   } else {
> - bool deboost = rt_mutex_slowunlock(lock, _q);
> -
> - rt_mutex_postunlock(_q, deboost);
> + if (rt_mutex_slowunlock(lock, _q))
> + rt_mutex_postunlock(_q);
>   }
>  }
>  EXPORT_SYMBOL_GPL(rt_mutex_unlock);
> @@ -1500,8 +1499,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
>   * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock
>   * @lock: the rt_mutex to be unlocked
>   *
> - * Returns: true/false indicating whether priority adjustment is
> - * required or not.
> + * Returns: true/false indicating whether we should call 
> rt_mutex_postunlock().

Can this be rephrased to: "Returns true if preemption has been
disabled and a call to rt_mutex_postunlock() is required (which will
re-enable preemption)"

-- Steve


>   */
>  bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
>  struct wake_q_head *wqh)
> --- a/kernel/locking/rtmutex_common.h
> +++ b/kernel/locking/rtmutex_common.h
> @@ -111,7 +111,7 @@ extern int rt_mutex_finish_proxy_lock(st
>  extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct 
> hrtimer_sleeper *to);
>  extern bool rt_mutex_futex_unlock(struct rt_mutex *lock,
> struct wake_q_head *wqh);
> -extern void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost);
> +extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
>  extern void rt_mutex_adjust_prio(struct task_struct *task);
>  
>  #ifdef CONFIG_DEBUG_RT_MUTEXES
> 



Re: [PATCH -v2 5/9] rtmutex: Clean up

2016-09-26 Thread Steven Rostedt
On Mon, 26 Sep 2016 14:32:18 +0200
Peter Zijlstra  wrote:

> Previous patches changed the meaning of the return value of
> rt_mutex_slowunlock(); update comments and code to reflect this.
> 
> Signed-off-by: Peter Zijlstra (Intel) 
> ---
>  kernel/futex.c  |   12 ++--
>  kernel/locking/rtmutex.c|   20 +---
>  kernel/locking/rtmutex_common.h |2 +-
>  3 files changed, 16 insertions(+), 18 deletions(-)
> 
> --- a/kernel/futex.c
> +++ b/kernel/futex.c
> @@ -1261,7 +1261,7 @@ static int wake_futex_pi(u32 __user *uad
>   struct futex_pi_state *pi_state = this->pi_state;
>   u32 uninitialized_var(curval), newval;
>   WAKE_Q(wake_q);
> - bool deboost;
> + bool postunlock;
>   int ret = 0;
>  
>   if (!pi_state)
> @@ -1327,17 +1327,17 @@ static int wake_futex_pi(u32 __user *uad
>  
>   raw_spin_unlock_irq(_state->pi_mutex.wait_lock);
>  
> - deboost = rt_mutex_futex_unlock(_state->pi_mutex, _q);
> + postunlock = rt_mutex_futex_unlock(_state->pi_mutex, _q);
>  
>   /*
>* First unlock HB so the waiter does not spin on it once he got woken
> -  * up. Second wake up the waiter before the priority is adjusted. If we
> -  * deboost first (and lose our higher priority), then the task might get
> -  * scheduled away before the wake up can take place.
> +  * up. Then wakeup the waiter by calling rt_mutex_postunlock(). Priority
> +  * is already adjusted and preemption is disabled to avoid inversion.

Can we specify here that preemption is only disabled if
rt_mutex_futex_unlock() returns true, and will be enabled again with
rt_mutex_postunlock().


>*/
>   spin_unlock(>lock);
>  
> - rt_mutex_postunlock(_q, deboost);
> + if (postunlock)
> + rt_mutex_postunlock(_q);
>  
>   return 0;
>  }
> --- a/kernel/locking/rtmutex.c
> +++ b/kernel/locking/rtmutex.c
> @@ -1254,7 +1254,8 @@ static inline int rt_mutex_slowtrylock(s
>  
>  /*
>   * Slow path to release a rt-mutex.
> - * Return whether the current task needs to undo a potential priority 
> boosting.
> + *
> + * Return whether the current task needs to call rt_mutex_postunlock().
>   */
>  static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
>   struct wake_q_head *wake_q)
> @@ -1327,7 +1328,7 @@ static bool __sched rt_mutex_slowunlock(
>  
>   raw_spin_unlock_irqrestore(>wait_lock, flags);
>  
> - /* check PI boosting */
> + /* call rt_mutex_postunlock() */

Can we rephrase this to "A call to rt_mutex_postunlock() is required".

>   return true;
>  }
>  
> @@ -1378,15 +1379,14 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
>  }
>  
>  /*
> - * Undo pi boosting (if necessary) and wake top waiter.
> + * Performs the wakeup of the the top-waiter and re-enables preemption.
>   */
> -void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost)
> +void rt_mutex_postunlock(struct wake_q_head *wake_q)
>  {
>   wake_up_q(wake_q);
>  
>   /* Pairs with preempt_disable() in rt_mutex_slowunlock() */
> - if (deboost)
> - preempt_enable();
> + preempt_enable();
>  }
>  
>  /**
> @@ -1489,9 +1489,8 @@ void __sched rt_mutex_unlock(struct rt_m
>   rt_mutex_deadlock_account_unlock(current);
>  
>   } else {
> - bool deboost = rt_mutex_slowunlock(lock, _q);
> -
> - rt_mutex_postunlock(_q, deboost);
> + if (rt_mutex_slowunlock(lock, _q))
> + rt_mutex_postunlock(_q);
>   }
>  }
>  EXPORT_SYMBOL_GPL(rt_mutex_unlock);
> @@ -1500,8 +1499,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
>   * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock
>   * @lock: the rt_mutex to be unlocked
>   *
> - * Returns: true/false indicating whether priority adjustment is
> - * required or not.
> + * Returns: true/false indicating whether we should call 
> rt_mutex_postunlock().

Can this be rephrased to: "Returns true if preemption has been
disabled and a call to rt_mutex_postunlock() is required (which will
re-enable preemption)"

-- Steve


>   */
>  bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
>  struct wake_q_head *wqh)
> --- a/kernel/locking/rtmutex_common.h
> +++ b/kernel/locking/rtmutex_common.h
> @@ -111,7 +111,7 @@ extern int rt_mutex_finish_proxy_lock(st
>  extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct 
> hrtimer_sleeper *to);
>  extern bool rt_mutex_futex_unlock(struct rt_mutex *lock,
> struct wake_q_head *wqh);
> -extern void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost);
> +extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
>  extern void rt_mutex_adjust_prio(struct task_struct *task);
>  
>  #ifdef CONFIG_DEBUG_RT_MUTEXES
> 



[PATCH -v2 5/9] rtmutex: Clean up

2016-09-26 Thread Peter Zijlstra
Previous patches changed the meaning of the return value of
rt_mutex_slowunlock(); update comments and code to reflect this.

Signed-off-by: Peter Zijlstra (Intel) 
---
 kernel/futex.c  |   12 ++--
 kernel/locking/rtmutex.c|   20 +---
 kernel/locking/rtmutex_common.h |2 +-
 3 files changed, 16 insertions(+), 18 deletions(-)

--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1261,7 +1261,7 @@ static int wake_futex_pi(u32 __user *uad
struct futex_pi_state *pi_state = this->pi_state;
u32 uninitialized_var(curval), newval;
WAKE_Q(wake_q);
-   bool deboost;
+   bool postunlock;
int ret = 0;
 
if (!pi_state)
@@ -1327,17 +1327,17 @@ static int wake_futex_pi(u32 __user *uad
 
raw_spin_unlock_irq(_state->pi_mutex.wait_lock);
 
-   deboost = rt_mutex_futex_unlock(_state->pi_mutex, _q);
+   postunlock = rt_mutex_futex_unlock(_state->pi_mutex, _q);
 
/*
 * First unlock HB so the waiter does not spin on it once he got woken
-* up. Second wake up the waiter before the priority is adjusted. If we
-* deboost first (and lose our higher priority), then the task might get
-* scheduled away before the wake up can take place.
+* up. Then wakeup the waiter by calling rt_mutex_postunlock(). Priority
+* is already adjusted and preemption is disabled to avoid inversion.
 */
spin_unlock(>lock);
 
-   rt_mutex_postunlock(_q, deboost);
+   if (postunlock)
+   rt_mutex_postunlock(_q);
 
return 0;
 }
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1254,7 +1254,8 @@ static inline int rt_mutex_slowtrylock(s
 
 /*
  * Slow path to release a rt-mutex.
- * Return whether the current task needs to undo a potential priority boosting.
+ *
+ * Return whether the current task needs to call rt_mutex_postunlock().
  */
 static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
struct wake_q_head *wake_q)
@@ -1327,7 +1328,7 @@ static bool __sched rt_mutex_slowunlock(
 
raw_spin_unlock_irqrestore(>wait_lock, flags);
 
-   /* check PI boosting */
+   /* call rt_mutex_postunlock() */
return true;
 }
 
@@ -1378,15 +1379,14 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
 }
 
 /*
- * Undo pi boosting (if necessary) and wake top waiter.
+ * Performs the wakeup of the the top-waiter and re-enables preemption.
  */
-void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost)
+void rt_mutex_postunlock(struct wake_q_head *wake_q)
 {
wake_up_q(wake_q);
 
/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
-   if (deboost)
-   preempt_enable();
+   preempt_enable();
 }
 
 /**
@@ -1489,9 +1489,8 @@ void __sched rt_mutex_unlock(struct rt_m
rt_mutex_deadlock_account_unlock(current);
 
} else {
-   bool deboost = rt_mutex_slowunlock(lock, _q);
-
-   rt_mutex_postunlock(_q, deboost);
+   if (rt_mutex_slowunlock(lock, _q))
+   rt_mutex_postunlock(_q);
}
 }
 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
@@ -1500,8 +1499,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
  * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock
  * @lock: the rt_mutex to be unlocked
  *
- * Returns: true/false indicating whether priority adjustment is
- * required or not.
+ * Returns: true/false indicating whether we should call rt_mutex_postunlock().
  */
 bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
   struct wake_q_head *wqh)
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -111,7 +111,7 @@ extern int rt_mutex_finish_proxy_lock(st
 extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct 
hrtimer_sleeper *to);
 extern bool rt_mutex_futex_unlock(struct rt_mutex *lock,
  struct wake_q_head *wqh);
-extern void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost);
+extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
 extern void rt_mutex_adjust_prio(struct task_struct *task);
 
 #ifdef CONFIG_DEBUG_RT_MUTEXES




[PATCH -v2 5/9] rtmutex: Clean up

2016-09-26 Thread Peter Zijlstra
Previous patches changed the meaning of the return value of
rt_mutex_slowunlock(); update comments and code to reflect this.

Signed-off-by: Peter Zijlstra (Intel) 
---
 kernel/futex.c  |   12 ++--
 kernel/locking/rtmutex.c|   20 +---
 kernel/locking/rtmutex_common.h |2 +-
 3 files changed, 16 insertions(+), 18 deletions(-)

--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1261,7 +1261,7 @@ static int wake_futex_pi(u32 __user *uad
struct futex_pi_state *pi_state = this->pi_state;
u32 uninitialized_var(curval), newval;
WAKE_Q(wake_q);
-   bool deboost;
+   bool postunlock;
int ret = 0;
 
if (!pi_state)
@@ -1327,17 +1327,17 @@ static int wake_futex_pi(u32 __user *uad
 
raw_spin_unlock_irq(_state->pi_mutex.wait_lock);
 
-   deboost = rt_mutex_futex_unlock(_state->pi_mutex, _q);
+   postunlock = rt_mutex_futex_unlock(_state->pi_mutex, _q);
 
/*
 * First unlock HB so the waiter does not spin on it once he got woken
-* up. Second wake up the waiter before the priority is adjusted. If we
-* deboost first (and lose our higher priority), then the task might get
-* scheduled away before the wake up can take place.
+* up. Then wakeup the waiter by calling rt_mutex_postunlock(). Priority
+* is already adjusted and preemption is disabled to avoid inversion.
 */
spin_unlock(>lock);
 
-   rt_mutex_postunlock(_q, deboost);
+   if (postunlock)
+   rt_mutex_postunlock(_q);
 
return 0;
 }
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1254,7 +1254,8 @@ static inline int rt_mutex_slowtrylock(s
 
 /*
  * Slow path to release a rt-mutex.
- * Return whether the current task needs to undo a potential priority boosting.
+ *
+ * Return whether the current task needs to call rt_mutex_postunlock().
  */
 static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
struct wake_q_head *wake_q)
@@ -1327,7 +1328,7 @@ static bool __sched rt_mutex_slowunlock(
 
raw_spin_unlock_irqrestore(>wait_lock, flags);
 
-   /* check PI boosting */
+   /* call rt_mutex_postunlock() */
return true;
 }
 
@@ -1378,15 +1379,14 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
 }
 
 /*
- * Undo pi boosting (if necessary) and wake top waiter.
+ * Performs the wakeup of the the top-waiter and re-enables preemption.
  */
-void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost)
+void rt_mutex_postunlock(struct wake_q_head *wake_q)
 {
wake_up_q(wake_q);
 
/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
-   if (deboost)
-   preempt_enable();
+   preempt_enable();
 }
 
 /**
@@ -1489,9 +1489,8 @@ void __sched rt_mutex_unlock(struct rt_m
rt_mutex_deadlock_account_unlock(current);
 
} else {
-   bool deboost = rt_mutex_slowunlock(lock, _q);
-
-   rt_mutex_postunlock(_q, deboost);
+   if (rt_mutex_slowunlock(lock, _q))
+   rt_mutex_postunlock(_q);
}
 }
 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
@@ -1500,8 +1499,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
  * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock
  * @lock: the rt_mutex to be unlocked
  *
- * Returns: true/false indicating whether priority adjustment is
- * required or not.
+ * Returns: true/false indicating whether we should call rt_mutex_postunlock().
  */
 bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
   struct wake_q_head *wqh)
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -111,7 +111,7 @@ extern int rt_mutex_finish_proxy_lock(st
 extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct 
hrtimer_sleeper *to);
 extern bool rt_mutex_futex_unlock(struct rt_mutex *lock,
  struct wake_q_head *wqh);
-extern void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost);
+extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
 extern void rt_mutex_adjust_prio(struct task_struct *task);
 
 #ifdef CONFIG_DEBUG_RT_MUTEXES