> On Jun 16, 2019, at 05:34, Thomas Gleixner <t...@linutronix.de> wrote:
> 
> On Sun, 16 Jun 2019, Thomas Gleixner wrote:
>> 
>> Please dont. Send me a delta patch against the documentation. I have queued
>> all the other patches already internally. I did not push it out because I
>> wanted to have proper docs.
> 
> Fixed it up already. About to push it out.
> 

Thanks. This is the diff though.

diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 22992c8377952..f667087792747 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -118,7 +118,7 @@ static __always_inline bool should_resched(int 
preempt_offset)
 
        /* preempt count == 0 ? */
        tmp &= ~PREEMPT_NEED_RESCHED;
-       if (tmp)
+       if (tmp != preempt_offset)
                return false;
        if (current_thread_info()->preempt_lazy_count)
                return false;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index c15583162a559..25bcf2f2714ba 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -92,6 +92,34 @@ static inline void softirq_clr_runner(unsigned int sirq)
        sr->runner[sirq] = NULL;
 }
 
+static bool softirq_check_runner_tsk(struct task_struct *tsk,
+                                    unsigned int *pending)
+{
+       bool ret = false;
+
+       if (!tsk)
+               return ret;
+
+       /*
+        * The wakeup code in rtmutex.c wakes up the task
+        * _before_ it sets pi_blocked_on to NULL under
+        * tsk->pi_lock. So we need to check for both: state
+        * and pi_blocked_on.
+        * The test against UNINTERRUPTIBLE + ->sleeping_lock is in case the
+        * task does cpu_chill().
+        */
+       raw_spin_lock(&tsk->pi_lock);
+       if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING ||
+           (tsk->state == TASK_UNINTERRUPTIBLE && tsk->sleeping_lock)) {
+               /* Clear all bits pending in that task */
+               *pending &= ~(tsk->softirqs_raised);
+               ret = true;
+       }
+       raw_spin_unlock(&tsk->pi_lock);
+
+       return ret;
+}
+
 /*
  * On preempt-rt a softirq running context might be blocked on a
  * lock. There might be no other runnable task on this CPU because the
@@ -104,6 +132,7 @@ static inline void softirq_clr_runner(unsigned int sirq)
  */
 void softirq_check_pending_idle(void)
 {
+       struct task_struct *tsk;
        static int rate_limit;
        struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
        u32 warnpending;
@@ -113,24 +142,23 @@ void softirq_check_pending_idle(void)
                return;
 
        warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK;
+       if (!warnpending)
+               return;
        for (i = 0; i < NR_SOFTIRQS; i++) {
-               struct task_struct *tsk = sr->runner[i];
+               tsk = sr->runner[i];
 
-               /*
-                * The wakeup code in rtmutex.c wakes up the task
-                * _before_ it sets pi_blocked_on to NULL under
-                * tsk->pi_lock. So we need to check for both: state
-                * and pi_blocked_on.
-                */
-               if (tsk) {
-                       raw_spin_lock(&tsk->pi_lock);
-                       if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) {
-                               /* Clear all bits pending in that task */
-                               warnpending &= ~(tsk->softirqs_raised);
-                               warnpending &= ~(1 << i);
-                       }
-                       raw_spin_unlock(&tsk->pi_lock);
-               }
+               if (softirq_check_runner_tsk(tsk, &warnpending))
+                       warnpending &= ~(1 << i);
+       }
+
+       if (warnpending) {
+               tsk = __this_cpu_read(ksoftirqd);
+               softirq_check_runner_tsk(tsk, &warnpending);
+       }
+
+       if (warnpending) {
+               tsk = __this_cpu_read(ktimer_softirqd);
+               softirq_check_runner_tsk(tsk, &warnpending);
        }
 
        if (warnpending) {
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 851b2134e77f4..6f2736ec4b8ef 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1902,15 +1902,18 @@ void cpu_chill(void)
 {
        ktime_t chill_time;
        unsigned int freeze_flag = current->flags & PF_NOFREEZE;
+       long saved_state;
 
+       saved_state = current->state;
        chill_time = ktime_set(0, NSEC_PER_MSEC);
-       set_current_state(TASK_UNINTERRUPTIBLE);
+       __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
        current->flags |= PF_NOFREEZE;
        sleeping_lock_inc();
        schedule_hrtimeout(&chill_time, HRTIMER_MODE_REL_HARD);
        sleeping_lock_dec();
        if (!freeze_flag)
                current->flags &= ~PF_NOFREEZE;
+       __set_current_state_no_track(saved_state);
 }
 EXPORT_SYMBOL(cpu_chill);
 #endif
diff --git a/localversion-rt b/localversion-rt
index 9f7d0bdbffb18..08b3e75841adc 100644
--- a/localversion-rt
+++ b/localversion-rt
@@ -1 +1 @@
--rt13
+-rt14


Reply via email to