Dear RT Folks,

I'm pleased to announce the 3.10.97-rt106 stable release.


You can get this release via the git tree at:

  git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git

  branch: v3.10-rt
  Head SHA1: 141a4ed5a87365316b29491b6b982d102d79d754


Or to build 3.10.97-rt106 directly, the following patches should be applied:

  http://www.kernel.org/pub/linux/kernel/v3.x/linux-3.10.tar.xz

  http://www.kernel.org/pub/linux/kernel/v3.x/patch-3.10.97.xz

  
http://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patch-3.10.97-rt106.patch.xz



You can also build from 3.10.97-rt105 by applying the incremental patch:

  
http://www.kernel.org/pub/linux/kernel/projects/rt/3.10/incr/patch-3.10.97-rt105-rt106.patch.xz



Enjoy,

-- Steve


Changes from v3.10.97-rt105:

---

Grygorii Strashko (2):
      ARM: smp: Move clear_tasks_mm_cpumask() call to __cpu_die()
      net/core/cpuhotplug: Drain input_pkt_queue lockless

Josh Cartwright (1):
      net: Make synchronize_rcu_expedited() conditional on !RT_FULL

Peter Zijlstra (1):
      sched: Introduce the trace_sched_waking tracepoint

Sebastian Andrzej Siewior (1):
      dump stack: don't disable preemption during trace

Steven Rostedt (Red Hat) (2):
      rtmutex: Have slowfn of rt_mutex_timed_fastlock() use enum
      Linux 3.10.97-rt106

Thomas Gleixner (2):
      rtmutex: Handle non enqueued waiters gracefully
      irqwork: Move irq safe work to irq context

[email protected] (1):
      rtmutex: Use chainwalking control enum

----
 arch/arm/kernel/smp.c             |  5 +++--
 arch/x86/kernel/dumpstack_64.c    |  8 ++++----
 include/linux/irq_work.h          |  6 ++++++
 include/trace/events/sched.h      | 30 +++++++++++++++++++++---------
 kernel/irq_work.c                 |  9 +++++++++
 kernel/rtmutex.c                  |  6 +++---
 kernel/sched/core.c               |  8 +++++---
 kernel/timer.c                    |  6 ++----
 kernel/trace/trace_sched_switch.c |  2 +-
 kernel/trace/trace_sched_wakeup.c |  2 +-
 localversion-rt                   |  2 +-
 net/core/dev.c                    |  4 ++--
 12 files changed, 58 insertions(+), 30 deletions(-)
---------------------------
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 5919eb451bb9..7ad24aa57040 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -192,8 +192,6 @@ int __cpuinit __cpu_disable(void)
        flush_cache_louis();
        local_flush_tlb_all();
 
-       clear_tasks_mm_cpumask(cpu);
-
        return 0;
 }
 
@@ -209,6 +207,9 @@ void __cpuinit __cpu_die(unsigned int cpu)
                pr_err("CPU%u: cpu didn't die\n", cpu);
                return;
        }
+
+       clear_tasks_mm_cpumask(cpu);
+
        printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
 
        /*
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 66e274a3d968..37aee503a7ba 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -114,7 +114,7 @@ void dump_trace(struct task_struct *task, struct pt_regs 
*regs,
                unsigned long *stack, unsigned long bp,
                const struct stacktrace_ops *ops, void *data)
 {
-       const unsigned cpu = get_cpu();
+       const unsigned cpu = get_cpu_light();
        unsigned long *irq_stack_end =
                (unsigned long *)per_cpu(irq_stack_ptr, cpu);
        unsigned used = 0;
@@ -191,7 +191,7 @@ void dump_trace(struct task_struct *task, struct pt_regs 
*regs,
         * This handles the process stack:
         */
        bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
-       put_cpu();
+       put_cpu_light();
 }
 EXPORT_SYMBOL(dump_trace);
 
@@ -205,7 +205,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs 
*regs,
        int cpu;
        int i;
 
-       preempt_disable();
+       migrate_disable();
        cpu = smp_processor_id();
 
        irq_stack_end   = (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
@@ -238,7 +238,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs 
*regs,
                pr_cont(" %016lx", *stack++);
                touch_nmi_watchdog();
        }
-       preempt_enable();
+       migrate_enable();
 
        pr_cont("\n");
        show_trace_log_lvl(task, regs, sp, bp, log_lvl);
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index 4a8c7a2df480..ccd736ebee9e 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -44,4 +44,10 @@ bool irq_work_needs_cpu(void);
 static inline bool irq_work_needs_cpu(void) { return false; }
 #endif
 
+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
+void irq_work_tick_soft(void);
+#else
+static inline void irq_work_tick_soft(void) { }
+#endif
+
 #endif /* _LINUX_IRQ_WORK_H */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index e5586caff67a..1ff7042e572d 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -55,9 +55,9 @@ TRACE_EVENT(sched_kthread_stop_ret,
  */
 DECLARE_EVENT_CLASS(sched_wakeup_template,
 
-       TP_PROTO(struct task_struct *p, int success),
+       TP_PROTO(struct task_struct *p),
 
-       TP_ARGS(p, success),
+       TP_ARGS(p),
 
        TP_STRUCT__entry(
                __array(        char,   comm,   TASK_COMM_LEN   )
@@ -71,28 +71,40 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
                memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
                __entry->pid            = p->pid;
                __entry->prio           = p->prio;
-               __entry->success        = success;
+               __entry->success        = 1; /* rudiment, kill when possible */
                __entry->target_cpu     = task_cpu(p);
        )
        TP_perf_assign(
                __perf_task(p);
        ),
 
-       TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
+       TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
                  __entry->comm, __entry->pid, __entry->prio,
-                 __entry->success, __entry->target_cpu)
+                 __entry->target_cpu)
 );
 
+/*
+ * Tracepoint called when waking a task; this tracepoint is guaranteed to be
+ * called from the waking context.
+ */
+DEFINE_EVENT(sched_wakeup_template, sched_waking,
+            TP_PROTO(struct task_struct *p),
+            TP_ARGS(p));
+
+/*
+ * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
+ * It it not always called from the waking context.
+ */
 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
-            TP_PROTO(struct task_struct *p, int success),
-            TP_ARGS(p, success));
+            TP_PROTO(struct task_struct *p),
+            TP_ARGS(p));
 
 /*
  * Tracepoint for waking up a new task:
  */
 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
-            TP_PROTO(struct task_struct *p, int success),
-            TP_ARGS(p, success));
+            TP_PROTO(struct task_struct *p),
+            TP_ARGS(p));
 
 #ifdef CREATE_TRACE_POINTS
 static inline long __trace_sched_switch_state(struct task_struct *p)
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index af8ceafc94e4..883bb73698b9 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -163,8 +163,17 @@ void irq_work_tick(void)
 
        if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
                irq_work_run_list(raised);
+
+       if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL))
+               irq_work_run_list(this_cpu_ptr(&lazy_list));
+}
+
+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
+void irq_work_tick_soft(void)
+{
        irq_work_run_list(this_cpu_ptr(&lazy_list));
 }
+#endif
 
 /*
  * Synchronize against the irq_work @entry, ensures the entry is not
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 5388ba98acaf..a809539f443c 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -1166,7 +1166,7 @@ static void  noinline __sched 
rt_spin_lock_slowlock(struct rt_mutex *lock)
        __set_current_state(TASK_UNINTERRUPTIBLE);
        pi_unlock(&self->pi_lock);
 
-       ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0);
+       ret = task_blocks_on_rt_mutex(lock, &waiter, self, 
RT_MUTEX_MIN_CHAINWALK);
        BUG_ON(ret);
 
        for (;;) {
@@ -1636,7 +1636,7 @@ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
                        enum rtmutex_chainwalk chwalk,
                        int (*slowfn)(struct rt_mutex *lock, int state,
                                      struct hrtimer_sleeper *timeout,
-                                     int detect_deadlock))
+                                     enum rtmutex_chainwalk chwalk))
 {
        if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
            likely(rt_mutex_cmpxchg(lock, NULL, current))) {
@@ -1914,7 +1914,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
                ret = 0;
        }
 
-       if (unlikely(ret))
+       if (ret && rt_mutex_has_waiters(lock))
                remove_waiter(lock, waiter);
 
        raw_spin_unlock(&lock->wait_lock);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 23e289ae4270..6c21a28689b9 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1394,9 +1394,9 @@ static void
 ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
 {
        check_preempt_curr(rq, p, wake_flags);
-       trace_sched_wakeup(p, true);
-
        p->state = TASK_RUNNING;
+       trace_sched_wakeup(p);
+
 #ifdef CONFIG_SMP
        if (p->sched_class->task_woken)
                p->sched_class->task_woken(rq, p);
@@ -1579,6 +1579,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, 
int wake_flags)
        if (!(wake_flags & WF_LOCK_SLEEPER))
                p->saved_state = TASK_RUNNING;
 
+       trace_sched_waking(p);
+
        success = 1; /* we're going to change ->state */
        cpu = task_cpu(p);
 
@@ -1830,7 +1832,7 @@ void wake_up_new_task(struct task_struct *p)
        rq = __task_rq_lock(p);
        activate_task(rq, p, 0);
        p->on_rq = 1;
-       trace_sched_wakeup_new(p, true);
+       trace_sched_wakeup_new(p);
        check_preempt_curr(rq, p, WF_FORK);
 #ifdef CONFIG_SMP
        if (p->sched_class->task_woken)
diff --git a/kernel/timer.c b/kernel/timer.c
index ff272e20ee0c..2fd6ea5c6519 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1450,7 +1450,7 @@ void update_process_times(int user_tick)
        scheduler_tick();
        run_local_timers();
        rcu_check_callbacks(cpu, user_tick);
-#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL)
+#if defined(CONFIG_IRQ_WORK)
        if (in_irq())
                irq_work_run();
 #endif
@@ -1466,9 +1466,7 @@ static void run_timer_softirq(struct softirq_action *h)
 
        hrtimer_run_pending();
 
-#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
-       irq_work_tick();
-#endif
+       irq_work_tick_soft();
 
        if (time_after_eq(jiffies, base->timer_jiffies))
                __run_timers(base);
diff --git a/kernel/trace/trace_sched_switch.c 
b/kernel/trace/trace_sched_switch.c
index 4e98e3b257a3..82fe794af532 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -106,7 +106,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
 }
 
 static void
-probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
+probe_sched_wakeup(void *ignore, struct task_struct *wakee)
 {
        struct trace_array_cpu *data;
        unsigned long flags;
diff --git a/kernel/trace/trace_sched_wakeup.c 
b/kernel/trace/trace_sched_wakeup.c
index fee77e15d815..8e967ca56006 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -458,7 +458,7 @@ static void wakeup_reset(struct trace_array *tr)
 }
 
 static void
-probe_wakeup(void *ignore, struct task_struct *p, int success)
+probe_wakeup(void *ignore, struct task_struct *p)
 {
        struct trace_array_cpu *data;
        int cpu = smp_processor_id();
diff --git a/localversion-rt b/localversion-rt
index b4e5b00e24e1..8111d0b4d38d 100644
--- a/localversion-rt
+++ b/localversion-rt
@@ -1 +1 @@
--rt105
+-rt106
diff --git a/net/core/dev.c b/net/core/dev.c
index a478f77e4322..864f7ae42c08 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5838,7 +5838,7 @@ EXPORT_SYMBOL(free_netdev);
 void synchronize_net(void)
 {
        might_sleep();
-       if (rtnl_is_locked())
+       if (rtnl_is_locked() && !IS_ENABLED(CONFIG_PREEMPT_RT_FULL))
                synchronize_rcu_expedited();
        else
                synchronize_rcu();
@@ -6088,7 +6088,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
                netif_rx(skb);
                input_queue_head_incr(oldsd);
        }
-       while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
+       while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
                netif_rx(skb);
                input_queue_head_incr(oldsd);
        }

Reply via email to