On Thu, Sep 03, 2020 at 08:19:38AM -0700, Guenter Roeck wrote:
> This doesn't compile for me - there is no "name" parameter in __DO_TRACE().
> 
> Error log:
> In file included from ./include/linux/rculist.h:11,
>                  from ./include/linux/pid.h:5,
>                  from ./include/linux/sched.h:14,
>                  from ./include/linux/sched/numa_balancing.h:10,
>                  from ./include/trace/events/sched.h:8,
>                  from kernel/sched/core.c:10:
> ./include/trace/events/sched.h: In function 'trace_sched_kthread_stop':
> ./include/linux/tracepoint.h:175:26: error: '__tracepoint_name' undeclared
> 
> I applied your patch on top of v5.9-rc3-75-gfc3abb53250a. Are you using
> a different branch ?

Argh, I was on tip/master... easy fix though.

---
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 7d9c1c0e149c..878bac893e41 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -27,17 +27,20 @@
  *         SOFTIRQ_MASK:       0x0000ff00
  *         HARDIRQ_MASK:       0x000f0000
  *             NMI_MASK:       0x00f00000
+ *         RCUIDLE_MASK:       0x01000000
  * PREEMPT_NEED_RESCHED:       0x80000000
  */
 #define PREEMPT_BITS   8
 #define SOFTIRQ_BITS   8
 #define HARDIRQ_BITS   4
 #define NMI_BITS       4
+#define RCUIDLE_BITS   1
 
 #define PREEMPT_SHIFT  0
 #define SOFTIRQ_SHIFT  (PREEMPT_SHIFT + PREEMPT_BITS)
 #define HARDIRQ_SHIFT  (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
 #define NMI_SHIFT      (HARDIRQ_SHIFT + HARDIRQ_BITS)
+#define RCUIDLE_SHIFT  (NMI_SHIFT     + NMI_BITS)
 
 #define __IRQ_MASK(x)  ((1UL << (x))-1)
 
@@ -45,11 +48,13 @@
 #define SOFTIRQ_MASK   (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
 #define HARDIRQ_MASK   (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
 #define NMI_MASK       (__IRQ_MASK(NMI_BITS)     << NMI_SHIFT)
+#define RCUIDLE_MASK   (__IRQ_MASK(RCUIDLE_BITS) << RCUIDLE_SHIFT)
 
 #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
 #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
 #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
 #define NMI_OFFSET     (1UL << NMI_SHIFT)
+#define RCUIDLE_OFFSET (1UL << RCUIDLE_SHIFT)
 
 #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
 
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 598fec9f9dbf..0469bc1c24fc 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -164,12 +164,18 @@ static inline struct tracepoint 
*tracepoint_ptr_deref(tracepoint_ptr_t *p)
                void *__data;                                           \
                int __maybe_unused __idx = 0;                           \
                                                                        \
-               if (!(cond))                                            \
+               if (!(cond) || (preempt_count() & RCUIDLE_MASK))        \
                        return;                                         \
                                                                        \
                /* srcu can't be used from NMI */                       \
                WARN_ON_ONCE(rcuidle && in_nmi());                      \
                                                                        \
+               if (IS_ENABLED(CONFIG_LOCKDEP) && !(rcuidle)) {         \
+                       rcu_read_lock_sched_notrace();                  \
+                       rcu_dereference_sched((tp)->funcs);             \
+                       rcu_read_unlock_sched_notrace();                \
+               }                                                       \
+                                                                       \
                /* keep srcu and sched-rcu usage consistent */          \
                preempt_disable_notrace();                              \
                                                                        \
@@ -235,11 +241,6 @@ static inline struct tracepoint 
*tracepoint_ptr_deref(tracepoint_ptr_t *p)
                                TP_PROTO(data_proto),                   \
                                TP_ARGS(data_args),                     \
                                TP_CONDITION(cond), 0);                 \
-               if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) {             \
-                       rcu_read_lock_sched_notrace();                  \
-                       rcu_dereference_sched(__tracepoint_##name.funcs);\
-                       rcu_read_unlock_sched_notrace();                \
-               }                                                       \
        }                                                               \
        __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args),          \
                PARAMS(cond), PARAMS(data_proto), PARAMS(data_args))    \
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 8ce77d9ac716..ad9fb4f12c63 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -269,6 +269,8 @@ static noinstr void rcu_dynticks_eqs_enter(void)
        /* Better not have special action (TLB flush) pending! */
        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
                     (seq & RCU_DYNTICK_CTRL_MASK));
+
+       __preempt_count_add(RCUIDLE_OFFSET);
 }
 
 /*
@@ -281,6 +283,8 @@ static noinstr void rcu_dynticks_eqs_exit(void)
        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
        int seq;
 
+       __preempt_count_sub(RCUIDLE_OFFSET);
+
        /*
         * CPUs seeing atomic_add_return() must see prior idle sojourns,
         * and we also must force ordering with the next RCU read-side


Reply via email to