Instead of providing asynchronous checks for the nohz subsystem to verify
perf event tick dependency, migrate perf to the new mask.

Perf needs the tick for two situations:

1) Freq events. We could set the tick dependency when those are
installed on a CPU context. But setting a global dependency on top of
the global freq events accounting is much easier. If people want that
to be optimized, we can still refine that on the per-CPU tick dependency
level. This patch dooesn't change the current behaviour anyway.

2) Throttled events: this is a per-cpu dependency.

Cc: Christoph Lameter <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Preeti U Murthy <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Viresh Kumar <[email protected]>
Signed-off-by: Frederic Weisbecker <[email protected]>
---
 include/linux/perf_event.h |  6 ------
 kernel/events/core.c       | 19 +++++--------------
 kernel/time/tick-sched.c   |  6 ------
 3 files changed, 5 insertions(+), 26 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2027809..ff25348 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1013,12 +1013,6 @@ static inline int __perf_event_disable(void *info)       
                { return -1; }
 static inline void perf_event_task_tick(void)                          { }
 #endif
 
-#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
-extern bool perf_event_can_stop_tick(void);
-#else
-static inline bool perf_event_can_stop_tick(void)                      { 
return true; }
-#endif
-
 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
 extern void perf_restore_debug_store(void);
 #else
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d3dae34..9980bd9 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3064,17 +3064,6 @@ done:
        return rotate;
 }
 
-#ifdef CONFIG_NO_HZ_FULL
-bool perf_event_can_stop_tick(void)
-{
-       if (atomic_read(&nr_freq_events) ||
-           __this_cpu_read(perf_throttled_count))
-               return false;
-       else
-               return true;
-}
-#endif
-
 void perf_event_task_tick(void)
 {
        struct list_head *head = this_cpu_ptr(&active_ctx_list);
@@ -3085,6 +3074,7 @@ void perf_event_task_tick(void)
 
        __this_cpu_inc(perf_throttled_seq);
        throttled = __this_cpu_xchg(perf_throttled_count, 0);
+       tick_nohz_clear_tick_dependency_this_cpu(TICK_PERF_EVENTS_BIT);
 
        list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
                perf_adjust_freq_unthr_context(ctx, throttled);
@@ -3453,7 +3443,8 @@ static void unaccount_event(struct perf_event *event)
        if (event->attr.task)
                atomic_dec(&nr_task_events);
        if (event->attr.freq)
-               atomic_dec(&nr_freq_events);
+               if (atomic_dec_and_test(&nr_freq_events))
+                       tick_nohz_clear_tick_dependency(TICK_PERF_EVENTS_BIT);
        if (is_cgroup_event(event))
                static_key_slow_dec_deferred(&perf_sched_events);
        if (has_branch_stack(event))
@@ -6089,9 +6080,9 @@ static int __perf_event_overflow(struct perf_event *event,
                if (unlikely(throttle
                             && hwc->interrupts >= max_samples_per_tick)) {
                        __this_cpu_inc(perf_throttled_count);
+                       
tick_nohz_set_tick_dependency_this_cpu(TICK_PERF_EVENTS_BIT);
                        hwc->interrupts = MAX_INTERRUPTS;
                        perf_log_throttle(event, 0);
-                       tick_nohz_full_kick();
                        ret = 1;
                }
        }
@@ -7477,7 +7468,7 @@ static void account_event(struct perf_event *event)
                atomic_inc(&nr_task_events);
        if (event->attr.freq) {
                if (atomic_inc_return(&nr_freq_events) == 1)
-                       tick_nohz_full_kick_all();
+                       tick_nohz_set_tick_dependency(TICK_PERF_EVENTS_BIT);
        }
        if (has_branch_stack(event))
                static_key_slow_inc(&perf_sched_events.key);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index a64646e..fbe4736 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -22,7 +22,6 @@
 #include <linux/module.h>
 #include <linux/irq_work.h>
 #include <linux/posix-timers.h>
-#include <linux/perf_event.h>
 #include <linux/context_tracking.h>
 
 #include <asm/irq_regs.h>
@@ -203,11 +202,6 @@ static bool can_stop_full_tick(struct tick_sched *ts)
                return false;
        }
 
-       if (!perf_event_can_stop_tick()) {
-               trace_tick_stop(0, "perf events running\n");
-               return false;
-       }
-
 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
        /*
         * sched_clock_tick() needs us?
-- 
2.1.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to