Introducing idle enter/exit balance callbacks to keep
balance.idle_cpus_mask cpumask of current idle cpus
in system.

It's used only when REBALANCE_AFFINITY feature is
switched on. The code functionality of this feature
is introduced in following patch.

Signed-off-by: Jiri Olsa <jo...@kernel.org>
---
 kernel/sched/fair.c  | 32 ++++++++++++++++++++++++++++++++
 kernel/sched/idle.c  |  2 ++
 kernel/sched/sched.h |  3 +++
 3 files changed, 37 insertions(+)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f19c9435c64d..78c4127f2f3a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7802,6 +7802,37 @@ static inline int on_null_domain(struct rq *rq)
        return unlikely(!rcu_dereference_sched(rq->sd));
 }
 
+static struct {
+       cpumask_var_t   idle_cpus_mask;
+       atomic_t        nr_cpus;
+} balance ____cacheline_aligned;
+
+void sched_idle_enter(int cpu)
+{
+       if (!sched_feat(REBALANCE_AFFINITY))
+               return;
+
+       if (!cpu_active(cpu))
+               return;
+
+       if (on_null_domain(cpu_rq(cpu)))
+               return;
+
+       cpumask_set_cpu(cpu, balance.idle_cpus_mask);
+       atomic_inc(&balance.nr_cpus);
+}
+
+void sched_idle_exit(int cpu)
+{
+       if (!sched_feat(REBALANCE_AFFINITY))
+               return;
+
+       if (likely(cpumask_test_cpu(cpu, balance.idle_cpus_mask))) {
+               cpumask_clear_cpu(cpu, balance.idle_cpus_mask);
+               atomic_dec(&balance.nr_cpus);
+       }
+}
+
 #ifdef CONFIG_NO_HZ_COMMON
 /*
  * idle load balancing details
@@ -8731,6 +8762,7 @@ __init void init_sched_fair_class(void)
        nohz.next_balance = jiffies;
        zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
 #endif
+       zalloc_cpumask_var(&balance.idle_cpus_mask, GFP_NOWAIT);
 #endif /* SMP */
 
 }
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index db4ff7c100b9..0cc0109eacd3 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -216,6 +216,7 @@ static void cpu_idle_loop(void)
                __current_set_polling();
                quiet_vmstat();
                tick_nohz_idle_enter();
+               sched_idle_enter(cpu);
 
                while (!need_resched()) {
                        check_pgt_cache();
@@ -256,6 +257,7 @@ static void cpu_idle_loop(void)
                 */
                preempt_set_need_resched();
                tick_nohz_idle_exit();
+               sched_idle_exit(cpu);
                __current_clr_polling();
 
                /*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 72f1f3087b04..d1a6224cd140 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1692,6 +1692,9 @@ extern void init_dl_rq(struct dl_rq *dl_rq);
 extern void cfs_bandwidth_usage_inc(void);
 extern void cfs_bandwidth_usage_dec(void);
 
+void sched_idle_enter(int cpu);
+void sched_idle_exit(int cpu);
+
 #ifdef CONFIG_NO_HZ_COMMON
 enum rq_nohz_flag_bits {
        NOHZ_TICK_STOPPED,
-- 
2.4.11

Reply via email to