Commit-ID:  ae92882e5646d8661a3ca182ba988752fe4b773f
Gitweb:     http://git.kernel.org/tip/ae92882e5646d8661a3ca182ba988752fe4b773f
Author:     Josh Poimboeuf <jpoim...@redhat.com>
AuthorDate: Fri, 17 Jun 2016 12:43:24 -0500
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Mon, 5 Sep 2016 13:29:46 +0200

sched/debug: Clean up schedstat macros

The schedstat_*() macros are inconsistent: most of them take a pointer
and a field which the macro combines, whereas schedstat_set() takes the
already combined ptr->field.

The already combined ptr->field argument is actually more intuitive and
easier to use, and there's no reason to require the user to split the
variable up, so convert the macros to use the combined argument.

Signed-off-by: Josh Poimboeuf <jpoim...@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Matt Fleming <m...@codeblueprint.co.uk>
Cc: Mel Gorman <mgor...@techsingularity.net>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Srikar Dronamraju <sri...@linux.vnet.ibm.com>
Cc: Thomas Gleixner <t...@linutronix.de>
Link: 
http://lkml.kernel.org/r/54953ca25bb579f3a5946432dee409b0e05222c6.1466184592.git.jpoim...@redhat.com
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 kernel/sched/core.c      | 22 +++++++++++-----------
 kernel/sched/debug.c     |  4 ++--
 kernel/sched/fair.c      | 42 +++++++++++++++++++++---------------------
 kernel/sched/idle_task.c |  2 +-
 kernel/sched/stats.h     | 22 +++++++++++-----------
 5 files changed, 46 insertions(+), 46 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 90b1961..8506770 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1636,16 +1636,16 @@ ttwu_stat(struct task_struct *p, int cpu, int 
wake_flags)
        int this_cpu = smp_processor_id();
 
        if (cpu == this_cpu) {
-               schedstat_inc(rq, ttwu_local);
-               schedstat_inc(p, se.statistics.nr_wakeups_local);
+               schedstat_inc(rq->ttwu_local);
+               schedstat_inc(p->se.statistics.nr_wakeups_local);
        } else {
                struct sched_domain *sd;
 
-               schedstat_inc(p, se.statistics.nr_wakeups_remote);
+               schedstat_inc(p->se.statistics.nr_wakeups_remote);
                rcu_read_lock();
                for_each_domain(this_cpu, sd) {
                        if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
-                               schedstat_inc(sd, ttwu_wake_remote);
+                               schedstat_inc(sd->ttwu_wake_remote);
                                break;
                        }
                }
@@ -1653,15 +1653,15 @@ ttwu_stat(struct task_struct *p, int cpu, int 
wake_flags)
        }
 
        if (wake_flags & WF_MIGRATED)
-               schedstat_inc(p, se.statistics.nr_wakeups_migrate);
+               schedstat_inc(p->se.statistics.nr_wakeups_migrate);
 
 #endif /* CONFIG_SMP */
 
-       schedstat_inc(rq, ttwu_count);
-       schedstat_inc(p, se.statistics.nr_wakeups);
+       schedstat_inc(rq->ttwu_count);
+       schedstat_inc(p->se.statistics.nr_wakeups);
 
        if (wake_flags & WF_SYNC)
-               schedstat_inc(p, se.statistics.nr_wakeups_sync);
+               schedstat_inc(p->se.statistics.nr_wakeups_sync);
 
 #endif /* CONFIG_SCHEDSTATS */
 }
@@ -3237,7 +3237,7 @@ static inline void schedule_debug(struct task_struct 
*prev)
 
        profile_hit(SCHED_PROFILING, __builtin_return_address(0));
 
-       schedstat_inc(this_rq(), sched_count);
+       schedstat_inc(this_rq()->sched_count);
 }
 
 /*
@@ -4849,7 +4849,7 @@ SYSCALL_DEFINE0(sched_yield)
 {
        struct rq *rq = this_rq_lock();
 
-       schedstat_inc(rq, yld_count);
+       schedstat_inc(rq->yld_count);
        current->sched_class->yield_task(rq);
 
        /*
@@ -5000,7 +5000,7 @@ again:
 
        yielded = curr->sched_class->yield_to_task(rq, p, preempt);
        if (yielded) {
-               schedstat_inc(rq, yld_count);
+               schedstat_inc(rq->yld_count);
                /*
                 * Make p's CPU reschedule; pick_next_entity takes care of
                 * fairness.
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 2a0a999..92fa534 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -429,9 +429,9 @@ print_task(struct seq_file *m, struct rq *rq, struct 
task_struct *p)
                p->prio);
 
        SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
-               SPLIT_NS(schedstat_val(p, se.statistics.wait_sum)),
+               SPLIT_NS(schedstat_val(p->se.statistics.wait_sum)),
                SPLIT_NS(p->se.sum_exec_runtime),
-               SPLIT_NS(schedstat_val(p, se.statistics.sum_sleep_runtime)));
+               SPLIT_NS(schedstat_val(p->se.statistics.sum_sleep_runtime)));
 
 #ifdef CONFIG_NUMA_BALANCING
        SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 479639f..157d741 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -800,7 +800,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
                      max(delta_exec, curr->statistics.exec_max));
 
        curr->sum_exec_runtime += delta_exec;
-       schedstat_add(cfs_rq, exec_clock, delta_exec);
+       schedstat_add(cfs_rq->exec_clock, delta_exec);
 
        curr->vruntime += calc_delta_fair(delta_exec, curr);
        update_min_vruntime(cfs_rq);
@@ -3275,7 +3275,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct 
sched_entity *se)
                d = -d;
 
        if (d > 3*sysctl_sched_latency)
-               schedstat_inc(cfs_rq, nr_spread_over);
+               schedstat_inc(cfs_rq->nr_spread_over);
 #endif
 }
 
@@ -5164,13 +5164,13 @@ static int wake_affine(struct sched_domain *sd, struct 
task_struct *p,
 
        balanced = this_eff_load <= prev_eff_load;
 
-       schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
+       schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
 
        if (!balanced)
                return 0;
 
-       schedstat_inc(sd, ttwu_move_affine);
-       schedstat_inc(p, se.statistics.nr_wakeups_affine);
+       schedstat_inc(sd->ttwu_move_affine);
+       schedstat_inc(p->se.statistics.nr_wakeups_affine);
 
        return 1;
 }
@@ -6183,7 +6183,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env 
*env)
        if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
                int cpu;
 
-               schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
+               schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
 
                env->flags |= LBF_SOME_PINNED;
 
@@ -6214,7 +6214,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env 
*env)
        env->flags &= ~LBF_ALL_PINNED;
 
        if (task_running(env->src_rq, p)) {
-               schedstat_inc(p, se.statistics.nr_failed_migrations_running);
+               schedstat_inc(p->se.statistics.nr_failed_migrations_running);
                return 0;
        }
 
@@ -6231,13 +6231,13 @@ int can_migrate_task(struct task_struct *p, struct 
lb_env *env)
        if (tsk_cache_hot <= 0 ||
            env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
                if (tsk_cache_hot == 1) {
-                       schedstat_inc(env->sd, lb_hot_gained[env->idle]);
-                       schedstat_inc(p, se.statistics.nr_forced_migrations);
+                       schedstat_inc(env->sd->lb_hot_gained[env->idle]);
+                       schedstat_inc(p->se.statistics.nr_forced_migrations);
                }
                return 1;
        }
 
-       schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
+       schedstat_inc(p->se.statistics.nr_failed_migrations_hot);
        return 0;
 }
 
@@ -6277,7 +6277,7 @@ static struct task_struct *detach_one_task(struct lb_env 
*env)
                 * so we can safely collect stats here rather than
                 * inside detach_tasks().
                 */
-               schedstat_inc(env->sd, lb_gained[env->idle]);
+               schedstat_inc(env->sd->lb_gained[env->idle]);
                return p;
        }
        return NULL;
@@ -6369,7 +6369,7 @@ next:
         * so we can safely collect detach_one_task() stats here rather
         * than inside detach_one_task().
         */
-       schedstat_add(env->sd, lb_gained[env->idle], detached);
+       schedstat_add(env->sd->lb_gained[env->idle], detached);
 
        return detached;
 }
@@ -7510,7 +7510,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
 
        cpumask_copy(cpus, cpu_active_mask);
 
-       schedstat_inc(sd, lb_count[idle]);
+       schedstat_inc(sd->lb_count[idle]);
 
 redo:
        if (!should_we_balance(&env)) {
@@ -7520,19 +7520,19 @@ redo:
 
        group = find_busiest_group(&env);
        if (!group) {
-               schedstat_inc(sd, lb_nobusyg[idle]);
+               schedstat_inc(sd->lb_nobusyg[idle]);
                goto out_balanced;
        }
 
        busiest = find_busiest_queue(&env, group);
        if (!busiest) {
-               schedstat_inc(sd, lb_nobusyq[idle]);
+               schedstat_inc(sd->lb_nobusyq[idle]);
                goto out_balanced;
        }
 
        BUG_ON(busiest == env.dst_rq);
 
-       schedstat_add(sd, lb_imbalance[idle], env.imbalance);
+       schedstat_add(sd->lb_imbalance[idle], env.imbalance);
 
        env.src_cpu = busiest->cpu;
        env.src_rq = busiest;
@@ -7639,7 +7639,7 @@ more_balance:
        }
 
        if (!ld_moved) {
-               schedstat_inc(sd, lb_failed[idle]);
+               schedstat_inc(sd->lb_failed[idle]);
                /*
                 * Increment the failure counter only on periodic balance.
                 * We do not want newidle balance, which can be very
@@ -7722,7 +7722,7 @@ out_all_pinned:
         * we can't migrate them. Let the imbalance flag set so parent level
         * can try to migrate them.
         */
-       schedstat_inc(sd, lb_balanced[idle]);
+       schedstat_inc(sd->lb_balanced[idle]);
 
        sd->nr_balance_failed = 0;
 
@@ -7915,15 +7915,15 @@ static int active_load_balance_cpu_stop(void *data)
                        .idle           = CPU_IDLE,
                };
 
-               schedstat_inc(sd, alb_count);
+               schedstat_inc(sd->alb_count);
 
                p = detach_one_task(&env);
                if (p) {
-                       schedstat_inc(sd, alb_pushed);
+                       schedstat_inc(sd->alb_pushed);
                        /* Active balancing done, reset the failure counter. */
                        sd->nr_balance_failed = 0;
                } else {
-                       schedstat_inc(sd, alb_failed);
+                       schedstat_inc(sd->alb_failed);
                }
        }
        rcu_read_unlock();
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index 2ce5458..dedc81ec 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -28,7 +28,7 @@ pick_next_task_idle(struct rq *rq, struct task_struct *prev, 
struct pin_cookie c
 {
        put_prev_task(rq, prev);
 
-       schedstat_inc(rq, sched_goidle);
+       schedstat_inc(rq->sched_goidle);
        return rq->idle;
 }
 
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 78955cb..fc05425 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -29,11 +29,11 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long 
delta)
        if (rq)
                rq->rq_sched_info.run_delay += delta;
 }
-# define schedstat_enabled()           
static_branch_unlikely(&sched_schedstats)
-# define schedstat_inc(rq, field)      do { if (schedstat_enabled()) { 
(rq)->field++; } } while (0)
-# define schedstat_add(rq, field, amt) do { if (schedstat_enabled()) { 
(rq)->field += (amt); } } while (0)
-# define schedstat_set(var, val)       do { if (schedstat_enabled()) { var = 
(val); } } while (0)
-# define schedstat_val(rq, field)      ((schedstat_enabled()) ? (rq)->field : 
0)
+#define schedstat_enabled()            
static_branch_unlikely(&sched_schedstats)
+#define schedstat_inc(var)             do { if (schedstat_enabled()) { var++; 
} } while (0)
+#define schedstat_add(var, amt)                do { if (schedstat_enabled()) { 
var += (amt); } } while (0)
+#define schedstat_set(var, val)                do { if (schedstat_enabled()) { 
var = (val); } } while (0)
+#define schedstat_val(var)             ((schedstat_enabled()) ? (var) : 0)
 
 #else /* !CONFIG_SCHEDSTATS */
 static inline void
@@ -45,12 +45,12 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long 
delta)
 static inline void
 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
 {}
-# define schedstat_enabled()           0
-# define schedstat_inc(rq, field)      do { } while (0)
-# define schedstat_add(rq, field, amt) do { } while (0)
-# define schedstat_set(var, val)       do { } while (0)
-# define schedstat_val(rq, field)      0
-#endif
+#define schedstat_enabled()            0
+#define schedstat_inc(var)             do { } while (0)
+#define schedstat_add(var, amt)                do { } while (0)
+#define schedstat_set(var, val)                do { } while (0)
+#define schedstat_val(var)             0
+#endif /* CONFIG_SCHEDSTATS */
 
 #ifdef CONFIG_SCHED_INFO
 static inline void sched_info_reset_dequeued(struct task_struct *t)

Reply via email to