Commit-ID:  7110744516276e906f9197e2857d026eb2343393
Gitweb:     http://git.kernel.org/tip/7110744516276e906f9197e2857d026eb2343393
Author:     Jason Low <jason.l...@hp.com>
AuthorDate: Tue, 28 Apr 2015 13:00:24 -0700
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Fri, 8 May 2015 12:17:46 +0200

sched, timer: Use the atomic task_cputime in thread_group_cputimer

Recent optimizations were made to thread_group_cputimer to improve its
scalability by keeping track of cputime stats without a lock. However,
the values were open coded to the structure, causing them to be at
a different abstraction level from the regular task_cputime structure.
Furthermore, any subsequent similar optimizations would not be able to
share the new code, since they are specific to thread_group_cputimer.

This patch adds the new task_cputime_atomic data structure (introduced in
the previous patch in the series) to thread_group_cputimer for keeping
track of the cputime atomically, which also helps generalize the code.

Suggested-by: Ingo Molnar <mi...@kernel.org>
Signed-off-by: Jason Low <jason.l...@hp.com>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Acked-by: Thomas Gleixner <t...@linutronix.de>
Acked-by: Rik van Riel <r...@redhat.com>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Aswin Chandramouleeswaran <as...@hp.com>
Cc: Borislav Petkov <b...@alien8.de>
Cc: Davidlohr Bueso <d...@stgolabs.net>
Cc: Frederic Weisbecker <fweis...@gmail.com>
Cc: H. Peter Anvin <h...@zytor.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Mel Gorman <mgor...@suse.de>
Cc: Mike Galbraith <umgwanakikb...@gmail.com>
Cc: Oleg Nesterov <o...@redhat.com>
Cc: Paul E. McKenney <paul...@linux.vnet.ibm.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Preeti U Murthy <pre...@linux.vnet.ibm.com>
Cc: Scott J Norton <scott.nor...@hp.com>
Cc: Steven Rostedt <rost...@goodmis.org>
Cc: Waiman Long <waiman.l...@hp.com>
Link: 
http://lkml.kernel.org/r/1430251224-5764-6-git-send-email-jason.l...@hp.com
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 include/linux/init_task.h      |  6 ++----
 include/linux/sched.h          |  4 +---
 kernel/sched/stats.h           |  6 +++---
 kernel/time/posix-cpu-timers.c | 26 +++++++++++++-------------
 4 files changed, 19 insertions(+), 23 deletions(-)

diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 7b9d8b5..bb9b075 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -50,10 +50,8 @@ extern struct fs_struct init_fs;
        .cpu_timers     = INIT_CPU_TIMERS(sig.cpu_timers),              \
        .rlim           = INIT_RLIMITS,                                 \
        .cputimer       = {                                             \
-               .utime            = ATOMIC64_INIT(0),                   \
-               .stime            = ATOMIC64_INIT(0),                   \
-               .sum_exec_runtime = ATOMIC64_INIT(0),                   \
-               .running          = 0                                   \
+               .cputime_atomic = INIT_CPUTIME_ATOMIC,                  \
+               .running        = 0,                                    \
        },                                                              \
        .cred_guard_mutex =                                             \
                 __MUTEX_INITIALIZER(sig.cred_guard_mutex),             \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6eb78cd..4adc536 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -615,9 +615,7 @@ struct task_cputime_atomic {
  * used for thread group CPU timer calculations.
  */
 struct thread_group_cputimer {
-       atomic64_t utime;
-       atomic64_t stime;
-       atomic64_t sum_exec_runtime;
+       struct task_cputime_atomic cputime_atomic;
        int running;
 };
 
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index c6d1c7d..077ebbd 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -216,7 +216,7 @@ static inline void account_group_user_time(struct 
task_struct *tsk,
        if (!cputimer_running(tsk))
                return;
 
-       atomic64_add(cputime, &cputimer->utime);
+       atomic64_add(cputime, &cputimer->cputime_atomic.utime);
 }
 
 /**
@@ -237,7 +237,7 @@ static inline void account_group_system_time(struct 
task_struct *tsk,
        if (!cputimer_running(tsk))
                return;
 
-       atomic64_add(cputime, &cputimer->stime);
+       atomic64_add(cputime, &cputimer->cputime_atomic.stime);
 }
 
 /**
@@ -258,5 +258,5 @@ static inline void account_group_exec_runtime(struct 
task_struct *tsk,
        if (!cputimer_running(tsk))
                return;
 
-       atomic64_add(ns, &cputimer->sum_exec_runtime);
+       atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime);
 }
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index d857306..892e3da 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -211,20 +211,20 @@ retry:
        }
 }
 
-static void update_gt_cputime(struct thread_group_cputimer *cputimer, struct 
task_cputime *sum)
+static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, 
struct task_cputime *sum)
 {
-       __update_gt_cputime(&cputimer->utime, sum->utime);
-       __update_gt_cputime(&cputimer->stime, sum->stime);
-       __update_gt_cputime(&cputimer->sum_exec_runtime, sum->sum_exec_runtime);
+       __update_gt_cputime(&cputime_atomic->utime, sum->utime);
+       __update_gt_cputime(&cputime_atomic->stime, sum->stime);
+       __update_gt_cputime(&cputime_atomic->sum_exec_runtime, 
sum->sum_exec_runtime);
 }
 
-/* Sample thread_group_cputimer values in "cputimer", store results in 
"times". */
-static inline void sample_group_cputimer(struct task_cputime *times,
-                                         struct thread_group_cputimer 
*cputimer)
+/* Sample task_cputime_atomic values in "atomic_timers", store results in 
"times". */
+static inline void sample_cputime_atomic(struct task_cputime *times,
+                                        struct task_cputime_atomic 
*atomic_times)
 {
-       times->utime = atomic64_read(&cputimer->utime);
-       times->stime = atomic64_read(&cputimer->stime);
-       times->sum_exec_runtime = atomic64_read(&cputimer->sum_exec_runtime);
+       times->utime = atomic64_read(&atomic_times->utime);
+       times->stime = atomic64_read(&atomic_times->stime);
+       times->sum_exec_runtime = 
atomic64_read(&atomic_times->sum_exec_runtime);
 }
 
 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
@@ -240,7 +240,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct 
task_cputime *times)
                 * to synchronize the timer to the clock every time we start it.
                 */
                thread_group_cputime(tsk, &sum);
-               update_gt_cputime(cputimer, &sum);
+               update_gt_cputime(&cputimer->cputime_atomic, &sum);
 
                /*
                 * We're setting cputimer->running without a lock. Ensure
@@ -251,7 +251,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct 
task_cputime *times)
                 */
                WRITE_ONCE(cputimer->running, 1);
        }
-       sample_group_cputimer(times, cputimer);
+       sample_cputime_atomic(times, &cputimer->cputime_atomic);
 }
 
 /*
@@ -1137,7 +1137,7 @@ static inline int fastpath_timer_check(struct task_struct 
*tsk)
        if (READ_ONCE(sig->cputimer.running)) {
                struct task_cputime group_sample;
 
-               sample_group_cputimer(&group_sample, &sig->cputimer);
+               sample_cputime_atomic(&group_sample, 
&sig->cputimer.cputime_atomic);
 
                if (task_cputime_expired(&group_sample, &sig->cputime_expires))
                        return 1;
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to