Use the new nsec based cputime accessors as part of the whole cputime
conversion from cputime_t to nsecs.

Also convert itimers to use nsec based internal counters. This simplifies
it and remove the whole game with error/inc_error which served to deal
with cputime_t random granularity.

Cc: Benjamin Herrenschmidt <b...@kernel.crashing.org>
Cc: Paul Mackerras <pau...@samba.org>
Cc: Michael Ellerman <m...@ellerman.id.au>
Cc: Heiko Carstens <heiko.carst...@de.ibm.com>
Cc: Martin Schwidefsky <schwidef...@de.ibm.com>
Cc: Tony Luck <tony.l...@intel.com>
Cc: Fenghua Yu <fenghua...@intel.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Rik van Riel <r...@redhat.com>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Ingo Molnar <mi...@kernel.org>
Cc: Stanislaw Gruszka <sgrus...@redhat.com>
Cc: Wanpeng Li <wanpeng...@hotmail.com>
Signed-off-by: Frederic Weisbecker <fweis...@gmail.com>
---
 include/linux/posix-timers.h   |  2 +-
 include/linux/sched.h          |  6 ++--
 include/trace/events/timer.h   | 26 ++++++++---------
 kernel/time/itimer.c           | 64 +++++++++++++++---------------------------
 kernel/time/posix-cpu-timers.c | 43 +++++++++++-----------------
 5 files changed, 55 insertions(+), 86 deletions(-)

diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 890de52..64aa189 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -119,7 +119,7 @@ void run_posix_cpu_timers(struct task_struct *task);
 void posix_cpu_timers_exit(struct task_struct *task);
 void posix_cpu_timers_exit_group(struct task_struct *task);
 void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
-                          cputime_t *newval, cputime_t *oldval);
+                          u64 *newval, u64 *oldval);
 
 long clock_nanosleep_restart(struct restart_block *restart_block);
 
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 148a0a6..a5081ff 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -566,10 +566,8 @@ struct pacct_struct {
 };
 
 struct cpu_itimer {
-       cputime_t expires;
-       cputime_t incr;
-       u32 error;
-       u32 incr_error;
+       u64 expires;
+       u64 incr;
 };
 
 /**
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 28c5da6..c636b51 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -271,17 +271,17 @@ DEFINE_EVENT(hrtimer_class, hrtimer_cancel,
 TRACE_EVENT(itimer_state,
 
        TP_PROTO(int which, const struct itimerval *const value,
-                cputime_t expires),
+                unsigned long long expires),
 
        TP_ARGS(which, value, expires),
 
        TP_STRUCT__entry(
-               __field(        int,            which           )
-               __field(        cputime_t,      expires         )
-               __field(        long,           value_sec       )
-               __field(        long,           value_usec      )
-               __field(        long,           interval_sec    )
-               __field(        long,           interval_usec   )
+               __field(        int,                    which           )
+               __field(        unsigned long long,     expires         )
+               __field(        long,                   value_sec       )
+               __field(        long,                   value_usec      )
+               __field(        long,                   interval_sec    )
+               __field(        long,                   interval_usec   )
        ),
 
        TP_fast_assign(
@@ -294,7 +294,7 @@ TRACE_EVENT(itimer_state,
        ),
 
        TP_printk("which=%d expires=%llu it_value=%ld.%ld it_interval=%ld.%ld",
-                 __entry->which, (unsigned long long)__entry->expires,
+                 __entry->which, __entry->expires,
                  __entry->value_sec, __entry->value_usec,
                  __entry->interval_sec, __entry->interval_usec)
 );
@@ -307,14 +307,14 @@ TRACE_EVENT(itimer_state,
  */
 TRACE_EVENT(itimer_expire,
 
-       TP_PROTO(int which, struct pid *pid, cputime_t now),
+       TP_PROTO(int which, struct pid *pid, unsigned long long now),
 
        TP_ARGS(which, pid, now),
 
        TP_STRUCT__entry(
-               __field( int ,          which   )
-               __field( pid_t,         pid     )
-               __field( cputime_t,     now     )
+               __field( int ,                  which   )
+               __field( pid_t,                 pid     )
+               __field( unsigned long long,    now     )
        ),
 
        TP_fast_assign(
@@ -324,7 +324,7 @@ TRACE_EVENT(itimer_expire,
        ),
 
        TP_printk("which=%d pid=%d now=%llu", __entry->which,
-                 (int) __entry->pid, (unsigned long long)__entry->now)
+                 (int) __entry->pid, __entry->now)
 );
 
 #ifdef CONFIG_NO_HZ_COMMON
diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
index 20aa205..41ae174 100644
--- a/kernel/time/itimer.c
+++ b/kernel/time/itimer.c
@@ -45,35 +45,35 @@ static struct timeval itimer_get_remtime(struct hrtimer 
*timer)
 static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
                           struct itimerval *const value)
 {
-       cputime_t cval, cinterval;
+       u64 val, interval;
        struct cpu_itimer *it = &tsk->signal->it[clock_id];
 
        spin_lock_irq(&tsk->sighand->siglock);
 
-       cval = it->expires;
-       cinterval = it->incr;
-       if (cval) {
+       val = it->expires;
+       interval = it->incr;
+       if (val) {
                struct task_cputime cputime;
-               cputime_t t;
+               u64 t;
 
                thread_group_cputimer(tsk, &cputime);
                if (clock_id == CPUCLOCK_PROF)
-                       t = nsecs_to_cputime(cputime.utime + cputime.stime);
+                       t = cputime.utime + cputime.stime;
                else
                        /* CPUCLOCK_VIRT */
-                       t = nsecs_to_cputime(cputime.utime);
+                       t = cputime.utime;
 
-               if (cval < t)
+               if (val < t)
                        /* about to fire */
-                       cval = cputime_one_jiffy;
+                       val = TICK_NSEC;
                else
-                       cval = cval - t;
+                       val -= t;
        }
 
        spin_unlock_irq(&tsk->sighand->siglock);
 
-       cputime_to_timeval(cval, &value->it_value);
-       cputime_to_timeval(cinterval, &value->it_interval);
+       value->it_value = ns_to_timeval(val);
+       value->it_interval = ns_to_timeval(interval);
 }
 
 int do_getitimer(int which, struct itimerval *value)
@@ -129,55 +129,35 @@ enum hrtimer_restart it_real_fn(struct hrtimer *timer)
        return HRTIMER_NORESTART;
 }
 
-static inline u32 cputime_sub_ns(cputime_t ct, s64 real_ns)
-{
-       struct timespec ts;
-       s64 cpu_ns;
-
-       cputime_to_timespec(ct, &ts);
-       cpu_ns = timespec_to_ns(&ts);
-
-       return (cpu_ns <= real_ns) ? 0 : cpu_ns - real_ns;
-}
-
 static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
                           const struct itimerval *const value,
                           struct itimerval *const ovalue)
 {
-       cputime_t cval, nval, cinterval, ninterval;
-       s64 ns_ninterval, ns_nval;
-       u32 error, incr_error;
+       u64 oval, nval, ointerval, ninterval;
        struct cpu_itimer *it = &tsk->signal->it[clock_id];
 
-       nval = timeval_to_cputime(&value->it_value);
-       ns_nval = timeval_to_ns(&value->it_value);
-       ninterval = timeval_to_cputime(&value->it_interval);
-       ns_ninterval = timeval_to_ns(&value->it_interval);
-
-       error = cputime_sub_ns(nval, ns_nval);
-       incr_error = cputime_sub_ns(ninterval, ns_ninterval);
+       nval = timeval_to_ns(&value->it_value);
+       ninterval = timeval_to_ns(&value->it_interval);
 
        spin_lock_irq(&tsk->sighand->siglock);
 
-       cval = it->expires;
-       cinterval = it->incr;
-       if (cval || nval) {
+       oval = it->expires;
+       ointerval = it->incr;
+       if (oval || nval) {
                if (nval > 0)
-                       nval += cputime_one_jiffy;
-               set_process_cpu_timer(tsk, clock_id, &nval, &cval);
+                       nval += TICK_NSEC;
+               set_process_cpu_timer(tsk, clock_id, &nval, &oval);
        }
        it->expires = nval;
        it->incr = ninterval;
-       it->error = error;
-       it->incr_error = incr_error;
        trace_itimer_state(clock_id == CPUCLOCK_VIRT ?
                           ITIMER_VIRTUAL : ITIMER_PROF, value, nval);
 
        spin_unlock_irq(&tsk->sighand->siglock);
 
        if (ovalue) {
-               cputime_to_timeval(cval, &ovalue->it_value);
-               cputime_to_timeval(cinterval, &ovalue->it_interval);
+               ovalue->it_value = ns_to_timeval(oval);
+               ovalue->it_interval = ns_to_timeval(ointerval);
        }
 }
 
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index a02f012..2afff0d 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -21,10 +21,10 @@
  */
 void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
 {
-       cputime_t cputime = secs_to_cputime(rlim_new);
+       u64 nsecs = rlim_new * NSEC_PER_SEC;
 
        spin_lock_irq(&task->sighand->siglock);
-       set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL);
+       set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
        spin_unlock_irq(&task->sighand->siglock);
 }
 
@@ -864,17 +864,11 @@ static void check_cpu_itimer(struct task_struct *tsk, 
struct cpu_itimer *it,
        if (!it->expires)
                return;
 
-       if (cur_time >= cputime_to_nsecs(it->expires)) {
-               if (it->incr) {
+       if (cur_time >= it->expires) {
+               if (it->incr)
                        it->expires += it->incr;
-                       it->error += it->incr_error;
-                       if (it->error >= TICK_NSEC) {
-                               it->expires -= cputime_one_jiffy;
-                               it->error -= TICK_NSEC;
-                       }
-               } else {
+               else
                        it->expires = 0;
-               }
 
                trace_itimer_expire(signo == SIGPROF ?
                                    ITIMER_PROF : ITIMER_VIRTUAL,
@@ -882,9 +876,8 @@ static void check_cpu_itimer(struct task_struct *tsk, 
struct cpu_itimer *it,
                __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
        }
 
-       if (it->expires && (!*expires || cputime_to_nsecs(it->expires) < 
*expires)) {
-               *expires = cputime_to_nsecs(it->expires);
-       }
+       if (it->expires && (!*expires || it->expires < *expires))
+               *expires = it->expires;
 }
 
 /*
@@ -1178,9 +1171,9 @@ void run_posix_cpu_timers(struct task_struct *tsk)
  * The tsk->sighand->siglock must be held by the caller.
  */
 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
-                          cputime_t *newval, cputime_t *oldval)
+                          u64 *newval, u64 *oldval)
 {
-       u64 now, new;
+       u64 now;
 
        WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
        cpu_timer_sample_group(clock_idx, tsk, &now);
@@ -1192,33 +1185,31 @@ void set_process_cpu_timer(struct task_struct *tsk, 
unsigned int clock_idx,
                 * it to be absolute.
                 */
                if (*oldval) {
-                       if (cputime_to_nsecs(*oldval) <= now) {
+                       if (*oldval <= now) {
                                /* Just about to fire. */
-                               *oldval = cputime_one_jiffy;
+                               *oldval = TICK_NSEC;
                        } else {
-                               *oldval -= nsecs_to_cputime(now);
+                               *oldval -= now;
                        }
                }
 
                if (!*newval)
                        return;
-               *newval += nsecs_to_cputime(now);
+               *newval += now;
        }
 
-       new = cputime_to_nsecs(*newval);
-
        /*
         * Update expiration cache if we are the earliest timer, or eventually
         * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
         */
        switch (clock_idx) {
        case CPUCLOCK_PROF:
-               if (expires_gt(tsk->signal->cputime_expires.prof_exp, new))
-                       tsk->signal->cputime_expires.prof_exp = new;
+               if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
+                       tsk->signal->cputime_expires.prof_exp = *newval;
                break;
        case CPUCLOCK_VIRT:
-               if (expires_gt(tsk->signal->cputime_expires.virt_exp, new))
-                       tsk->signal->cputime_expires.virt_exp = new;
+               if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
+                       tsk->signal->cputime_expires.virt_exp = *newval;
                break;
        }
 
-- 
2.7.4

Reply via email to