Now that most cputime readers use the transition API which return the
task cputime in old style cputime_t, we can safely store the cputime in
nsecs. This will eventually make cputime statistics less opaque and more
granular. Back and forth convertions between cputime_t and nsecs in order
to deal with cputime_t random granularity won't be needed anymore.

Cc: Benjamin Herrenschmidt <b...@kernel.crashing.org>
Cc: Paul Mackerras <pau...@samba.org>
Cc: Michael Ellerman <m...@ellerman.id.au>
Cc: Heiko Carstens <heiko.carst...@de.ibm.com>
Cc: Martin Schwidefsky <schwidef...@de.ibm.com>
Cc: Tony Luck <tony.l...@intel.com>
Cc: Fenghua Yu <fenghua...@intel.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Rik van Riel <r...@redhat.com>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Ingo Molnar <mi...@kernel.org>
Cc: Stanislaw Gruszka <sgrus...@redhat.com>
Cc: Wanpeng Li <wanpeng...@hotmail.com>
Signed-off-by: Frederic Weisbecker <fweis...@gmail.com>
---
 arch/alpha/kernel/osf_sys.c |  4 ++--
 arch/powerpc/kernel/time.c  |  4 ++--
 arch/s390/kernel/vtime.c    |  6 ++---
 arch/x86/kvm/hyperv.c       |  5 +++--
 fs/binfmt_elf.c             | 11 +++++++--
 fs/binfmt_elf_fdpic.c       |  4 ++--
 fs/proc/array.c             | 10 ++++-----
 include/linux/sched.h       | 55 ++++++++++++++++++++++++++++-----------------
 kernel/exit.c               |  4 ++--
 kernel/sched/cputime.c      | 35 ++++++++++++++---------------
 kernel/signal.c             |  4 ++--
 kernel/sys.c                | 16 ++++++-------
 12 files changed, 89 insertions(+), 69 deletions(-)

diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index c26d631..0ccf1d1 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1155,8 +1155,8 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 
__user *, ru)
                r.ru_majflt = current->maj_flt;
                break;
        case RUSAGE_CHILDREN:
-               utime_jiffies = cputime_to_jiffies(current->signal->cutime);
-               stime_jiffies = cputime_to_jiffies(current->signal->cstime);
+               utime_jiffies = nsecs_to_jiffies(current->signal->cutime);
+               stime_jiffies = nsecs_to_jiffies(current->signal->cstime);
                jiffies_to_timeval32(utime_jiffies, &r.ru_utime);
                jiffies_to_timeval32(stime_jiffies, &r.ru_stime);
                r.ru_minflt = current->signal->cmin_flt;
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index be9751f..19361fb 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -359,7 +359,7 @@ void vtime_account_system(struct task_struct *tsk)
 
        delta = vtime_delta(tsk, &sys_scaled, &stolen);
        account_system_time(tsk, 0, delta);
-       tsk->stimescaled += sys_scaled;
+       tsk->stimescaled += cputime_to_nsecs(sys_scaled);
        if (stolen)
                account_steal_time(stolen);
 }
@@ -393,7 +393,7 @@ void vtime_account_user(struct task_struct *tsk)
        acct->user_time_scaled = 0;
        acct->utime_sspurr = 0;
        account_user_time(tsk, utime);
-       tsk->utimescaled += utimescaled;
+       tsk->utimescaled += cputime_to_nsecs(utimescaled);
 }
 
 #ifdef CONFIG_PPC32
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 1bd5dde..4905e8c 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -138,9 +138,9 @@ static int do_account_vtime(struct task_struct *tsk, int 
hardirq_offset)
                system_scaled = (system_scaled * mult) / div;
        }
        account_user_time(tsk, user);
-       tsk->utimescaled += user_scaled;
+       tsk->utimescaled += cputime_to_nsecs(user_scaled);
        account_system_time(tsk, hardirq_offset, system);
-       tsk->stimescaled += system_scaled;
+       tsk->stimescaled += cputime_to_nsecs(system_scaled);
 
        steal = S390_lowcore.steal_timer;
        if ((s64) steal > 0) {
@@ -205,7 +205,7 @@ void vtime_account_irq_enter(struct task_struct *tsk)
                system_scaled = (system_scaled * mult) / div;
        }
        account_system_time(tsk, 0, system);
-       tsk->stimescaled += system_scaled;
+       tsk->stimescaled += cputime_to_nsecs(system_scaled);
 
        virt_timer_forward(system);
 }
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 42b1c83..7ae7006 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -958,10 +958,11 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 
msr, u64 data,
 /* Calculate cpu time spent by current task in 100ns units */
 static u64 current_task_runtime_100ns(void)
 {
-       cputime_t utime, stime;
+       u64 utime, stime;
 
        task_cputime_adjusted(current, &utime, &stime);
-       return div_u64(cputime_to_nsecs(utime + stime), 100);
+
+       return div_u64(utime + stime, 100);
 }
 
 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 0f62ac5..720a88d 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1411,6 +1411,8 @@ static void fill_note(struct memelfnote *note, const char 
*name, int type,
 static void fill_prstatus(struct elf_prstatus *prstatus,
                struct task_struct *p, long signr)
 {
+       struct timeval tv;
+
        prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
        prstatus->pr_sigpend = p->pending.signal.sig[0];
        prstatus->pr_sighold = p->blocked.sig[0];
@@ -1437,8 +1439,13 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
                cputime_to_timeval(utime, &prstatus->pr_utime);
                cputime_to_timeval(stime, &prstatus->pr_stime);
        }
-       cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
-       cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
+       tv = ns_to_timeval(p->signal->cutime);
+       prstatus->pr_cutime.tv_sec = tv.tv_sec;
+       prstatus->pr_cutime.tv_usec = tv.tv_usec;
+
+       tv = ns_to_timeval(p->signal->cstime);
+       prstatus->pr_cstime.tv_sec = tv.tv_sec;
+       prstatus->pr_cstime.tv_usec = tv.tv_usec;
 }
 
 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 29e175d..977b731 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1358,8 +1358,8 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
                cputime_to_timeval(utime, &prstatus->pr_utime);
                cputime_to_timeval(stime, &prstatus->pr_stime);
        }
-       cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
-       cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
+       prstatus->pr_cutime = ns_to_timeval(p->signal->cutime);
+       prstatus->pr_cstime = ns_to_timeval(p->signal->cstime);
 
        prstatus->pr_exec_fdpic_loadmap = p->mm->context.exec_fdpic_loadmap;
        prstatus->pr_interp_fdpic_loadmap = p->mm->context.interp_fdpic_loadmap;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 2a7b0ed..697e424 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -400,7 +400,7 @@ static int do_task_stat(struct seq_file *m, struct 
pid_namespace *ns,
        unsigned long long start_time;
        unsigned long cmin_flt = 0, cmaj_flt = 0;
        unsigned long  min_flt = 0,  maj_flt = 0;
-       cputime_t cutime, cstime, utime, stime;
+       u64 cutime, cstime, utime, stime;
        u64 cgtime, gtime;
        unsigned long rsslim = 0;
        char tcomm[sizeof(task->comm)];
@@ -496,10 +496,10 @@ static int do_task_stat(struct seq_file *m, struct 
pid_namespace *ns,
        seq_put_decimal_ull(m, " ", cmin_flt);
        seq_put_decimal_ull(m, " ", maj_flt);
        seq_put_decimal_ull(m, " ", cmaj_flt);
-       seq_put_decimal_ull(m, " ", cputime_to_clock_t(utime));
-       seq_put_decimal_ull(m, " ", cputime_to_clock_t(stime));
-       seq_put_decimal_ll(m, " ", cputime_to_clock_t(cutime));
-       seq_put_decimal_ll(m, " ", cputime_to_clock_t(cstime));
+       seq_put_decimal_ull(m, " ", nsec_to_clock_t(utime));
+       seq_put_decimal_ull(m, " ", nsec_to_clock_t(stime));
+       seq_put_decimal_ll(m, " ", nsec_to_clock_t(cutime));
+       seq_put_decimal_ll(m, " ", nsec_to_clock_t(cstime));
        seq_put_decimal_ll(m, " ", priority);
        seq_put_decimal_ll(m, " ", nice);
        seq_put_decimal_ll(m, " ", num_threads);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6cf884d..c1f7f6d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -583,8 +583,8 @@ struct cpu_itimer {
  */
 struct prev_cputime {
 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-       cputime_t utime;
-       cputime_t stime;
+       u64 utime;
+       u64 stime;
        raw_spinlock_t lock;
 #endif
 };
@@ -599,8 +599,8 @@ static inline void prev_cputime_init(struct prev_cputime 
*prev)
 
 /**
  * struct task_cputime - collected CPU time counts
- * @utime:             time spent in user mode, in &cputime_t units
- * @stime:             time spent in kernel mode, in &cputime_t units
+ * @utime:             time spent in user mode, in nanoseconds
+ * @stime:             time spent in kernel mode, in nanoseconds
  * @sum_exec_runtime:  total time spent on the CPU, in nanoseconds
  *
  * This structure groups together three kinds of CPU time that are tracked for
@@ -608,8 +608,8 @@ static inline void prev_cputime_init(struct prev_cputime 
*prev)
  * these counts together and treat all three of them in parallel.
  */
 struct task_cputime {
-       cputime_t utime;
-       cputime_t stime;
+       u64 utime;
+       u64 stime;
        unsigned long long sum_exec_runtime;
 };
 
@@ -778,7 +778,7 @@ struct signal_struct {
         * in __exit_signal, except for the group leader.
         */
        seqlock_t stats_lock;
-       cputime_t utime, stime, cutime, cstime;
+       u64 utime, stime, cutime, cstime;
        u64 gtime;
        u64 cgtime;
        struct prev_cputime prev_cputime;
@@ -1647,9 +1647,9 @@ struct task_struct {
        int __user *set_child_tid;              /* CLONE_CHILD_SETTID */
        int __user *clear_child_tid;            /* CLONE_CHILD_CLEARTID */
 
-       cputime_t utime, stime;
+       u64 utime, stime;
 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
-       cputime_t utimescaled, stimescaled;
+       u64 utimescaled, stimescaled;
 #endif
        u64 gtime;
        struct prev_cputime prev_cputime;
@@ -2242,11 +2242,11 @@ struct task_struct *try_get_task_struct(struct 
task_struct **ptask);
 
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
 extern void task_cputime(struct task_struct *t,
-                        cputime_t *utime, cputime_t *stime);
+                        u64 *utime, u64 *stime);
 extern u64 task_gtime(struct task_struct *t);
 #else
 static inline void task_cputime(struct task_struct *t,
-                               cputime_t *utime, cputime_t *stime)
+                               u64 *utime, u64 *stime)
 {
        *utime = t->utime;
        *stime = t->stime;
@@ -2260,16 +2260,16 @@ static inline u64 task_gtime(struct task_struct *t)
 
 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
 static inline void task_cputime_scaled(struct task_struct *t,
-                                      cputime_t *utimescaled,
-                                      cputime_t *stimescaled)
+                                      u64 *utimescaled,
+                                      u64 *stimescaled)
 {
        *utimescaled = t->utimescaled;
        *stimescaled = t->stimescaled;
 }
 #else
 static inline void task_cputime_scaled(struct task_struct *t,
-                                      cputime_t *utimescaled,
-                                      cputime_t *stimescaled)
+                                      u64 *utimescaled,
+                                      u64 *stimescaled)
 {
        task_cputime(t, utimescaled, stimescaled);
 }
@@ -2278,18 +2278,26 @@ static inline void task_cputime_scaled(struct 
task_struct *t,
 static inline void task_cputime_t(struct task_struct *t,
                                  cputime_t *utime, cputime_t *stime)
 {
-       task_cputime(t, utime, stime);
+       u64 ut, st;
+
+       task_cputime(t, &ut, &st);
+       *utime = nsecs_to_cputime(ut);
+       *stime = nsecs_to_cputime(st);
 }
 
 static inline void task_cputime_t_scaled(struct task_struct *t,
                                         cputime_t *utimescaled,
                                         cputime_t *stimescaled)
 {
-       task_cputime_scaled(t, utimescaled, stimescaled);
+       u64 ut, st;
+
+       task_cputime_scaled(t, &ut, &st);
+       *utimescaled = nsecs_to_cputime(ut);
+       *stimescaled = nsecs_to_cputime(st);
 }
 
-extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, 
cputime_t *st);
-extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t 
*ut, cputime_t *st);
+extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
+extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 
*st);
 
 /*
  * Per process flags
@@ -3493,9 +3501,14 @@ void thread_group_cputime(struct task_struct *tsk, 
struct task_cputime *times);
 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime_t 
*times);
 
 static inline void thread_group_cputime_t(struct task_struct *tsk,
-                                         struct task_cputime_t *times)
+                                         struct task_cputime_t *cputime)
 {
-       thread_group_cputime(tsk, (struct task_cputime *)times);
+       struct task_cputime times;
+
+       thread_group_cputime(tsk, &times);
+       cputime->utime = nsecs_to_cputime(times.utime);
+       cputime->stime = nsecs_to_cputime(times.stime);
+       cputime->sum_exec_runtime = times.sum_exec_runtime;
 }
 
 /*
diff --git a/kernel/exit.c b/kernel/exit.c
index 9d68c45..ab84e72 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -85,7 +85,7 @@ static void __exit_signal(struct task_struct *tsk)
        bool group_dead = thread_group_leader(tsk);
        struct sighand_struct *sighand;
        struct tty_struct *uninitialized_var(tty);
-       cputime_t utime, stime;
+       u64 utime, stime;
 
        sighand = rcu_dereference_check(tsk->sighand,
                                        lockdep_tasklist_lock_is_held());
@@ -1079,7 +1079,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct 
task_struct *p)
                struct signal_struct *sig = p->signal;
                struct signal_struct *psig = current->signal;
                unsigned long maxrss;
-               cputime_t tgutime, tgstime;
+               u64 tgutime, tgstime;
 
                /*
                 * The resource counters for the group leader are in its
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 647c0fc..26cd477 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -134,7 +134,7 @@ void account_user_time(struct task_struct *p, cputime_t 
cputime)
        int index;
 
        /* Add user time to process. */
-       p->utime += cputime;
+       p->utime += cputime_to_nsecs(cputime);
        account_group_user_time(p, cputime);
 
        index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
@@ -156,7 +156,7 @@ static void account_guest_time(struct task_struct *p, 
cputime_t cputime)
        u64 *cpustat = kcpustat_this_cpu->cpustat;
 
        /* Add guest time to process. */
-       p->utime += cputime;
+       p->utime += cputime_to_nsecs(cputime);
        account_group_user_time(p, cputime);
        p->gtime += cputime_to_nsecs(cputime);
 
@@ -180,7 +180,7 @@ static inline
 void __account_system_time(struct task_struct *p, cputime_t cputime, int index)
 {
        /* Add system time to process. */
-       p->stime += cputime;
+       p->stime += cputime_to_nsecs(cputime);
        account_group_system_time(p, cputime);
 
        /* Add system time to cpustat. */
@@ -315,7 +315,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t)
 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
 {
        struct signal_struct *sig = tsk->signal;
-       cputime_t utime, stime;
+       u64 utime, stime;
        struct task_struct *t;
        unsigned int seq, nextseq;
        unsigned long flags;
@@ -467,14 +467,14 @@ void vtime_account_irq_enter(struct task_struct *tsk)
 EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
 #endif /* __ARCH_HAS_VTIME_ACCOUNT */
 
-void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
+void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
 {
        *ut = p->utime;
        *st = p->stime;
 }
 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
 
-void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, 
cputime_t *st)
+void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
 {
        struct task_cputime cputime;
 
@@ -545,7 +545,7 @@ void account_idle_ticks(unsigned long ticks)
  * Perform (stime * rtime) / total, but avoid multiplication overflow by
  * loosing precision when the numbers are big.
  */
-static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
+static u64 scale_stime(u64 stime, u64 rtime, u64 total)
 {
        u64 scaled;
 
@@ -582,7 +582,7 @@ static cputime_t scale_stime(u64 stime, u64 rtime, u64 
total)
         * followed by a 64/32->64 divide.
         */
        scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
-       return (__force cputime_t) scaled;
+       return scaled;
 }
 
 /*
@@ -607,14 +607,14 @@ static cputime_t scale_stime(u64 stime, u64 rtime, u64 
total)
  */
 static void cputime_adjust(struct task_cputime *curr,
                           struct prev_cputime *prev,
-                          cputime_t *ut, cputime_t *st)
+                          u64 *ut, u64 *st)
 {
-       cputime_t rtime, stime, utime;
+       u64 rtime, stime, utime;
        unsigned long flags;
 
        /* Serialize concurrent callers such that we can honour our guarantees 
*/
        raw_spin_lock_irqsave(&prev->lock, flags);
-       rtime = nsecs_to_cputime(curr->sum_exec_runtime);
+       rtime = curr->sum_exec_runtime;
 
        /*
         * This is possible under two circumstances:
@@ -645,8 +645,7 @@ static void cputime_adjust(struct task_cputime *curr,
                goto update;
        }
 
-       stime = scale_stime((__force u64)stime, (__force u64)rtime,
-                           (__force u64)(stime + utime));
+       stime = scale_stime(stime, rtime, stime + utime);
 
 update:
        /*
@@ -679,7 +678,7 @@ static void cputime_adjust(struct task_cputime *curr,
        raw_spin_unlock_irqrestore(&prev->lock, flags);
 }
 
-void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
+void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
 {
        struct task_cputime cputime = {
                .sum_exec_runtime = p->se.sum_exec_runtime,
@@ -690,7 +689,7 @@ void task_cputime_adjusted(struct task_struct *p, cputime_t 
*ut, cputime_t *st)
 }
 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
 
-void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, 
cputime_t *st)
+void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
 {
        struct task_cputime cputime;
 
@@ -851,9 +850,9 @@ u64 task_gtime(struct task_struct *t)
  * add up the pending nohz execution time since the last
  * cputime snapshot.
  */
-void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
+void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
 {
-       cputime_t delta;
+       u64 delta;
        unsigned int seq;
 
        if (!vtime_accounting_enabled()) {
@@ -872,7 +871,7 @@ void task_cputime(struct task_struct *t, cputime_t *utime, 
cputime_t *stime)
                if (t->vtime_snap_whence == VTIME_INACTIVE || is_idle_task(t))
                        continue;
 
-               delta = vtime_delta(t);
+               delta = cputime_to_nsecs(vtime_delta(t));
 
                /*
                 * Task runs either in user or kernel space, add pending nohz 
time to
diff --git a/kernel/signal.c b/kernel/signal.c
index db189b4..5d5f6f7 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1614,8 +1614,8 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
        rcu_read_unlock();
 
        task_cputime_t(tsk, &utime, &stime);
-       info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
-       info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
+       info.si_utime = cputime_to_clock_t(utime + 
nsecs_to_cputime(tsk->signal->utime));
+       info.si_stime = cputime_to_clock_t(stime + 
nsecs_to_cputime(tsk->signal->stime));
 
        info.si_status = tsk->exit_code & 0x7f;
        if (tsk->exit_code & 0x80)
diff --git a/kernel/sys.c b/kernel/sys.c
index 89d5be4..0dd8031 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -881,15 +881,15 @@ SYSCALL_DEFINE0(getegid)
 
 void do_sys_times(struct tms *tms)
 {
-       cputime_t tgutime, tgstime, cutime, cstime;
+       u64 tgutime, tgstime, cutime, cstime;
 
        thread_group_cputime_adjusted(current, &tgutime, &tgstime);
        cutime = current->signal->cutime;
        cstime = current->signal->cstime;
-       tms->tms_utime = cputime_to_clock_t(tgutime);
-       tms->tms_stime = cputime_to_clock_t(tgstime);
-       tms->tms_cutime = cputime_to_clock_t(cutime);
-       tms->tms_cstime = cputime_to_clock_t(cstime);
+       tms->tms_utime = nsec_to_clock_t(tgutime);
+       tms->tms_stime = nsec_to_clock_t(tgstime);
+       tms->tms_cutime = nsec_to_clock_t(cutime);
+       tms->tms_cstime = nsec_to_clock_t(cstime);
 }
 
 SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
@@ -1543,7 +1543,7 @@ static void k_getrusage(struct task_struct *p, int who, 
struct rusage *r)
 {
        struct task_struct *t;
        unsigned long flags;
-       cputime_t tgutime, tgstime, utime, stime;
+       u64 tgutime, tgstime, utime, stime;
        unsigned long maxrss = 0;
 
        memset((char *)r, 0, sizeof (*r));
@@ -1599,8 +1599,8 @@ static void k_getrusage(struct task_struct *p, int who, 
struct rusage *r)
        unlock_task_sighand(p, &flags);
 
 out:
-       cputime_to_timeval(utime, &r->ru_utime);
-       cputime_to_timeval(stime, &r->ru_stime);
+       r->ru_utime = ns_to_timeval(utime);
+       r->ru_stime = ns_to_timeval(stime);
 
        if (who != RUSAGE_CHILDREN) {
                struct mm_struct *mm = get_task_mm(p);
-- 
2.7.4

Reply via email to