Re: [PATCH 3/5] sched: Rename vtime fields

2017-06-29 Thread Rik van Riel
On Thu, 2017-06-29 at 19:15 +0200, Frederic Weisbecker wrote:
> The current "snapshot" based naming on vtime fields suggests we
> record
> some past event but that's a low level picture of their actual
> purpose
> which comes out blurry. The real point of these fields is to run a
> basic
> state machine that tracks down cputime entry while switching between
> contexts.
> 
> So lets reflect that with more meaningful names.
> 
> Cc: Wanpeng Li 
> Cc: Rik van Riel 
> Cc: Peter Zijlstra 
> Cc: Thomas Gleixner 
> Cc: Ingo Molnar 
> Cc: Luiz Capitulino 
> Signed-off-by: Frederic Weisbecker 

Acked-by: Rik van Riel 


Re: [PATCH 3/5] sched: Rename vtime fields

2017-06-29 Thread Rik van Riel
On Thu, 2017-06-29 at 19:15 +0200, Frederic Weisbecker wrote:
> The current "snapshot" based naming on vtime fields suggests we
> record
> some past event but that's a low level picture of their actual
> purpose
> which comes out blurry. The real point of these fields is to run a
> basic
> state machine that tracks down cputime entry while switching between
> contexts.
> 
> So lets reflect that with more meaningful names.
> 
> Cc: Wanpeng Li 
> Cc: Rik van Riel 
> Cc: Peter Zijlstra 
> Cc: Thomas Gleixner 
> Cc: Ingo Molnar 
> Cc: Luiz Capitulino 
> Signed-off-by: Frederic Weisbecker 

Acked-by: Rik van Riel 


[PATCH 3/5] sched: Rename vtime fields

2017-06-29 Thread Frederic Weisbecker
The current "snapshot" based naming on vtime fields suggests we record
some past event but that's a low level picture of their actual purpose
which comes out blurry. The real point of these fields is to run a basic
state machine that tracks down cputime entry while switching between
contexts.

So lets reflect that with more meaningful names.

Cc: Wanpeng Li 
Cc: Rik van Riel 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: Luiz Capitulino 
Signed-off-by: Frederic Weisbecker 
---
 include/linux/init_task.h |  4 ++--
 include/linux/sched.h |  4 ++--
 kernel/fork.c |  4 ++--
 kernel/sched/cputime.c| 30 +++---
 4 files changed, 21 insertions(+), 21 deletions(-)

diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index e049526..3d53733 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -171,8 +171,8 @@ extern struct cred init_cred;
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
 # define INIT_VTIME(tsk)   \
.vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount),  \
-   .vtime_snap = 0,\
-   .vtime_snap_whence = VTIME_SYS,
+   .vtime_starttime = 0,   \
+   .vtime_state = VTIME_SYS,
 #else
 # define INIT_VTIME(tsk)
 #endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1f0f427..22d2d9b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -689,7 +689,7 @@ struct task_struct {
struct prev_cputime prev_cputime;
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
seqcount_t  vtime_seqcount;
-   unsigned long long  vtime_snap;
+   unsigned long long  vtime_starttime;
enum {
/* Task is sleeping or running in a CPU with VTIME inactive: */
VTIME_INACTIVE = 0,
@@ -697,7 +697,7 @@ struct task_struct {
VTIME_USER,
/* Task runs in kernelspace in a CPU with VTIME active: */
VTIME_SYS,
-   } vtime_snap_whence;
+   } vtime_state;
 #endif
 
 #ifdef CONFIG_NO_HZ_FULL
diff --git a/kernel/fork.c b/kernel/fork.c
index e53770d..83c4f9b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1638,8 +1638,8 @@ static __latent_entropy struct task_struct *copy_process(
 
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
seqcount_init(>vtime_seqcount);
-   p->vtime_snap = 0;
-   p->vtime_snap_whence = VTIME_INACTIVE;
+   p->vtime_starttime = 0;
+   p->vtime_state = VTIME_INACTIVE;
 #endif
 
 #if defined(SPLIT_RSS_COUNTING)
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index db7ef10..6b152c2 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -683,10 +683,10 @@ static u64 vtime_delta(struct task_struct *tsk)
 {
unsigned long now = READ_ONCE(jiffies);
 
-   if (time_before(now, (unsigned long)tsk->vtime_snap))
+   if (time_before(now, (unsigned long)tsk->vtime_starttime))
return 0;
 
-   return jiffies_to_nsecs(now - tsk->vtime_snap);
+   return jiffies_to_nsecs(now - tsk->vtime_starttime);
 }
 
 static u64 get_vtime_delta(struct task_struct *tsk)
@@ -701,10 +701,10 @@ static u64 get_vtime_delta(struct task_struct *tsk)
 * elapsed time. Limit account_other_time to prevent rounding
 * errors from causing elapsed vtime to go negative.
 */
-   delta = jiffies_to_nsecs(now - tsk->vtime_snap);
+   delta = jiffies_to_nsecs(now - tsk->vtime_starttime);
other = account_other_time(delta);
-   WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE);
-   tsk->vtime_snap = now;
+   WARN_ON_ONCE(tsk->vtime_state == VTIME_INACTIVE);
+   tsk->vtime_starttime = now;
 
return delta - other;
 }
@@ -746,7 +746,7 @@ void vtime_guest_enter(struct task_struct *tsk)
 {
/*
 * The flags must be updated under the lock with
-* the vtime_snap flush and update.
+* the vtime_starttime flush and update.
 * That enforces a right ordering and update sequence
 * synchronization against the reader (task_gtime())
 * that can thus safely catch up with a tickless delta.
@@ -776,12 +776,12 @@ void vtime_account_idle(struct task_struct *tsk)
 void arch_vtime_task_switch(struct task_struct *prev)
 {
write_seqcount_begin(>vtime_seqcount);
-   prev->vtime_snap_whence = VTIME_INACTIVE;
+   prev->vtime_state = VTIME_INACTIVE;
write_seqcount_end(>vtime_seqcount);
 
write_seqcount_begin(>vtime_seqcount);
-   current->vtime_snap_whence = VTIME_SYS;
-   current->vtime_snap = jiffies;
+   current->vtime_state = VTIME_SYS;
+   current->vtime_starttime = jiffies;
write_seqcount_end(>vtime_seqcount);
 }
 

[PATCH 3/5] sched: Rename vtime fields

2017-06-29 Thread Frederic Weisbecker
The current "snapshot" based naming on vtime fields suggests we record
some past event but that's a low level picture of their actual purpose
which comes out blurry. The real point of these fields is to run a basic
state machine that tracks down cputime entry while switching between
contexts.

So lets reflect that with more meaningful names.

Cc: Wanpeng Li 
Cc: Rik van Riel 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: Luiz Capitulino 
Signed-off-by: Frederic Weisbecker 
---
 include/linux/init_task.h |  4 ++--
 include/linux/sched.h |  4 ++--
 kernel/fork.c |  4 ++--
 kernel/sched/cputime.c| 30 +++---
 4 files changed, 21 insertions(+), 21 deletions(-)

diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index e049526..3d53733 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -171,8 +171,8 @@ extern struct cred init_cred;
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
 # define INIT_VTIME(tsk)   \
.vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount),  \
-   .vtime_snap = 0,\
-   .vtime_snap_whence = VTIME_SYS,
+   .vtime_starttime = 0,   \
+   .vtime_state = VTIME_SYS,
 #else
 # define INIT_VTIME(tsk)
 #endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1f0f427..22d2d9b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -689,7 +689,7 @@ struct task_struct {
struct prev_cputime prev_cputime;
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
seqcount_t  vtime_seqcount;
-   unsigned long long  vtime_snap;
+   unsigned long long  vtime_starttime;
enum {
/* Task is sleeping or running in a CPU with VTIME inactive: */
VTIME_INACTIVE = 0,
@@ -697,7 +697,7 @@ struct task_struct {
VTIME_USER,
/* Task runs in kernelspace in a CPU with VTIME active: */
VTIME_SYS,
-   } vtime_snap_whence;
+   } vtime_state;
 #endif
 
 #ifdef CONFIG_NO_HZ_FULL
diff --git a/kernel/fork.c b/kernel/fork.c
index e53770d..83c4f9b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1638,8 +1638,8 @@ static __latent_entropy struct task_struct *copy_process(
 
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
seqcount_init(>vtime_seqcount);
-   p->vtime_snap = 0;
-   p->vtime_snap_whence = VTIME_INACTIVE;
+   p->vtime_starttime = 0;
+   p->vtime_state = VTIME_INACTIVE;
 #endif
 
 #if defined(SPLIT_RSS_COUNTING)
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index db7ef10..6b152c2 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -683,10 +683,10 @@ static u64 vtime_delta(struct task_struct *tsk)
 {
unsigned long now = READ_ONCE(jiffies);
 
-   if (time_before(now, (unsigned long)tsk->vtime_snap))
+   if (time_before(now, (unsigned long)tsk->vtime_starttime))
return 0;
 
-   return jiffies_to_nsecs(now - tsk->vtime_snap);
+   return jiffies_to_nsecs(now - tsk->vtime_starttime);
 }
 
 static u64 get_vtime_delta(struct task_struct *tsk)
@@ -701,10 +701,10 @@ static u64 get_vtime_delta(struct task_struct *tsk)
 * elapsed time. Limit account_other_time to prevent rounding
 * errors from causing elapsed vtime to go negative.
 */
-   delta = jiffies_to_nsecs(now - tsk->vtime_snap);
+   delta = jiffies_to_nsecs(now - tsk->vtime_starttime);
other = account_other_time(delta);
-   WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE);
-   tsk->vtime_snap = now;
+   WARN_ON_ONCE(tsk->vtime_state == VTIME_INACTIVE);
+   tsk->vtime_starttime = now;
 
return delta - other;
 }
@@ -746,7 +746,7 @@ void vtime_guest_enter(struct task_struct *tsk)
 {
/*
 * The flags must be updated under the lock with
-* the vtime_snap flush and update.
+* the vtime_starttime flush and update.
 * That enforces a right ordering and update sequence
 * synchronization against the reader (task_gtime())
 * that can thus safely catch up with a tickless delta.
@@ -776,12 +776,12 @@ void vtime_account_idle(struct task_struct *tsk)
 void arch_vtime_task_switch(struct task_struct *prev)
 {
write_seqcount_begin(>vtime_seqcount);
-   prev->vtime_snap_whence = VTIME_INACTIVE;
+   prev->vtime_state = VTIME_INACTIVE;
write_seqcount_end(>vtime_seqcount);
 
write_seqcount_begin(>vtime_seqcount);
-   current->vtime_snap_whence = VTIME_SYS;
-   current->vtime_snap = jiffies;
+   current->vtime_state = VTIME_SYS;
+   current->vtime_starttime = jiffies;
write_seqcount_end(>vtime_seqcount);
 }
 
@@ -791,8 +791,8 @@ void vtime_init_idle(struct task_struct *t, int cpu)
 
local_irq_save(flags);