vtime_account() doesn't have the same role in
CONFIG_VIRT_CPU_TIME_ACCOUNTING and CONFIG_IRQ_TIME_ACCOUNTING.

In the first case it handles time accounting in any context. In
the second case it only handles irq time accounting.

So when vtime_account() is called from outside vtime_account_irq_*()
this call is pointless to CONFIG_IRQ_TIME_ACCOUNTING.

To fix the confusion, change vtime_account() to irqtime_account_irq()
in CONFIG_IRQ_TIME_ACCOUNTING. This way we ensure future account_vtime()
calls won't waste useless cycles in the irqtime APIs.

Signed-off-by: Frederic Weisbecker <fweis...@gmail.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Ingo Molnar <mi...@kernel.org>
Cc: Thomas Gleixner <t...@linutronix.de>
---
 include/linux/hardirq.h |   26 ++++++++++++--------------
 kernel/sched/cputime.c  |    8 ++++----
 2 files changed, 16 insertions(+), 18 deletions(-)

diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index c126ffb..dc2052c 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -131,17 +131,8 @@ extern void synchronize_irq(unsigned int irq);
 
 struct task_struct;
 
-#ifdef CONFIG_TICK_CPU_ACCOUNTING
-static inline void vtime_account(struct task_struct *tsk) { }
-static inline void vtime_account_irq_enter(struct task_struct *tsk,
-                                          unsigned long offset) { }
-static inline void vtime_account_irq_exit(struct task_struct *tsk,
-                                         unsigned long offset) { }
-#else /* !CONFIG_TICK_CPU_ACCOUNTING */
-extern void vtime_account(struct task_struct *tsk);
-#endif /* !CONFIG_TICK_CPU_ACCOUNTING */
-
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void vtime_account(struct task_struct *tsk);
 extern void vtime_task_switch(struct task_struct *prev);
 extern void vtime_account_system(struct task_struct *tsk);
 extern void vtime_account_idle(struct task_struct *tsk);
@@ -174,21 +165,28 @@ static inline void vtime_account_irq_exit(struct 
task_struct *tsk,
 #else /* !CONFIG_VIRT_CPU_ACCOUNTING */
 static inline void vtime_task_switch(struct task_struct *prev) { }
 static inline void vtime_account_system(struct task_struct *tsk) { }
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
 
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
+extern void irqtime_account_irq(struct task_struct *tsk);
+
 static inline void vtime_account_irq_enter(struct task_struct *tsk,
                                           unsigned long offset)
 {
-       vtime_account(tsk);
+       irqtime_account_irq(tsk);
 }
 
 static inline void vtime_account_irq_exit(struct task_struct *tsk,
                                          unsigned long offset)
 {
-       vtime_account(tsk);
+       irqtime_account_irq(tsk);
 }
-#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
+#else /* !CONFIG_IRQ_TIME_ACCOUNTING */
+static inline void vtime_account_irq_enter(struct task_struct *tsk,
+                                          unsigned long offset) { }
+static inline void vtime_account_irq_exit(struct task_struct *tsk,
+                                         unsigned long offset) { }
+#endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
+#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
 
 
 #if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 81b763b..7ad407a 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -10,11 +10,11 @@
 
 /*
  * There are no locks covering percpu hardirq/softirq time.
- * They are only modified in vtime_account, on corresponding CPU
+ * They are only modified in irqtime_account_irq, on corresponding CPU
  * with interrupts disabled. So, writes are safe.
  * They are read and saved off onto struct rq in update_rq_clock().
  * This may result in other CPU reading this CPU's irq time and can
- * race with irq/vtime_account on this CPU. We would either get old
+ * race with irqtime_account_irq on this CPU. We would either get old
  * or new value with a side effect of accounting a slice of irq time to wrong
  * task when irq is in progress while we read rq->clock. That is a worthy
  * compromise in place of having locks on each irq in account_system_time.
@@ -43,7 +43,7 @@ DEFINE_PER_CPU(seqcount_t, irq_time_seq);
  * Called before incrementing preempt_count on {soft,}irq_enter
  * and before decrementing preempt_count on {soft,}irq_exit.
  */
-void vtime_account(struct task_struct *curr)
+void irqtime_account_irq(struct task_struct *curr)
 {
        unsigned long flags;
        s64 delta;
@@ -73,7 +73,7 @@ void vtime_account(struct task_struct *curr)
        irq_time_write_end();
        local_irq_restore(flags);
 }
-EXPORT_SYMBOL_GPL(vtime_account);
+EXPORT_SYMBOL_GPL(irqtime_account_irq);
 
 static int irqtime_account_hi_update(void)
 {
-- 
1.7.5.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to