The following commit has been merged into the sched/urgent branch of tip:

Commit-ID:     dac9f027b1096c5f03ca583e787aac0f852e8f78
Gitweb:        
https://git.kernel.org/tip/dac9f027b1096c5f03ca583e787aac0f852e8f78
Author:        Qian Cai <[email protected]>
AuthorDate:    Mon, 16 Sep 2019 17:19:35 -04:00
Committer:     Ingo Molnar <[email protected]>
CommitterDate: Tue, 17 Sep 2019 09:55:02 +02:00

sched/fair: Remove unused cfs_rq_clock_task() function

cfs_rq_clock_task() was first introduced and used in:

  f1b17280efbd ("sched: Maintain runnable averages across throttled periods")

Over time its use has been graduately removed by the following commits:

  d31b1a66cbe0 ("sched/fair: Factorize PELT update")
  23127296889f ("sched/fair: Update scale invariance of PELT")

Today, there is no single user left, so it can be safely removed.

Found via the -Wunused-function build warning.

Signed-off-by: Qian Cai <[email protected]>
Cc: Ben Segall <[email protected]>
Cc: Dietmar Eggemann <[email protected]>
Cc: Juri Lelli <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Mike Galbraith <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Steven Rostedt <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Vincent Guittot <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
[ Rewrote the changelog. ]
Signed-off-by: Ingo Molnar <[email protected]>
---
 kernel/sched/fair.c | 16 ----------------
 1 file changed, 16 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d4bbf68..3101c66 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -749,7 +749,6 @@ void init_entity_runnable_average(struct sched_entity *se)
        /* when this task enqueue'ed, it will contribute to its cfs_rq's 
load_avg */
 }
 
-static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
 static void attach_entity_cfs_rq(struct sched_entity *se);
 
 /*
@@ -4376,15 +4375,6 @@ static inline struct cfs_bandwidth 
*tg_cfs_bandwidth(struct task_group *tg)
        return &tg->cfs_bandwidth;
 }
 
-/* rq->task_clock normalized against any time this cfs_rq has spent throttled 
*/
-static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
-{
-       if (unlikely(cfs_rq->throttle_count))
-               return cfs_rq->throttled_clock_task - 
cfs_rq->throttled_clock_task_time;
-
-       return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
-}
-
 /* returns 0 on failure to allocate runtime */
 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 {
@@ -4476,7 +4466,6 @@ static int tg_unthrottle_up(struct task_group *tg, void 
*data)
 
        cfs_rq->throttle_count--;
        if (!cfs_rq->throttle_count) {
-               /* adjust cfs_rq_clock_task() */
                cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
                                             cfs_rq->throttled_clock_task;
 
@@ -5080,11 +5069,6 @@ static inline bool cfs_bandwidth_used(void)
        return false;
 }
 
-static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
-{
-       return rq_clock_task(rq_of(cfs_rq));
-}
-
 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}

Reply via email to