[tip: sched/urgent] sched/fair: Fix -Wunused-but-set-variable warnings

2019-09-27 Thread tip-bot2 for Qian Cai
The following commit has been merged into the sched/urgent branch of tip:

Commit-ID: 763a9ec06c409dcde2a761aac4bb83ff3938e0b3
Gitweb:
https://git.kernel.org/tip/763a9ec06c409dcde2a761aac4bb83ff3938e0b3
Author:Qian Cai 
AuthorDate:Tue, 20 Aug 2019 14:40:55 -04:00
Committer: Ingo Molnar 
CommitterDate: Wed, 25 Sep 2019 17:42:31 +02:00

sched/fair: Fix -Wunused-but-set-variable warnings

Commit:

   de53fd7aedb1 ("sched/fair: Fix low cpu usage with high throttling by 
removing expiration of cpu-local slices")

introduced a few compilation warnings:

  kernel/sched/fair.c: In function '__refill_cfs_bandwidth_runtime':
  kernel/sched/fair.c:4365:6: warning: variable 'now' set but not used 
[-Wunused-but-set-variable]
  kernel/sched/fair.c: In function 'start_cfs_bandwidth':
  kernel/sched/fair.c:4992:6: warning: variable 'overrun' set but not used 
[-Wunused-but-set-variable]

Also, __refill_cfs_bandwidth_runtime() does no longer update the
expiration time, so fix the comments accordingly.

Signed-off-by: Qian Cai 
Signed-off-by: Peter Zijlstra (Intel) 
Reviewed-by: Ben Segall 
Reviewed-by: Dave Chiluk 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: pa...@redhat.com
Fixes: de53fd7aedb1 ("sched/fair: Fix low cpu usage with high throttling by 
removing expiration of cpu-local slices")
Link: https://lkml.kernel.org/r/1566326455-8038-1-git-send-email-...@lca.pw
Signed-off-by: Ingo Molnar 
---
 kernel/sched/fair.c | 19 ++-
 1 file changed, 6 insertions(+), 13 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5bc2399..dfdac90 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4353,21 +4353,16 @@ static inline u64 sched_cfs_bandwidth_slice(void)
 }
 
 /*
- * Replenish runtime according to assigned quota and update expiration time.
- * We use sched_clock_cpu directly instead of rq->clock to avoid adding
- * additional synchronization around rq->lock.
+ * Replenish runtime according to assigned quota. We use sched_clock_cpu
+ * directly instead of rq->clock to avoid adding additional synchronization
+ * around rq->lock.
  *
  * requires cfs_b->lock
  */
 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
 {
-   u64 now;
-
-   if (cfs_b->quota == RUNTIME_INF)
-   return;
-
-   now = sched_clock_cpu(smp_processor_id());
-   cfs_b->runtime = cfs_b->quota;
+   if (cfs_b->quota != RUNTIME_INF)
+   cfs_b->runtime = cfs_b->quota;
 }
 
 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
@@ -4983,15 +4978,13 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 
 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
 {
-   u64 overrun;
-
lockdep_assert_held(&cfs_b->lock);
 
if (cfs_b->period_active)
return;
 
cfs_b->period_active = 1;
-   overrun = hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
+   hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
 }
 


[tip: sched/urgent] sched/core: Convert vcpu_is_preempted() from macro to an inline function

2019-09-18 Thread tip-bot2 for Qian Cai
The following commit has been merged into the sched/urgent branch of tip:

Commit-ID: 42fd8baab31f53bed2952485fcf0e92f244c5e55
Gitweb:
https://git.kernel.org/tip/42fd8baab31f53bed2952485fcf0e92f244c5e55
Author:Qian Cai 
AuthorDate:Tue, 17 Sep 2019 10:34:54 -04:00
Committer: Ingo Molnar 
CommitterDate: Wed, 18 Sep 2019 12:38:17 +02:00

sched/core: Convert vcpu_is_preempted() from macro to an inline function

Clang reports this warning:

  kernel/locking/osq_lock.c:25:19: warning: unused function 'node_cpu' 
[-Wunused-function]

due to osq_lock() calling vcpu_is_preempted(node_cpu(node->prev))), but
vcpu_is_preempted() is compiled away. Fix it by converting the dummy
vcpu_is_preempted() from a macro to a proper static inline function.

Signed-off-by: Qian Cai 
Acked-by: Mel Gorman 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: bseg...@google.com
Cc: dietmar.eggem...@arm.com
Cc: juri.le...@redhat.com
Cc: rost...@goodmis.org
Cc: vincent.guit...@linaro.org
Link: https://lkml.kernel.org/r/1568730894-10483-1-git-send-email-...@lca.pw
Signed-off-by: Ingo Molnar 
---
 include/linux/sched.h | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index f0edee9..e2e9196 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1856,7 +1856,10 @@ static inline void set_task_cpu(struct task_struct *p, 
unsigned int cpu)
  * running or not.
  */
 #ifndef vcpu_is_preempted
-# define vcpu_is_preempted(cpu)false
+static inline bool vcpu_is_preempted(int cpu)
+{
+   return false;
+}
 #endif
 
 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);


[tip: sched/urgent] sched/fair: Remove unused cfs_rq_clock_task() function

2019-09-17 Thread tip-bot2 for Qian Cai
The following commit has been merged into the sched/urgent branch of tip:

Commit-ID: dac9f027b1096c5f03ca583e787aac0f852e8f78
Gitweb:
https://git.kernel.org/tip/dac9f027b1096c5f03ca583e787aac0f852e8f78
Author:Qian Cai 
AuthorDate:Mon, 16 Sep 2019 17:19:35 -04:00
Committer: Ingo Molnar 
CommitterDate: Tue, 17 Sep 2019 09:55:02 +02:00

sched/fair: Remove unused cfs_rq_clock_task() function

cfs_rq_clock_task() was first introduced and used in:

  f1b17280efbd ("sched: Maintain runnable averages across throttled periods")

Over time its use has been graduately removed by the following commits:

  d31b1a66cbe0 ("sched/fair: Factorize PELT update")
  23127296889f ("sched/fair: Update scale invariance of PELT")

Today, there is no single user left, so it can be safely removed.

Found via the -Wunused-function build warning.

Signed-off-by: Qian Cai 
Cc: Ben Segall 
Cc: Dietmar Eggemann 
Cc: Juri Lelli 
Cc: Linus Torvalds 
Cc: Mel Gorman 
Cc: Mike Galbraith 
Cc: Peter Zijlstra 
Cc: Steven Rostedt 
Cc: Thomas Gleixner 
Cc: Vincent Guittot 
Link: https://lkml.kernel.org/r/1568668775-2127-1-git-send-email-...@lca.pw
[ Rewrote the changelog. ]
Signed-off-by: Ingo Molnar 
---
 kernel/sched/fair.c | 16 
 1 file changed, 16 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d4bbf68..3101c66 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -749,7 +749,6 @@ void init_entity_runnable_average(struct sched_entity *se)
/* when this task enqueue'ed, it will contribute to its cfs_rq's 
load_avg */
 }
 
-static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
 static void attach_entity_cfs_rq(struct sched_entity *se);
 
 /*
@@ -4376,15 +4375,6 @@ static inline struct cfs_bandwidth 
*tg_cfs_bandwidth(struct task_group *tg)
return &tg->cfs_bandwidth;
 }
 
-/* rq->task_clock normalized against any time this cfs_rq has spent throttled 
*/
-static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
-{
-   if (unlikely(cfs_rq->throttle_count))
-   return cfs_rq->throttled_clock_task - 
cfs_rq->throttled_clock_task_time;
-
-   return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
-}
-
 /* returns 0 on failure to allocate runtime */
 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 {
@@ -4476,7 +4466,6 @@ static int tg_unthrottle_up(struct task_group *tg, void 
*data)
 
cfs_rq->throttle_count--;
if (!cfs_rq->throttle_count) {
-   /* adjust cfs_rq_clock_task() */
cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
 cfs_rq->throttled_clock_task;
 
@@ -5080,11 +5069,6 @@ static inline bool cfs_bandwidth_used(void)
return false;
 }
 
-static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
-{
-   return rq_clock_task(rq_of(cfs_rq));
-}
-
 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}