From: Jiri Kosina <[email protected]>

Commit

        799ba82de01e ("sched/deadline: Use C bitfields for the state flags")

converted state flags into one-bit signed int. Signed one-bit type can be 
either 0 or -1, which is going to cause a problem once 1 is assigned to it 
and then the value later tested against 1.

The current code is okay, as all the checks are (non-)zero check, but I 
believe that we'd rather be safe than sorry here; remove the fragility by 
converting the state flags to bool.

This also silences annoying sparse complaints about this very issue when 
compiling any code that includes sched.h.

Fixes: 799ba82de01e ("sched/deadline: Use C bitfields for the state flags")
Cc: luca abeni <[email protected]>
Cc: Peter Zijlstra (Intel) <[email protected]>
Cc: Daniel Bristot de Oliveira <[email protected]>
Cc: Juri Lelli <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Mathieu Poirier <[email protected]>
Cc: Mike Galbraith <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Steven Rostedt <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Signed-off-by: Jiri Kosina <[email protected]>
---
 include/linux/sched.h   |  8 ++++----
 kernel/sched/core.c     |  8 ++++----
 kernel/sched/deadline.c | 32 ++++++++++++++++----------------
 3 files changed, 24 insertions(+), 24 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index a5dc7c98b0a2..b19fa1b96726 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -473,10 +473,10 @@ struct sched_dl_entity {
         * conditions between the inactive timer handler and the wakeup
         * code.
         */
-       int                             dl_throttled      : 1;
-       int                             dl_boosted        : 1;
-       int                             dl_yielded        : 1;
-       int                             dl_non_contending : 1;
+       bool                            dl_throttled;
+       bool                            dl_boosted;
+       bool                            dl_yielded;
+       bool                            dl_non_contending;
 
        /*
         * Bandwidth enforcement timer. Each -deadline task has its
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 75554f366fd3..f9bbf0f55e0e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3738,20 +3738,20 @@ void rt_mutex_setprio(struct task_struct *p, struct 
task_struct *pi_task)
        if (dl_prio(prio)) {
                if (!dl_prio(p->normal_prio) ||
                    (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
-                       p->dl.dl_boosted = 1;
+                       p->dl.dl_boosted = true;
                        queue_flag |= ENQUEUE_REPLENISH;
                } else
-                       p->dl.dl_boosted = 0;
+                       p->dl.dl_boosted = false;
                p->sched_class = &dl_sched_class;
        } else if (rt_prio(prio)) {
                if (dl_prio(oldprio))
-                       p->dl.dl_boosted = 0;
+                       p->dl.dl_boosted = false;
                if (oldprio < prio)
                        queue_flag |= ENQUEUE_HEAD;
                p->sched_class = &rt_sched_class;
        } else {
                if (dl_prio(oldprio))
-                       p->dl.dl_boosted = 0;
+                       p->dl.dl_boosted = false;
                if (rt_prio(oldprio))
                        p->rt.timeout = 0;
                p->sched_class = &fair_sched_class;
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 2473736c7616..78cb00ae8b2f 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -133,7 +133,7 @@ void dl_change_utilization(struct task_struct *p, u64 
new_bw)
        rq = task_rq(p);
        if (p->dl.dl_non_contending) {
                sub_running_bw(p->dl.dl_bw, &rq->dl);
-               p->dl.dl_non_contending = 0;
+               p->dl.dl_non_contending = false;
                /*
                 * If the timer handler is currently running and the
                 * timer cannot be cancelled, inactive_task_timer()
@@ -251,7 +251,7 @@ static void task_non_contending(struct task_struct *p)
                return;
        }
 
-       dl_se->dl_non_contending = 1;
+       dl_se->dl_non_contending = true;
        get_task_struct(p);
        hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL);
 }
@@ -271,7 +271,7 @@ static void task_contending(struct sched_dl_entity *dl_se, 
int flags)
                add_rq_bw(dl_se->dl_bw, dl_rq);
 
        if (dl_se->dl_non_contending) {
-               dl_se->dl_non_contending = 0;
+               dl_se->dl_non_contending = false;
                /*
                 * If the timer handler is currently running and the
                 * timer cannot be cancelled, inactive_task_timer()
@@ -676,9 +676,9 @@ static void replenish_dl_entity(struct sched_dl_entity 
*dl_se,
        }
 
        if (dl_se->dl_yielded)
-               dl_se->dl_yielded = 0;
+               dl_se->dl_yielded = false;
        if (dl_se->dl_throttled)
-               dl_se->dl_throttled = 0;
+               dl_se->dl_throttled = false;
 }
 
 /*
@@ -1051,7 +1051,7 @@ static inline void dl_check_constrained_dl(struct 
sched_dl_entity *dl_se)
            dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
                if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
                        return;
-               dl_se->dl_throttled = 1;
+               dl_se->dl_throttled = true;
                if (dl_se->runtime > 0)
                        dl_se->runtime = 0;
        }
@@ -1154,7 +1154,7 @@ static void update_curr_dl(struct rq *rq)
 
 throttle:
        if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
-               dl_se->dl_throttled = 1;
+               dl_se->dl_throttled = true;
                __dequeue_task_dl(rq, curr, 0);
                if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
                        enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
@@ -1206,7 +1206,7 @@ static enum hrtimer_restart inactive_task_timer(struct 
hrtimer *timer)
                if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
                        sub_running_bw(p->dl.dl_bw, dl_rq_of_se(&p->dl));
                        sub_rq_bw(p->dl.dl_bw, dl_rq_of_se(&p->dl));
-                       dl_se->dl_non_contending = 0;
+                       dl_se->dl_non_contending = false;
                }
 
                raw_spin_lock(&dl_b->lock);
@@ -1216,14 +1216,14 @@ static enum hrtimer_restart inactive_task_timer(struct 
hrtimer *timer)
 
                goto unlock;
        }
-       if (dl_se->dl_non_contending == 0)
+       if (!dl_se->dl_non_contending)
                goto unlock;
 
        sched_clock_tick();
        update_rq_clock(rq);
 
        sub_running_bw(dl_se->dl_bw, &rq->dl);
-       dl_se->dl_non_contending = 0;
+       dl_se->dl_non_contending = false;
 unlock:
        task_rq_unlock(rq, p, &rf);
        put_task_struct(p);
@@ -1492,7 +1492,7 @@ static void yield_task_dl(struct rq *rq)
         * it and the bandwidth timer will wake it up and will give it
         * new scheduling parameters (thanks to dl_yielded=1).
         */
-       rq->curr->dl.dl_yielded = 1;
+       rq->curr->dl.dl_yielded = true;
 
        update_rq_clock(rq);
        update_curr_dl(rq);
@@ -1565,7 +1565,7 @@ static void migrate_task_rq_dl(struct task_struct *p)
        raw_spin_lock(&rq->lock);
        if (p->dl.dl_non_contending) {
                sub_running_bw(p->dl.dl_bw, &rq->dl);
-               p->dl.dl_non_contending = 0;
+               p->dl.dl_non_contending = false;
                /*
                 * If the timer handler is currently running and the
                 * timer cannot be cancelled, inactive_task_timer()
@@ -2232,7 +2232,7 @@ static void switched_from_dl(struct rq *rq, struct 
task_struct *p)
         * while SCHED_OTHER in the meanwhile.
         */
        if (p->dl.dl_non_contending)
-               p->dl.dl_non_contending = 0;
+               p->dl.dl_non_contending = false;
 
        /*
         * Since this might be the only -deadline task on the rq,
@@ -2563,9 +2563,9 @@ void __dl_clear_params(struct task_struct *p)
        dl_se->dl_bw = 0;
        dl_se->dl_density = 0;
 
-       dl_se->dl_throttled = 0;
-       dl_se->dl_yielded = 0;
-       dl_se->dl_non_contending = 0;
+       dl_se->dl_throttled = false;
+       dl_se->dl_yielded = false;
+       dl_se->dl_non_contending = false;
 }
 
 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)

-- 
Jiri Kosina
SUSE Labs

Reply via email to