Peter,

This is a simple clean up patch that makes sense to me. Want to take it?

Reviewed-by: Steven Rostedt (VMware) <rost...@goodmis.org>

-- Steve


On Sat, 31 Oct 2020 01:32:23 +0800
Hui Su <sh_...@163.com> wrote:

> We have supplied the macro: 'task_current()', and we should
> all use task_current() instaed of 'rq->curr == p',
> which is more readable.
> 
> No functional change.
> 
> Signed-off-by: Hui Su <sh_...@163.com>
> ---
>  kernel/sched/deadline.c | 2 +-
>  kernel/sched/debug.c    | 2 +-
>  kernel/sched/fair.c     | 6 +++---
>  kernel/sched/rt.c       | 2 +-
>  4 files changed, 6 insertions(+), 6 deletions(-)
> 
> diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
> index f232305dcefe..3b335be97952 100644
> --- a/kernel/sched/deadline.c
> +++ b/kernel/sched/deadline.c
> @@ -2474,7 +2474,7 @@ static void switched_to_dl(struct rq *rq, struct 
> task_struct *p)
>  static void prio_changed_dl(struct rq *rq, struct task_struct *p,
>                           int oldprio)
>  {
> -     if (task_on_rq_queued(p) || rq->curr == p) {
> +     if (task_on_rq_queued(p) || task_current(rq, p)) {
>  #ifdef CONFIG_SMP
>               /*
>                * This might be too much, but unfortunately
> diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
> index 0655524700d2..1ca554f10901 100644
> --- a/kernel/sched/debug.c
> +++ b/kernel/sched/debug.c
> @@ -486,7 +486,7 @@ static char *task_group_path(struct task_group *tg)
>  static void
>  print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
>  {
> -     if (rq->curr == p)
> +     if (task_current(rq, p))
>               SEQ_printf(m, ">R");
>       else
>               SEQ_printf(m, " %c", task_state_to_char(p));
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 290f9e38378c..c3e3ae76302e 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -5399,7 +5399,7 @@ static void hrtick_start_fair(struct rq *rq, struct 
> task_struct *p)
>               s64 delta = slice - ran;
>  
>               if (delta < 0) {
> -                     if (rq->curr == p)
> +                     if (task_current(rq, p))
>                               resched_curr(rq);
>                       return;
>               }
> @@ -10740,7 +10740,7 @@ prio_changed_fair(struct rq *rq, struct task_struct 
> *p, int oldprio)
>        * our priority decreased, or if we are not currently running on
>        * this runqueue and our priority is higher than the current's
>        */
> -     if (rq->curr == p) {
> +     if (task_current(rq, p)) {
>               if (p->prio > oldprio)
>                       resched_curr(rq);
>       } else
> @@ -10873,7 +10873,7 @@ static void switched_to_fair(struct rq *rq, struct 
> task_struct *p)
>                * kick off the schedule if running, otherwise just see
>                * if we can still preempt the current task.
>                */
> -             if (rq->curr == p)
> +             if (task_current(rq, p))
>                       resched_curr(rq);
>               else
>                       check_preempt_curr(rq, p, 0);
> diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
> index 49ec096a8aa1..cd615aace14c 100644
> --- a/kernel/sched/rt.c
> +++ b/kernel/sched/rt.c
> @@ -2317,7 +2317,7 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, 
> int oldprio)
>       if (!task_on_rq_queued(p))
>               return;
>  
> -     if (rq->curr == p) {
> +     if (task_current(rq, p)) {
>  #ifdef CONFIG_SMP
>               /*
>                * If our priority decreases while running, we

Reply via email to