On 11/24/20 15:50, Will Deacon wrote:

[...]

> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index d2003a7d5ab5..818c8f7bdf2a 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -1860,24 +1860,18 @@ void do_set_cpus_allowed(struct task_struct *p, const 
> struct cpumask *new_mask)
>  }
>  
>  /*
> - * Change a given task's CPU affinity. Migrate the thread to a
> - * proper CPU and schedule it away if the CPU it's executing on
> - * is removed from the allowed bitmask.
> - *
> - * NOTE: the caller must have a valid reference to the task, the
> - * task must not exit() & deallocate itself prematurely. The
> - * call is not atomic; no spinlocks may be held.
> + * Called with both p->pi_lock and rq->lock held; drops both before 
> returning.

nit: wouldn't it be better for the caller to acquire and release the locks?
Not a big deal but it's always confusing when half of the work done outside the
function and the other half done inside.

Thanks

--
Qais Yousef

>   */
> -static int __set_cpus_allowed_ptr(struct task_struct *p,
> -                               const struct cpumask *new_mask, bool check)
> +static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
> +                                      const struct cpumask *new_mask,
> +                                      bool check,
> +                                      struct rq *rq,
> +                                      struct rq_flags *rf)
>  {
>       const struct cpumask *cpu_valid_mask = cpu_active_mask;
>       unsigned int dest_cpu;
> -     struct rq_flags rf;
> -     struct rq *rq;
>       int ret = 0;
>  
> -     rq = task_rq_lock(p, &rf);
>       update_rq_clock(rq);
>  
>       if (p->flags & PF_KTHREAD) {
> @@ -1929,7 +1923,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
>       if (task_running(rq, p) || p->state == TASK_WAKING) {
>               struct migration_arg arg = { p, dest_cpu };
>               /* Need help from migration thread: drop lock and wait. */
> -             task_rq_unlock(rq, p, &rf);
> +             task_rq_unlock(rq, p, rf);
>               stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
>               return 0;
>       } else if (task_on_rq_queued(p)) {
> @@ -1937,20 +1931,69 @@ static int __set_cpus_allowed_ptr(struct task_struct 
> *p,
>                * OK, since we're going to drop the lock immediately
>                * afterwards anyway.
>                */
> -             rq = move_queued_task(rq, &rf, p, dest_cpu);
> +             rq = move_queued_task(rq, rf, p, dest_cpu);
>       }
>  out:
> -     task_rq_unlock(rq, p, &rf);
> +     task_rq_unlock(rq, p, rf);
>  
>       return ret;
>  }

Reply via email to