On Wed, Jun 26, 2019 at 03:47:16PM -0700, subhra mazumdar wrote:
> @@ -1082,6 +1088,37 @@ void do_set_cpus_allowed(struct task_struct *p, const 
> struct cpumask *new_mask)
>               put_prev_task(rq, p);
>  
>       p->sched_class->set_cpus_allowed(p, new_mask);
> +     set_cpus_preferred_common(p, new_mask);
> +
> +     if (queued)
> +             enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
> +     if (running)
> +             set_curr_task(rq, p);
> +}
> +
> +void do_set_cpus_preferred(struct task_struct *p,
> +                        const struct cpumask *new_mask)
> +{
> +     struct rq *rq = task_rq(p);
> +     bool queued, running;
> +
> +     lockdep_assert_held(&p->pi_lock);
> +
> +     queued = task_on_rq_queued(p);
> +     running = task_current(rq, p);
> +
> +     if (queued) {
> +             /*
> +              * Because __kthread_bind() calls this on blocked tasks without
> +              * holding rq->lock.
> +              */
> +             lockdep_assert_held(&rq->lock);
> +             dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
> +     }
> +     if (running)
> +             put_prev_task(rq, p);
> +
> +     set_cpus_preferred_common(p, new_mask);
>  
>       if (queued)
>               enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
> @@ -1170,6 +1207,41 @@ static int __set_cpus_allowed_ptr(struct task_struct 
> *p,
>       return ret;
>  }
>  
> +static int
> +__set_cpus_preferred_ptr(struct task_struct *p, const struct cpumask 
> *new_mask)
> +{
> +     const struct cpumask *cpu_valid_mask = cpu_active_mask;
> +     unsigned int dest_cpu;
> +     struct rq_flags rf;
> +     struct rq *rq;
> +     int ret = 0;
> +
> +     rq = task_rq_lock(p, &rf);
> +     update_rq_clock(rq);
> +
> +     if (p->flags & PF_KTHREAD) {
> +             /*
> +              * Kernel threads are allowed on online && !active CPUs
> +              */
> +             cpu_valid_mask = cpu_online_mask;
> +     }
> +
> +     if (cpumask_equal(&p->cpus_preferred, new_mask))
> +             goto out;
> +
> +     if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
> +             ret = -EINVAL;
> +             goto out;
> +     }
> +
> +     do_set_cpus_preferred(p, new_mask);
> +
> +out:
> +     task_rq_unlock(rq, p, &rf);
> +
> +     return ret;
> +}
> +
>  int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask 
> *new_mask)
>  {
>       return __set_cpus_allowed_ptr(p, new_mask, false);
> @@ -4724,7 +4796,7 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct 
> sched_attr __user *, uattr,
>       return retval;
>  }
>  
> -long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
> +long sched_setaffinity(pid_t pid, const struct cpumask *in_mask, int flags)
>  {
>       cpumask_var_t cpus_allowed, new_mask;
>       struct task_struct *p;
> @@ -4742,6 +4814,11 @@ long sched_setaffinity(pid_t pid, const struct cpumask 
> *in_mask)
>       get_task_struct(p);
>       rcu_read_unlock();
>  
> +     if (flags == SCHED_SOFT_AFFINITY &&
> +         p->sched_class != &fair_sched_class) {
> +             retval = -EINVAL;
> +             goto out_put_task;
> +     }
>       if (p->flags & PF_NO_SETAFFINITY) {
>               retval = -EINVAL;
>               goto out_put_task;
> @@ -4790,18 +4867,37 @@ long sched_setaffinity(pid_t pid, const struct 
> cpumask *in_mask)
>       }
>  #endif
>  again:
> -     retval = __set_cpus_allowed_ptr(p, new_mask, true);
> -
> -     if (!retval) {
> -             cpuset_cpus_allowed(p, cpus_allowed);
> -             if (!cpumask_subset(new_mask, cpus_allowed)) {
> -                     /*
> -                      * We must have raced with a concurrent cpuset
> -                      * update. Just reset the cpus_allowed to the
> -                      * cpuset's cpus_allowed
> -                      */
> -                     cpumask_copy(new_mask, cpus_allowed);
> -                     goto again;
> +     if (flags == SCHED_HARD_AFFINITY) {
> +             retval = __set_cpus_allowed_ptr(p, new_mask, true);
> +
> +             if (!retval) {
> +                     cpuset_cpus_allowed(p, cpus_allowed);
> +                     if (!cpumask_subset(new_mask, cpus_allowed)) {
> +                             /*
> +                              * We must have raced with a concurrent cpuset
> +                              * update. Just reset the cpus_allowed to the
> +                              * cpuset's cpus_allowed
> +                              */
> +                             cpumask_copy(new_mask, cpus_allowed);
> +                             goto again;
> +                     }
> +                     p->affinity_unequal = false;
> +             }
> +     } else if (flags == SCHED_SOFT_AFFINITY) {
> +             retval = __set_cpus_preferred_ptr(p, new_mask);
> +             if (!retval) {
> +                     cpuset_cpus_allowed(p, cpus_allowed);
> +                     if (!cpumask_subset(new_mask, cpus_allowed)) {
> +                             /*
> +                              * We must have raced with a concurrent cpuset
> +                              * update.
> +                              */
> +                             cpumask_and(new_mask, new_mask, cpus_allowed);
> +                             goto again;
> +                     }
> +                     if (!cpumask_equal(&p->cpus_allowed,
> +                                        &p->cpus_preferred))
> +                             p->affinity_unequal = true;
>               }
>       }
>  out_free_new_mask:

This seems like a terrible lot of pointless duplication; don't you get a
much smaller diff by passing the hard/soft thing into
__set_cpus_allowed_ptr() and only branching where it matters?

Reply via email to