Le Fri, Jul 26, 2024 at 11:56:48PM +0200, Frederic Weisbecker a écrit :
> +int kthread_affine_preferred(struct task_struct *p, const struct cpumask 
> *mask)
> +{
> +     struct kthread *kthread = to_kthread(p);
> +     cpumask_var_t affinity;
> +     unsigned long flags;
> +     int ret;
> +
> +     if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE) || kthread->started) {
> +             WARN_ON(1);
> +             return -EINVAL;
> +     }
> +
> +     if (!zalloc_cpumask_var(&affinity, GFP_KERNEL))
> +             return -ENOMEM;
> +
> +     kthread->preferred_affinity = kzalloc(sizeof(struct cpumask), 
> GFP_KERNEL);
> +     if (!kthread->preferred_affinity) {
> +             ret = -ENOMEM;
> +             goto out;
> +     }
> +
> +     mutex_lock(&kthreads_hotplug_lock);
> +     cpumask_copy(kthread->preferred_affinity, mask);
> +     list_add_tail(&kthread->hotplug_node, &kthreads_hotplug);
> +     kthread_fetch_affinity(kthread, affinity);
> +
> +     /* It's safe because the task is inactive. */
> +     raw_spin_lock_irqsave(&p->pi_lock, flags);
> +     do_set_cpus_allowed(p, mask);

s/mask/affinity

Reply via email to