On Thu, Jan 17, 2019 at 09:47:37AM +0100, Juri Lelli wrote:
> @@ -3233,11 +3233,11 @@ void cpuset_cpus_allowed(struct task_struct *tsk, 
> struct cpumask *pmask)
>  {
>       unsigned long flags;
>  
> -     spin_lock_irqsave(&callback_lock, flags);
> +     raw_spin_lock_irqsave(&callback_lock, flags);
>       rcu_read_lock();
>       guarantee_online_cpus(task_cs(tsk), pmask);
>       rcu_read_unlock();
> -     spin_unlock_irqrestore(&callback_lock, flags);
> +     raw_spin_unlock_irqrestore(&callback_lock, flags);
>  }
>  
>  void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
> @@ -3285,11 +3285,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct 
> *tsk)
>       nodemask_t mask;
>       unsigned long flags;
>  
> -     spin_lock_irqsave(&callback_lock, flags);
> +     raw_spin_lock_irqsave(&callback_lock, flags);
>       rcu_read_lock();
>       guarantee_online_mems(task_cs(tsk), &mask);
>       rcu_read_unlock();
> -     spin_unlock_irqrestore(&callback_lock, flags);
> +     raw_spin_unlock_irqrestore(&callback_lock, flags);
>  
>       return mask;
>  }
> @@ -3381,14 +3381,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
>               return true;
>  
>       /* Not hardwall and node outside mems_allowed: scan up cpusets */
> -     spin_lock_irqsave(&callback_lock, flags);
> +     raw_spin_lock_irqsave(&callback_lock, flags);
>  
>       rcu_read_lock();
>       cs = nearest_hardwall_ancestor(task_cs(current));
>       allowed = node_isset(node, cs->mems_allowed);
>       rcu_read_unlock();
>  
> -     spin_unlock_irqrestore(&callback_lock, flags);
> +     raw_spin_unlock_irqrestore(&callback_lock, flags);
>       return allowed;
>  }

These three appear to be a user-controlled O(n) (depth of cgroup tree).
Which is basically bad for raw_spinlock_t.

The Changelog should really have mentioned this; and ideally we'd
somehow avoid this.

Reply via email to