migrate_disable();
  set_cpus_allowed_ptr(current, {something excluding task_cpu(current)});
  affine_move_task(); <-- never returns

Signed-off-by: Valentin Schneider <valentin.schnei...@arm.com>
---
 kernel/sched/core.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4ccd1099adaa..7f4e38819de1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2189,6 +2189,11 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
        if (!(flags & SCA_MIGRATE_ENABLE) && cpumask_equal(&p->cpus_mask, 
new_mask))
                goto out;
 
+       if (p == current &&
+           is_migration_disabled(p) &&
+           !cpumask_test_cpu(task_cpu(p), new_mask))
+               ret = -EBUSY;
+
        /*
         * Picking a ~random cpu helps in cases where we are changing affinity
         * for groups of tasks (ie. cpuset), so that load balancing is not
-- 
2.27.0

Reply via email to