We only need to set the callback_head worker function once, do it during sched_fork().
While at it, move the comment regarding double task_work addition to init_numa_balancing(), since the double add sentinel is first set there. Suggested-by: Peter Zijlstra <pet...@infradead.org> Signed-off-by: Valentin Schneider <valentin.schnei...@arm.com> --- kernel/sched/fair.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 476b0201a8fb..74faa55bc52a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2441,7 +2441,7 @@ void task_numa_work(struct callback_head *work) SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work)); - work->next = work; /* protect against double add */ + work->next = work; /* * Who cares about NUMA placement when they're dying. * @@ -2585,12 +2585,15 @@ void init_numa_balancing(unsigned long clone_flags, struct task_struct *p) p->node_stamp = 0; p->numa_scan_seq = mm ? mm->numa_scan_seq : 0; p->numa_scan_period = sysctl_numa_balancing_scan_delay; + /* Protect against double add, see task_tick_numa and task_numa_work */ p->numa_work.next = &p->numa_work; p->numa_faults = NULL; p->numa_group = NULL; p->last_task_numa_placement = 0; p->last_sum_exec_runtime = 0; + init_task_work(&p->numa_work, task_numa_work); + /* New address space, reset the preferred nid */ if (!(clone_flags & CLONE_VM)) { p->numa_preferred_nid = NUMA_NO_NODE; @@ -2639,10 +2642,8 @@ static void task_tick_numa(struct rq *rq, struct task_struct *curr) curr->numa_scan_period = task_scan_start(curr); curr->node_stamp += period; - if (!time_before(jiffies, curr->mm->numa_next_scan)) { - init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */ + if (!time_before(jiffies, curr->mm->numa_next_scan)) task_work_add(curr, work, true); - } } } -- 2.22.0