- Resolve the ancient TODO by setting the numa_work function in init_numa_balancing() which is called on fork().
- Make task_numa_work() static as it's not used outside of the fair scheduler and lacks a prototype as well. Signed-off-by: Thomas Gleixner <t...@linutronix.de> --- diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index bc9cfeaac8bd..f47869c0cdad 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1103,6 +1103,7 @@ static struct numa_group *deref_curr_numa_group(struct task_struct *p) static inline unsigned long group_faults_priv(struct numa_group *ng); static inline unsigned long group_faults_shared(struct numa_group *ng); +static void task_numa_work(struct callback_head *work); static unsigned int task_nr_scan_windows(struct task_struct *p) { @@ -1203,6 +1204,7 @@ void init_numa_balancing(unsigned long clone_flags, struct task_struct *p) p->node_stamp = 0; p->numa_scan_seq = mm ? mm->numa_scan_seq : 0; p->numa_scan_period = sysctl_numa_balancing_scan_delay; + p->numa_work.func = task_numa_work; p->numa_work.next = &p->numa_work; p->numa_faults = NULL; RCU_INIT_POINTER(p->numa_group, NULL); @@ -2523,7 +2525,7 @@ static void reset_ptenuma_scan(struct task_struct *p) * The expensive part of numa migration is done from task_work context. * Triggered from task_tick_numa(). */ -void task_numa_work(struct callback_head *work) +static void task_numa_work(struct callback_head *work) { unsigned long migrate, next_scan, now = jiffies; struct task_struct *p = current; @@ -2693,10 +2695,8 @@ static void task_tick_numa(struct rq *rq, struct task_struct *curr) curr->numa_scan_period = task_scan_start(curr); curr->node_stamp += period; - if (!time_before(jiffies, curr->mm->numa_next_scan)) { - init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */ + if (!time_before(jiffies, curr->mm->numa_next_scan)) task_work_add(curr, work, true); - } } }