On Tue, Jan 21, 2014 at 05:20:06PM -0500, [email protected] wrote:
> From: Rik van Riel <[email protected]>
> 
> The numa_faults_cpu statistics are used to maintain an active_nodes nodemask
> per numa_group. This allows us to be smarter about when to do numa migrations.
> 
> Cc: Peter Zijlstra <[email protected]>
> Cc: Mel Gorman <[email protected]>
> Cc: Ingo Molnar <[email protected]>
> Cc: Chegu Vinod <[email protected]>
> Signed-off-by: Rik van Riel <[email protected]>
> ---
>  kernel/sched/fair.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 51 insertions(+)
> 
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index b98ed61..d4f6df5 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -885,6 +885,7 @@ struct numa_group {
>       struct list_head task_list;
>  
>       struct rcu_head rcu;
> +     nodemask_t active_nodes;
>       unsigned long total_faults;
>       unsigned long *faults_cpu;
>       unsigned long faults[0];
> @@ -918,6 +919,12 @@ static inline unsigned long group_faults(struct 
> task_struct *p, int nid)
>               p->numa_group->faults[task_faults_idx(nid, 1)];
>  }
>  
> +static inline unsigned long group_faults_cpu(struct numa_group *group, int 
> nid)
> +{
> +     return group->faults_cpu[task_faults_idx(nid, 0)] +
> +             group->faults_cpu[task_faults_idx(nid, 1)];
> +}
> +
>  /*
>   * These return the fraction of accesses done by a particular task, or
>   * task group, on a particular numa node.  The group weight is given a
> @@ -1275,6 +1282,40 @@ static void numa_migrate_preferred(struct task_struct 
> *p)
>  }
>  
>  /*
> + * Find the nodes on which the workload is actively running. We do this by
> + * tracking the nodes from which NUMA hinting faults are triggered. This can
> + * be different from the set of nodes where the workload's memory is 
> currently
> + * located.
> + *
> + * The bitmask is used to make smarter decisions on when to do NUMA page
> + * migrations, To prevent flip-flopping, and excessive page migrations, nodes
> + * are added when they cause over 6/16 of the maximum number of faults, but
> + * only removed when they drop below 3/16.
> + */
> +static void update_numa_active_node_mask(struct numa_group *numa_group)
> +{
> +     unsigned long faults, max_faults = 0;
> +     int nid;
> +
> +     for_each_online_node(nid) {
> +             faults = numa_group->faults_cpu[task_faults_idx(nid, 0)] +
> +                      numa_group->faults_cpu[task_faults_idx(nid, 1)];

faults = group_faults_cpu(numa_group, nid)

?

> +             if (faults > max_faults)
> +                     max_faults = faults;
> +     }
> +
> +     for_each_online_node(nid) {
> +             faults = numa_group->faults_cpu[task_faults_idx(nid, 0)] +
> +                      numa_group->faults_cpu[task_faults_idx(nid, 1)];

Same?

> +             if (!node_isset(nid, numa_group->active_nodes)) {
> +                     if (faults > max_faults * 6 / 16)
> +                             node_set(nid, numa_group->active_nodes);
> +             } else if (faults < max_faults * 3 / 16)
> +                     node_clear(nid, numa_group->active_nodes);
> +     }
> +}
> +
> +/*
>   * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
>   * increments. The more local the fault statistics are, the higher the scan
>   * period will be for the next scan window. If local/remote ratio is below
> @@ -1416,6 +1457,7 @@ static void task_numa_placement(struct task_struct *p)
>       update_task_scan_period(p, fault_types[0], fault_types[1]);
>  
>       if (p->numa_group) {
> +             update_numa_active_node_mask(p->numa_group);
>               /*
>                * If the preferred task and group nids are different,
>                * iterate over the nodes again to find the best place.
> @@ -1478,6 +1520,8 @@ static void task_numa_group(struct task_struct *p, int 
> cpupid, int flags,
>               /* Second half of the array tracks nids where faults happen */
>               grp->faults_cpu = grp->faults + 2 * nr_node_ids;
>  
> +             node_set(task_node(current), grp->active_nodes);
> +
>               for (i = 0; i < 4*nr_node_ids; i++)
>                       grp->faults[i] = p->numa_faults_memory[i];
>  
> @@ -1547,6 +1591,13 @@ static void task_numa_group(struct task_struct *p, int 
> cpupid, int flags,
>       my_grp->nr_tasks--;
>       grp->nr_tasks++;
>  
> +     /*
> +      * We just joined a new group, the set of active nodes may have
> +      * changed. Do not update the nodemask of the old group, since
> +      * the tasks in that group will probably join the new group soon.
> +      */
> +     update_numa_active_node_mask(grp);
> +
>       spin_unlock(&my_grp->lock);
>       spin_unlock(&grp->lock);
>  

Ok, I guess this stops the old group making very different migration
decisions just because one task left the group. That has difficult to
predict consequences so assuming the new group_faults_cpu helper gets used

Acked-by: Mel Gorman <[email protected]>

-- 
Mel Gorman
SUSE Labs
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to