In order to allow overriding the unbound wqs low-level cpumask, we need to be able to call apply_workqueue_attr() on all workqueues in the pool list.
Now since traversing the pool list require to lock it, we can't currently call apply_workqueue_attr() under the pool traversal. So lets provide a version of apply_workqueue_attrs() that can be called when the pool is already locked. Suggested-by: Tejun Heo <t...@kernel.org> Cc: Christoph Lameter <c...@linux.com> Cc: Kevin Hilman <khil...@linaro.org> Cc: Lai Jiangshan <la...@cn.fujitsu.com> Cc: Mike Galbraith <bitbuc...@online.de> Cc: Paul E. McKenney <paul...@linux.vnet.ibm.com> Cc: Tejun Heo <t...@kernel.org> Cc: Viresh Kumar <viresh.ku...@linaro.org> Signed-off-by: Frederic Weisbecker <fweis...@gmail.com> --- kernel/workqueue.c | 77 +++++++++++++++++++++++++++++++----------------------- 1 file changed, 44 insertions(+), 33 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1252a8c..2aa296d 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3637,24 +3637,9 @@ static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq, return old_pwq; } -/** - * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue - * @wq: the target workqueue - * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs() - * - * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA - * machines, this function maps a separate pwq to each NUMA node with - * possibles CPUs in @attrs->cpumask so that work items are affine to the - * NUMA node it was issued on. Older pwqs are released as in-flight work - * items finish. Note that a work item which repeatedly requeues itself - * back-to-back will stay on its current pwq. - * - * Performs GFP_KERNEL allocations. - * - * Return: 0 on success and -errno on failure. - */ -int apply_workqueue_attrs(struct workqueue_struct *wq, - const struct workqueue_attrs *attrs) +static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, + const struct workqueue_attrs *attrs, + cpumask_var_t unbounds_cpumask) { struct workqueue_attrs *new_attrs, *tmp_attrs; struct pool_workqueue **pwq_tbl, *dfl_pwq; @@ -3676,7 +3661,7 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, /* make a copy of @attrs and sanitize it */ copy_workqueue_attrs(new_attrs, attrs); - cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask); + cpumask_and(new_attrs->cpumask, new_attrs->cpumask, unbounds_cpumask); /* * We may create multiple pwqs with differing cpumasks. Make a @@ -3686,15 +3671,6 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, copy_workqueue_attrs(tmp_attrs, new_attrs); /* - * CPUs should stay stable across pwq creations and installations. - * Pin CPUs, determine the target cpumask for each node and create - * pwqs accordingly. - */ - get_online_cpus(); - - mutex_lock(&wq_pool_mutex); - - /* * If something goes wrong during CPU up/down, we'll fall back to * the default pwq covering whole @attrs->cpumask. Always create * it even if we don't use it immediately. @@ -3714,8 +3690,6 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, } } - mutex_unlock(&wq_pool_mutex); - /* all pwqs have been created successfully, let's install'em */ mutex_lock(&wq->mutex); @@ -3736,7 +3710,6 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, put_pwq_unlocked(pwq_tbl[node]); put_pwq_unlocked(dfl_pwq); - put_online_cpus(); ret = 0; /* fall through */ out_free: @@ -3750,14 +3723,52 @@ enomem_pwq: for_each_node(node) if (pwq_tbl && pwq_tbl[node] != dfl_pwq) free_unbound_pwq(pwq_tbl[node]); - mutex_unlock(&wq_pool_mutex); - put_online_cpus(); enomem: ret = -ENOMEM; goto out_free; } /** + * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue + * @wq: the target workqueue + * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs() + * + * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA + * machines, this function maps a separate pwq to each NUMA node with + * possibles CPUs in @attrs->cpumask so that work items are affine to the + * NUMA node it was issued on. Older pwqs are released as in-flight work + * items finish. Note that a work item which repeatedly requeues itself + * back-to-back will stay on its current pwq. + * + * Performs GFP_KERNEL allocations. + * + * Return: 0 on success and -errno on failure. + */ +int apply_workqueue_attrs(struct workqueue_struct *wq, + const struct workqueue_attrs *attrs) +{ + int ret; + + /* + * CPUs should stay stable across pwq creations and installations. + * Pin CPUs, determine the target cpumask for each node and create + * pwqs accordingly. + */ + + get_online_cpus(); + /* + * Lock for alloc_unbound_pwq() + */ + mutex_lock(&wq_pool_mutex); + ret = apply_workqueue_attrs_locked(wq, attrs, wq_unbound_cpumask); + mutex_unlock(&wq_pool_mutex); + put_online_cpus(); + + return ret; +} + + +/** * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug * @wq: the target workqueue * @cpu: the CPU coming up or going down -- 1.8.3.1 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/