Re: [PATCH 6/7] workqueue: node-awared allocation for unbound pool

2013-04-04 Thread Tejun Heo
Hello,

On Thu, Apr 04, 2013 at 10:05:37AM +0800, Lai Jiangshan wrote:
> calculate the node of the pool earlier, and allocate the pool
> from the node.
> 
> Signed-off-by: Lai Jiangshan 
> ---
>  kernel/workqueue.c |   29 +++--
>  1 files changed, 15 insertions(+), 14 deletions(-)
> 
> diff --git a/kernel/workqueue.c b/kernel/workqueue.c
> index 737646d..3f33077 100644
> --- a/kernel/workqueue.c
> +++ b/kernel/workqueue.c
> @@ -539,7 +539,7 @@ static int worker_pool_assign_id(struct worker_pool *pool)
>   * @wq: the target workqueue
>   * @node: the node ID
>   *
> - * This must be called either with pwq_lock held or sched RCU read locked.
> + * This must be called either with wq->mutex held or sched RCU read locked.

It'd be nice to mention it in the patch description.  Just add
something like "while at it, fix the wrong locking comment in XXX".

> @@ -3563,29 +3563,30 @@ static struct worker_pool *get_unbound_pool(const 
> struct workqueue_attrs *attrs)
>   hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
>   if (wqattrs_equal(pool->attrs, attrs)) {
>   pool->refcnt++;
> - goto out_unlock;
> + goto out_pool;

return pool; ?

Thanks.

-- 
tejun
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH 6/7] workqueue: node-awared allocation for unbound pool

2013-04-04 Thread Tejun Heo
Hello,

On Thu, Apr 04, 2013 at 10:05:37AM +0800, Lai Jiangshan wrote:
 calculate the node of the pool earlier, and allocate the pool
 from the node.
 
 Signed-off-by: Lai Jiangshan la...@cn.fujitsu.com
 ---
  kernel/workqueue.c |   29 +++--
  1 files changed, 15 insertions(+), 14 deletions(-)
 
 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
 index 737646d..3f33077 100644
 --- a/kernel/workqueue.c
 +++ b/kernel/workqueue.c
 @@ -539,7 +539,7 @@ static int worker_pool_assign_id(struct worker_pool *pool)
   * @wq: the target workqueue
   * @node: the node ID
   *
 - * This must be called either with pwq_lock held or sched RCU read locked.
 + * This must be called either with wq-mutex held or sched RCU read locked.

It'd be nice to mention it in the patch description.  Just add
something like while at it, fix the wrong locking comment in XXX.

 @@ -3563,29 +3563,30 @@ static struct worker_pool *get_unbound_pool(const 
 struct workqueue_attrs *attrs)
   hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
   if (wqattrs_equal(pool-attrs, attrs)) {
   pool-refcnt++;
 - goto out_unlock;
 + goto out_pool;

return pool; ?

Thanks.

-- 
tejun
--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 6/7] workqueue: node-awared allocation for unbound pool

2013-04-03 Thread Lai Jiangshan
calculate the node of the pool earlier, and allocate the pool
from the node.

Signed-off-by: Lai Jiangshan 
---
 kernel/workqueue.c |   29 +++--
 1 files changed, 15 insertions(+), 14 deletions(-)

diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 737646d..3f33077 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -539,7 +539,7 @@ static int worker_pool_assign_id(struct worker_pool *pool)
  * @wq: the target workqueue
  * @node: the node ID
  *
- * This must be called either with pwq_lock held or sched RCU read locked.
+ * This must be called either with wq->mutex held or sched RCU read locked.
  * If the pwq needs to be used beyond the locking in effect, the caller is
  * responsible for guaranteeing that the pwq stays online.
  */
@@ -3555,7 +3555,7 @@ static struct worker_pool *get_unbound_pool(const struct 
workqueue_attrs *attrs)
 {
u32 hash = wqattrs_hash(attrs);
struct worker_pool *pool;
-   int node;
+   int pool_node = NUMA_NO_NODE, node;
 
lockdep_assert_held(_pool_mutex);
 
@@ -3563,29 +3563,30 @@ static struct worker_pool *get_unbound_pool(const 
struct workqueue_attrs *attrs)
hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
if (wqattrs_equal(pool->attrs, attrs)) {
pool->refcnt++;
-   goto out_unlock;
+   goto out_pool;
}
}
 
-   /* nope, create a new one */
-   pool = kzalloc(sizeof(*pool), GFP_KERNEL);
-   if (!pool || init_worker_pool(pool) < 0)
-   goto fail;
-
-   lockdep_set_subclass(>lock, 1);   /* see put_pwq() */
-   copy_workqueue_attrs(pool->attrs, attrs);
-
/* if cpumask is contained inside a NUMA node, we belong to that node */
if (wq_numa_enabled) {
for_each_node(node) {
-   if (cpumask_subset(pool->attrs->cpumask,
+   if (cpumask_subset(attrs->cpumask,
   wq_numa_possible_cpumask[node])) {
-   pool->node = node;
+   pool_node = node;
break;
}
}
}
 
+   /* create a new one */
+   pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, pool_node);
+   if (!pool || init_worker_pool(pool) < 0)
+   goto fail;
+
+   lockdep_set_subclass(>lock, 1);   /* see put_pwq() */
+   copy_workqueue_attrs(pool->attrs, attrs);
+   pool->node = pool_node;
+
if (worker_pool_assign_id(pool) < 0)
goto fail;
 
@@ -3595,7 +3596,7 @@ static struct worker_pool *get_unbound_pool(const struct 
workqueue_attrs *attrs)
 
/* install */
hash_add(unbound_pool_hash, >hash_node, hash);
-out_unlock:
+out_pool:
return pool;
 fail:
if (pool)
-- 
1.7.7.6

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 6/7] workqueue: node-awared allocation for unbound pool

2013-04-03 Thread Lai Jiangshan
calculate the node of the pool earlier, and allocate the pool
from the node.

Signed-off-by: Lai Jiangshan la...@cn.fujitsu.com
---
 kernel/workqueue.c |   29 +++--
 1 files changed, 15 insertions(+), 14 deletions(-)

diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 737646d..3f33077 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -539,7 +539,7 @@ static int worker_pool_assign_id(struct worker_pool *pool)
  * @wq: the target workqueue
  * @node: the node ID
  *
- * This must be called either with pwq_lock held or sched RCU read locked.
+ * This must be called either with wq-mutex held or sched RCU read locked.
  * If the pwq needs to be used beyond the locking in effect, the caller is
  * responsible for guaranteeing that the pwq stays online.
  */
@@ -3555,7 +3555,7 @@ static struct worker_pool *get_unbound_pool(const struct 
workqueue_attrs *attrs)
 {
u32 hash = wqattrs_hash(attrs);
struct worker_pool *pool;
-   int node;
+   int pool_node = NUMA_NO_NODE, node;
 
lockdep_assert_held(wq_pool_mutex);
 
@@ -3563,29 +3563,30 @@ static struct worker_pool *get_unbound_pool(const 
struct workqueue_attrs *attrs)
hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
if (wqattrs_equal(pool-attrs, attrs)) {
pool-refcnt++;
-   goto out_unlock;
+   goto out_pool;
}
}
 
-   /* nope, create a new one */
-   pool = kzalloc(sizeof(*pool), GFP_KERNEL);
-   if (!pool || init_worker_pool(pool)  0)
-   goto fail;
-
-   lockdep_set_subclass(pool-lock, 1);   /* see put_pwq() */
-   copy_workqueue_attrs(pool-attrs, attrs);
-
/* if cpumask is contained inside a NUMA node, we belong to that node */
if (wq_numa_enabled) {
for_each_node(node) {
-   if (cpumask_subset(pool-attrs-cpumask,
+   if (cpumask_subset(attrs-cpumask,
   wq_numa_possible_cpumask[node])) {
-   pool-node = node;
+   pool_node = node;
break;
}
}
}
 
+   /* create a new one */
+   pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, pool_node);
+   if (!pool || init_worker_pool(pool)  0)
+   goto fail;
+
+   lockdep_set_subclass(pool-lock, 1);   /* see put_pwq() */
+   copy_workqueue_attrs(pool-attrs, attrs);
+   pool-node = pool_node;
+
if (worker_pool_assign_id(pool)  0)
goto fail;
 
@@ -3595,7 +3596,7 @@ static struct worker_pool *get_unbound_pool(const struct 
workqueue_attrs *attrs)
 
/* install */
hash_add(unbound_pool_hash, pool-hash_node, hash);
-out_unlock:
+out_pool:
return pool;
 fail:
if (pool)
-- 
1.7.7.6

--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/