Re: [PATCH 17/31] workqueue: implement attribute-based unbound worker_pool management

2013-03-10 Thread Tejun Heo
On Sun, Mar 10, 2013 at 05:58:21AM -0700, Tejun Heo wrote:
> > > + if (WARN_ON(pool->nr_workers != pool->nr_idle))
> > > + return;
> > 
> > This can be false-negative. we should remove this WARN_ON().
> 
> How would the test fail spuriously?  Can you please elaborate?

I got it.  It'll be short by one if there's an active manager.
Removed.

Thanks.

-- 
tejun
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH 17/31] workqueue: implement attribute-based unbound worker_pool management

2013-03-10 Thread Tejun Heo
On Sun, Mar 10, 2013 at 06:08:57PM +0800, Lai Jiangshan wrote:
> > @@ -3185,12 +3250,133 @@ static int init_worker_pool(struct worker_pool 
> > *pool)
> > mutex_init(>assoc_mutex);
> > ida_init(>worker_ida);
> >  
> > +   INIT_HLIST_NODE(>hash_node);
> > +   atomic_set(>refcnt, 1);
> 
> We should document: the code before "atomic_set(>refcnt, 1);" should 
> not failed.
> (In case we add failable code before it when we forget this requirement in 
> future".
> reason: when get_unbound_pool() fails, we expected ->refcnt = 1)

Yeap, comments added.

> > +/**
> > + * put_unbound_pool - put a worker_pool
> > + * @pool: worker_pool to put
> > + *
> > + * Put @pool.  If its refcnt reaches zero, it gets destroyed in sched-RCU
> > + * safe manner.
> > + */
> > +static void put_unbound_pool(struct worker_pool *pool)
> > +{
> > +   struct worker *worker;
> > +
> > +   if (!atomic_dec_and_test(>refcnt))
> > +   return;
> 
> if get_unbound_pool() happens here, it will get a destroyed pool.
> so we need to move "spin_lock_irq(_lock);" before above statement.
> (and ->refcnt don't need atomic after moved)

Hmmm... right.  Nice catch.  Updating...

> > +   if (WARN_ON(pool->nr_workers != pool->nr_idle))
> > +   return;
> 
> This can be false-negative. we should remove this WARN_ON().

How would the test fail spuriously?  Can you please elaborate?

Thanks.

-- 
tejun
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH 17/31] workqueue: implement attribute-based unbound worker_pool management

2013-03-10 Thread Lai Jiangshan
On 02/03/13 11:24, Tejun Heo wrote:
> This patch makes unbound worker_pools reference counted and
> dynamically created and destroyed as workqueues needing them come and
> go.  All unbound worker_pools are hashed on unbound_pool_hash which is
> keyed by the content of worker_pool->attrs.
> 
> When an unbound workqueue is allocated, get_unbound_pool() is called
> with the attributes of the workqueue.  If there already is a matching
> worker_pool, the reference count is bumped and the pool is returned.
> If not, a new worker_pool with matching attributes is created and
> returned.
> 
> When an unbound workqueue is destroyed, put_unbound_pool() is called
> which decrements the reference count of the associated worker_pool.
> If the refcnt reaches zero, the worker_pool is destroyed in sched-RCU
> safe way.
> 
> Note that the standard unbound worker_pools - normal and highpri ones
> with no specific cpumask affinity - are no longer created explicitly
> during init_workqueues().  init_workqueues() only initializes
> workqueue_attrs to be used for standard unbound pools -
> unbound_std_wq_attrs[].  The pools are spawned on demand as workqueues
> are created.
> 
> Signed-off-by: Tejun Heo 
> ---
>  kernel/workqueue.c | 230 
> ++---
>  1 file changed, 218 insertions(+), 12 deletions(-)
> 
> diff --git a/kernel/workqueue.c b/kernel/workqueue.c
> index 7eba824..fb91b67 100644
> --- a/kernel/workqueue.c
> +++ b/kernel/workqueue.c
> @@ -41,6 +41,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
>  #include 
>  
> @@ -80,6 +81,7 @@ enum {
>  
>   NR_STD_WORKER_POOLS = 2,/* # standard pools per cpu */
>  
> + UNBOUND_POOL_HASH_ORDER = 6,/* hashed by pool->attrs */
>   BUSY_WORKER_HASH_ORDER  = 6,/* 64 pointers */
>  
>   MAX_IDLE_WORKERS_RATIO  = 4,/* 1/4 of busy can be idle */
> @@ -149,6 +151,8 @@ struct worker_pool {
>   struct ida  worker_ida; /* L: for worker IDs */
>  
>   struct workqueue_attrs  *attrs; /* I: worker attributes */
> + struct hlist_node   hash_node;  /* R: unbound_pool_hash node */
> + atomic_trefcnt; /* refcnt for unbound pools */
>  
>   /*
>* The current concurrency level.  As it's likely to be accessed
> @@ -156,6 +160,12 @@ struct worker_pool {
>* cacheline.
>*/
>   atomic_tnr_running cacheline_aligned_in_smp;
> +
> + /*
> +  * Destruction of pool is sched-RCU protected to allow dereferences
> +  * from get_work_pool().
> +  */
> + struct rcu_head rcu;
>  } cacheline_aligned_in_smp;
>  
>  /*
> @@ -218,6 +228,11 @@ struct workqueue_struct {
>  
>  static struct kmem_cache *pwq_cache;
>  
> +/* hash of all unbound pools keyed by pool->attrs */
> +static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
> +
> +static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
> +
>  struct workqueue_struct *system_wq __read_mostly;
>  EXPORT_SYMBOL_GPL(system_wq);
>  struct workqueue_struct *system_highpri_wq __read_mostly;
> @@ -1740,7 +1755,7 @@ static struct worker *create_worker(struct worker_pool 
> *pool)
>   worker->pool = pool;
>   worker->id = id;
>  
> - if (pool->cpu != WORK_CPU_UNBOUND)
> + if (pool->cpu >= 0)
>   worker->task = kthread_create_on_node(worker_thread,
>   worker, cpu_to_node(pool->cpu),
>   "kworker/%d:%d%s", pool->cpu, id, pri);
> @@ -3159,6 +3174,54 @@ fail:
>   return NULL;
>  }
>  
> +static void copy_workqueue_attrs(struct workqueue_attrs *to,
> +  const struct workqueue_attrs *from)
> +{
> + to->nice = from->nice;
> + cpumask_copy(to->cpumask, from->cpumask);
> +}
> +
> +/*
> + * Hacky implementation of jhash of bitmaps which only considers the
> + * specified number of bits.  We probably want a proper implementation in
> + * include/linux/jhash.h.
> + */
> +static u32 jhash_bitmap(const unsigned long *bitmap, int bits, u32 hash)
> +{
> + int nr_longs = bits / BITS_PER_LONG;
> + int nr_leftover = bits % BITS_PER_LONG;
> + unsigned long leftover = 0;
> +
> + if (nr_longs)
> + hash = jhash(bitmap, nr_longs * sizeof(long), hash);
> + if (nr_leftover) {
> + bitmap_copy(, bitmap + nr_longs, nr_leftover);
> + hash = jhash(, sizeof(long), hash);
> + }
> + return hash;
> +}
> +
> +/* hash value of the content of @attr */
> +static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
> +{
> + u32 hash = 0;
> +
> + hash = jhash_1word(attrs->nice, hash);
> + hash = jhash_bitmap(cpumask_bits(attrs->cpumask), nr_cpu_ids, hash);
> + return hash;
> +}
> +
> +/* content equality test */
> +static bool wqattrs_equal(const struct workqueue_attrs *a,
> +   

Re: [PATCH 17/31] workqueue: implement attribute-based unbound worker_pool management

2013-03-10 Thread Lai Jiangshan
On 02/03/13 11:24, Tejun Heo wrote:
 This patch makes unbound worker_pools reference counted and
 dynamically created and destroyed as workqueues needing them come and
 go.  All unbound worker_pools are hashed on unbound_pool_hash which is
 keyed by the content of worker_pool-attrs.
 
 When an unbound workqueue is allocated, get_unbound_pool() is called
 with the attributes of the workqueue.  If there already is a matching
 worker_pool, the reference count is bumped and the pool is returned.
 If not, a new worker_pool with matching attributes is created and
 returned.
 
 When an unbound workqueue is destroyed, put_unbound_pool() is called
 which decrements the reference count of the associated worker_pool.
 If the refcnt reaches zero, the worker_pool is destroyed in sched-RCU
 safe way.
 
 Note that the standard unbound worker_pools - normal and highpri ones
 with no specific cpumask affinity - are no longer created explicitly
 during init_workqueues().  init_workqueues() only initializes
 workqueue_attrs to be used for standard unbound pools -
 unbound_std_wq_attrs[].  The pools are spawned on demand as workqueues
 are created.
 
 Signed-off-by: Tejun Heo t...@kernel.org
 ---
  kernel/workqueue.c | 230 
 ++---
  1 file changed, 218 insertions(+), 12 deletions(-)
 
 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
 index 7eba824..fb91b67 100644
 --- a/kernel/workqueue.c
 +++ b/kernel/workqueue.c
 @@ -41,6 +41,7 @@
  #include linux/debug_locks.h
  #include linux/lockdep.h
  #include linux/idr.h
 +#include linux/jhash.h
  #include linux/hashtable.h
  #include linux/rculist.h
  
 @@ -80,6 +81,7 @@ enum {
  
   NR_STD_WORKER_POOLS = 2,/* # standard pools per cpu */
  
 + UNBOUND_POOL_HASH_ORDER = 6,/* hashed by pool-attrs */
   BUSY_WORKER_HASH_ORDER  = 6,/* 64 pointers */
  
   MAX_IDLE_WORKERS_RATIO  = 4,/* 1/4 of busy can be idle */
 @@ -149,6 +151,8 @@ struct worker_pool {
   struct ida  worker_ida; /* L: for worker IDs */
  
   struct workqueue_attrs  *attrs; /* I: worker attributes */
 + struct hlist_node   hash_node;  /* R: unbound_pool_hash node */
 + atomic_trefcnt; /* refcnt for unbound pools */
  
   /*
* The current concurrency level.  As it's likely to be accessed
 @@ -156,6 +160,12 @@ struct worker_pool {
* cacheline.
*/
   atomic_tnr_running cacheline_aligned_in_smp;
 +
 + /*
 +  * Destruction of pool is sched-RCU protected to allow dereferences
 +  * from get_work_pool().
 +  */
 + struct rcu_head rcu;
  } cacheline_aligned_in_smp;
  
  /*
 @@ -218,6 +228,11 @@ struct workqueue_struct {
  
  static struct kmem_cache *pwq_cache;
  
 +/* hash of all unbound pools keyed by pool-attrs */
 +static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
 +
 +static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
 +
  struct workqueue_struct *system_wq __read_mostly;
  EXPORT_SYMBOL_GPL(system_wq);
  struct workqueue_struct *system_highpri_wq __read_mostly;
 @@ -1740,7 +1755,7 @@ static struct worker *create_worker(struct worker_pool 
 *pool)
   worker-pool = pool;
   worker-id = id;
  
 - if (pool-cpu != WORK_CPU_UNBOUND)
 + if (pool-cpu = 0)
   worker-task = kthread_create_on_node(worker_thread,
   worker, cpu_to_node(pool-cpu),
   kworker/%d:%d%s, pool-cpu, id, pri);
 @@ -3159,6 +3174,54 @@ fail:
   return NULL;
  }
  
 +static void copy_workqueue_attrs(struct workqueue_attrs *to,
 +  const struct workqueue_attrs *from)
 +{
 + to-nice = from-nice;
 + cpumask_copy(to-cpumask, from-cpumask);
 +}
 +
 +/*
 + * Hacky implementation of jhash of bitmaps which only considers the
 + * specified number of bits.  We probably want a proper implementation in
 + * include/linux/jhash.h.
 + */
 +static u32 jhash_bitmap(const unsigned long *bitmap, int bits, u32 hash)
 +{
 + int nr_longs = bits / BITS_PER_LONG;
 + int nr_leftover = bits % BITS_PER_LONG;
 + unsigned long leftover = 0;
 +
 + if (nr_longs)
 + hash = jhash(bitmap, nr_longs * sizeof(long), hash);
 + if (nr_leftover) {
 + bitmap_copy(leftover, bitmap + nr_longs, nr_leftover);
 + hash = jhash(leftover, sizeof(long), hash);
 + }
 + return hash;
 +}
 +
 +/* hash value of the content of @attr */
 +static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
 +{
 + u32 hash = 0;
 +
 + hash = jhash_1word(attrs-nice, hash);
 + hash = jhash_bitmap(cpumask_bits(attrs-cpumask), nr_cpu_ids, hash);
 + return hash;
 +}
 +
 +/* content equality test */
 +static bool wqattrs_equal(const struct workqueue_attrs *a,
 +   const struct 

Re: [PATCH 17/31] workqueue: implement attribute-based unbound worker_pool management

2013-03-10 Thread Tejun Heo
On Sun, Mar 10, 2013 at 06:08:57PM +0800, Lai Jiangshan wrote:
  @@ -3185,12 +3250,133 @@ static int init_worker_pool(struct worker_pool 
  *pool)
  mutex_init(pool-assoc_mutex);
  ida_init(pool-worker_ida);
   
  +   INIT_HLIST_NODE(pool-hash_node);
  +   atomic_set(pool-refcnt, 1);
 
 We should document: the code before atomic_set(pool-refcnt, 1); should 
 not failed.
 (In case we add failable code before it when we forget this requirement in 
 future.
 reason: when get_unbound_pool() fails, we expected -refcnt = 1)

Yeap, comments added.

  +/**
  + * put_unbound_pool - put a worker_pool
  + * @pool: worker_pool to put
  + *
  + * Put @pool.  If its refcnt reaches zero, it gets destroyed in sched-RCU
  + * safe manner.
  + */
  +static void put_unbound_pool(struct worker_pool *pool)
  +{
  +   struct worker *worker;
  +
  +   if (!atomic_dec_and_test(pool-refcnt))
  +   return;
 
 if get_unbound_pool() happens here, it will get a destroyed pool.
 so we need to move spin_lock_irq(workqueue_lock); before above statement.
 (and -refcnt don't need atomic after moved)

Hmmm... right.  Nice catch.  Updating...

  +   if (WARN_ON(pool-nr_workers != pool-nr_idle))
  +   return;
 
 This can be false-negative. we should remove this WARN_ON().

How would the test fail spuriously?  Can you please elaborate?

Thanks.

-- 
tejun
--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH 17/31] workqueue: implement attribute-based unbound worker_pool management

2013-03-10 Thread Tejun Heo
On Sun, Mar 10, 2013 at 05:58:21AM -0700, Tejun Heo wrote:
   + if (WARN_ON(pool-nr_workers != pool-nr_idle))
   + return;
  
  This can be false-negative. we should remove this WARN_ON().
 
 How would the test fail spuriously?  Can you please elaborate?

I got it.  It'll be short by one if there's an active manager.
Removed.

Thanks.

-- 
tejun
--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 17/31] workqueue: implement attribute-based unbound worker_pool management

2013-03-01 Thread Tejun Heo
This patch makes unbound worker_pools reference counted and
dynamically created and destroyed as workqueues needing them come and
go.  All unbound worker_pools are hashed on unbound_pool_hash which is
keyed by the content of worker_pool->attrs.

When an unbound workqueue is allocated, get_unbound_pool() is called
with the attributes of the workqueue.  If there already is a matching
worker_pool, the reference count is bumped and the pool is returned.
If not, a new worker_pool with matching attributes is created and
returned.

When an unbound workqueue is destroyed, put_unbound_pool() is called
which decrements the reference count of the associated worker_pool.
If the refcnt reaches zero, the worker_pool is destroyed in sched-RCU
safe way.

Note that the standard unbound worker_pools - normal and highpri ones
with no specific cpumask affinity - are no longer created explicitly
during init_workqueues().  init_workqueues() only initializes
workqueue_attrs to be used for standard unbound pools -
unbound_std_wq_attrs[].  The pools are spawned on demand as workqueues
are created.

Signed-off-by: Tejun Heo 
---
 kernel/workqueue.c | 230 ++---
 1 file changed, 218 insertions(+), 12 deletions(-)

diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 7eba824..fb91b67 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -41,6 +41,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 
@@ -80,6 +81,7 @@ enum {
 
NR_STD_WORKER_POOLS = 2,/* # standard pools per cpu */
 
+   UNBOUND_POOL_HASH_ORDER = 6,/* hashed by pool->attrs */
BUSY_WORKER_HASH_ORDER  = 6,/* 64 pointers */
 
MAX_IDLE_WORKERS_RATIO  = 4,/* 1/4 of busy can be idle */
@@ -149,6 +151,8 @@ struct worker_pool {
struct ida  worker_ida; /* L: for worker IDs */
 
struct workqueue_attrs  *attrs; /* I: worker attributes */
+   struct hlist_node   hash_node;  /* R: unbound_pool_hash node */
+   atomic_trefcnt; /* refcnt for unbound pools */
 
/*
 * The current concurrency level.  As it's likely to be accessed
@@ -156,6 +160,12 @@ struct worker_pool {
 * cacheline.
 */
atomic_tnr_running cacheline_aligned_in_smp;
+
+   /*
+* Destruction of pool is sched-RCU protected to allow dereferences
+* from get_work_pool().
+*/
+   struct rcu_head rcu;
 } cacheline_aligned_in_smp;
 
 /*
@@ -218,6 +228,11 @@ struct workqueue_struct {
 
 static struct kmem_cache *pwq_cache;
 
+/* hash of all unbound pools keyed by pool->attrs */
+static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
+
+static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
+
 struct workqueue_struct *system_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_wq);
 struct workqueue_struct *system_highpri_wq __read_mostly;
@@ -1740,7 +1755,7 @@ static struct worker *create_worker(struct worker_pool 
*pool)
worker->pool = pool;
worker->id = id;
 
-   if (pool->cpu != WORK_CPU_UNBOUND)
+   if (pool->cpu >= 0)
worker->task = kthread_create_on_node(worker_thread,
worker, cpu_to_node(pool->cpu),
"kworker/%d:%d%s", pool->cpu, id, pri);
@@ -3159,6 +3174,54 @@ fail:
return NULL;
 }
 
+static void copy_workqueue_attrs(struct workqueue_attrs *to,
+const struct workqueue_attrs *from)
+{
+   to->nice = from->nice;
+   cpumask_copy(to->cpumask, from->cpumask);
+}
+
+/*
+ * Hacky implementation of jhash of bitmaps which only considers the
+ * specified number of bits.  We probably want a proper implementation in
+ * include/linux/jhash.h.
+ */
+static u32 jhash_bitmap(const unsigned long *bitmap, int bits, u32 hash)
+{
+   int nr_longs = bits / BITS_PER_LONG;
+   int nr_leftover = bits % BITS_PER_LONG;
+   unsigned long leftover = 0;
+
+   if (nr_longs)
+   hash = jhash(bitmap, nr_longs * sizeof(long), hash);
+   if (nr_leftover) {
+   bitmap_copy(, bitmap + nr_longs, nr_leftover);
+   hash = jhash(, sizeof(long), hash);
+   }
+   return hash;
+}
+
+/* hash value of the content of @attr */
+static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
+{
+   u32 hash = 0;
+
+   hash = jhash_1word(attrs->nice, hash);
+   hash = jhash_bitmap(cpumask_bits(attrs->cpumask), nr_cpu_ids, hash);
+   return hash;
+}
+
+/* content equality test */
+static bool wqattrs_equal(const struct workqueue_attrs *a,
+ const struct workqueue_attrs *b)
+{
+   if (a->nice != b->nice)
+   return false;
+   if (!cpumask_equal(a->cpumask, b->cpumask))
+   return false;
+   return true;
+}
+
 

[PATCH 17/31] workqueue: implement attribute-based unbound worker_pool management

2013-03-01 Thread Tejun Heo
This patch makes unbound worker_pools reference counted and
dynamically created and destroyed as workqueues needing them come and
go.  All unbound worker_pools are hashed on unbound_pool_hash which is
keyed by the content of worker_pool-attrs.

When an unbound workqueue is allocated, get_unbound_pool() is called
with the attributes of the workqueue.  If there already is a matching
worker_pool, the reference count is bumped and the pool is returned.
If not, a new worker_pool with matching attributes is created and
returned.

When an unbound workqueue is destroyed, put_unbound_pool() is called
which decrements the reference count of the associated worker_pool.
If the refcnt reaches zero, the worker_pool is destroyed in sched-RCU
safe way.

Note that the standard unbound worker_pools - normal and highpri ones
with no specific cpumask affinity - are no longer created explicitly
during init_workqueues().  init_workqueues() only initializes
workqueue_attrs to be used for standard unbound pools -
unbound_std_wq_attrs[].  The pools are spawned on demand as workqueues
are created.

Signed-off-by: Tejun Heo t...@kernel.org
---
 kernel/workqueue.c | 230 ++---
 1 file changed, 218 insertions(+), 12 deletions(-)

diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 7eba824..fb91b67 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -41,6 +41,7 @@
 #include linux/debug_locks.h
 #include linux/lockdep.h
 #include linux/idr.h
+#include linux/jhash.h
 #include linux/hashtable.h
 #include linux/rculist.h
 
@@ -80,6 +81,7 @@ enum {
 
NR_STD_WORKER_POOLS = 2,/* # standard pools per cpu */
 
+   UNBOUND_POOL_HASH_ORDER = 6,/* hashed by pool-attrs */
BUSY_WORKER_HASH_ORDER  = 6,/* 64 pointers */
 
MAX_IDLE_WORKERS_RATIO  = 4,/* 1/4 of busy can be idle */
@@ -149,6 +151,8 @@ struct worker_pool {
struct ida  worker_ida; /* L: for worker IDs */
 
struct workqueue_attrs  *attrs; /* I: worker attributes */
+   struct hlist_node   hash_node;  /* R: unbound_pool_hash node */
+   atomic_trefcnt; /* refcnt for unbound pools */
 
/*
 * The current concurrency level.  As it's likely to be accessed
@@ -156,6 +160,12 @@ struct worker_pool {
 * cacheline.
 */
atomic_tnr_running cacheline_aligned_in_smp;
+
+   /*
+* Destruction of pool is sched-RCU protected to allow dereferences
+* from get_work_pool().
+*/
+   struct rcu_head rcu;
 } cacheline_aligned_in_smp;
 
 /*
@@ -218,6 +228,11 @@ struct workqueue_struct {
 
 static struct kmem_cache *pwq_cache;
 
+/* hash of all unbound pools keyed by pool-attrs */
+static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
+
+static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
+
 struct workqueue_struct *system_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_wq);
 struct workqueue_struct *system_highpri_wq __read_mostly;
@@ -1740,7 +1755,7 @@ static struct worker *create_worker(struct worker_pool 
*pool)
worker-pool = pool;
worker-id = id;
 
-   if (pool-cpu != WORK_CPU_UNBOUND)
+   if (pool-cpu = 0)
worker-task = kthread_create_on_node(worker_thread,
worker, cpu_to_node(pool-cpu),
kworker/%d:%d%s, pool-cpu, id, pri);
@@ -3159,6 +3174,54 @@ fail:
return NULL;
 }
 
+static void copy_workqueue_attrs(struct workqueue_attrs *to,
+const struct workqueue_attrs *from)
+{
+   to-nice = from-nice;
+   cpumask_copy(to-cpumask, from-cpumask);
+}
+
+/*
+ * Hacky implementation of jhash of bitmaps which only considers the
+ * specified number of bits.  We probably want a proper implementation in
+ * include/linux/jhash.h.
+ */
+static u32 jhash_bitmap(const unsigned long *bitmap, int bits, u32 hash)
+{
+   int nr_longs = bits / BITS_PER_LONG;
+   int nr_leftover = bits % BITS_PER_LONG;
+   unsigned long leftover = 0;
+
+   if (nr_longs)
+   hash = jhash(bitmap, nr_longs * sizeof(long), hash);
+   if (nr_leftover) {
+   bitmap_copy(leftover, bitmap + nr_longs, nr_leftover);
+   hash = jhash(leftover, sizeof(long), hash);
+   }
+   return hash;
+}
+
+/* hash value of the content of @attr */
+static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
+{
+   u32 hash = 0;
+
+   hash = jhash_1word(attrs-nice, hash);
+   hash = jhash_bitmap(cpumask_bits(attrs-cpumask), nr_cpu_ids, hash);
+   return hash;
+}
+
+/* content equality test */
+static bool wqattrs_equal(const struct workqueue_attrs *a,
+ const struct workqueue_attrs *b)
+{
+   if (a-nice != b-nice)
+   return false;
+