busy_worker_rebind_fn() can't return until all idle workers are rebound. This order is ensured by rebind_workers() currently.
We use synchronize_all_idles_rebound() to wait for all idle workers to be rebound. this is an explicit way and it will ease the pain of the rebind_workers(). The sleeping synchronize_all_idles_rebound() must be called before the WORKER_REBIND has been cleared. It adds a small overhead to the unlikely path. Signed-off-by: Lai Jiangshan <la...@cn.fujitsu.com> --- kernel/workqueue.c | 16 +++++++++++++++- 1 files changed, 15 insertions(+), 1 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c875951..16bcd84 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1387,9 +1387,23 @@ static void busy_worker_rebind_fn(struct work_struct *work) { struct worker *worker = container_of(work, struct worker, rebind_work); struct global_cwq *gcwq = worker->pool->gcwq; + struct idle_rebind *idle_rebind; + + if (worker_maybe_bind_and_lock(worker)) { + /* Is idle-rebinding still in progress? */ + if ((idle_rebind = gcwq->idle_rebind) != NULL) { + /* get reference */ + BUG_ON(idle_rebind->ref_cnt <= 0); + idle_rebind->ref_cnt++; + spin_unlock_irq(&gcwq->lock); + + synchronize_all_idles_rebound(gcwq, idle_rebind); + + spin_lock_irq(&gcwq->lock); + } - if (worker_maybe_bind_and_lock(worker)) worker_clr_flags(worker, WORKER_REBIND); + } spin_unlock_irq(&gcwq->lock); } -- 1.7.4.4 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/