Now when we have ->current_work we can avoid adding a barrier and waiting for its completition when cwq's queue is empty.
Note: this change is also useful if we change flush_workqueue() to also check the dead CPUs. Signed-off-by: Oleg Nesterov <[EMAIL PROTECTED]> --- mm-6.20-rc3/kernel/workqueue.c~1_opt 2007-01-07 23:15:50.000000000 +0300 +++ mm-6.20-rc3/kernel/workqueue.c 2007-01-07 23:26:45.000000000 +0300 @@ -405,12 +405,15 @@ static void wq_barrier_func(struct work_ complete(&barr->done); } -static inline void init_wq_barrier(struct wq_barrier *barr) +static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, + struct wq_barrier *barr, int tail) { INIT_WORK(&barr->work, wq_barrier_func); __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); init_completion(&barr->done); + + insert_work(cwq, &barr->work, tail); } static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) @@ -429,13 +432,20 @@ static void flush_cpu_workqueue(struct c preempt_disable(); } else { struct wq_barrier barr; + int active = 0; - init_wq_barrier(&barr); - __queue_work(cwq, &barr.work); + spin_lock_irq(&cwq->lock); + if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { + insert_wq_barrier(cwq, &barr, 1); + active = 1; + } + spin_unlock_irq(&cwq->lock); - preempt_enable(); /* Can no longer touch *cwq */ - wait_for_completion(&barr.done); - preempt_disable(); + if (active) { + preempt_enable(); + wait_for_completion(&barr.done); + preempt_disable(); + } } } @@ -482,8 +492,7 @@ static void wait_on_work(struct cpu_work spin_lock_irq(&cwq->lock); if (unlikely(cwq->current_work == work)) { - init_wq_barrier(&barr); - insert_work(cwq, &barr.work, 0); + insert_wq_barrier(cwq, &barr, 0); running = 1; } spin_unlock_irq(&cwq->lock); - To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to [EMAIL PROTECTED] More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/