This is very slow operation. There is no reason to do it again if somebody
else already drained all per-cpu vectors after we waited for lock.

Signed-off-by: Konstantin Khlebnikov <khlebni...@yandex-team.ru>
---
 mm/swap.c |   13 ++++++++++++-
 1 file changed, 12 insertions(+), 1 deletion(-)

diff --git a/mm/swap.c b/mm/swap.c
index 38c3fa4308e2..6203918e1316 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -708,9 +708,10 @@ static void lru_add_drain_per_cpu(struct work_struct 
*dummy)
  */
 void lru_add_drain_all(void)
 {
+       static seqcount_t seqcount = SEQCNT_ZERO(seqcount);
        static DEFINE_MUTEX(lock);
        static struct cpumask has_work;
-       int cpu;
+       int cpu, seq;
 
        /*
         * Make sure nobody triggers this path before mm_percpu_wq is fully
@@ -719,7 +720,16 @@ void lru_add_drain_all(void)
        if (WARN_ON(!mm_percpu_wq))
                return;
 
+       seq = raw_read_seqcount_latch(&seqcount);
+
        mutex_lock(&lock);
+
+       /* Piggyback on drain done by somebody else. */
+       if (__read_seqcount_retry(&seqcount, seq))
+               goto done;
+
+       raw_write_seqcount_latch(&seqcount);
+
        cpumask_clear(&has_work);
 
        for_each_online_cpu(cpu) {
@@ -740,6 +750,7 @@ void lru_add_drain_all(void)
        for_each_cpu(cpu, &has_work)
                flush_work(&per_cpu(lru_add_drain_work, cpu));
 
+done:
        mutex_unlock(&lock);
 }
 #else

Reply via email to