On Tue, May 19, 2020 at 11:45:24PM +0200, Ahmed S. Darwish wrote:
> @@ -713,10 +713,20 @@ static void lru_add_drain_per_cpu(struct work_struct 
> *dummy)
>   */
>  void lru_add_drain_all(void)
>  {

> +     static unsigned int lru_drain_gen;
>       static struct cpumask has_work;
> +     static DEFINE_MUTEX(lock);
> +     int cpu, this_gen;
>  
>       /*
>        * Make sure nobody triggers this path before mm_percpu_wq is fully
> @@ -725,21 +735,48 @@ void lru_add_drain_all(void)
>       if (WARN_ON(!mm_percpu_wq))
>               return;
>  

> +     this_gen = READ_ONCE(lru_drain_gen);
> +     smp_rmb();

        this_gen = smp_load_acquire(&lru_drain_gen);
>  
>       mutex_lock(&lock);
>  
>       /*
> +      * (C) Exit the draining operation if a newer generation, from another
> +      * lru_add_drain_all(), was already scheduled for draining. Check (A).
>        */
> +     if (unlikely(this_gen != lru_drain_gen))
>               goto done;
>  

> +     WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1);
> +     smp_wmb();

You can leave this smp_wmb() out and rely on the smp_mb() implied by
queue_work_on()'s test_and_set_bit().

>       cpumask_clear(&has_work);
> -
>       for_each_online_cpu(cpu) {
>               struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
>  

While you're here, do:

        s/cpumask_set_cpu/__&/

> @@ -766,7 +803,7 @@ void lru_add_drain_all(void)
>  {
>       lru_add_drain();
>  }
> -#endif
> +#endif /* CONFIG_SMP */
>  
>  /**
>   * release_pages - batched put_page()

Reply via email to