On Tue, May 28, 2013 at 02:26:18PM -0700, Kent Overstreet wrote:
> Just making ioctx shutdown asynchronous so as not to block io_destroy()
> - and percpu refcounts for the ioctx are going to need a RCU barrier in
> the same place anyways.
> 
> Signed-off-by: Kent Overstreet <koverstr...@google.com>

Signed-off-by: Benjamin LaHaise <b...@kvack.org>
Tested-by: Benjamin LaHaise <b...@kvack.org>

I have reviewed and tested this, and it fixes the io_setup() returning 
EAGAIN error from the first version of this patch.  Thanks Kent!

                -ben

> Cc: Zach Brown <z...@redhat.com>
> Cc: Felipe Balbi <ba...@ti.com>
> Cc: Greg Kroah-Hartman <gre...@linuxfoundation.org>
> Cc: Mark Fasheh <mfas...@suse.com>
> Cc: Joel Becker <jl...@evilplan.org>
> Cc: Rusty Russell <ru...@rustcorp.com.au>
> Cc: Jens Axboe <ax...@kernel.dk>
> Cc: Asai Thambi S P <asamymuth...@micron.com>
> Cc: Selvan Mani <sm...@micron.com>
> Cc: Sam Bradshaw <sbrads...@micron.com>
> Cc: Jeff Moyer <jmo...@redhat.com>
> Cc: Al Viro <v...@zeniv.linux.org.uk>
> Cc: Benjamin LaHaise <b...@kvack.org>
> ---
>  fs/aio.c | 36 ++++++++++++++++--------------------
>  1 file changed, 16 insertions(+), 20 deletions(-)
> 
> diff --git a/fs/aio.c b/fs/aio.c
> index 7fe5bde..2bbcacf 100644
> --- a/fs/aio.c
> +++ b/fs/aio.c
> @@ -141,9 +141,6 @@ static void aio_free_ring(struct kioctx *ctx)
>       for (i = 0; i < ctx->nr_pages; i++)
>               put_page(ctx->ring_pages[i]);
>  
> -     if (ctx->mmap_size)
> -             vm_munmap(ctx->mmap_base, ctx->mmap_size);
> -
>       if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages)
>               kfree(ctx->ring_pages);
>  }
> @@ -322,11 +319,6 @@ static void free_ioctx(struct kioctx *ctx)
>  
>       aio_free_ring(ctx);
>  
> -     spin_lock(&aio_nr_lock);
> -     BUG_ON(aio_nr - ctx->max_reqs > aio_nr);
> -     aio_nr -= ctx->max_reqs;
> -     spin_unlock(&aio_nr_lock);
> -
>       pr_debug("freeing %p\n", ctx);
>  
>       /*
> @@ -435,17 +427,24 @@ static void kill_ioctx(struct kioctx *ctx)
>  {
>       if (!atomic_xchg(&ctx->dead, 1)) {
>               hlist_del_rcu(&ctx->list);
> -             /* Between hlist_del_rcu() and dropping the initial ref */
> -             synchronize_rcu();
>  
>               /*
> -              * We can't punt to workqueue here because put_ioctx() ->
> -              * free_ioctx() will unmap the ringbuffer, and that has to be
> -              * done in the original process's context. kill_ioctx_rcu/work()
> -              * exist for exit_aio(), as in that path free_ioctx() won't do
> -              * the unmap.
> +              * It'd be more correct to do this in free_ioctx(), after all
> +              * the outstanding kiocbs have finished - but by then io_destroy
> +              * has already returned, so io_setup() could potentially return
> +              * -EAGAIN with no ioctxs actually in use (as far as userspace
> +              *  could tell).
>                */
> -             kill_ioctx_work(&ctx->rcu_work);
> +             spin_lock(&aio_nr_lock);
> +             BUG_ON(aio_nr - ctx->max_reqs > aio_nr);
> +             aio_nr -= ctx->max_reqs;
> +             spin_unlock(&aio_nr_lock);
> +
> +             if (ctx->mmap_size)
> +                     vm_munmap(ctx->mmap_base, ctx->mmap_size);
> +
> +             /* Between hlist_del_rcu() and dropping the initial ref */
> +             call_rcu(&ctx->rcu_head, kill_ioctx_rcu);
>       }
>  }
>  
> @@ -495,10 +494,7 @@ void exit_aio(struct mm_struct *mm)
>                */
>               ctx->mmap_size = 0;
>  
> -             if (!atomic_xchg(&ctx->dead, 1)) {
> -                     hlist_del_rcu(&ctx->list);
> -                     call_rcu(&ctx->rcu_head, kill_ioctx_rcu);
> -             }
> +             kill_ioctx(ctx);
>       }
>  }
>  
> -- 
> 1.8.2.1

-- 
"Thought is the essence of where you are now."
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to