On Mon, Jul 30, 2018 at 09:15:43AM +0200, Christoph Hellwig wrote:

> +static void aio_poll_complete_work(struct work_struct *work)
> +{
> +     struct poll_iocb *req = container_of(work, struct poll_iocb, work);
> +     struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
> +     struct poll_table_struct pt = { ._key = req->events };
> +     struct kioctx *ctx = iocb->ki_ctx;
> +     __poll_t mask;
> +
> +     if (READ_ONCE(req->cancelled)) {
....
> +     }
> +
> +     mask = vfs_poll(req->file, &pt) & req->events;
> +     if (!mask) {
> +             add_wait_queue(req->head, &req->wait);
> +             return;
> +     }
....
> +}

> +/* assumes we are called with irqs disabled */
> +static int aio_poll_cancel(struct kiocb *iocb)
> +{
> +     struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
> +     struct poll_iocb *req = &aiocb->poll;
> +
> +     spin_lock(&req->head->lock);
> +     if (!list_empty(&req->wait.entry)) {
> +             WRITE_ONCE(req->cancelled, true);
> +             list_del_init(&req->wait.entry);
> +             schedule_work(&aiocb->poll.work);
> +     }
> +     spin_unlock(&req->head->lock);
> +
> +     return 0;
> +}

> +static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int 
> sync,
> +             void *key)
> +{
> +     struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
> +     __poll_t mask = key_to_poll(key);
> +
> +     /* for instances that support it check for an event match first: */
> +     if (mask && !(mask & req->events))
> +             return 0;
> +
> +     list_del_init(&req->wait.entry);
> +     schedule_work(&req->work);
> +     return 1;
> +}

> +static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
> +{
> +     struct kioctx *ctx = aiocb->ki_ctx;
> +     struct poll_iocb *req = &aiocb->poll;
> +     struct aio_poll_table apt;
> +     __poll_t mask;

> +     mask = vfs_poll(req->file, &apt.pt) & req->events;
> +     if (mask || apt.error) {

> +     } else {
> +             spin_lock_irq(&ctx->ctx_lock);
> +             if (!req->done) {
> +                     list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
> +                     aiocb->ki_cancel = aio_poll_cancel;
> +             }
> +             spin_unlock_irq(&ctx->ctx_lock);
> +     }

So what happens if
        * we call aio_poll(), add the sucker to queue and see that we need
to wait
        * add to ->active_refs just as the wakeup comes
        * wakeup removes from queue and hits schedule_work()
        * io_cancel() is called, triggering aio_poll_cancel(), which sees that
we are not from queue and buggers off.  We are gone from ->active_refs.
        * aio_poll_complete_work() is called, sees no ->cancelled
        * aio_poll_complete_work() calls vfs_poll(), sees nothing interesting
and puts us back on the queue.

Unless I'm misreading it, cancel will end up with iocb still around and now
impossible to cancel...  What am I missing?

Reply via email to