nvme wakes the request coroutine via qemu_coroutine_enter() from a BH scheduled in the BDS AioContext. This may not be the same context as the one in which the request originally ran, which would be wrong: - It could mean we enter the coroutine before it yields, - We would move the coroutine in to a different context.
(Can be reproduced with multiqueue by adding a usleep(100000) before the `while (data.ret == -EINPROGRESS)` loop.) To fix that, use aio_co_wake() to run the coroutine in its home context. Just like in the preceding iscsi and nfs patches, we can drop the trivial nvme_rw_cb_bh() and use aio_co_wake() directly. With this, we can remove NVMeCoData.ctx. Note the check of data->co == NULL to bypass the BH/yield combination in case nvme_rw_cb() is called from nvme_submit_command(): We probably want to keep this fast path for performance reasons, but we have to be quite careful about it: - We cannot overload .ret for this, but have to use a dedicated .skip_yield field. Otherwise, if nvme_rw_cb() runs in a different thread than the coroutine, it may see .ret set and skip the yield, while nvme_rw_cb() will still schedule a BH for waking. Therefore, the signal to skip the yield can only be set in nvme_rw_cb() if waking too is skipped, which is independent from communicating the return value. - We can only skip the yield if nvme_rw_cb() actually runs in the request coroutine. Otherwise (specifically if they run in different AioContexts), the order between this function’s execution and the coroutine yielding (or not yielding) is not reliable. - There is no point to yielding in a loop; there are no spurious wakes, so once we yield, we will only be re-entered once the command is done. Replace `while` by `if`. Cc: [email protected] Signed-off-by: Hanna Czenczek <[email protected]> --- block/nvme.c | 56 +++++++++++++++++++++++++++------------------------- 1 file changed, 29 insertions(+), 27 deletions(-) diff --git a/block/nvme.c b/block/nvme.c index 7ed5f570bc..b8262ebfd9 100644 --- a/block/nvme.c +++ b/block/nvme.c @@ -1176,25 +1176,35 @@ fail: typedef struct { Coroutine *co; + bool skip_yield; int ret; - AioContext *ctx; } NVMeCoData; -static void nvme_rw_cb_bh(void *opaque) -{ - NVMeCoData *data = opaque; - qemu_coroutine_enter(data->co); -} - static void nvme_rw_cb(void *opaque, int ret) { NVMeCoData *data = opaque; + data->ret = ret; - if (!data->co) { - /* The rw coroutine hasn't yielded, don't try to enter. */ - return; + + if (data->co == qemu_coroutine_self()) { + /* + * Fast path: We are inside of the request coroutine (through + * nvme_submit_command, nvme_deferred_fn, nvme_process_completion). + * We can set data->skip_yield here to keep the coroutine from + * yielding, and then we don't need to schedule a BH to wake it. + */ + data->skip_yield = true; + } else { + /* + * Safe to call: The case where we run in the request coroutine is + * handled above, so we must be independent of it; and without + * skip_yield set, the coroutine will yield. + * No need to release NVMeQueuePair.lock (we are called without it + * held). (Note: If we enter the coroutine here, @data will + * probably be dangling once aio_co_wake() returns.) + */ + aio_co_wake(data->co); } - replay_bh_schedule_oneshot_event(data->ctx, nvme_rw_cb_bh, data); } static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs, @@ -1218,7 +1228,7 @@ static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs, .cdw12 = cpu_to_le32(cdw12), }; NVMeCoData data = { - .ctx = bdrv_get_aio_context(bs), + .co = qemu_coroutine_self(), .ret = -EINPROGRESS, }; @@ -1235,9 +1245,7 @@ static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs, return r; } nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data); - - data.co = qemu_coroutine_self(); - while (data.ret == -EINPROGRESS) { + if (!data.skip_yield) { qemu_coroutine_yield(); } @@ -1333,7 +1341,7 @@ static coroutine_fn int nvme_co_flush(BlockDriverState *bs) .nsid = cpu_to_le32(s->nsid), }; NVMeCoData data = { - .ctx = bdrv_get_aio_context(bs), + .co = qemu_coroutine_self(), .ret = -EINPROGRESS, }; @@ -1341,9 +1349,7 @@ static coroutine_fn int nvme_co_flush(BlockDriverState *bs) req = nvme_get_free_req(ioq); assert(req); nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data); - - data.co = qemu_coroutine_self(); - if (data.ret == -EINPROGRESS) { + if (!data.skip_yield) { qemu_coroutine_yield(); } @@ -1384,7 +1390,7 @@ static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs, }; NVMeCoData data = { - .ctx = bdrv_get_aio_context(bs), + .co = qemu_coroutine_self(), .ret = -EINPROGRESS, }; @@ -1404,9 +1410,7 @@ static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs, assert(req); nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data); - - data.co = qemu_coroutine_self(); - while (data.ret == -EINPROGRESS) { + if (!data.skip_yield) { qemu_coroutine_yield(); } @@ -1434,7 +1438,7 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs, }; NVMeCoData data = { - .ctx = bdrv_get_aio_context(bs), + .co = qemu_coroutine_self(), .ret = -EINPROGRESS, }; @@ -1479,9 +1483,7 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs, trace_nvme_dsm(s, offset, bytes); nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data); - - data.co = qemu_coroutine_self(); - while (data.ret == -EINPROGRESS) { + if (!data.skip_yield) { qemu_coroutine_yield(); } -- 2.51.1
