Drop the assumption that we're using the main AioContext. Convert qemu_bh_new() to aio_bh_new() and qemu_aio_wait() to aio_poll() so we're using the BlockDriverState's AioContext.
Implement .bdrv_detach/attach_aio_context() interfaces to move the QED_F_NEED_CHECK timer from the old AioContext to the new one. Signed-off-by: Stefan Hajnoczi <stefa...@redhat.com> --- block/qed-table.c | 8 ++++---- block/qed.c | 35 +++++++++++++++++++++++++++++------ 2 files changed, 33 insertions(+), 10 deletions(-) diff --git a/block/qed-table.c b/block/qed-table.c index 76d2dcc..f61107a 100644 --- a/block/qed-table.c +++ b/block/qed-table.c @@ -173,7 +173,7 @@ int qed_read_l1_table_sync(BDRVQEDState *s) qed_read_table(s, s->header.l1_table_offset, s->l1_table, qed_sync_cb, &ret); while (ret == -EINPROGRESS) { - qemu_aio_wait(); + aio_poll(bdrv_get_aio_context(s->bs), true); } return ret; @@ -194,7 +194,7 @@ int qed_write_l1_table_sync(BDRVQEDState *s, unsigned int index, qed_write_l1_table(s, index, n, qed_sync_cb, &ret); while (ret == -EINPROGRESS) { - qemu_aio_wait(); + aio_poll(bdrv_get_aio_context(s->bs), true); } return ret; @@ -267,7 +267,7 @@ int qed_read_l2_table_sync(BDRVQEDState *s, QEDRequest *request, uint64_t offset qed_read_l2_table(s, request, offset, qed_sync_cb, &ret); while (ret == -EINPROGRESS) { - qemu_aio_wait(); + aio_poll(bdrv_get_aio_context(s->bs), true); } return ret; @@ -289,7 +289,7 @@ int qed_write_l2_table_sync(BDRVQEDState *s, QEDRequest *request, qed_write_l2_table(s, request, index, n, flush, qed_sync_cb, &ret); while (ret == -EINPROGRESS) { - qemu_aio_wait(); + aio_poll(bdrv_get_aio_context(s->bs), true); } return ret; diff --git a/block/qed.c b/block/qed.c index c130e42..79f5bd3 100644 --- a/block/qed.c +++ b/block/qed.c @@ -21,12 +21,13 @@ static void qed_aio_cancel(BlockDriverAIOCB *blockacb) { QEDAIOCB *acb = (QEDAIOCB *)blockacb; + AioContext *aio_context = bdrv_get_aio_context(blockacb->bs); bool finished = false; /* Wait for the request to finish */ acb->finished = &finished; while (!finished) { - qemu_aio_wait(); + aio_poll(aio_context, true); } } @@ -373,6 +374,27 @@ static void bdrv_qed_rebind(BlockDriverState *bs) s->bs = bs; } +static void bdrv_qed_detach_aio_context(BlockDriverState *bs) +{ + BDRVQEDState *s = bs->opaque; + + qed_cancel_need_check_timer(s); + timer_free(s->need_check_timer); +} + +static void bdrv_qed_attach_aio_context(BlockDriverState *bs, + AioContext *new_context) +{ + BDRVQEDState *s = bs->opaque; + + s->need_check_timer = aio_timer_new(new_context, + QEMU_CLOCK_VIRTUAL, SCALE_NS, + qed_need_check_timer_cb, s); + if (s->header.features & QED_F_NEED_CHECK) { + qed_start_need_check_timer(s); + } +} + static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { @@ -496,8 +518,7 @@ static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags, } } - s->need_check_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, - qed_need_check_timer_cb, s); + bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs)); out: if (ret) { @@ -528,8 +549,7 @@ static void bdrv_qed_close(BlockDriverState *bs) { BDRVQEDState *s = bs->opaque; - qed_cancel_need_check_timer(s); - timer_free(s->need_check_timer); + bdrv_qed_detach_aio_context(bs); /* Ensure writes reach stable storage */ bdrv_flush(bs->file); @@ -919,7 +939,8 @@ static void qed_aio_complete(QEDAIOCB *acb, int ret) /* Arrange for a bh to invoke the completion function */ acb->bh_ret = ret; - acb->bh = qemu_bh_new(qed_aio_complete_bh, acb); + acb->bh = aio_bh_new(bdrv_get_aio_context(acb->common.bs), + qed_aio_complete_bh, acb); qemu_bh_schedule(acb->bh); /* Start next allocating write request waiting behind this one. Note that @@ -1644,6 +1665,8 @@ static BlockDriver bdrv_qed = { .bdrv_change_backing_file = bdrv_qed_change_backing_file, .bdrv_invalidate_cache = bdrv_qed_invalidate_cache, .bdrv_check = bdrv_qed_check, + .bdrv_detach_aio_context = bdrv_qed_detach_aio_context, + .bdrv_attach_aio_context = bdrv_qed_attach_aio_context, }; static void bdrv_qed_init(void) -- 1.9.0