All read/write functions already have a single coroutine-based function on the BlockBackend level through which all requests go (no matter what API style the external caller used) and which passes the requests down to the block node level.
This patch exports a bdrv_co_ioctl() function and uses it to extend this mode of operation to ioctls. Signed-off-by: Kevin Wolf <kw...@redhat.com> --- block/block-backend.c | 39 +++++++++++++++++++++++++++++++++------ block/io.c | 8 ++++---- include/block/block.h | 1 + include/sysemu/block-backend.h | 1 + 4 files changed, 39 insertions(+), 10 deletions(-) diff --git a/block/block-backend.c b/block/block-backend.c index 39336de..c53ca30 100644 --- a/block/block-backend.c +++ b/block/block-backend.c @@ -1141,23 +1141,50 @@ void blk_aio_cancel_async(BlockAIOCB *acb) bdrv_aio_cancel_async(acb); } -int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf) +int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf) { if (!blk_is_available(blk)) { return -ENOMEDIUM; } - return bdrv_ioctl(blk_bs(blk), req, buf); + return bdrv_co_ioctl(blk_bs(blk), req, buf); +} + +static void blk_ioctl_entry(void *opaque) +{ + BlkRwCo *rwco = opaque; + rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset, + rwco->qiov->iov[0].iov_base); +} + +int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf) +{ + return blk_prw(blk, req, buf, 0, blk_ioctl_entry, 0); +} + +static void blk_aio_ioctl_entry(void *opaque) +{ + BlkAioEmAIOCB *acb = opaque; + BlkRwCo *rwco = &acb->rwco; + + rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset, + rwco->qiov->iov[0].iov_base); + blk_aio_complete(acb); } BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf, BlockCompletionFunc *cb, void *opaque) { - if (!blk_is_available(blk)) { - return blk_abort_aio_request(blk, cb, opaque, -ENOMEDIUM); - } + QEMUIOVector qiov; + struct iovec iov; + + iov = (struct iovec) { + .iov_base = buf, + .iov_len = 0, + }; + qemu_iovec_init_external(&qiov, &iov, 1); - return bdrv_aio_ioctl(blk_bs(blk), req, buf, cb, opaque); + return blk_aio_prwv(blk, req, 0, &qiov, blk_aio_ioctl_entry, 0, cb, opaque); } int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int count) diff --git a/block/io.c b/block/io.c index ff93ba1..7c119d5 100644 --- a/block/io.c +++ b/block/io.c @@ -2492,7 +2492,7 @@ int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int count) return rwco.ret; } -static int bdrv_co_do_ioctl(BlockDriverState *bs, int req, void *buf) +int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf) { BlockDriver *drv = bs->drv; BdrvTrackedRequest tracked_req; @@ -2528,7 +2528,7 @@ typedef struct { static void coroutine_fn bdrv_co_ioctl_entry(void *opaque) { BdrvIoctlCoData *data = opaque; - data->ret = bdrv_co_do_ioctl(data->bs, data->req, data->buf); + data->ret = bdrv_co_ioctl(data->bs, data->req, data->buf); } /* needed for generic scsi interface */ @@ -2558,8 +2558,8 @@ int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) static void coroutine_fn bdrv_co_aio_ioctl_entry(void *opaque) { BlockAIOCBCoroutine *acb = opaque; - acb->req.error = bdrv_co_do_ioctl(acb->common.bs, - acb->req.req, acb->req.buf); + acb->req.error = bdrv_co_ioctl(acb->common.bs, + acb->req.req, acb->req.buf); bdrv_co_complete(acb); } diff --git a/include/block/block.h b/include/block/block.h index 99a15a6..e06db62 100644 --- a/include/block/block.h +++ b/include/block/block.h @@ -318,6 +318,7 @@ void bdrv_aio_cancel(BlockAIOCB *acb); void bdrv_aio_cancel_async(BlockAIOCB *acb); /* sg packet commands */ +int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf); int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf); BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, unsigned long int req, void *buf, diff --git a/include/sysemu/block-backend.h b/include/sysemu/block-backend.h index b07159b..6444e41 100644 --- a/include/sysemu/block-backend.h +++ b/include/sysemu/block-backend.h @@ -146,6 +146,7 @@ BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, int64_t offset, int count, BlockCompletionFunc *cb, void *opaque); void blk_aio_cancel(BlockAIOCB *acb); void blk_aio_cancel_async(BlockAIOCB *acb); +int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf); int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf); BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf, BlockCompletionFunc *cb, void *opaque); -- 1.8.3.1