Now that all bottom halves and callbacks take care of taking the AioContext lock, we can migrate some users away from it and to a specific QemuMutex or CoMutex.
Protect libnfs calls with a QemuMutex. Callbacks are invoked using bottom halves, so we don't even have to drop it around callback invocations. Signed-off-by: Paolo Bonzini <pbonz...@redhat.com> --- block/nfs.c | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/block/nfs.c b/block/nfs.c index 08b43dd..3ee3275 100644 --- a/block/nfs.c +++ b/block/nfs.c @@ -54,6 +54,7 @@ typedef struct NFSClient { int events; bool has_zero_init; AioContext *aio_context; + QemuMutex mutex; blkcnt_t st_blocks; bool cache_used; NFSServer *server; @@ -191,6 +192,7 @@ static void nfs_parse_filename(const char *filename, QDict *options, static void nfs_process_read(void *arg); static void nfs_process_write(void *arg); +/* Called with QemuMutex held. */ static void nfs_set_events(NFSClient *client) { int ev = nfs_which_events(client->context); @@ -209,20 +211,20 @@ static void nfs_process_read(void *arg) { NFSClient *client = arg; - aio_context_acquire(client->aio_context); + qemu_mutex_lock(&client->mutex); nfs_service(client->context, POLLIN); nfs_set_events(client); - aio_context_release(client->aio_context); + qemu_mutex_unlock(&client->mutex); } static void nfs_process_write(void *arg) { NFSClient *client = arg; - aio_context_acquire(client->aio_context); + qemu_mutex_lock(&client->mutex); nfs_service(client->context, POLLOUT); nfs_set_events(client); - aio_context_release(client->aio_context); + qemu_mutex_unlock(&client->mutex); } static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task) @@ -242,6 +244,7 @@ static void nfs_co_generic_bh_cb(void *opaque) aio_co_wake(task->co); } +/* Called (via nfs_service) with QemuMutex held. */ static void nfs_co_generic_cb(int ret, struct nfs_context *nfs, void *data, void *private_data) @@ -273,14 +276,17 @@ static int coroutine_fn nfs_co_readv(BlockDriverState *bs, nfs_co_init_task(bs, &task); task.iov = iov; + qemu_mutex_lock(&client->mutex); if (nfs_pread_async(client->context, client->fh, sector_num * BDRV_SECTOR_SIZE, nb_sectors * BDRV_SECTOR_SIZE, nfs_co_generic_cb, &task) != 0) { + qemu_mutex_unlock(&client->mutex); return -ENOMEM; } nfs_set_events(client); + qemu_mutex_unlock(&client->mutex); while (!task.complete) { qemu_coroutine_yield(); } @@ -314,15 +320,18 @@ static int coroutine_fn nfs_co_writev(BlockDriverState *bs, qemu_iovec_to_buf(iov, 0, buf, nb_sectors * BDRV_SECTOR_SIZE); + qemu_mutex_lock(&client->mutex); if (nfs_pwrite_async(client->context, client->fh, sector_num * BDRV_SECTOR_SIZE, nb_sectors * BDRV_SECTOR_SIZE, buf, nfs_co_generic_cb, &task) != 0) { + qemu_mutex_unlock(&client->mutex); g_free(buf); return -ENOMEM; } nfs_set_events(client); + qemu_mutex_unlock(&client->mutex); while (!task.complete) { qemu_coroutine_yield(); } @@ -343,12 +352,15 @@ static int coroutine_fn nfs_co_flush(BlockDriverState *bs) nfs_co_init_task(bs, &task); + qemu_mutex_lock(&client->mutex); if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb, &task) != 0) { + qemu_mutex_unlock(&client->mutex); return -ENOMEM; } nfs_set_events(client); + qemu_mutex_unlock(&client->mutex); while (!task.complete) { qemu_coroutine_yield(); } @@ -434,6 +446,7 @@ static void nfs_file_close(BlockDriverState *bs) { NFSClient *client = bs->opaque; nfs_client_close(client); + qemu_mutex_destroy(&client->mutex); } static NFSServer *nfs_config(QDict *options, Error **errp) @@ -641,6 +654,7 @@ static int nfs_file_open(BlockDriverState *bs, QDict *options, int flags, if (ret < 0) { return ret; } + qemu_mutex_init(&client->mutex); bs->total_sectors = ret; ret = 0; return ret; @@ -696,6 +710,7 @@ static int nfs_has_zero_init(BlockDriverState *bs) return client->has_zero_init; } +/* Called (via nfs_service) with QemuMutex held. */ static void nfs_get_allocated_file_size_cb(int ret, struct nfs_context *nfs, void *data, void *private_data) -- 2.9.3