We'll need a separate function, which will only "mark" request serialising with specified align but not wait for conflicting requests. So, it will be like old bdrv_mark_request_serialising(), before merging bdrv_wait_serialising_requests_locked() into it.
To reduce the possible mess, let's do the following: Public function that does both marking and waiting will be called bdrv_make_request_serialising, and private function which will only "mark" will be called tracked_request_set_serialising(). Signed-off-by: Vladimir Sementsov-Ogievskiy <vsement...@virtuozzo.com> Reviewed-by: Max Reitz <mre...@redhat.com> --- include/block/block_int.h | 3 ++- block/file-posix.c | 2 +- block/io.c | 35 +++++++++++++++++++++++------------ 3 files changed, 26 insertions(+), 14 deletions(-) diff --git a/include/block/block_int.h b/include/block/block_int.h index 38cad9d15c..887b0668d8 100644 --- a/include/block/block_int.h +++ b/include/block/block_int.h @@ -1052,7 +1052,8 @@ extern unsigned int bdrv_drain_all_count; void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent); void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent); -bool coroutine_fn bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align); +bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req, + uint64_t align); BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs); int get_tmp_filename(char *filename, int size); diff --git a/block/file-posix.c b/block/file-posix.c index c63926d592..37d9266f6a 100644 --- a/block/file-posix.c +++ b/block/file-posix.c @@ -2953,7 +2953,7 @@ raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes, req->bytes = end - req->offset; req->overlap_bytes = req->bytes; - bdrv_mark_request_serialising(req, bs->bl.request_alignment); + bdrv_make_request_serialising(req, bs->bl.request_alignment); } #endif diff --git a/block/io.c b/block/io.c index 5c1a1b388b..24ff8d804b 100644 --- a/block/io.c +++ b/block/io.c @@ -779,15 +779,14 @@ bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self) return waited; } -bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) +/* Called with req->bs->reqs_lock held */ +static void tracked_request_set_serialising(BdrvTrackedRequest *req, + uint64_t align) { - BlockDriverState *bs = req->bs; int64_t overlap_offset = req->offset & ~(align - 1); uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align) - overlap_offset; - bool waited; - qemu_co_mutex_lock(&bs->reqs_lock); if (!req->serialising) { qatomic_inc(&req->bs->serialising_in_flight); req->serialising = true; @@ -795,9 +794,6 @@ bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) req->overlap_offset = MIN(req->overlap_offset, overlap_offset); req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); - waited = bdrv_wait_serialising_requests_locked(req); - qemu_co_mutex_unlock(&bs->reqs_lock); - return waited; } /** @@ -883,6 +879,21 @@ static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self return waited; } +bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req, + uint64_t align) +{ + bool waited; + + qemu_co_mutex_lock(&req->bs->reqs_lock); + + tracked_request_set_serialising(req, align); + waited = bdrv_wait_serialising_requests_locked(req); + + qemu_co_mutex_unlock(&req->bs->reqs_lock); + + return waited; +} + static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset, size_t size) { @@ -1395,7 +1406,7 @@ static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child, * with each other for the same cluster. For example, in copy-on-read * it ensures that the CoR read and write operations are atomic and * guest writes cannot interleave between them. */ - bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs)); + bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs)); } else { bdrv_wait_serialising_requests(req); } @@ -1806,7 +1817,7 @@ bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes, assert(!(flags & ~BDRV_REQ_MASK)); if (flags & BDRV_REQ_SERIALISING) { - bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs)); + bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs)); } else { bdrv_wait_serialising_requests(req); } @@ -1972,7 +1983,7 @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child, padding = bdrv_init_padding(bs, offset, bytes, &pad); if (padding) { - bdrv_mark_request_serialising(req, align); + bdrv_make_request_serialising(req, align); bdrv_padding_rmw_read(child, req, &pad, true); @@ -2086,7 +2097,7 @@ int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child, } if (bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad)) { - bdrv_mark_request_serialising(&req, align); + bdrv_make_request_serialising(&req, align); bdrv_padding_rmw_read(child, &req, &pad, false); } @@ -3139,7 +3150,7 @@ int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact, * new area, we need to make sure that no write requests are made to it * concurrently or they might be overwritten by preallocation. */ if (new_bytes) { - bdrv_mark_request_serialising(&req, 1); + bdrv_make_request_serialising(&req, 1); } if (bs->read_only) { error_setg(errp, "Image is read-only"); -- 2.21.3