From: Liu Ping Fan pingf...@linux.vnet.ibm.com
virtio-blk will unref RAM's memoryRegion when the io-req has been
done. So we can avoid to call bdrv_drain_all() when RAM hot unplug.
Signed-off-by: Liu Ping Fan pingf...@linux.vnet.ibm.com
---
hw/block/dataplane/virtio-blk.c | 51 +-
1 files changed, 39 insertions(+), 12 deletions(-)
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index 3bb57d1..047e1df 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -35,6 +35,8 @@ enum {
typedef struct {
struct iocb iocb; /* Linux AIO control block */
+MemoryRegion *mrs[VRING_MAX];
+int mrs_cnt;
QEMUIOVector *inhdr;/* iovecs for virtio_blk_inhdr */
unsigned int head; /* vring descriptor index */
struct iovec *bounce_iov; /* used if guest buffers are unaligned */
@@ -121,6 +123,10 @@ static void complete_request(struct iocb *iocb, ssize_t
ret, void *opaque)
* transferred plus the status bytes.
*/
vring_push(s-vring, req-head, len + sizeof(hdr));
+while (--req-mrs_cnt = 0) {
+memory_region_unref(req-mrs[req-mrs_cnt]);
+}
+
s-num_reqs--;
}
@@ -156,7 +162,8 @@ static void do_get_id_cmd(VirtIOBlockDataPlane *s,
static int do_rdwr_cmd(VirtIOBlockDataPlane *s, bool read,
struct iovec *iov, unsigned int iov_cnt,
long long offset, unsigned int head,
- QEMUIOVector *inhdr)
+ QEMUIOVector *inhdr,
+ MemoryRegion **mrs, int cnt)
{
struct iocb *iocb;
QEMUIOVector qiov;
@@ -188,6 +195,8 @@ static int do_rdwr_cmd(VirtIOBlockDataPlane *s, bool read,
/* Fill in virtio block metadata needed for completion */
VirtIOBlockRequest *req = container_of(iocb, VirtIOBlockRequest, iocb);
+memcpy(req-mrs, mrs, cnt*sizeof(MemoryRegion *));
+req-mrs_cnt = cnt;
req-head = head;
req-inhdr = inhdr;
req-bounce_iov = bounce_iov;
@@ -197,19 +206,22 @@ static int do_rdwr_cmd(VirtIOBlockDataPlane *s, bool read,
static int process_request(IOQueue *ioq, struct iovec iov[],
unsigned int out_num, unsigned int in_num,
- unsigned int head)
+ unsigned int head, MemoryRegion **mrs)
{
VirtIOBlockDataPlane *s = container_of(ioq, VirtIOBlockDataPlane, ioqueue);
struct iovec *in_iov = iov[out_num];
struct virtio_blk_outhdr outhdr;
QEMUIOVector *inhdr;
size_t in_size;
+unsigned int i, cnt = out_num+in_num;
+int ret;
/* Copy in outhdr */
if (unlikely(iov_to_buf(iov, out_num, 0, outhdr,
sizeof(outhdr)) != sizeof(outhdr))) {
error_report(virtio-blk request outhdr too short);
-return -EFAULT;
+ret = -EFAULT;
+goto free_mrs;
}
iov_discard_front(iov, out_num, sizeof(outhdr));
@@ -217,7 +229,8 @@ static int process_request(IOQueue *ioq, struct iovec iov[],
in_size = iov_size(in_iov, in_num);
if (in_size sizeof(struct virtio_blk_inhdr)) {
error_report(virtio_blk request inhdr too short);
-return -EFAULT;
+ret = -EFAULT;
+goto free_mrs;
}
inhdr = g_slice_new(QEMUIOVector);
qemu_iovec_init(inhdr, 1);
@@ -231,17 +244,20 @@ static int process_request(IOQueue *ioq, struct iovec
iov[],
switch (outhdr.type) {
case VIRTIO_BLK_T_IN:
-do_rdwr_cmd(s, true, in_iov, in_num, outhdr.sector * 512, head, inhdr);
+do_rdwr_cmd(s, true, in_iov, in_num, outhdr.sector * 512, head, inhdr,
+mrs, cnt);
return 0;
case VIRTIO_BLK_T_OUT:
-do_rdwr_cmd(s, false, iov, out_num, outhdr.sector * 512, head, inhdr);
+do_rdwr_cmd(s, false, iov, out_num, outhdr.sector * 512, head, inhdr,
+mrs, cnt);
return 0;
case VIRTIO_BLK_T_SCSI_CMD:
/* TODO support SCSI commands */
complete_request_early(s, head, inhdr, VIRTIO_BLK_S_UNSUPP);
-return 0;
+ret = 0;
+goto free_mrs;
case VIRTIO_BLK_T_FLUSH:
/* TODO fdsync not supported by Linux AIO, do it synchronously here! */
@@ -250,18 +266,27 @@ static int process_request(IOQueue *ioq, struct iovec
iov[],
} else {
complete_request_early(s, head, inhdr, VIRTIO_BLK_S_OK);
}
-return 0;
+ret = 0;
+goto free_mrs;
case VIRTIO_BLK_T_GET_ID:
do_get_id_cmd(s, in_iov, in_num, head, inhdr);
-return 0;
+ret = 0;
+goto free_mrs;
default:
error_report(virtio-blk unsupported request type %#x, outhdr.type);
qemu_iovec_destroy(inhdr);
g_slice_free(QEMUIOVector, inhdr);
-return -EFAULT;
+ret = -EFAULT;
+goto free_mrs;
+}
+
+free_mrs:
+for (i = 0;