This patch lets vhost support multiqueue. The idea is simple, just launching multiple threads of vhost and let each of vhost thread processing a subset of the virtqueues of the device.
The only thing needed is passing a virtqueue index when starting vhost device, this is used to track the first virtqueue which this vhost thread serves. Signed-off-by: Jason Wang <jasow...@redhat.com> --- hw/vhost.c | 52 +++++++++++++++++++++++++++++++++------------------- hw/vhost.h | 2 ++ hw/vhost_net.c | 7 +++++-- hw/vhost_net.h | 2 +- hw/virtio-net.c | 3 ++- 5 files changed, 43 insertions(+), 23 deletions(-) diff --git a/hw/vhost.c b/hw/vhost.c index 16322a1..63c76d6 100644 --- a/hw/vhost.c +++ b/hw/vhost.c @@ -619,11 +619,12 @@ static int vhost_virtqueue_init(struct vhost_dev *dev, { hwaddr s, l, a; int r; + int vhost_vq_index = idx % dev->nvqs; struct vhost_vring_file file = { - .index = idx, + .index = vhost_vq_index }; struct vhost_vring_state state = { - .index = idx, + .index = vhost_vq_index }; struct VirtQueue *vvq = virtio_get_queue(vdev, idx); @@ -669,11 +670,12 @@ static int vhost_virtqueue_init(struct vhost_dev *dev, goto fail_alloc_ring; } - r = vhost_virtqueue_set_addr(dev, vq, idx, dev->log_enabled); + r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled); if (r < 0) { r = -errno; goto fail_alloc; } + file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq)); r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file); if (r) { @@ -714,7 +716,7 @@ static void vhost_virtqueue_cleanup(struct vhost_dev *dev, unsigned idx) { struct vhost_vring_state state = { - .index = idx, + .index = idx % dev->nvqs, }; int r; r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state); @@ -829,7 +831,9 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) } for (i = 0; i < hdev->nvqs; ++i) { - r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, true); + r = vdev->binding->set_host_notifier(vdev->binding_opaque, + hdev->vq_index + i, + true); if (r < 0) { fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r); goto fail_vq; @@ -839,7 +843,9 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) return 0; fail_vq: while (--i >= 0) { - r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, false); + r = vdev->binding->set_host_notifier(vdev->binding_opaque, + hdev->vq_index + i, + false); if (r < 0) { fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r); fflush(stderr); @@ -860,7 +866,9 @@ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) int i, r; for (i = 0; i < hdev->nvqs; ++i) { - r = vdev->binding->set_host_notifier(vdev->binding_opaque, i, false); + r = vdev->binding->set_host_notifier(vdev->binding_opaque, + hdev->vq_index + i, + false); if (r < 0) { fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r); fflush(stderr); @@ -879,10 +887,12 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) goto fail; } - r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, true); - if (r < 0) { - fprintf(stderr, "Error binding guest notifier: %d\n", -r); - goto fail_notifiers; + if (hdev->vq_index == 0) { + r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, true); + if (r < 0) { + fprintf(stderr, "Error binding guest notifier: %d\n", -r); + goto fail_notifiers; + } } r = vhost_dev_set_features(hdev, hdev->log_enabled); @@ -898,7 +908,7 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) r = vhost_virtqueue_init(hdev, vdev, hdev->vqs + i, - i); + hdev->vq_index + i); if (r < 0) { goto fail_vq; } @@ -925,8 +935,9 @@ fail_vq: vhost_virtqueue_cleanup(hdev, vdev, hdev->vqs + i, - i); + hdev->vq_index + i); } + i = hdev->nvqs; fail_mem: fail_features: vdev->binding->set_guest_notifiers(vdev->binding_opaque, false); @@ -944,21 +955,24 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev) vhost_virtqueue_cleanup(hdev, vdev, hdev->vqs + i, - i); + hdev->vq_index + i); } for (i = 0; i < hdev->n_mem_sections; ++i) { vhost_sync_dirty_bitmap(hdev, &hdev->mem_sections[i], 0, (hwaddr)~0x0ull); } - r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, false); - if (r < 0) { - fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r); - fflush(stderr); + if (hdev->vq_index == 0) { + r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, false); + if (r < 0) { + fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r); + fflush(stderr); + } + assert (r>= 0); } - assert (r >= 0); hdev->started = false; g_free(hdev->log); hdev->log = NULL; hdev->log_size = 0; } + diff --git a/hw/vhost.h b/hw/vhost.h index 0c47229..e94a9f7 100644 --- a/hw/vhost.h +++ b/hw/vhost.h @@ -34,6 +34,8 @@ struct vhost_dev { MemoryRegionSection *mem_sections; struct vhost_virtqueue *vqs; int nvqs; + /* the first virtuque which would be used by this vhost dev */ + int vq_index; unsigned long long features; unsigned long long acked_features; unsigned long long backend_features; diff --git a/hw/vhost_net.c b/hw/vhost_net.c index 8241601..cdb294c 100644 --- a/hw/vhost_net.c +++ b/hw/vhost_net.c @@ -138,13 +138,15 @@ bool vhost_net_query(VHostNetState *net, VirtIODevice *dev) } int vhost_net_start(struct vhost_net *net, - VirtIODevice *dev) + VirtIODevice *dev, + int vq_index) { struct vhost_vring_file file = { }; int r; net->dev.nvqs = 2; net->dev.vqs = net->vqs; + net->dev.vq_index = vq_index; r = vhost_dev_enable_notifiers(&net->dev, dev); if (r < 0) { @@ -214,7 +216,8 @@ bool vhost_net_query(VHostNetState *net, VirtIODevice *dev) } int vhost_net_start(struct vhost_net *net, - VirtIODevice *dev) + VirtIODevice *dev, + int vq_index) { return -ENOSYS; } diff --git a/hw/vhost_net.h b/hw/vhost_net.h index a9db234..c9a8429 100644 --- a/hw/vhost_net.h +++ b/hw/vhost_net.h @@ -9,7 +9,7 @@ typedef struct vhost_net VHostNetState; VHostNetState *vhost_net_init(NetClientState *backend, int devfd, bool force); bool vhost_net_query(VHostNetState *net, VirtIODevice *dev); -int vhost_net_start(VHostNetState *net, VirtIODevice *dev); +int vhost_net_start(VHostNetState *net, VirtIODevice *dev, int vq_index); void vhost_net_stop(VHostNetState *net, VirtIODevice *dev); void vhost_net_cleanup(VHostNetState *net); diff --git a/hw/virtio-net.c b/hw/virtio-net.c index d57a5a5..70bc0e6 100644 --- a/hw/virtio-net.c +++ b/hw/virtio-net.c @@ -126,7 +126,8 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) if (!vhost_net_query(tap_get_vhost_net(qemu_get_queue(n->nic)->peer), &n->vdev)) { return; } - r = vhost_net_start(tap_get_vhost_net(qemu_get_queue(n->nic)->peer), &n->vdev); + r = vhost_net_start(tap_get_vhost_net(qemu_get_queue(n->nic)->peer), + &n->vdev, 0); if (r < 0) { error_report("unable to start vhost net: %d: " "falling back on userspace virtio", -r); -- 1.7.1 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html