This allows external handlers to be aware of new buffers that the guest places in the virtqueue.
When this callback is defined the ownership of the guest's virtqueue element is transferred to the callback. This means that if the user wants to forward the descriptor it needs to manually inject it. The callback is also free to process the command by itself and use the element with svq_push. Signed-off-by: Eugenio Pérez <epere...@redhat.com> --- hw/virtio/vhost-shadow-virtqueue.h | 31 +++++++++++++++++++++++++++++- hw/virtio/vhost-shadow-virtqueue.c | 14 ++++++++++++-- hw/virtio/vhost-vdpa.c | 3 ++- 3 files changed, 44 insertions(+), 4 deletions(-) diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h index cf442f7dea..d04c34a589 100644 --- a/hw/virtio/vhost-shadow-virtqueue.h +++ b/hw/virtio/vhost-shadow-virtqueue.h @@ -25,6 +25,27 @@ typedef struct SVQDescState { unsigned int ndescs; } SVQDescState; +typedef struct VhostShadowVirtqueue VhostShadowVirtqueue; + +/** + * Callback to handle an avail buffer. + * + * @svq: Shadow virtqueue + * @elem: Element placed in the queue by the guest + * @vq_callback_opaque: Opaque + * + * Returns 0 if the vq is running as expected. + * + * Note that ownership of elem is transferred to the callback. + */ +typedef int (*VirtQueueAvailCallback)(VhostShadowVirtqueue *svq, + VirtQueueElement *elem, + void *vq_callback_opaque); + +typedef struct VhostShadowVirtqueueOps { + VirtQueueAvailCallback avail_handler; +} VhostShadowVirtqueueOps; + /* Shadow virtqueue to relay notifications */ typedef struct VhostShadowVirtqueue { /* Shadow vring */ @@ -69,6 +90,12 @@ typedef struct VhostShadowVirtqueue { */ uint16_t *desc_next; + /* Caller callbacks */ + const VhostShadowVirtqueueOps *ops; + + /* Caller callbacks opaque */ + void *ops_opaque; + /* Next head to expose to the device */ uint16_t shadow_avail_idx; @@ -102,7 +129,9 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev, VirtQueue *vq); void vhost_svq_stop(VhostShadowVirtqueue *svq); -VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree); +VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree, + const VhostShadowVirtqueueOps *ops, + void *ops_opaque); void vhost_svq_free(gpointer vq); G_DEFINE_AUTOPTR_CLEANUP_FUNC(VhostShadowVirtqueue, vhost_svq_free); diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c index 1c54a03e17..a21b0b1bf6 100644 --- a/hw/virtio/vhost-shadow-virtqueue.c +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -306,7 +306,11 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq) break; } - r = vhost_svq_add_element(svq, elem); + if (svq->ops) { + r = svq->ops->avail_handler(svq, elem, svq->ops_opaque); + } else { + r = vhost_svq_add_element(svq, elem); + } if (unlikely(r != 0)) { if (r == -ENOSPC) { /* @@ -679,12 +683,16 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq) * shadow methods and file descriptors. * * @iova_tree: Tree to perform descriptors translations + * @ops: SVQ owner callbacks + * @ops_opaque: ops opaque pointer * * Returns the new virtqueue or NULL. * * In case of error, reason is reported through error_report. */ -VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree) +VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree, + const VhostShadowVirtqueueOps *ops, + void *ops_opaque) { g_autofree VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 1); int r; @@ -706,6 +714,8 @@ VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree) event_notifier_init_fd(&svq->svq_kick, VHOST_FILE_UNBIND); event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call); svq->iova_tree = iova_tree; + svq->ops = ops; + svq->ops_opaque = ops_opaque; return g_steal_pointer(&svq); err_init_hdev_call: diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index 66f054a12c..0b13e98471 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -418,8 +418,9 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v, shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free); for (unsigned n = 0; n < hdev->nvqs; ++n) { - g_autoptr(VhostShadowVirtqueue) svq = vhost_svq_new(v->iova_tree); + g_autoptr(VhostShadowVirtqueue) svq; + svq = vhost_svq_new(v->iova_tree, NULL, NULL); if (unlikely(!svq)) { error_setg(errp, "Cannot create svq %u", n); return -1; -- 2.31.1