On Mon, Mar 24, 2025 at 3:00 PM Sahil Siddiq <[email protected]> wrote:
>
> Introduce "struct vring_packed".
>
> Modify VhostShadowVirtqueue so it can support split and packed virtqueue
> formats.
>
> Signed-off-by: Sahil Siddiq <[email protected]>
> ---
> Changes from v4 -> v5:
> - This was commit #3 in v4. This has been reordered to commit #2
> based on review comments.
> - Place shadow_avail_idx, shadow_used_idx, last_used_idx
> above the "shadow vring" union.
>
What is the reason for the member reorder?
> hw/virtio/vhost-shadow-virtqueue.h | 87 +++++++++++++++++++-----------
> 1 file changed, 56 insertions(+), 31 deletions(-)
>
> diff --git a/hw/virtio/vhost-shadow-virtqueue.h
> b/hw/virtio/vhost-shadow-virtqueue.h
> index 9c273739d6..5f7699da9d 100644
> --- a/hw/virtio/vhost-shadow-virtqueue.h
> +++ b/hw/virtio/vhost-shadow-virtqueue.h
> @@ -46,10 +46,65 @@ typedef struct VhostShadowVirtqueueOps {
> VirtQueueAvailCallback avail_handler;
> } VhostShadowVirtqueueOps;
>
> +struct vring_packed {
> + /* Actual memory layout for this queue. */
> + struct {
> + unsigned int num;
> + struct vring_packed_desc *desc;
> + struct vring_packed_desc_event *driver;
> + struct vring_packed_desc_event *device;
> + } vring;
> +
> + /* Avail used flags. */
> + uint16_t avail_used_flags;
> +
> + /* Index of the next avail descriptor. */
> + uint16_t next_avail_idx;
> +
> + /* Driver ring wrap counter */
> + bool avail_wrap_counter;
> +};
> +
> /* Shadow virtqueue to relay notifications */
> typedef struct VhostShadowVirtqueue {
> + /* True if packed virtqueue */
> + bool is_packed;
> +
> + /* Virtio queue shadowing */
> + VirtQueue *vq;
> +
> + /* Virtio device */
> + VirtIODevice *vdev;
> +
> + /* SVQ vring descriptors state */
> + SVQDescState *desc_state;
> +
> + /*
> + * Backup next field for each descriptor so we can recover securely, not
> + * needing to trust the device access.
> + */
> + uint16_t *desc_next;
> +
> + /* Next free descriptor */
> + uint16_t free_head;
> +
> + /* Size of SVQ vring free descriptors */
> + uint16_t num_free;
> +
> + /* Next head to expose to the device */
> + uint16_t shadow_avail_idx;
> +
> + /* Last seen used idx */
> + uint16_t shadow_used_idx;
> +
> + /* Next head to consume from the device */
> + uint16_t last_used_idx;
> +
> /* Shadow vring */
> - struct vring vring;
> + union {
> + struct vring vring;
> + struct vring_packed vring_packed;
> + };
>
> /* Shadow kick notifier, sent to vhost */
> EventNotifier hdev_kick;
> @@ -69,47 +124,17 @@ typedef struct VhostShadowVirtqueue {
> /* Guest's call notifier, where the SVQ calls guest. */
> EventNotifier svq_call;
>
> - /* Virtio queue shadowing */
> - VirtQueue *vq;
> -
> - /* Virtio device */
> - VirtIODevice *vdev;
> -
> /* IOVA mapping */
> VhostIOVATree *iova_tree;
>
> - /* SVQ vring descriptors state */
> - SVQDescState *desc_state;
> -
> /* Next VirtQueue element that guest made available */
> VirtQueueElement *next_guest_avail_elem;
>
> - /*
> - * Backup next field for each descriptor so we can recover securely, not
> - * needing to trust the device access.
> - */
> - uint16_t *desc_next;
> -
> /* Caller callbacks */
> const VhostShadowVirtqueueOps *ops;
>
> /* Caller callbacks opaque */
> void *ops_opaque;
> -
> - /* Next head to expose to the device */
> - uint16_t shadow_avail_idx;
> -
> - /* Next free descriptor */
> - uint16_t free_head;
> -
> - /* Last seen used idx */
> - uint16_t shadow_used_idx;
> -
> - /* Next head to consume from the device */
> - uint16_t last_used_idx;
> -
> - /* Size of SVQ vring free descriptors */
> - uint16_t num_free;
> } VhostShadowVirtqueue;
>
> bool vhost_svq_valid_features(uint64_t features, Error **errp);
> --
> 2.48.1
>