From: wafer Xie <[email protected]>

allocated and initialized when creating the vhost-vdpa device,
and release the indirect buffer when vhost-vdpa is stopped.

Suggested-by: Eugenio PĂ©rez <[email protected]>
Signed-off-by: wafer Xie <[email protected]>
---
 hw/virtio/vhost-shadow-virtqueue.c | 28 +++++++++
 hw/virtio/vhost-vdpa.c             | 98 +++++++++++++++++++++++++++++-
 2 files changed, 125 insertions(+), 1 deletion(-)

diff --git a/hw/virtio/vhost-shadow-virtqueue.c 
b/hw/virtio/vhost-shadow-virtqueue.c
index 2481d49345..f80266fc03 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -708,6 +708,28 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, 
VirtIODevice *vdev,
     for (unsigned i = 0; i < svq->vring.num - 1; i++) {
         svq->desc_next[i] = i + 1;
     }
+
+    /* Initialize indirect descriptor state */
+    svq->indirect.desc = NULL;
+    svq->indirect.iova = 0;
+    svq->indirect.size = 0;
+    svq->indirect.total_descs = 0;
+    svq->indirect.enabled = false;
+    svq->indirect.current_buf = -1;
+    for (int i = 0; i < SVQ_NUM_INDIRECT_BUFS; i++) {
+        svq->indirect.bufs[i].start_idx = 0;
+        svq->indirect.bufs[i].num_descs = 0;
+        svq->indirect.bufs[i].freed_descs = 0;
+        svq->indirect.bufs[i].freeing_descs = 0;
+        svq->indirect.bufs[i].freed_head = 0;
+        svq->indirect.bufs[i].borrowed_descs = 0;
+        svq->indirect.bufs[i].state = SVQ_INDIRECT_BUF_FREED;
+    }
+
+    /* Initialize desc_state indirect_buf_idx to -1 */
+    for (unsigned i = 0; i < svq->vring.num; i++) {
+        svq->desc_state[i].indirect_buf_idx = -1;
+    }
 }
 
 /**
@@ -748,6 +770,10 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
     munmap(svq->vring.desc, vhost_svq_driver_area_size(svq));
     munmap(svq->vring.used, vhost_svq_device_area_size(svq));
     event_notifier_set_handler(&svq->hdev_call, NULL);
+
+    /* Reset indirect descriptor state */
+    svq->indirect.enabled = false;
+    svq->indirect.current_buf = -1;
 }
 
 /**
@@ -765,6 +791,8 @@ VhostShadowVirtqueue *vhost_svq_new(const 
VhostShadowVirtqueueOps *ops,
     event_notifier_init_fd(&svq->svq_kick, VHOST_FILE_UNBIND);
     svq->ops = ops;
     svq->ops_opaque = ops_opaque;
+    svq->indirect.enabled = false;
+    svq->indirect.current_buf = -1;
     return svq;
 }
 
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 7061b6e1a3..a1f4e501e2 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -1142,8 +1142,27 @@ static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa 
*v, hwaddr addr)
     vhost_iova_tree_remove(v->shared->iova_tree, *result);
 }
 
+/**
+ * Unmap indirect descriptor buffers for a single SVQ
+ *
+ * @v: vhost_vdpa instance
+ * @svq: Shadow virtqueue to unmap
+ */
+static void vhost_vdpa_svq_unmap_indirect(struct vhost_vdpa *v,
+                                          VhostShadowVirtqueue *svq)
+{
+    if (svq->indirect.desc) {
+        vhost_vdpa_svq_unmap_ring(v, (hwaddr)(uintptr_t)svq->indirect.desc);
+        munmap(svq->indirect.desc, svq->indirect.size);
+        svq->indirect.desc = NULL;
+        svq->indirect.iova = 0;
+        svq->indirect.size = 0;
+        svq->indirect.total_descs = 0;
+    }
+}
+
 static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
-                                       const VhostShadowVirtqueue *svq)
+                                       VhostShadowVirtqueue *svq)
 {
     struct vhost_vdpa *v = dev->opaque;
     struct vhost_vring_addr svq_addr;
@@ -1153,6 +1172,8 @@ static void vhost_vdpa_svq_unmap_rings(struct vhost_dev 
*dev,
     vhost_vdpa_svq_unmap_ring(v, svq_addr.desc_user_addr);
 
     vhost_vdpa_svq_unmap_ring(v, svq_addr.used_user_addr);
+
+    vhost_vdpa_svq_unmap_indirect(v, svq);
 }
 
 /**
@@ -1192,6 +1213,74 @@ static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa 
*v, DMAMap *needle,
     return r == 0;
 }
 
+/**
+ * Initialize indirect descriptor buffers for a single SVQ
+ * Allocates one contiguous memory region shared by all buffer segments.
+ *
+ * @v: vhost_vdpa instance
+ * @svq: Shadow virtqueue to initialize
+ *
+ * Returns true on success, false on failure.
+ */
+static bool vhost_vdpa_svq_init_indirect(struct vhost_vdpa *v,
+                                          VhostShadowVirtqueue *svq)
+{
+    uint16_t num_per_buf = svq->vring.num * 2;
+    uint16_t total_descs = SVQ_NUM_INDIRECT_BUFS * num_per_buf;
+    size_t desc_size = sizeof(vring_desc_t) * total_descs;
+    size_t alloc_size = ROUND_UP(desc_size, qemu_real_host_page_size());
+    DMAMap needle = {
+        .size = alloc_size - 1,
+        .perm = IOMMU_RO,
+    };
+    vring_desc_t *indirect_desc;
+    Error *err = NULL;
+    bool ok;
+
+    if (!svq->vring.num) {
+        return true;
+    }
+
+    /* Allocate one contiguous memory region for all indirect descriptors */
+    indirect_desc = mmap(NULL, alloc_size, PROT_READ | PROT_WRITE,
+                         MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+    if (indirect_desc == MAP_FAILED) {
+        error_report("Cannot allocate indirect descriptor buffer");
+        return false;
+    }
+
+    /* Use vhost_vdpa_svq_map_ring to allocate IOVA and map to device */
+    ok = vhost_vdpa_svq_map_ring(v, &needle, (hwaddr)(uintptr_t)indirect_desc,
+                                 &err);
+    if (unlikely(!ok)) {
+        error_report_err(err);
+        munmap(indirect_desc, alloc_size);
+        return false;
+    }
+
+    /* Store shared memory info in SVQ */
+    svq->indirect.desc = indirect_desc;
+    svq->indirect.iova = needle.iova;
+    svq->indirect.size = alloc_size;
+    svq->indirect.total_descs = total_descs;
+
+    /* Initialize each buffer segment */
+    for (int j = 0; j < SVQ_NUM_INDIRECT_BUFS; j++) {
+        SVQIndirectDescBuf *buf = &svq->indirect.bufs[j];
+        buf->start_idx = j * num_per_buf;
+        buf->num_descs = num_per_buf;
+        buf->freed_descs = num_per_buf;
+        buf->freeing_descs = 0;
+        buf->freed_head = 0;
+        buf->borrowed_descs = 0;
+        buf->state = SVQ_INDIRECT_BUF_FREED;
+    }
+
+    svq->indirect.enabled = true;
+    svq->indirect.current_buf = 0;
+    return true;
+}
+
 /**
  * Map the shadow virtqueue rings in the device
  *
@@ -1299,6 +1388,13 @@ static bool vhost_vdpa_svqs_start(struct vhost_dev *dev)
             error_setg_errno(&err, -r, "Cannot set device address");
             goto err_set_addr;
         }
+
+        /* Initialize indirect descriptor buffers for this SVQ */
+        if (!vhost_vdpa_svq_init_indirect(v, svq)) {
+            /* Non-fatal: will fallback to chain mode */
+            warn_report("Cannot initialize indirect descriptor for SVQ %u",
+                virtio_get_queue_index(vq));
+        }
     }
 
     return true;
-- 
2.34.1


Reply via email to