virtio core only supports virtual addresses, dma is completed in virtio
core.

In some scenarios (such as the AF_XDP), the memory is allocated
and DMA is completed in advance, so it is necessary for us to support
passing the DMA address to virtio core.

Drives can use sg->dma_address to pass the mapped dma address to virtio
core. If one sg->dma_address is used then all sgs must use sg->dma_address,
otherwise all dma_address must be null.

On the indirect path, if dma_address is used, desc_state.indir_desc will
be mixed with VRING_INDIRECT_PREMAPPED. So when do unmap, we can pass it.

Signed-off-by: Xuan Zhuo <xuanz...@linux.alibaba.com>
---
 drivers/virtio/virtio_ring.c | 28 ++++++++++++++++++++++------
 1 file changed, 22 insertions(+), 6 deletions(-)

diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 66a071e3bdef..11827d2e56a8 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -231,6 +231,18 @@ static void vring_free(struct virtqueue *_vq);
  * Helpers.
  */
 
+#define VRING_INDIRECT_PREMAPPED  BIT(0)
+
+#define desc_mix_dma_map(do_map, desc) \
+       (do_map ? desc : (typeof(desc))((unsigned long)(desc) | 
VRING_INDIRECT_PREMAPPED))
+
+#define desc_rm_dma_map(desc) \
+       ((typeof(desc))((unsigned long)(desc) & ~VRING_INDIRECT_PREMAPPED))
+
+#define desc_map_inter(desc) \
+       !((unsigned long)(desc) & VRING_INDIRECT_PREMAPPED)
+
+
 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
 
 static inline bool virtqueue_use_indirect(struct vring_virtqueue *vq,
@@ -725,7 +737,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
        /* Store token and indirect buffer state. */
        vq->split.desc_state[head].data = data;
        if (indirect)
-               vq->split.desc_state[head].indir_desc = desc;
+               vq->split.desc_state[head].indir_desc = 
desc_mix_dma_map(do_map, desc);
        else
                vq->split.desc_state[head].indir_desc = ctx;
 
@@ -820,22 +832,26 @@ static void detach_buf_split(struct vring_virtqueue *vq, 
unsigned int head,
        vq->vq.num_free++;
 
        if (vq->indirect) {
-               struct vring_desc *indir_desc =
-                               vq->split.desc_state[head].indir_desc;
+               struct vring_desc *mix = vq->split.desc_state[head].indir_desc;
+               struct vring_desc *indir_desc;
                u32 len;
 
                /* Free the indirect table, if any, now that it's unmapped. */
-               if (!indir_desc)
+               if (!mix)
                        return;
 
+               indir_desc = desc_rm_dma_map(mix);
+
                len = vq->split.desc_extra[head].len;
 
                BUG_ON(!(vq->split.desc_extra[head].flags &
                                VRING_DESC_F_INDIRECT));
                BUG_ON(len == 0 || len % sizeof(struct vring_desc));
 
-               for (j = 0; j < len / sizeof(struct vring_desc); j++)
-                       vring_unmap_one_split_indirect(vq, &indir_desc[j]);
+               if (desc_map_inter(mix)) {
+                       for (j = 0; j < len / sizeof(struct vring_desc); j++)
+                               vring_unmap_one_split_indirect(vq, 
&indir_desc[j]);
+               }
 
                kfree(indir_desc);
                vq->split.desc_state[head].indir_desc = NULL;
-- 
2.32.0.3.g01195cf9f

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to