This patch enables batch update of the used ring for better efficiency.

Signed-off-by: Zhihong Wang <zhihong.wang at intel.com>
---
 lib/librte_vhost/vhost-net.h  |  4 +++
 lib/librte_vhost/vhost_rxtx.c | 68 +++++++++++++++++++++++++++++++++----------
 lib/librte_vhost/virtio-net.c | 15 ++++++++--
 3 files changed, 68 insertions(+), 19 deletions(-)

diff --git a/lib/librte_vhost/vhost-net.h b/lib/librte_vhost/vhost-net.h
index 51fdf3d..a15182c 100644
--- a/lib/librte_vhost/vhost-net.h
+++ b/lib/librte_vhost/vhost-net.h
@@ -85,6 +85,10 @@ struct vhost_virtqueue {

        /* Physical address of used ring, for logging */
        uint64_t                log_guest_addr;
+
+       /* Shadow used ring for performance */
+       struct vring_used_elem  *shadow_used_ring;
+       uint32_t                shadow_used_idx;
 } __rte_cache_aligned;

 /* Old kernels have no such macro defined */
diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c
index 7db83d0..60d63d3 100644
--- a/lib/librte_vhost/vhost_rxtx.c
+++ b/lib/librte_vhost/vhost_rxtx.c
@@ -155,7 +155,6 @@ enqueue_packet(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
        uint32_t mbuf_len_left = 0;
        uint32_t copy_len = 0;
        uint32_t extra_buffers = 0;
-       uint32_t used_idx_round = 0;

        /* start with the first mbuf of the packet */
        mbuf_len = rte_pktmbuf_data_len(mbuf);
@@ -207,17 +206,11 @@ enqueue_packet(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
                                        goto rollback;
                        } else if (is_mrg_rxbuf) {
                                /* start with the next desc chain */
-                               used_idx_round = vq->last_used_idx
-                                       & (vq->size - 1);
-                               vq->used->ring[used_idx_round].id =
+                               vq->shadow_used_ring[vq->shadow_used_idx].id =
                                        desc_chain_head;
-                               vq->used->ring[used_idx_round].len =
+                               vq->shadow_used_ring[vq->shadow_used_idx].len =
                                        desc_chain_len;
-                               vhost_log_used_vring(dev, vq,
-                                       offsetof(struct vring_used,
-                                               ring[used_idx_round]),
-                                       sizeof(vq->used->ring[
-                                               used_idx_round]));
+                               vq->shadow_used_idx++;
                                vq->last_used_idx++;
                                extra_buffers++;
                                virtio_hdr->num_buffers++;
@@ -255,12 +248,9 @@ enqueue_packet(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
                desc_chain_len += copy_len;
        }

-       used_idx_round = vq->last_used_idx & (vq->size - 1);
-       vq->used->ring[used_idx_round].id = desc_chain_head;
-       vq->used->ring[used_idx_round].len = desc_chain_len;
-       vhost_log_used_vring(dev, vq,
-               offsetof(struct vring_used, ring[used_idx_round]),
-               sizeof(vq->used->ring[used_idx_round]));
+       vq->shadow_used_ring[vq->shadow_used_idx].id = desc_chain_head;
+       vq->shadow_used_ring[vq->shadow_used_idx].len = desc_chain_len;
+       vq->shadow_used_idx++;
        vq->last_used_idx++;

        return 0;
@@ -275,6 +265,45 @@ error:
 }

 static inline void __attribute__((always_inline))
+update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
+               uint32_t used_idx_start)
+{
+       if (used_idx_start + vq->shadow_used_idx < vq->size) {
+               rte_memcpy(&vq->used->ring[used_idx_start],
+                               &vq->shadow_used_ring[0],
+                               vq->shadow_used_idx *
+                               sizeof(struct vring_used_elem));
+               vhost_log_used_vring(dev, vq,
+                               offsetof(struct vring_used,
+                                       ring[used_idx_start]),
+                               vq->shadow_used_idx *
+                               sizeof(struct vring_used_elem));
+       } else {
+               uint32_t part_1 = vq->size - used_idx_start;
+               uint32_t part_2 = vq->shadow_used_idx - part_1;
+
+               rte_memcpy(&vq->used->ring[used_idx_start],
+                               &vq->shadow_used_ring[0],
+                               part_1 *
+                               sizeof(struct vring_used_elem));
+               vhost_log_used_vring(dev, vq,
+                               offsetof(struct vring_used,
+                                       ring[used_idx_start]),
+                               part_1 *
+                               sizeof(struct vring_used_elem));
+               rte_memcpy(&vq->used->ring[0],
+                               &vq->shadow_used_ring[part_1],
+                               part_2 *
+                               sizeof(struct vring_used_elem));
+               vhost_log_used_vring(dev, vq,
+                               offsetof(struct vring_used,
+                                       ring[0]),
+                               part_2 *
+                               sizeof(struct vring_used_elem));
+       }
+}
+
+static inline void __attribute__((always_inline))
 notify_guest(struct virtio_net *dev, struct vhost_virtqueue *vq)
 {
        rte_smp_wmb();
@@ -293,6 +322,7 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
 {
        struct vhost_virtqueue *vq;
        struct virtio_net *dev;
+       uint32_t used_idx_start = 0;
        uint32_t pkt_idx = 0;
        uint32_t pkt_left = 0;
        uint32_t pkt_sent = 0;
@@ -322,6 +352,8 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
        /* start enqueuing packets 1 by 1 */
        pkt_idx = 0;
        pkt_left = count;
+       vq->shadow_used_idx = 0;
+       used_idx_start = vq->last_used_idx & (vq->size - 1);
        avail_idx = *((volatile uint16_t *)&vq->avail->idx);
        while (1) {
                if (loop_check(vq, avail_idx, pkt_left))
@@ -336,6 +368,10 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
                pkt_left--;
        }

+       /* batch update used ring for better performance */
+       if (likely(vq->shadow_used_idx > 0))
+               update_used_ring(dev, vq, used_idx_start);
+
        /* update used idx and kick the guest if necessary */
        if (pkt_sent)
                notify_guest(dev, vq);
diff --git a/lib/librte_vhost/virtio-net.c b/lib/librte_vhost/virtio-net.c
index 1785695..87d09fa 100644
--- a/lib/librte_vhost/virtio-net.c
+++ b/lib/librte_vhost/virtio-net.c
@@ -152,10 +152,14 @@ cleanup_device(struct virtio_net *dev, int destroy)
 static void
 free_device(struct virtio_net *dev)
 {
+       struct vhost_virtqueue *vq;
        uint32_t i;

-       for (i = 0; i < dev->virt_qp_nb; i++)
-               rte_free(dev->virtqueue[i * VIRTIO_QNUM]);
+       for (i = 0; i < dev->virt_qp_nb; i++) {
+               vq = dev->virtqueue[i * VIRTIO_QNUM];
+               rte_free(vq->shadow_used_ring);
+               rte_free(vq);
+       }

        rte_free(dev);
 }
@@ -418,13 +422,18 @@ int
 vhost_set_vring_num(int vid, struct vhost_vring_state *state)
 {
        struct virtio_net *dev;
+       struct vhost_virtqueue *vq;

        dev = get_device(vid);
        if (dev == NULL)
                return -1;

        /* State->index refers to the queue index. The txq is 1, rxq is 0. */
-       dev->virtqueue[state->index]->size = state->num;
+       vq = dev->virtqueue[state->index];
+       vq->size = state->num;
+       vq->shadow_used_ring = rte_malloc("",
+                       vq->size * sizeof(struct vring_used_elem),
+                       RTE_CACHE_LINE_SIZE);

        return 0;
 }
-- 
2.7.4

Reply via email to