This patch enables batch update of the used ring for better efficiency.

Signed-off-by: Zhihong Wang <zhihong.wang at intel.com>
---
Changes in v4:

 1. Free shadow used ring in the right place.

 2. Add failure check for shadow used ring malloc.

 lib/librte_vhost/vhost.c      | 20 ++++++++++++--
 lib/librte_vhost/vhost.h      |  4 +++
 lib/librte_vhost/vhost_user.c | 31 +++++++++++++++++----
 lib/librte_vhost/virtio_net.c | 64 +++++++++++++++++++++++++++++++++++--------
 4 files changed, 101 insertions(+), 18 deletions(-)

diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index 46095c3..cb31cdd 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -119,10 +119,26 @@ cleanup_device(struct virtio_net *dev, int destroy)
 static void
 free_device(struct virtio_net *dev)
 {
+       struct vhost_virtqueue *vq_0;
+       struct vhost_virtqueue *vq_1;
        uint32_t i;

-       for (i = 0; i < dev->virt_qp_nb; i++)
-               rte_free(dev->virtqueue[i * VIRTIO_QNUM]);
+       for (i = 0; i < dev->virt_qp_nb; i++) {
+               vq_0 = dev->virtqueue[i * VIRTIO_QNUM];
+               if (vq_0->shadow_used_ring) {
+                       rte_free(vq_0->shadow_used_ring);
+                       vq_0->shadow_used_ring = NULL;
+               }
+
+               vq_1 = dev->virtqueue[i * VIRTIO_QNUM + 1];
+               if (vq_1->shadow_used_ring) {
+                       rte_free(vq_1->shadow_used_ring);
+                       vq_1->shadow_used_ring = NULL;
+               }
+
+               /* malloc together, free together */
+               rte_free(vq_0);
+       }

        rte_free(dev);
 }
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 9707dfc..381dc27 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -85,6 +85,10 @@ struct vhost_virtqueue {

        /* Physical address of used ring, for logging */
        uint64_t                log_guest_addr;
+
+       /* Shadow used ring for performance */
+       struct vring_used_elem  *shadow_used_ring;
+       uint32_t                shadow_used_idx;
 } __rte_cache_aligned;

 /* Old kernels have no such macro defined */
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index eee99e9..d7cf1ed 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -193,7 +193,21 @@ static int
 vhost_user_set_vring_num(struct virtio_net *dev,
                         struct vhost_vring_state *state)
 {
-       dev->virtqueue[state->index]->size = state->num;
+       struct vhost_virtqueue *vq;
+
+       vq = dev->virtqueue[state->index];
+       vq->size = state->num;
+       if (!vq->shadow_used_ring) {
+               vq->shadow_used_ring = rte_malloc(NULL,
+                               vq->size * sizeof(struct vring_used_elem),
+                               RTE_CACHE_LINE_SIZE);
+               if (!vq->shadow_used_ring) {
+                       RTE_LOG(ERR, VHOST_CONFIG,
+                               "Failed to allocate memory"
+                               " for shadow used ring.\n");
+                       return -1;
+               }
+       }

        return 0;
 }
@@ -611,14 +625,21 @@ static int
 vhost_user_get_vring_base(struct virtio_net *dev,
                          struct vhost_vring_state *state)
 {
+       struct vhost_virtqueue *vq;
+
        /* We have to stop the queue (virtio) if it is running. */
        if (dev->flags & VIRTIO_DEV_RUNNING) {
                dev->flags &= ~VIRTIO_DEV_RUNNING;
                notify_ops->destroy_device(dev->vid);
        }

+       vq = dev->virtqueue[state->index];
        /* Here we are safe to get the last used index */
-       state->num = dev->virtqueue[state->index]->last_used_idx;
+       state->num = vq->last_used_idx;
+       if (vq->shadow_used_ring) {
+               rte_free(vq->shadow_used_ring);
+               vq->shadow_used_ring = NULL;
+       }

        RTE_LOG(INFO, VHOST_CONFIG,
                "vring base idx:%d file:%d\n", state->index, state->num);
@@ -627,10 +648,10 @@ vhost_user_get_vring_base(struct virtio_net *dev,
         * sent and only sent in vhost_vring_stop.
         * TODO: cleanup the vring, it isn't usable since here.
         */
-       if (dev->virtqueue[state->index]->kickfd >= 0)
-               close(dev->virtqueue[state->index]->kickfd);
+       if (vq->kickfd >= 0)
+               close(vq->kickfd);

-       dev->virtqueue[state->index]->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
+       vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;

        return 0;
 }
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index b38f18f..e9f6353 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -134,17 +134,52 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct 
virtio_net_hdr *net_hdr)
 }

 static inline void __attribute__((always_inline))
-update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
-               uint32_t desc_chain_head, uint32_t desc_chain_len)
+update_used_ring(struct vhost_virtqueue *vq, uint32_t desc_chain_head,
+               uint32_t desc_chain_len)
 {
-       uint32_t used_idx = vq->last_used_idx & (vq->size - 1);
-
-       vq->used->ring[used_idx].id = desc_chain_head;
-       vq->used->ring[used_idx].len = desc_chain_len;
+       vq->shadow_used_ring[vq->shadow_used_idx].id  = desc_chain_head;
+       vq->shadow_used_ring[vq->shadow_used_idx].len = desc_chain_len;
+       vq->shadow_used_idx++;
        vq->last_used_idx++;
-       vhost_log_used_vring(dev, vq, offsetof(struct vring_used,
-                               ring[used_idx]),
-                       sizeof(vq->used->ring[used_idx]));
+}
+
+static inline void __attribute__((always_inline))
+flush_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
+               uint32_t used_idx_start)
+{
+       if (used_idx_start + vq->shadow_used_idx < vq->size) {
+               rte_memcpy(&vq->used->ring[used_idx_start],
+                               &vq->shadow_used_ring[0],
+                               vq->shadow_used_idx *
+                               sizeof(struct vring_used_elem));
+               vhost_log_used_vring(dev, vq,
+                               offsetof(struct vring_used,
+                                       ring[used_idx_start]),
+                               vq->shadow_used_idx *
+                               sizeof(struct vring_used_elem));
+       } else {
+               uint32_t part_1 = vq->size - used_idx_start;
+               uint32_t part_2 = vq->shadow_used_idx - part_1;
+
+               rte_memcpy(&vq->used->ring[used_idx_start],
+                               &vq->shadow_used_ring[0],
+                               part_1 *
+                               sizeof(struct vring_used_elem));
+               vhost_log_used_vring(dev, vq,
+                               offsetof(struct vring_used,
+                                       ring[used_idx_start]),
+                               part_1 *
+                               sizeof(struct vring_used_elem));
+               rte_memcpy(&vq->used->ring[0],
+                               &vq->shadow_used_ring[part_1],
+                               part_2 *
+                               sizeof(struct vring_used_elem));
+               vhost_log_used_vring(dev, vq,
+                               offsetof(struct vring_used,
+                                       ring[0]),
+                               part_2 *
+                               sizeof(struct vring_used_elem));
+       }
 }

 static inline int __attribute__((always_inline))
@@ -209,7 +244,7 @@ enqueue_packet(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
                                        goto error;
                        } else if (is_mrg_rxbuf) {
                                /* start with the next desc chain */
-                               update_used_ring(dev, vq, desc_chain_head,
+                               update_used_ring(vq, desc_chain_head,
                                                desc_chain_len);
                                num_buffers++;
                                virtio_hdr->num_buffers++;
@@ -245,7 +280,7 @@ enqueue_packet(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
                desc_chain_len += cpy_len;
        }

-       update_used_ring(dev, vq, desc_chain_head, desc_chain_len);
+       update_used_ring(vq, desc_chain_head, desc_chain_len);

        return 0;

@@ -275,6 +310,7 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
 {
        struct vhost_virtqueue *vq;
        struct virtio_net *dev;
+       uint32_t used_idx_start;
        uint32_t is_mrg_rxbuf = 0;
        uint32_t pkt_idx      = 0;
        uint32_t pkt_left     = count;
@@ -300,6 +336,8 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
                is_mrg_rxbuf = 1;

        /* start enqueuing packets 1 by 1 */
+       vq->shadow_used_idx = 0;
+       used_idx_start = vq->last_used_idx & (vq->size - 1);
        avail_idx = *((volatile uint16_t *)&vq->avail->idx);
        while (pkt_left && avail_idx != vq->last_used_idx) {
                /* prefetch the next desc */
@@ -316,6 +354,10 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
                pkt_left--;
        }

+       /* batch update used ring for better performance */
+       if (likely(vq->shadow_used_idx > 0))
+               flush_used_ring(dev, vq, used_idx_start);
+
        /* update used idx and kick the guest if necessary */
        if (pkt_idx)
                notify_guest(dev, vq);
-- 
2.7.4

Reply via email to