Signed-off-by: Maxime Coquelin <maxime.coque...@redhat.com>
---
 lib/librte_vhost/virtio_net.c | 28 +++++++++++++++-------------
 1 file changed, 15 insertions(+), 13 deletions(-)

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 7705c853b..7db3877d4 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -77,8 +77,9 @@ free_ind_table(void *idesc)
 }
 
 static __rte_always_inline void
-do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
-                         uint16_t to, uint16_t from, uint16_t size)
+do_flush_shadow_used_ring_split(struct virtio_net *dev,
+                       struct vhost_virtqueue *vq,
+                       uint16_t to, uint16_t from, uint16_t size)
 {
        rte_memcpy(&vq->used->ring[to],
                        &vq->shadow_used_ring[from],
@@ -89,22 +90,22 @@ do_flush_shadow_used_ring(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
 }
 
 static __rte_always_inline void
-flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq)
+flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue 
*vq)
 {
        uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
 
        if (used_idx + vq->shadow_used_idx <= vq->size) {
-               do_flush_shadow_used_ring(dev, vq, used_idx, 0,
+               do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
                                          vq->shadow_used_idx);
        } else {
                uint16_t size;
 
                /* update used ring interval [used_idx, vq->size] */
                size = vq->size - used_idx;
-               do_flush_shadow_used_ring(dev, vq, used_idx, 0, size);
+               do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
 
                /* update the left half used ring interval [0, left_size] */
-               do_flush_shadow_used_ring(dev, vq, 0, size,
+               do_flush_shadow_used_ring_split(dev, vq, 0, size,
                                          vq->shadow_used_idx - size);
        }
        vq->last_used_idx += vq->shadow_used_idx;
@@ -120,7 +121,7 @@ flush_shadow_used_ring(struct virtio_net *dev, struct 
vhost_virtqueue *vq)
 }
 
 static __rte_always_inline void
-update_shadow_used_ring(struct vhost_virtqueue *vq,
+update_shadow_used_ring_split(struct vhost_virtqueue *vq,
                         uint16_t desc_idx, uint16_t len)
 {
        uint16_t i = vq->shadow_used_idx++;
@@ -353,7 +354,7 @@ reserve_avail_buf_split(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
                                                VHOST_ACCESS_RW) < 0))
                        return -1;
                len = RTE_MIN(len, size);
-               update_shadow_used_ring(vq, head_idx, len);
+               update_shadow_used_ring_split(vq, head_idx, len);
                size -= len;
 
                cur_idx++;
@@ -579,7 +580,7 @@ virtio_dev_rx_split(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
        do_data_copy_enqueue(dev, vq);
 
        if (likely(vq->shadow_used_idx)) {
-               flush_shadow_used_ring(dev, vq);
+               flush_shadow_used_ring_split(dev, vq);
                vhost_vring_call(dev, vq);
        }
 
@@ -1047,7 +1048,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
                        next = TAILQ_NEXT(zmbuf, next);
 
                        if (mbuf_is_consumed(zmbuf->mbuf)) {
-                               update_shadow_used_ring(vq, zmbuf->desc_idx, 0);
+                               update_shadow_used_ring_split(vq,
+                                               zmbuf->desc_idx, 0);
                                nr_updated += 1;
 
                                TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
@@ -1058,7 +1060,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
                        }
                }
 
-               flush_shadow_used_ring(dev, vq);
+               flush_shadow_used_ring_split(dev, vq);
                vhost_vring_call(dev, vq);
        }
 
@@ -1090,7 +1092,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
                        break;
 
                if (likely(dev->dequeue_zero_copy == 0))
-                       update_shadow_used_ring(vq, head_idx, 0);
+                       update_shadow_used_ring_split(vq, head_idx, 0);
 
                rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
 
@@ -1137,7 +1139,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
                do_data_copy_dequeue(vq);
                if (unlikely(i < count))
                        vq->shadow_used_idx = i;
-               flush_shadow_used_ring(dev, vq);
+               flush_shadow_used_ring_split(dev, vq);
                vhost_vring_call(dev, vq);
        }
 
-- 
2.14.4

Reply via email to