This patch removes unnecessary check and function calls, and it changes
appropriate types for internal variables and fixes typos.

Signed-off-by: Jiayu Hu <jiayu...@intel.com>
Tested-by: Yinan Wang <yinan.w...@intel.com>
---
 lib/librte_vhost/rte_vhost_async.h |  8 ++++----
 lib/librte_vhost/virtio_net.c      | 16 ++++++++--------
 2 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/lib/librte_vhost/rte_vhost_async.h 
b/lib/librte_vhost/rte_vhost_async.h
index c73bd7c..03bd558 100644
--- a/lib/librte_vhost/rte_vhost_async.h
+++ b/lib/librte_vhost/rte_vhost_async.h
@@ -112,7 +112,7 @@ struct rte_vhost_async_features {
 };
 
 /**
- * register a async channel for vhost
+ * register an async channel for vhost
  *
  * @param vid
  *  vhost device id async channel to be attached to
@@ -147,8 +147,8 @@ __rte_experimental
 int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id);
 
 /**
- * This function submit enqueue data to async engine. This function has
- * no guranttee to the transfer completion upon return. Applications
+ * This function submits enqueue data to async engine. This function has
+ * no guarantee to the transfer completion upon return. Applications
  * should poll transfer status by rte_vhost_poll_enqueue_completed()
  *
  * @param vid
@@ -167,7 +167,7 @@ uint16_t rte_vhost_submit_enqueue_burst(int vid, uint16_t 
queue_id,
                struct rte_mbuf **pkts, uint16_t count);
 
 /**
- * This function check async completion status for a specific vhost
+ * This function checks async completion status for a specific vhost
  * device queue. Packets which finish copying (enqueue) operation
  * will be returned in an array.
  *
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index fec08b2..0b63940 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -1130,8 +1130,11 @@ async_mbuf_to_desc(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
        }
 
 out:
-       async_fill_iter(src_it, tlen, src_iovec, tvec_idx);
-       async_fill_iter(dst_it, tlen, dst_iovec, tvec_idx);
+       if (tlen) {
+               async_fill_iter(src_it, tlen, src_iovec, tvec_idx);
+               async_fill_iter(dst_it, tlen, dst_iovec, tvec_idx);
+       } else
+               src_it->count = 0;
 
        return error;
 }
@@ -1491,10 +1494,9 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
        struct rte_vhost_iov_iter *src_it = it_pool;
        struct rte_vhost_iov_iter *dst_it = it_pool + 1;
        uint16_t n_free_slot, slot_idx = 0;
-       uint16_t pkt_err = 0;
        uint16_t segs_await = 0;
        struct async_inflight_info *pkts_info = vq->async_pkts_info;
-       int n_pkts = 0;
+       uint32_t n_pkts = 0, pkt_err = 0;
 
        /*
         * The ordering between avail index and desc reads need to be enforced.
@@ -1549,11 +1551,9 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
                /*
                 * conditions to trigger async device transfer:
                 * - buffered packet number reaches transfer threshold
-                * - this is the last packet in the burst enqueue
                 * - unused async iov number is less than max vhost vector
                 */
                if (pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
-                       (pkt_idx == count - 1 && pkt_burst_idx) ||
                        (VHOST_MAX_ASYNC_VEC / 2 - segs_await <
                        BUF_VECTOR_MAX)) {
                        n_pkts = vq->async_ops.transfer_data(dev->vid,
@@ -1565,7 +1565,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
                        segs_await = 0;
                        vq->async_pkts_inflight_n += pkt_burst_idx;
 
-                       if (unlikely(n_pkts < (int)pkt_burst_idx)) {
+                       if (unlikely(n_pkts < pkt_burst_idx)) {
                                /*
                                 * log error packets number here and do actual
                                 * error processing when applications poll
@@ -1585,7 +1585,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
                                queue_id, tdes, 0, pkt_burst_idx);
                vq->async_pkts_inflight_n += pkt_burst_idx;
 
-               if (unlikely(n_pkts < (int)pkt_burst_idx))
+               if (unlikely(n_pkts < pkt_burst_idx))
                        pkt_err = pkt_burst_idx - n_pkts;
        }
 
-- 
2.7.4

Reply via email to