Add netdev_dpdk_vhost_txq_drain(), that flushes packets on vHost User
port queues. Also add netdev_dpdk_vhost_tx_burst() function that
uses rte_vhost_enqueue_burst() to enqueue burst of packets on vHost User
ports.

Signed-off-by: Bhanuprakash Bodireddy <bhanuprakash.bodire...@intel.com>
Signed-off-by: Antonio Fischetti <antonio.fische...@intel.com>
Co-authored-by: Antonio Fischetti <antonio.fische...@intel.com>
---
 lib/netdev-dpdk.c | 67 +++++++++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 65 insertions(+), 2 deletions(-)

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index 4a9d9aa..dfaa3cd 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -308,10 +308,15 @@ struct dpdk_tx_queue {
                                     * to enabled by guest. */
     int count;                     /* Number of buffered packets waiting to
                                       be sent. */
+    int vhost_pkt_cnt;             /* Number of buffered packets waiting to
+                                      be sent on vhost port */
     struct rte_mbuf *burst_pkts[INTERIM_QUEUE_BURST_THRESHOLD];
                                    /* Intermediate queue where packets can
                                     * be buffered to amortize the cost of MMIO
                                     * writes. */
+    struct dp_packet *pkts[INTERIM_QUEUE_BURST_THRESHOLD];
+                                   /* Intermediate queue where packets can
+                                    * be buffered for vhost ports */
 };
 
 /* dpdk has no way to remove dpdk ring ethernet devices
@@ -1714,6 +1719,63 @@ netdev_dpdk_vhost_update_tx_counters(struct netdev_stats 
*stats,
     }
 }
 
+static int
+netdev_dpdk_vhost_tx_burst(struct netdev_dpdk *dev, int qid)
+{
+    struct dpdk_tx_queue *txq = &dev->tx_q[qid];
+    struct rte_mbuf **cur_pkts = (struct rte_mbuf **)txq->pkts;
+
+    int tx_vid = netdev_dpdk_get_vid(dev);
+    int tx_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
+    uint32_t sent = 0;
+    uint32_t retries = 0;
+    uint32_t sum, total_pkts;
+
+    total_pkts = sum = txq->vhost_pkt_cnt;
+    do {
+        uint32_t ret;
+        ret = rte_vhost_enqueue_burst(tx_vid, tx_qid, &cur_pkts[sent], sum);
+        if (OVS_UNLIKELY(!ret)) {
+            /* No packets enqueued - do not retry. */
+            break;
+        } else {
+            /* Packet have been sent */
+            sent += ret;
+
+            /* 'sum; packet have to be retransmitted */
+            sum -= ret;
+        }
+    } while (sum && (retries++ < VHOST_ENQ_RETRY_NUM));
+
+    for (int i=0; i < total_pkts; i++) {
+        dp_packet_delete(txq->pkts[i]);
+    }
+
+    /* Reset pkt count */
+    txq->vhost_pkt_cnt = 0;
+
+    /* 'sum' refers to packets dropped */
+    return sum;
+}
+
+/* Drain the txq if there are any packets available.
+ * dynamic_txqs/concurrent_txq is disabled for vHost User ports as
+ * 'OVS_VHOST_MAX_QUEUE_NUM[1024]' txqs are preallocated.
+ */
+static int
+netdev_dpdk_vhost_txq_drain(struct netdev *netdev, int qid,
+                            bool concurrent_txq OVS_UNUSED)
+{
+    struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
+    struct dpdk_tx_queue *txq = &dev->tx_q[qid];
+
+    if (OVS_LIKELY(txq->vhost_pkt_cnt)) {
+        netdev_dpdk_vhost_tx_burst(dev, qid);
+    }
+
+    return 0;
+}
+
 static void
 __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
                          struct dp_packet **pkts, int cnt)
@@ -3425,7 +3487,8 @@ static const struct netdev_class dpdk_vhost_class =
         NULL,
         netdev_dpdk_vhost_reconfigure,
         netdev_dpdk_vhost_rxq_recv,
-        NULL);
+        netdev_dpdk_vhost_txq_drain);
+
 static const struct netdev_class dpdk_vhost_client_class =
     NETDEV_DPDK_CLASS(
         "dpdkvhostuserclient",
@@ -3441,7 +3504,7 @@ static const struct netdev_class dpdk_vhost_client_class =
         NULL,
         netdev_dpdk_vhost_client_reconfigure,
         netdev_dpdk_vhost_rxq_recv,
-        NULL);
+        netdev_dpdk_vhost_txq_drain);
 
 void
 netdev_dpdk_register(void)
-- 
2.4.11

_______________________________________________
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to