[dpdk-dev] [PATCH v4 5/6] vmxnet3: add TSO support

2016-03-15 Thread Thomas Monjalon
2016-01-12 18:08, Yong Wang:
> +   /* Drop non-TSO packet that is excessively fragmented */
> +   if (unlikely(!tso && count > VMXNET3_MAX_TXD_PER_PKT)) {
> +   PMD_TX_LOG(ERROR, "Non-TSO packet cannot occupy more 
> than %d tx "
> +  "descriptors. Packet dropped.", 
> VMXNET3_MAX_TXD_PER_PKT);
> 

It cannot compile because rte_log.h defines RTE_LOG_ERR not RTE_LOG_ERROR.
Will be fixed when rebased on master branch.


[dpdk-dev] [PATCH v4 5/6] vmxnet3: add TSO support

2016-01-12 Thread Yong Wang
This commit adds vmxnet3 TSO support.

Verified with test-pmd (set fwd csum) that both tso and
non-tso pkts can be successfully transmitted and all
segmentes for a tso pkt are correct on the receiver side.

Signed-off-by: Yong Wang 
---
 doc/guides/rel_notes/release_2_3.rst |   3 +
 drivers/net/vmxnet3/vmxnet3_rxtx.c   | 108 ++-
 2 files changed, 84 insertions(+), 27 deletions(-)

diff --git a/doc/guides/rel_notes/release_2_3.rst 
b/doc/guides/rel_notes/release_2_3.rst
index 58205fe..ae487bb 100644
--- a/doc/guides/rel_notes/release_2_3.rst
+++ b/doc/guides/rel_notes/release_2_3.rst
@@ -24,6 +24,9 @@ Drivers

   Support TCP/UDP checksum offload.

+* **vmxnet3: add TSO support.**
+
+
 Libraries
 ~

diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c 
b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index 2c1bc3c..103294a 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -295,27 +295,45 @@ vmxnet3_dev_clear_queues(struct rte_eth_dev *dev)
}
 }

+static int
+vmxnet3_unmap_pkt(uint16_t eop_idx, vmxnet3_tx_queue_t *txq)
+{
+   int completed = 0;
+   struct rte_mbuf *mbuf;
+
+   /* Release cmd_ring descriptor and free mbuf */
+   VMXNET3_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1);
+
+   mbuf = txq->cmd_ring.buf_info[eop_idx].m;
+   if (mbuf == NULL)
+   rte_panic("EOP desc does not point to a valid mbuf");
+   rte_pktmbuf_free(mbuf);
+
+   txq->cmd_ring.buf_info[eop_idx].m = NULL;
+
+   while (txq->cmd_ring.next2comp != eop_idx) {
+   /* no out-of-order completion */
+   
VMXNET3_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0);
+   vmxnet3_cmd_ring_adv_next2comp(>cmd_ring);
+   completed++;
+   }
+
+   /* Mark the txd for which tcd was generated as completed */
+   vmxnet3_cmd_ring_adv_next2comp(>cmd_ring);
+
+   return completed + 1;
+}
+
 static void
 vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
 {
int completed = 0;
-   struct rte_mbuf *mbuf;
vmxnet3_comp_ring_t *comp_ring = >comp_ring;
struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
(comp_ring->base + comp_ring->next2proc);

while (tcd->gen == comp_ring->gen) {
-   /* Release cmd_ring descriptor and free mbuf */
-   VMXNET3_ASSERT(txq->cmd_ring.base[tcd->txdIdx].txd.eop == 1);
-   while (txq->cmd_ring.next2comp != tcd->txdIdx) {
-   mbuf = 
txq->cmd_ring.buf_info[txq->cmd_ring.next2comp].m;
-   txq->cmd_ring.buf_info[txq->cmd_ring.next2comp].m = 
NULL;
-   rte_pktmbuf_free_seg(mbuf);
-
-   /* Mark the txd for which tcd was generated as 
completed */
-   vmxnet3_cmd_ring_adv_next2comp(>cmd_ring);
-   completed++;
-   }
+   completed += vmxnet3_unmap_pkt(tcd->txdIdx, txq);

vmxnet3_comp_ring_adv_next2proc(comp_ring);
tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base +
@@ -351,21 +369,43 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
struct rte_mbuf *txm = tx_pkts[nb_tx];
struct rte_mbuf *m_seg = txm;
int copy_size = 0;
+   bool tso = (txm->ol_flags & PKT_TX_TCP_SEG) != 0;
+   /* # of descriptors needed for a packet. */
+   unsigned count = txm->nb_segs;

-   /* Is this packet execessively fragmented, then drop */
-   if (unlikely(txm->nb_segs > VMXNET3_MAX_TXD_PER_PKT)) {
-   ++txq->stats.drop_too_many_segs;
-   ++txq->stats.drop_total;
+   avail = vmxnet3_cmd_ring_desc_avail(>cmd_ring);
+   if (count > avail) {
+   /* Is command ring full? */
+   if (unlikely(avail == 0)) {
+   PMD_TX_LOG(DEBUG, "No free ring descriptors");
+   txq->stats.tx_ring_full++;
+   txq->stats.drop_total += (nb_pkts - nb_tx);
+   break;
+   }
+
+   /* Command ring is not full but cannot handle the
+* multi-segmented packet. Let's try the next packet
+* in this case.
+*/
+   PMD_TX_LOG(DEBUG, "Running out of ring descriptors "
+  "(avail %d needed %d)", avail, count);
+   txq->stats.drop_total++;
+   if (tso)
+   txq->stats.drop_tso++;
rte_pktmbuf_free(txm);
-   ++nb_tx;
+   nb_tx++;
continue;
}

-   /* Is command ring full?