On Sun, Jun 16, 2019 at 11:17:09AM +0100, Andrew Rybchenko wrote: > From: Dilshod Urazov <dilshod.ura...@oktetlabs.ru> > > VLAN tag insertion should be in Tx prepare, not in Tx burst functions.
Please add some details in the commit log. Thanks, Tiwei > > Fixes: 4fb7e803eb1a ("ethdev: add Tx preparation") > Cc: sta...@dpdk.org > > Signed-off-by: Dilshod Urazov <dilshod.ura...@oktetlabs.ru> > Signed-off-by: Andrew Rybchenko <arybche...@solarflare.com> > --- > drivers/net/virtio/virtio_rxtx.c | 50 +++++++++----------------------- > 1 file changed, 14 insertions(+), 36 deletions(-) > > diff --git a/drivers/net/virtio/virtio_rxtx.c > b/drivers/net/virtio/virtio_rxtx.c > index 07f8f47de..dcce39e8c 100644 > --- a/drivers/net/virtio/virtio_rxtx.c > +++ b/drivers/net/virtio/virtio_rxtx.c > @@ -1966,6 +1966,20 @@ virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, > struct rte_mbuf **tx_pkts, > } > #endif > > + /* Do VLAN tag insertion */ > + if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) { > + error = rte_vlan_insert(&m); > + /* rte_vlan_insert() may change pointer > + * even in the case of failure > + */ > + tx_pkts[nb_tx] = m; > + > + if (unlikely(error)) { > + rte_errno = -error; > + break; > + } > + } > + > error = rte_net_intel_cksum_prepare(m); > if (unlikely(error)) { > rte_errno = -error; > @@ -1989,7 +2003,6 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf > **tx_pkts, > uint16_t hdr_size = hw->vtnet_hdr_size; > uint16_t nb_tx = 0; > bool in_order = hw->use_inorder_tx; > - int error; > > if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) > return nb_tx; > @@ -2007,17 +2020,6 @@ virtio_xmit_pkts_packed(void *tx_queue, struct > rte_mbuf **tx_pkts, > struct rte_mbuf *txm = tx_pkts[nb_tx]; > int can_push = 0, slots, need; > > - /* Do VLAN tag insertion */ > - if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) { > - error = rte_vlan_insert(&txm); > - if (unlikely(error)) { > - rte_pktmbuf_free(txm); > - continue; > - } > - /* vlan_insert may add a header mbuf */ > - tx_pkts[nb_tx] = txm; > - } > - > /* optimize ring usage */ > if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || > vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) && > @@ -2077,7 +2079,6 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf > **tx_pkts, uint16_t nb_pkts) > struct virtio_hw *hw = vq->hw; > uint16_t hdr_size = hw->vtnet_hdr_size; > uint16_t nb_used, nb_tx = 0; > - int error; > > if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) > return nb_tx; > @@ -2096,17 +2097,6 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf > **tx_pkts, uint16_t nb_pkts) > struct rte_mbuf *txm = tx_pkts[nb_tx]; > int can_push = 0, use_indirect = 0, slots, need; > > - /* Do VLAN tag insertion */ > - if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) { > - error = rte_vlan_insert(&txm); > - if (unlikely(error)) { > - rte_pktmbuf_free(txm); > - continue; > - } > - /* vlan_insert may add a header mbuf */ > - tx_pkts[nb_tx] = txm; > - } > - > /* optimize ring usage */ > if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || > vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) && > @@ -2176,7 +2166,6 @@ virtio_xmit_pkts_inorder(void *tx_queue, > uint16_t hdr_size = hw->vtnet_hdr_size; > uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0; > struct rte_mbuf *inorder_pkts[nb_pkts]; > - int error; > > if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) > return nb_tx; > @@ -2201,17 +2190,6 @@ virtio_xmit_pkts_inorder(void *tx_queue, > struct rte_mbuf *txm = tx_pkts[nb_tx]; > int slots, need; > > - /* Do VLAN tag insertion */ > - if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) { > - error = rte_vlan_insert(&txm); > - if (unlikely(error)) { > - rte_pktmbuf_free(txm); > - continue; > - } > - /* vlan_insert may add a header mbuf */ > - tx_pkts[nb_tx] = txm; > - } > - > /* optimize ring usage */ > if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || > vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) && > -- > 2.17.1 >