The patch add VxLAN & NVGRE TX checksum off-load. When the flag of
outer IP header checksum offload is set, we'll set the context
descriptor to enable this checksum off-load.

Also update release note for VxLAN & NVGRE checksum off-load support.

Signed-off-by: Wenzhuo Lu <wenzhuo.lu at intel.com>
---
 doc/guides/rel_notes/release_16_04.rst |  9 ++++++
 drivers/net/ixgbe/ixgbe_ethdev.c       |  4 +++
 drivers/net/ixgbe/ixgbe_rxtx.c         | 56 +++++++++++++++++++++++++++-------
 drivers/net/ixgbe/ixgbe_rxtx.h         |  6 +++-
 4 files changed, 63 insertions(+), 12 deletions(-)

diff --git a/doc/guides/rel_notes/release_16_04.rst 
b/doc/guides/rel_notes/release_16_04.rst
index 8273817..a17c2fb 100644
--- a/doc/guides/rel_notes/release_16_04.rst
+++ b/doc/guides/rel_notes/release_16_04.rst
@@ -46,6 +46,15 @@ This section should contain new features added in this 
release. Sample format:

 * **Added vhost-user live migration support.**

+* **Added support for VxLAN & NVGRE checksum off-load on X550.**
+
+  * Added support for VxLAN & NVGRE RX/TX checksum off-load on
+    X550. RX/TX checksum off-load is provided on both inner and
+    outer IP header and TCP header.
+  * Added functions to support VxLAN port configuration. The
+    default VxLAN port number is 4789 but this can be updated
+    programmatically.
+

 Resolved Issues
 ---------------
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 86afba4..7ad7a84 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2811,6 +2811,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
                DEV_TX_OFFLOAD_SCTP_CKSUM  |
                DEV_TX_OFFLOAD_TCP_TSO;

+       if (hw->mac.type == ixgbe_mac_X550 ||
+           hw->mac.type == ixgbe_mac_X550EM_x)
+               dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_thresh = {
                        .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 6b913ee..c2c71de 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -85,7 +85,8 @@
                PKT_TX_VLAN_PKT |                \
                PKT_TX_IP_CKSUM |                \
                PKT_TX_L4_MASK |                 \
-               PKT_TX_TCP_SEG)
+               PKT_TX_TCP_SEG |                 \
+               PKT_TX_OUTER_IP_CKSUM)

 static inline struct rte_mbuf *
 rte_rxmbuf_alloc(struct rte_mempool *mp)
@@ -364,9 +365,11 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
        uint32_t ctx_idx;
        uint32_t vlan_macip_lens;
        union ixgbe_tx_offload tx_offload_mask;
+       uint32_t seqnum_seed = 0;

        ctx_idx = txq->ctx_curr;
-       tx_offload_mask.data = 0;
+       tx_offload_mask.data[0] = 0;
+       tx_offload_mask.data[1] = 0;
        type_tucmd_mlhl = 0;

        /* Specify which HW CTX to upload. */
@@ -430,18 +433,35 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
                }
        }

+       if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
+               tx_offload_mask.outer_l2_len |= ~0;
+               tx_offload_mask.outer_l3_len |= ~0;
+               tx_offload_mask.l2_len |= ~0;
+               seqnum_seed |= tx_offload.outer_l3_len
+                              << IXGBE_ADVTXD_OUTER_IPLEN;
+               seqnum_seed |= tx_offload.l2_len
+                              << IXGBE_ADVTXD_TUNNEL_LEN;
+       }
+
        txq->ctx_cache[ctx_idx].flags = ol_flags;
-       txq->ctx_cache[ctx_idx].tx_offload.data  =
-               tx_offload_mask.data & tx_offload.data;
+       txq->ctx_cache[ctx_idx].tx_offload.data[0]  =
+               tx_offload_mask.data[0] & tx_offload.data[0];
+       txq->ctx_cache[ctx_idx].tx_offload.data[1]  =
+               tx_offload_mask.data[1] & tx_offload.data[1];
        txq->ctx_cache[ctx_idx].tx_offload_mask    = tx_offload_mask;

        ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
        vlan_macip_lens = tx_offload.l3_len;
-       vlan_macip_lens |= (tx_offload.l2_len << IXGBE_ADVTXD_MACLEN_SHIFT);
+       if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
+               vlan_macip_lens |= (tx_offload.outer_l2_len <<
+                                   IXGBE_ADVTXD_MACLEN_SHIFT);
+       else
+               vlan_macip_lens |= (tx_offload.l2_len <<
+                                   IXGBE_ADVTXD_MACLEN_SHIFT);
        vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << 
IXGBE_ADVTXD_VLAN_SHIFT);
        ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
        ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
-       ctx_txd->seqnum_seed     = 0;
+       ctx_txd->seqnum_seed     = seqnum_seed;
 }

 /*
@@ -454,16 +474,24 @@ what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t 
flags,
 {
        /* If match with the current used context */
        if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
-               (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
-               (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & 
tx_offload.data)))) {
+               (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
+               (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
+                & tx_offload.data[0])) &&
+               (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
+               (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
+                & tx_offload.data[1])))) {
                        return txq->ctx_curr;
        }

        /* What if match with the next context  */
        txq->ctx_curr ^= 1;
        if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
-               (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
-               (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & 
tx_offload.data)))) {
+               (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
+               (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
+                & tx_offload.data[0])) &&
+               (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
+               (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
+                & tx_offload.data[1])))) {
                        return txq->ctx_curr;
        }

@@ -492,6 +520,8 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
                cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
        if (ol_flags & PKT_TX_TCP_SEG)
                cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
+       if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
+               cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
        return cmdtype;
 }

@@ -588,8 +618,10 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        uint64_t tx_ol_req;
        uint32_t ctx = 0;
        uint32_t new_ctx;
-       union ixgbe_tx_offload tx_offload = {0};
+       union ixgbe_tx_offload tx_offload;

+       tx_offload.data[0] = 0;
+       tx_offload.data[1] = 0;
        txq = tx_queue;
        sw_ring = txq->sw_ring;
        txr     = txq->tx_ring;
@@ -623,6 +655,8 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        tx_offload.l4_len = tx_pkt->l4_len;
                        tx_offload.vlan_tci = tx_pkt->vlan_tci;
                        tx_offload.tso_segsz = tx_pkt->tso_segsz;
+                       tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
+                       tx_offload.outer_l3_len = tx_pkt->outer_l3_len;

                        /* If new context need be built or reuse the exist ctx. 
*/
                        ctx = what_advctx_update(txq, tx_ol_req,
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 475a800..c15f9fa 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -163,7 +163,7 @@ enum ixgbe_advctx_num {

 /** Offload features */
 union ixgbe_tx_offload {
-       uint64_t data;
+       uint64_t data[2];
        struct {
                uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
                uint64_t l3_len:9; /**< L3 (IP) Header Length. */
@@ -171,6 +171,10 @@ union ixgbe_tx_offload {
                uint64_t tso_segsz:16; /**< TCP TSO segment size */
                uint64_t vlan_tci:16;
                /**< VLAN Tag Control Identifier (CPU order). */
+
+               /* fields for TX offloading of tunnels */
+               uint64_t outer_l3_len:8; /**< Outer L3 (IP) Hdr Length. */
+               uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */
        };
 };

-- 
1.9.3

Reply via email to