Since i40e and ice have the same checksum offload logic, merge their functions into one. Future rework should enable this to be used by more drivers also.
Signed-off-by: Bruce Richardson <[email protected]> --- drivers/net/intel/common/tx_scalar_fns.h | 63 +++++++++++++++++++++++ drivers/net/intel/i40e/i40e_rxtx.c | 57 +-------------------- drivers/net/intel/ice/ice_rxtx.c | 64 +----------------------- 3 files changed, 65 insertions(+), 119 deletions(-) diff --git a/drivers/net/intel/common/tx_scalar_fns.h b/drivers/net/intel/common/tx_scalar_fns.h index f894cea616..95ee7dc35f 100644 --- a/drivers/net/intel/common/tx_scalar_fns.h +++ b/drivers/net/intel/common/tx_scalar_fns.h @@ -64,6 +64,69 @@ ci_tx_xmit_cleanup(struct ci_tx_queue *txq) return 0; } +/* Common checksum enable function for Intel drivers (ice, i40e, etc.) */ +static inline void +ci_txd_enable_checksum(uint64_t ol_flags, + uint32_t *td_cmd, + uint32_t *td_offset, + union ci_tx_offload tx_offload) +{ + /* Set MACLEN */ + if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) + *td_offset |= (tx_offload.l2_len >> 1) + << CI_TX_DESC_LEN_MACLEN_S; + + /* Enable L3 checksum offloads */ + if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { + *td_cmd |= CI_TX_DESC_CMD_IIPT_IPV4_CSUM; + *td_offset |= (tx_offload.l3_len >> 2) << + CI_TX_DESC_LEN_IPLEN_S; + } else if (ol_flags & RTE_MBUF_F_TX_IPV4) { + *td_cmd |= CI_TX_DESC_CMD_IIPT_IPV4; + *td_offset |= (tx_offload.l3_len >> 2) << + CI_TX_DESC_LEN_IPLEN_S; + } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { + *td_cmd |= CI_TX_DESC_CMD_IIPT_IPV6; + *td_offset |= (tx_offload.l3_len >> 2) << + CI_TX_DESC_LEN_IPLEN_S; + } + + if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { + *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_TCP; + *td_offset |= (tx_offload.l4_len >> 2) << + CI_TX_DESC_LEN_L4_LEN_S; + return; + } + + if (ol_flags & RTE_MBUF_F_TX_UDP_SEG) { + *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_UDP; + *td_offset |= (tx_offload.l4_len >> 2) << + CI_TX_DESC_LEN_L4_LEN_S; + return; + } + + /* Enable L4 checksum offloads */ + switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) { + case RTE_MBUF_F_TX_TCP_CKSUM: + *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_TCP; + *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << + CI_TX_DESC_LEN_L4_LEN_S; + break; + case RTE_MBUF_F_TX_SCTP_CKSUM: + *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_SCTP; + *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) << + CI_TX_DESC_LEN_L4_LEN_S; + break; + case RTE_MBUF_F_TX_UDP_CKSUM: + *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_UDP; + *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) << + CI_TX_DESC_LEN_L4_LEN_S; + break; + default: + break; + } +} + static inline uint16_t ci_div_roundup16(uint16_t x, uint16_t y) { diff --git a/drivers/net/intel/i40e/i40e_rxtx.c b/drivers/net/intel/i40e/i40e_rxtx.c index e1964eab97..5d1b2e4217 100644 --- a/drivers/net/intel/i40e/i40e_rxtx.c +++ b/drivers/net/intel/i40e/i40e_rxtx.c @@ -306,61 +306,6 @@ i40e_parse_tunneling_params(uint64_t ol_flags, *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK; } -static inline void -i40e_txd_enable_checksum(uint64_t ol_flags, - uint32_t *td_cmd, - uint32_t *td_offset, - union ci_tx_offload tx_offload) -{ - /* Set MACLEN */ - if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) - *td_offset |= (tx_offload.l2_len >> 1) - << CI_TX_DESC_LEN_MACLEN_S; - - /* Enable L3 checksum offloads */ - if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { - *td_cmd |= CI_TX_DESC_CMD_IIPT_IPV4_CSUM; - *td_offset |= (tx_offload.l3_len >> 2) - << CI_TX_DESC_LEN_IPLEN_S; - } else if (ol_flags & RTE_MBUF_F_TX_IPV4) { - *td_cmd |= CI_TX_DESC_CMD_IIPT_IPV4; - *td_offset |= (tx_offload.l3_len >> 2) - << CI_TX_DESC_LEN_IPLEN_S; - } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { - *td_cmd |= CI_TX_DESC_CMD_IIPT_IPV6; - *td_offset |= (tx_offload.l3_len >> 2) - << CI_TX_DESC_LEN_IPLEN_S; - } - - if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { - *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_TCP; - *td_offset |= (tx_offload.l4_len >> 2) - << CI_TX_DESC_LEN_L4_LEN_S; - return; - } - - /* Enable L4 checksum offloads */ - switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) { - case RTE_MBUF_F_TX_TCP_CKSUM: - *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_TCP; - *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << - CI_TX_DESC_LEN_L4_LEN_S; - break; - case RTE_MBUF_F_TX_SCTP_CKSUM: - *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_SCTP; - *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) << - CI_TX_DESC_LEN_L4_LEN_S; - break; - case RTE_MBUF_F_TX_UDP_CKSUM: - *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_UDP; - *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) << - CI_TX_DESC_LEN_L4_LEN_S; - break; - default: - break; - } -} - /* Construct the tx flags */ static inline uint64_t i40e_build_ctob(uint32_t td_cmd, @@ -1167,7 +1112,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Enable checksum offloading */ if (ol_flags & CI_TX_CKSUM_OFFLOAD_MASK) - i40e_txd_enable_checksum(ol_flags, &td_cmd, + ci_txd_enable_checksum(ol_flags, &td_cmd, &td_offset, tx_offload); if (nb_ctx) { diff --git a/drivers/net/intel/ice/ice_rxtx.c b/drivers/net/intel/ice/ice_rxtx.c index e102eb9bcc..0b0179e1fa 100644 --- a/drivers/net/intel/ice/ice_rxtx.c +++ b/drivers/net/intel/ice/ice_rxtx.c @@ -2947,68 +2947,6 @@ ice_parse_tunneling_params(uint64_t ol_flags, *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M; } -static inline void -ice_txd_enable_checksum(uint64_t ol_flags, - uint32_t *td_cmd, - uint32_t *td_offset, - union ci_tx_offload tx_offload) -{ - /* Set MACLEN */ - if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) - *td_offset |= (tx_offload.l2_len >> 1) - << CI_TX_DESC_LEN_MACLEN_S; - - /* Enable L3 checksum offloads */ - if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { - *td_cmd |= CI_TX_DESC_CMD_IIPT_IPV4_CSUM; - *td_offset |= (tx_offload.l3_len >> 2) << - CI_TX_DESC_LEN_IPLEN_S; - } else if (ol_flags & RTE_MBUF_F_TX_IPV4) { - *td_cmd |= CI_TX_DESC_CMD_IIPT_IPV4; - *td_offset |= (tx_offload.l3_len >> 2) << - CI_TX_DESC_LEN_IPLEN_S; - } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { - *td_cmd |= CI_TX_DESC_CMD_IIPT_IPV6; - *td_offset |= (tx_offload.l3_len >> 2) << - CI_TX_DESC_LEN_IPLEN_S; - } - - if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { - *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_TCP; - *td_offset |= (tx_offload.l4_len >> 2) << - CI_TX_DESC_LEN_L4_LEN_S; - return; - } - - if (ol_flags & RTE_MBUF_F_TX_UDP_SEG) { - *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_UDP; - *td_offset |= (tx_offload.l4_len >> 2) << - CI_TX_DESC_LEN_L4_LEN_S; - return; - } - - /* Enable L4 checksum offloads */ - switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) { - case RTE_MBUF_F_TX_TCP_CKSUM: - *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_TCP; - *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << - CI_TX_DESC_LEN_L4_LEN_S; - break; - case RTE_MBUF_F_TX_SCTP_CKSUM: - *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_SCTP; - *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) << - CI_TX_DESC_LEN_L4_LEN_S; - break; - case RTE_MBUF_F_TX_UDP_CKSUM: - *td_cmd |= CI_TX_DESC_CMD_L4T_EOFT_UDP; - *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) << - CI_TX_DESC_LEN_L4_LEN_S; - break; - default: - break; - } -} - /* Construct the tx flags */ static inline uint64_t ice_build_ctob(uint32_t td_cmd, @@ -3206,7 +3144,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Enable checksum offloading */ if (ol_flags & CI_TX_CKSUM_OFFLOAD_MASK) - ice_txd_enable_checksum(ol_flags, &td_cmd, + ci_txd_enable_checksum(ol_flags, &td_cmd, &td_offset, tx_offload); if (nb_ctx) { -- 2.51.0

