Replace the existing complicated logic with the use of the common function.
Signed-off-by: Ciara Loftus <[email protected]> --- drivers/net/intel/cpfl/cpfl_rxtx.c | 114 +++++------------- drivers/net/intel/cpfl/cpfl_rxtx_vec_common.h | 10 -- 2 files changed, 32 insertions(+), 92 deletions(-) diff --git a/drivers/net/intel/cpfl/cpfl_rxtx.c b/drivers/net/intel/cpfl/cpfl_rxtx.c index 453ec975d5..2bf66159b5 100644 --- a/drivers/net/intel/cpfl/cpfl_rxtx.c +++ b/drivers/net/intel/cpfl/cpfl_rxtx.c @@ -1462,97 +1462,47 @@ cpfl_set_tx_function(struct rte_eth_dev *dev) struct cpfl_vport *cpfl_vport = dev->data->dev_private; struct idpf_vport *vport = &cpfl_vport->base; #ifdef RTE_ARCH_X86 - enum rte_vect_max_simd tx_simd_width = RTE_VECT_SIMD_DISABLED; #ifdef CC_AVX512_SUPPORT - struct cpfl_tx_queue *cpfl_txq; + struct ci_tx_queue *txq; int i; #endif /* CC_AVX512_SUPPORT */ - - if (cpfl_tx_vec_dev_check_default(dev) == CPFL_VECTOR_PATH && - rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { - vport->tx_vec_allowed = true; - tx_simd_width = cpfl_get_max_simd_bitwidth(); -#ifdef CC_AVX512_SUPPORT - if (tx_simd_width == RTE_VECT_SIMD_512) { - for (i = 0; i < dev->data->nb_tx_queues; i++) { - cpfl_txq = dev->data->tx_queues[i]; - idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base); - } - } -#else - PMD_DRV_LOG(NOTICE, - "AVX512 is not supported in build env"); -#endif /* CC_AVX512_SUPPORT */ - } else { - vport->tx_vec_allowed = false; - } #endif /* RTE_ARCH_X86 */ + struct idpf_adapter *ad = vport->adapter; + struct ci_tx_path_features req_features = { + .tx_offloads = dev->data->dev_conf.txmode.offloads, + .simd_width = RTE_VECT_SIMD_DISABLED, + .extra.single_queue = (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) + }; #ifdef RTE_ARCH_X86 - if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { - if (vport->tx_vec_allowed) { + if (cpfl_tx_vec_dev_check_default(dev) == CPFL_VECTOR_PATH) + req_features.simd_width = cpfl_get_max_simd_bitwidth(); +#endif + + ad->tx_func_type = ci_tx_path_select(req_features, + &idpf_tx_path_infos[0], + IDPF_TX_MAX, + IDPF_TX_DEFAULT); + + dev->tx_pkt_burst = idpf_tx_path_infos[ad->tx_func_type].pkt_burst; + dev->tx_pkt_prepare = idpf_dp_prep_pkts; + PMD_DRV_LOG(NOTICE, "Using %s Tx (port %d).", + idpf_tx_path_infos[ad->tx_func_type].info, dev->data->port_id); + +#ifdef RTE_ARCH_X86 + if (idpf_tx_path_infos[ad->tx_func_type].features.simd_width >= RTE_VECT_SIMD_256 && + idpf_tx_path_infos[ad->tx_func_type].features.extra.single_queue) { #ifdef CC_AVX512_SUPPORT - if (tx_simd_width == RTE_VECT_SIMD_512) { - PMD_DRV_LOG(NOTICE, - "Using Split AVX512 Vector Tx (port %d).", - dev->data->port_id); - dev->tx_pkt_burst = idpf_dp_splitq_xmit_pkts_avx512; - dev->tx_pkt_prepare = idpf_dp_prep_pkts; - return; - } -#endif /* CC_AVX512_SUPPORT */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (txq == NULL) + continue; + if (idpf_tx_path_infos[ad->tx_func_type].features.simd_width == + RTE_VECT_SIMD_512) + idpf_qc_tx_vec_avx512_setup(txq); } - PMD_DRV_LOG(NOTICE, - "Using Split Scalar Tx (port %d).", - dev->data->port_id); - dev->tx_pkt_burst = idpf_dp_splitq_xmit_pkts; - dev->tx_pkt_prepare = idpf_dp_prep_pkts; - } else { - if (vport->tx_vec_allowed) { -#ifdef CC_AVX512_SUPPORT - if (tx_simd_width == RTE_VECT_SIMD_512) { - for (i = 0; i < dev->data->nb_tx_queues; i++) { - cpfl_txq = dev->data->tx_queues[i]; - if (cpfl_txq == NULL) - continue; - idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base); - } - PMD_DRV_LOG(NOTICE, - "Using Single AVX512 Vector Tx (port %d).", - dev->data->port_id); - dev->tx_pkt_burst = idpf_dp_singleq_xmit_pkts_avx512; - dev->tx_pkt_prepare = idpf_dp_prep_pkts; - return; - } #endif /* CC_AVX512_SUPPORT */ - if (tx_simd_width == RTE_VECT_SIMD_256) { - PMD_DRV_LOG(NOTICE, - "Using Single AVX2 Vector Tx (port %d).", - dev->data->port_id); - dev->tx_pkt_burst = idpf_dp_singleq_xmit_pkts_avx2; - dev->tx_pkt_prepare = idpf_dp_prep_pkts; - return; - } - } - PMD_DRV_LOG(NOTICE, - "Using Single Scalar Tx (port %d).", - dev->data->port_id); - dev->tx_pkt_burst = idpf_dp_singleq_xmit_pkts; - dev->tx_pkt_prepare = idpf_dp_prep_pkts; - } -#else - if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { - PMD_DRV_LOG(NOTICE, - "Using Split Scalar Tx (port %d).", - dev->data->port_id); - dev->tx_pkt_burst = idpf_dp_splitq_xmit_pkts; - dev->tx_pkt_prepare = idpf_dp_prep_pkts; - } else { - PMD_DRV_LOG(NOTICE, - "Using Single Scalar Tx (port %d).", - dev->data->port_id); - dev->tx_pkt_burst = idpf_dp_singleq_xmit_pkts; - dev->tx_pkt_prepare = idpf_dp_prep_pkts; + vport->tx_vec_allowed = true; } #endif /* RTE_ARCH_X86 */ } diff --git a/drivers/net/intel/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/intel/cpfl/cpfl_rxtx_vec_common.h index 525ca9a6e0..f0daa1eb30 100644 --- a/drivers/net/intel/cpfl/cpfl_rxtx_vec_common.h +++ b/drivers/net/intel/cpfl/cpfl_rxtx_vec_common.h @@ -23,13 +23,6 @@ RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \ RTE_ETH_RX_OFFLOAD_TIMESTAMP) -#define CPFL_TX_NO_VECTOR_FLAGS ( \ - RTE_ETH_TX_OFFLOAD_TCP_TSO | \ - RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \ - RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \ - RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \ - RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \ - RTE_ETH_TX_OFFLOAD_TCP_CKSUM) static inline int cpfl_rx_vec_queue_default(struct idpf_rx_queue *rxq) @@ -62,9 +55,6 @@ cpfl_tx_vec_queue_default(struct ci_tx_queue *txq) (txq->tx_rs_thresh & 3) != 0) return CPFL_SCALAR_PATH; - if ((txq->offloads & CPFL_TX_NO_VECTOR_FLAGS) != 0) - return CPFL_SCALAR_PATH; - return CPFL_VECTOR_PATH; } -- 2.43.0

