On Tue, Dec 09, 2025 at 11:26:42AM +0000, Ciara Loftus wrote:
> Replace the existing complicated logic with the use of the common
> function. Introduce a new feature "simple tx" to the common
> infrastructure which represents whether or not a simplified transmit
> path may be selected for the device.
> 
> Signed-off-by: Ciara Loftus <[email protected]>
> ---
>  drivers/net/intel/common/tx.h               |  10 ++
>  drivers/net/intel/ice/ice_rxtx.c            | 142 +++++++++-----------
>  drivers/net/intel/ice/ice_rxtx.h            |  30 ++++-
>  drivers/net/intel/ice/ice_rxtx_vec_common.h |  35 +----
>  drivers/net/intel/ice/ice_rxtx_vec_sse.c    |   6 -
>  5 files changed, 103 insertions(+), 120 deletions(-)
> 
> diff --git a/drivers/net/intel/common/tx.h b/drivers/net/intel/common/tx.h
> index c6c1904ba3..3480c5e07c 100644
> --- a/drivers/net/intel/common/tx.h
> +++ b/drivers/net/intel/common/tx.h
> @@ -118,15 +118,21 @@ struct ci_tx_queue {
>       };
>  };
>  
> +struct ci_tx_path_features_extra {
> +     bool simple_tx;
> +};
> +
>  struct ci_tx_path_features {
>       uint32_t tx_offloads;
>       enum rte_vect_max_simd simd_width;
> +     struct ci_tx_path_features_extra extra;

Two thoughts on this - do we really need a substructure here rather than
just adding the flags directly to the path_features structure? Secondly,
should this addition not be included in patch 1, which adds the rest of the
support for the tx path selection logic? I don't see a reason to put half
the infrastructure there and the rest here.

>  };
>  
>  struct ci_tx_path_info {
>       eth_tx_burst_t pkt_burst;
>       const char *info;
>       struct ci_tx_path_features features;
> +     eth_tx_prep_t pkt_prep;
>  };

Similar comment here - should the pkt_prep function not be in place from
the first patch?

>  
>  static __rte_always_inline void
> @@ -302,6 +308,10 @@ ci_tx_path_select(struct ci_tx_path_features 
> req_features,
>       for (i = 0; i < num_paths; i++) {
>               const struct ci_tx_path_features *path_features = 
> &infos[i].features;
>  
> +             /* Do not use a simple tx path if not requested. */
> +             if (path_features->extra.simple_tx && 
> !req_features.extra.simple_tx)
> +                     continue;
> +
>               /* Ensure the path supports the requested TX offloads. */
>               if ((path_features->tx_offloads & req_features.tx_offloads) !=
>                               req_features.tx_offloads)
> diff --git a/drivers/net/intel/ice/ice_rxtx.c 
> b/drivers/net/intel/ice/ice_rxtx.c
> index f05ca83e5b..ca9cdc9618 100644
> --- a/drivers/net/intel/ice/ice_rxtx.c
> +++ b/drivers/net/intel/ice/ice_rxtx.c
> @@ -4091,39 +4091,70 @@ ice_prep_pkts(void *tx_queue, struct rte_mbuf 
> **tx_pkts,
>       return i;
>  }
>  
> -static const struct {
> -     eth_tx_burst_t pkt_burst;
> -     const char *info;
> -} ice_tx_burst_infos[] = {
> +static const struct ci_tx_path_info ice_tx_path_infos[] = {
>       [ICE_TX_DEFAULT] = {
>               .pkt_burst = ice_xmit_pkts,
> -             .info = "Scalar"
> +             .info = "Scalar",
> +             .features = {
> +                     .tx_offloads = ICE_TX_SCALAR_OFFLOADS
> +             },
> +             .pkt_prep = ice_prep_pkts
>       },
>       [ICE_TX_SIMPLE] = {
>               .pkt_burst = ice_xmit_pkts_simple,
> -             .info = "Scalar Simple"
> +             .info = "Scalar Simple",
> +             .features = {
> +                     .tx_offloads = ICE_TX_SCALAR_OFFLOADS,
> +                     .extra.simple_tx = true
> +             },
> +             .pkt_prep = rte_eth_tx_pkt_prepare_dummy
>       },
>  #ifdef RTE_ARCH_X86
>       [ICE_TX_SSE] = {
>               .pkt_burst = ice_xmit_pkts_vec,
> -             .info = "Vector SSE"
> +             .info = "Vector SSE",
> +             .features = {
> +                     .tx_offloads = ICE_TX_VECTOR_OFFLOADS,
> +                     .simd_width = RTE_VECT_SIMD_128
> +             },
> +             .pkt_prep = rte_eth_tx_pkt_prepare_dummy
>       },
>       [ICE_TX_AVX2] = {
>               .pkt_burst = ice_xmit_pkts_vec_avx2,
> -             .info = "Vector AVX2"
> +             .info = "Vector AVX2",
> +             .features = {
> +                     .tx_offloads = ICE_TX_VECTOR_OFFLOADS,
> +                     .simd_width = RTE_VECT_SIMD_256
> +             },
> +             .pkt_prep = rte_eth_tx_pkt_prepare_dummy
>       },
>       [ICE_TX_AVX2_OFFLOAD] = {
>               .pkt_burst = ice_xmit_pkts_vec_avx2_offload,
> -             .info = "Offload Vector AVX2"
> +             .info = "Offload Vector AVX2",
> +             .features = {
> +                     .tx_offloads = ICE_TX_VECTOR_OFFLOAD_OFFLOADS,
> +                     .simd_width = RTE_VECT_SIMD_256
> +             },
> +             .pkt_prep = ice_prep_pkts
>       },
>  #ifdef CC_AVX512_SUPPORT
>       [ICE_TX_AVX512] = {
>               .pkt_burst = ice_xmit_pkts_vec_avx512,
> -             .info = "Vector AVX512"
> +             .info = "Vector AVX512",
> +             .features = {
> +                     .tx_offloads = ICE_TX_VECTOR_OFFLOADS,
> +                     .simd_width = RTE_VECT_SIMD_512
> +             },
> +             .pkt_prep = rte_eth_tx_pkt_prepare_dummy
>       },
>       [ICE_TX_AVX512_OFFLOAD] = {
>               .pkt_burst = ice_xmit_pkts_vec_avx512_offload,
> -             .info = "Offload Vector AVX512"
> +             .info = "Offload Vector AVX512",
> +             .features = {
> +                     .tx_offloads = ICE_TX_VECTOR_OFFLOAD_OFFLOADS,
> +                     .simd_width = RTE_VECT_SIMD_512
> +             },
> +             .pkt_prep = ice_prep_pkts
>       },
>  #endif
>  #endif
> @@ -4135,85 +4166,36 @@ ice_set_tx_function(struct rte_eth_dev *dev)
>       struct ice_adapter *ad =
>               ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
>       int mbuf_check = ad->devargs.mbuf_check;
> -#ifdef RTE_ARCH_X86
> -     struct ci_tx_queue *txq;
> -     int i;
> -     int tx_check_ret = -1;
> -     enum rte_vect_max_simd tx_simd_width = RTE_VECT_SIMD_DISABLED;
> +     struct ci_tx_path_features req_features = {
> +             .tx_offloads = dev->data->dev_conf.txmode.offloads,
> +             .simd_width = RTE_VECT_SIMD_DISABLED,
> +     };
>  
>       /* The primary process selects the tx path for all processes. */
>       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
>               goto out;
>  
> -     tx_check_ret = ice_tx_vec_dev_check(dev);
> -     tx_simd_width = ice_get_max_simd_bitwidth();
> -     if (tx_check_ret >= 0 &&
> -             rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
> -             ad->tx_vec_allowed = true;
> -
> -             if (tx_simd_width < RTE_VECT_SIMD_256 &&
> -                     tx_check_ret == ICE_VECTOR_OFFLOAD_PATH)
> -                     ad->tx_vec_allowed = false;
> +     req_features.extra.simple_tx = ad->tx_simple_allowed;
>  
> -             if (ad->tx_vec_allowed) {
> -                     for (i = 0; i < dev->data->nb_tx_queues; i++) {
> -                             txq = dev->data->tx_queues[i];
> -                             if (txq && ice_txq_vec_setup(txq)) {
> -                                     ad->tx_vec_allowed = false;
> -                                     break;
> -                             }
> -                     }
> -             }
> -     } else {
> -             ad->tx_vec_allowed = false;
> -     }
> -

This code being removed was reworked in the previous patch. Do you think it
would be as simple to merge this patch with the previous one, rather than
adjusting the code twice?

Reply via email to