Hello Andrew/Thomas/Ferruh,

Thank You for your comments. I have taken care and uploaded new patch-set.
As tomorrow is RC1 release. Could you please help in reviewing the changes and 
in merging the changes, if changes looks good.

Regards,
Hanumanth 

> -----Original Message-----
> From: Hanumanth Pothula <hpoth...@marvell.com>
> Sent: Thursday, October 6, 2022 11:24 PM
> To: Thomas Monjalon <tho...@monjalon.net>; Ferruh Yigit
> <ferruh.yi...@xilinx.com>; Andrew Rybchenko
> <andrew.rybche...@oktetlabs.ru>
> Cc: dev@dpdk.org; xuan.d...@intel.com; wenxuanx...@intel.com;
> xiaoyun...@intel.com; step...@networkplumber.org; yuanx.w...@intel.com;
> m...@ashroe.eu; yuying.zh...@intel.com; qi.z.zh...@intel.com;
> viachesl...@nvidia.com; Jerin Jacob Kollanukkaran <jer...@marvell.com>;
> Nithin Kumar Dabilpuram <ndabilpu...@marvell.com>; Hanumanth Reddy
> Pothula <hpoth...@marvell.com>
> Subject: [PATCH v6 1/3] ethdev: support mulitiple mbuf pools per Rx queue
> 
> This patch adds support for multiple mempool capability.
> Some of the HW has support for choosing memory pools based on the packet's
> size. The capability allows PMD to choose a memory pool based on the packet's
> length.
> 
> This is often useful for saving the memory where the application can create a
> different pool to steer the specific size of the packet, thus enabling 
> effective use
> of memory.
> 
> For example, let's say HW has a capability of three pools,
>  - pool-1 size is 2K
>  - pool-2 size is > 2K and < 4K
>  - pool-3 size is > 4K
> Here,
>         pool-1 can accommodate packets with sizes < 2K
>         pool-2 can accommodate packets with sizes > 2K and < 4K
>         pool-3 can accommodate packets with sizes > 4K
> 
> With multiple mempool capability enabled in SW, an application may create
> three pools of different sizes and send them to PMD. Allowing PMD to program
> HW based on the packet lengths. So that packets with less than 2K are received
> on pool-1, packets with lengths between 2K and 4K are received on pool-2 and
> finally packets greater than 4K are received on pool-3.
> 
> Signed-off-by: Hanumanth Pothula <hpoth...@marvell.com>
> 
> v6:
>  - Updated release notes, release_22_11.rst.
> v5:
>  - Declared memory pools as struct rte_mempool **rx_mempools rather than
>    as struct rte_mempool *mp.
>  - Added the feature in release notes.
>  - Updated conditions and strings as per review comments.
> v4:
>  - Renamed Offload capability name from
> RTE_ETH_RX_OFFLOAD_BUFFER_SORT
>    to RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL.
>  - In struct rte_eth_rxconf, defined new pointer, which holds array of
>    type struct rte_eth_rx_mempool(memory pools). This array is used
>    by PMD to program multiple mempools.
> v3:
>  - Implemented Pool Sort capability as new Rx offload capability,
>    RTE_ETH_RX_OFFLOAD_BUFFER_SORT.
> v2:
>  - Along with spec changes, uploading testpmd and driver changes.
> ---
>  doc/guides/rel_notes/release_22_11.rst |  6 +++
>  lib/ethdev/rte_ethdev.c                | 74 ++++++++++++++++++++++----
>  lib/ethdev/rte_ethdev.h                | 22 ++++++++
>  3 files changed, 92 insertions(+), 10 deletions(-)
> 
> diff --git a/doc/guides/rel_notes/release_22_11.rst
> b/doc/guides/rel_notes/release_22_11.rst
> index 2e076ba2ad..8bb19155d9 100644
> --- a/doc/guides/rel_notes/release_22_11.rst
> +++ b/doc/guides/rel_notes/release_22_11.rst
> @@ -55,6 +55,12 @@ New Features
>       Also, make sure to start the actual text at the margin.
>       =======================================================
> 
> +* ** Added support ethdev support for mulitiple mbuf pools per Rx
> +queue.**
> +
> +  * Added new Rx offload flag ``RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL`` to
> support
> +    mulitiple mbuf pools per Rx queue. Thisi capability allows PMD to choose
> +    a memory pool based on the packet's length
> +
>  * **Updated Wangxun ngbe driver.**
> 
>    * Added support to set device link down/up.
> diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index
> 1979dc0850..eed4834e6b 100644
> --- a/lib/ethdev/rte_ethdev.c
> +++ b/lib/ethdev/rte_ethdev.c
> @@ -1634,6 +1634,44 @@ rte_eth_dev_is_removed(uint16_t port_id)
>       return ret;
>  }
> 
> +static int
> +rte_eth_rx_queue_check_mempool(struct rte_mempool **rx_mempool,
> +                            uint16_t n_pool, uint32_t *mbp_buf_size,
> +                            const struct rte_eth_dev_info *dev_info) {
> +     uint16_t pool_idx;
> +
> +     if (n_pool > dev_info->max_pools) {
> +             RTE_ETHDEV_LOG(ERR,
> +                            "Too many Rx mempools %u vs maximum %u\n",
> +                            n_pool, dev_info->max_pools);
> +             return -EINVAL;
> +     }
> +
> +     for (pool_idx = 0; pool_idx < n_pool; pool_idx++) {
> +             struct rte_mempool *mpl = rx_mempool[pool_idx];
> +
> +             if (mpl == NULL) {
> +                     RTE_ETHDEV_LOG(ERR, "null Rx mempool pointer\n");
> +                     return -EINVAL;
> +             }
> +
> +             *mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
> +             if (*mbp_buf_size < dev_info->min_rx_bufsize +
> +                 RTE_PKTMBUF_HEADROOM) {
> +                     RTE_ETHDEV_LOG(ERR,
> +                                    "%s mbuf_data_room_size %u < %u
> (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n",
> +                                     mpl->name, *mbp_buf_size,
> +                                     RTE_PKTMBUF_HEADROOM +
> dev_info->min_rx_bufsize,
> +                                     RTE_PKTMBUF_HEADROOM,
> +                                     dev_info->min_rx_bufsize);
> +                     return -EINVAL;
> +             }
> +     }
> +
> +     return 0;
> +}
> +
>  static int
>  rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg,
>                            uint16_t n_seg, uint32_t *mbp_buf_size, @@ -
> 1733,9 +1771,12 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t
> rx_queue_id,
> 
>       if (mp != NULL) {
>               /* Single pool configuration check. */
> -             if (rx_conf != NULL && rx_conf->rx_nseg != 0) {
> +             if (((rx_conf->offloads &
> RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) &&
> +                 rx_conf != NULL && rx_conf->rx_nseg != 0) ||
> +                ((rx_conf->offloads &
> RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL) &&
> +                 rx_conf != NULL && rx_conf->rx_npool != 0)) {
>                       RTE_ETHDEV_LOG(ERR,
> -                                    "Ambiguous segment configuration\n");
> +                                    "Ambiguous Rx mempools
> configuration\n");
>                       return -EINVAL;
>               }
>               /*
> @@ -1763,30 +1804,43 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t
> rx_queue_id,
>                                      dev_info.min_rx_bufsize);
>                       return -EINVAL;
>               }
> -     } else {
> +     } else if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
>               const struct rte_eth_rxseg_split *rx_seg;
>               uint16_t n_seg;
> 
>               /* Extended multi-segment configuration check. */
>               if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf-
> >rx_nseg == 0) {
>                       RTE_ETHDEV_LOG(ERR,
> -                                    "Memory pool is null and no extended
> configuration provided\n");
> +                                    "Memory pool is null and no multi-segment
> configuration
> +provided\n");
>                       return -EINVAL;
>               }
> 
>               rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
>               n_seg = rx_conf->rx_nseg;
> 
> -             if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)
> {
> -                     ret = rte_eth_rx_queue_check_split(rx_seg, n_seg,
> +             ret = rte_eth_rx_queue_check_split(rx_seg, n_seg,
>                                                          &mbp_buf_size,
>                                                          &dev_info);
> -                     if (ret != 0)
> -                             return ret;
> -             } else {
> -                     RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload
> configured\n");
> +             if (ret != 0)
> +                     return ret;
> +     } else if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL)
> {
> +             /* Extended multi-pool configuration check. */
> +             if (rx_conf == NULL || rx_conf->rx_mempools == NULL ||
> rx_conf->rx_npool == 0) {
> +                     RTE_ETHDEV_LOG(ERR,
> +                                    "Memory pool is null and no multi-pool
> configuration
> +provided\n");
>                       return -EINVAL;
>               }
> +
> +             ret = rte_eth_rx_queue_check_mempool(rx_conf-
> >rx_mempools,
> +                                                  rx_conf->rx_npool,
> +                                                  &mbp_buf_size,
> +                                                  &dev_info);
> +
> +             if (ret != 0)
> +                     return ret;
> +     } else {
> +             RTE_ETHDEV_LOG(ERR, "Missing Rx mempool
> configuration\n");
> +             return -EINVAL;
>       }
> 
>       /* Use default specified by driver, if nb_rx_desc is zero */ diff --git
> a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h index
> b62ac5bb6f..306c2b3573 100644
> --- a/lib/ethdev/rte_ethdev.h
> +++ b/lib/ethdev/rte_ethdev.h
> @@ -1067,6 +1067,25 @@ struct rte_eth_rxconf {
>        */
>       union rte_eth_rxseg *rx_seg;
> 
> +     /**
> +      * Points to an array of mempools.
> +      *
> +      * Valid only when RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL flag is set
> in
> +      * Rx offloads.
> +      *
> +      * This provides support for  multiple mbuf pools per Rx queue.
> +      *
> +      * This is often useful for saving the memory where the application can
> +      * create a different pools to steer the specific size of the packet, 
> thus
> +      * enabling effective use of memory.
> +      *
> +      * Note that on Rx scatter enable, a packet may be delivered using a
> chain
> +      * of mbufs obtained from single mempool or multiple mempools based
> on
> +      * the NIC implementation.
> +      */
> +     struct rte_mempool **rx_mempools;
> +     uint16_t rx_npool; /** < number of mempools */
> +
>       uint64_t reserved_64s[2]; /**< Reserved for future fields */
>       void *reserved_ptrs[2];   /**< Reserved for future fields */
>  };
> @@ -1395,6 +1414,7 @@ struct rte_eth_conf {  #define
> RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM  RTE_BIT64(18)
>  #define RTE_ETH_RX_OFFLOAD_RSS_HASH         RTE_BIT64(19)
>  #define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT     RTE_BIT64(20)
> +#define RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL      RTE_BIT64(21)
> 
>  #define RTE_ETH_RX_OFFLOAD_CHECKSUM
> (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
>                                RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
> @@ -1615,6 +1635,8 @@ struct rte_eth_dev_info {
>       /** Configured number of Rx/Tx queues */
>       uint16_t nb_rx_queues; /**< Number of Rx queues. */
>       uint16_t nb_tx_queues; /**< Number of Tx queues. */
> +     /** Maximum number of pools supported per Rx queue. */
> +     uint16_t max_pools;
>       /** Rx parameter recommendations */
>       struct rte_eth_dev_portconf default_rxportconf;
>       /** Tx parameter recommendations */
> --
> 2.25.1

Reply via email to