Multi-Packet Rx queue uses PMD-managed buffers to store packets.
These buffers are externally attached to user mbufs.
This conflicts with the feature that allows using user-managed
externally attached buffers in an application.
Fall back to SPRQ in case external buffers mempool is configured.
Add the corresponding limitation to MLX5 documentation that MPRQ
and external data buffers cannot be used together.

Signed-off-by: Alexander Kozyrev <akozy...@nvidia.com>
---
v3: fixed coverity issue with NULL pointer derefence
v2: fixed typo in warning message

 doc/guides/nics/mlx5.rst    |  4 +++-
 drivers/net/mlx5/mlx5_rx.h  |  2 +-
 drivers/net/mlx5/mlx5_rxq.c | 23 +++++++++++++++--------
 3 files changed, 19 insertions(+), 10 deletions(-)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 679481bed5..4799875263 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -260,7 +260,9 @@ Limitations
   ol_flags. As the mempool for the external buffer is managed by PMD, all the
   Rx mbufs must be freed before the device is closed. Otherwise, the mempool of
   the external buffers will be freed by PMD and the application which still
-  holds the external buffers may be corrupted.
+  holds the external buffers may be corrupted. User-managed mempools with
+  external pinned data buffers cannot be used in conjunction with MPRQ
+  since packets may be already attached to PMD-managed external buffers.
 
 - If Multi-Packet Rx queue is configured (``mprq_en``) and Rx CQE compression 
is
   enabled (``rxq_cqe_comp_en``) at the same time, RSS hash result is not fully
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index acebe3348c..5bf88b6181 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -209,7 +209,7 @@ struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, 
uint16_t idx,
                                   uint16_t desc, unsigned int socket,
                                   const struct rte_eth_rxconf *conf,
                                   const struct rte_eth_rxseg_split *rx_seg,
-                                  uint16_t n_seg);
+                                  uint16_t n_seg, bool is_extmem);
 struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
        (struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, uint16_t desc,
         const struct rte_eth_hairpin_conf *hairpin_conf);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index f16795bac3..925544ae3d 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -840,6 +840,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, 
uint16_t desc,
        int res;
        uint64_t offloads = conf->offloads |
                            dev->data->dev_conf.rxmode.offloads;
+       bool is_extmem = false;
 
        if (mp) {
                /*
@@ -849,6 +850,8 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, 
uint16_t desc,
                 */
                rx_seg = &rx_single;
                n_seg = 1;
+               is_extmem = rte_pktmbuf_priv_flags(mp) &
+                           RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF;
        }
        if (n_seg > 1) {
                /* The offloads should be checked on rte_eth_dev layer. */
@@ -912,7 +915,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, 
uint16_t desc,
        }
        if (rxq_ctrl == NULL) {
                rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg,
-                                       n_seg);
+                                       n_seg, is_extmem);
                if (rxq_ctrl == NULL) {
                        DRV_LOG(ERR, "port %u unable to allocate rx queue index 
%u",
                                dev->data->port_id, idx);
@@ -1548,7 +1551,8 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, 
uint16_t idx,
  *   Log number of strides to configure for this queue.
  * @param actual_log_stride_size
  *   Log stride size to configure for this queue.
- *
+ * @param is_extmem
+ *   Is external pinned memory pool used.
  * @return
  *   0 if Multi-Packet RQ is supported, otherwise -1.
  */
@@ -1556,7 +1560,8 @@ static int
 mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                  bool rx_seg_en, uint32_t min_mbuf_size,
                  uint32_t *actual_log_stride_num,
-                 uint32_t *actual_log_stride_size)
+                 uint32_t *actual_log_stride_size,
+                 bool is_extmem)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_port_config *config = &priv->config;
@@ -1575,7 +1580,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, 
uint16_t desc,
                                log_max_stride_size);
        uint32_t log_stride_wqe_size;
 
-       if (mlx5_check_mprq_support(dev) != 1 || rx_seg_en)
+       if (mlx5_check_mprq_support(dev) != 1 || rx_seg_en || is_extmem)
                goto unsupport;
        /* Checks if chosen number of strides is in supported range. */
        if (config->mprq.log_stride_num > log_max_stride_num ||
@@ -1641,7 +1646,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, 
uint16_t desc,
                        " rxq_num = %u, stride_sz = %u, stride_num = %u\n"
                        "  supported: min_rxqs_num = %u, min_buf_wqe_sz = %u"
                        " min_stride_sz = %u, max_stride_sz = %u).\n"
-                       "Rx segment is %senable.",
+                       "Rx segment is %senabled. External mempool is %sused.",
                        dev->data->port_id, min_mbuf_size, desc, priv->rxqs_n,
                        RTE_BIT32(config->mprq.log_stride_size),
                        RTE_BIT32(config->mprq.log_stride_num),
@@ -1649,7 +1654,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, 
uint16_t desc,
                        RTE_BIT32(dev_cap->mprq.log_min_stride_wqe_size),
                        RTE_BIT32(dev_cap->mprq.log_min_stride_size),
                        RTE_BIT32(dev_cap->mprq.log_max_stride_size),
-                       rx_seg_en ? "" : "not ");
+                       rx_seg_en ? "" : "not ", is_extmem ? "" : "not ");
        return -1;
 }
 
@@ -1671,7 +1676,8 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, 
uint16_t desc,
 struct mlx5_rxq_ctrl *
 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
             unsigned int socket, const struct rte_eth_rxconf *conf,
-            const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
+            const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg,
+            bool is_extmem)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_rxq_ctrl *tmpl;
@@ -1694,7 +1700,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, 
uint16_t desc,
        const int mprq_en = !mlx5_mprq_prepare(dev, idx, desc, rx_seg_en,
                                               non_scatter_min_mbuf_size,
                                               &mprq_log_actual_stride_num,
-                                              &mprq_log_actual_stride_size);
+                                              &mprq_log_actual_stride_size,
+                                              is_extmem);
        /*
         * Always allocate extra slots, even if eventually
         * the vector Rx will not be used.
-- 
2.18.2

Reply via email to