> -----Original Message-----
> From: Ke Zhang <ke1x.zh...@intel.com>
> Sent: Monday, May 9, 2022 9:16 AM
> To: Li, Xiaoyun <xiaoyun...@intel.com>; Wu, Jingjing <jingjing...@intel.com>;
> Xing, Beilei <beilei.x...@intel.com>; dev@dpdk.org
> Cc: Zhang, Ke1X <ke1x.zh...@intel.com>
> Subject: [PATCH v3] fix mbuf release function point corrupt in multi-process
> 
> In the multi process environment, the sub process operates on the shared
> memory and changes the function pointer of the main process, resulting in the
> failure to find the address of the function when main process releasing,
> resulting in crash.
> 
> Signed-off-by: Ke Zhang <ke1x.zh...@intel.com>
> ---
>  drivers/net/iavf/iavf_rxtx.c            | 50 ++++++++++++++++++++-----
>  drivers/net/iavf/iavf_rxtx.h            | 12 ++++++
>  drivers/net/iavf/iavf_rxtx_vec_avx512.c |  8 +---
>  drivers/net/iavf/iavf_rxtx_vec_sse.c    | 16 ++------
>  4 files changed, 58 insertions(+), 28 deletions(-)
> 
> diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index
> 16e8d021f9..8d7f3c4316 100644
> --- a/drivers/net/iavf/iavf_rxtx.c
> +++ b/drivers/net/iavf/iavf_rxtx.c
> @@ -362,14 +362,44 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
>       }
>  }
> 
> -static const struct iavf_rxq_ops def_rxq_ops = {
> +static const
> +struct iavf_rxq_ops def_rxq_ops = {
>       .release_mbufs = release_rxq_mbufs,
>  };
> 
> -static const struct iavf_txq_ops def_txq_ops = {
> +static const
> +struct iavf_txq_ops def_txq_ops = {
>       .release_mbufs = release_txq_mbufs,
>  };
> 
> +static const
> +struct iavf_rxq_ops sse_vec_rxq_ops = {
> +     .release_mbufs = iavf_rx_queue_release_mbufs_sse, };
> +
> +static const
> +struct iavf_txq_ops sse_vec_txq_ops = {
> +     .release_mbufs = iavf_tx_queue_release_mbufs_sse, };
> +
> +static const
> +struct iavf_txq_ops avx512_vec_txq_ops = {
> +     .release_mbufs = iavf_tx_queue_release_mbufs_avx512,
> +};
> +
> +static const
> +struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[IAVF_REL_MBUFS_LAST + 1]
> = {
> +     [IAVF_REL_MBUFS_NORMAL] = def_rxq_ops,
> +     [IAVF_REL_MBUFS_VEC] = sse_vec_rxq_ops, };

Please name the macro align with the ops name, replace NORMAL with DEFAULT and 
VEC with SSE

> +
> +static const
> +struct iavf_txq_ops iavf_txq_release_mbufs_ops[IAVF_REL_MBUFS_LAST + 1]

> = {
> +     [IAVF_REL_MBUFS_NORMAL] = def_txq_ops,
> +     [IAVF_REL_MBUFS_VEC_AVX512] = avx512_vec_txq_ops,
> +     [IAVF_REL_MBUFS_VEC] = sse_vec_txq_ops, };


Please re-order it to align with Rx,  default  -> sse -> avx512.

> +
>  static inline void
>  iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue
> *rxq,
>                                   struct rte_mbuf *mb,
> @@ -674,7 +704,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev,
> uint16_t queue_idx,
>       rxq->q_set = true;
>       dev->data->rx_queues[queue_idx] = rxq;
>       rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
> -     rxq->ops = &def_rxq_ops;
> +     rxq->rel_mbufs_type = IAVF_REL_MBUFS_NORMAL;
> 
>       if (check_rx_bulk_allow(rxq) == true) {
>               PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are
> "
> @@ -811,7 +841,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
>       txq->q_set = true;
>       dev->data->tx_queues[queue_idx] = txq;
>       txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
> -     txq->ops = &def_txq_ops;
> +     txq->rel_mbufs_type = IAVF_REL_MBUFS_NORMAL;
> 
>       if (check_tx_vec_allow(txq) == false) {
>               struct iavf_adapter *ad =
> @@ -943,7 +973,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev,
> uint16_t rx_queue_id)
>       }
> 
>       rxq = dev->data->rx_queues[rx_queue_id];
> -     rxq->ops->release_mbufs(rxq);
> +     iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
>       reset_rx_queue(rxq);
>       dev->data->rx_queue_state[rx_queue_id] =
> RTE_ETH_QUEUE_STATE_STOPPED;
> 
> @@ -971,7 +1001,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev,
> uint16_t tx_queue_id)
>       }
> 
>       txq = dev->data->tx_queues[tx_queue_id];
> -     txq->ops->release_mbufs(txq);
> +     iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
>       reset_tx_queue(txq);
>       dev->data->tx_queue_state[tx_queue_id] =
> RTE_ETH_QUEUE_STATE_STOPPED;
> 
> @@ -986,7 +1016,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev,
> uint16_t qid)
>       if (!q)
>               return;
> 
> -     q->ops->release_mbufs(q);
> +     iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
>       rte_free(q->sw_ring);
>       rte_memzone_free(q->mz);
>       rte_free(q);
> @@ -1000,7 +1030,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev,
> uint16_t qid)
>       if (!q)
>               return;
> 
> -     q->ops->release_mbufs(q);
> +     iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
>       rte_free(q->sw_ring);
>       rte_memzone_free(q->mz);
>       rte_free(q);
> @@ -1034,7 +1064,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
>               txq = dev->data->tx_queues[i];
>               if (!txq)
>                       continue;
> -             txq->ops->release_mbufs(txq);
> +             iavf_txq_release_mbufs_ops[txq-
> >rel_mbufs_type].release_mbufs(txq);
>               reset_tx_queue(txq);
>               dev->data->tx_queue_state[i] =
> RTE_ETH_QUEUE_STATE_STOPPED;
>       }
> @@ -1042,7 +1072,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
>               rxq = dev->data->rx_queues[i];
>               if (!rxq)
>                       continue;
> -             rxq->ops->release_mbufs(rxq);
> +             iavf_rxq_release_mbufs_ops[rxq-
> >rel_mbufs_type].release_mbufs(rxq);
>               reset_rx_queue(rxq);
>               dev->data->rx_queue_state[i] =
> RTE_ETH_QUEUE_STATE_STOPPED;
>       }
> diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h index
> bf8aebbce8..826f59da39 100644
> --- a/drivers/net/iavf/iavf_rxtx.h
> +++ b/drivers/net/iavf/iavf_rxtx.h
> @@ -187,6 +187,7 @@ struct iavf_rx_queue {
>       struct rte_mbuf *pkt_last_seg;  /* last segment of current packet */
>       struct rte_mbuf fake_mbuf;      /* dummy mbuf */
>       uint8_t rxdid;
> +     uint8_t rel_mbufs_type;
> 
>       /* used for VPMD */
>       uint16_t rxrearm_nb;       /* number of remaining to be re-armed */
> @@ -246,6 +247,7 @@ struct iavf_tx_queue {
>       uint16_t last_desc_cleaned;    /* last desc have been cleaned*/
>       uint16_t free_thresh;
>       uint16_t rs_thresh;
> +     uint8_t rel_mbufs_type;
> 
>       uint16_t port_id;
>       uint16_t queue_id;
> @@ -389,6 +391,13 @@ struct iavf_32b_rx_flex_desc_comms_ipsec {
>       __le32 ipsec_said;
>  };
> 
> +enum iavf_rxtx_rel_mbufs_type {
> +     IAVF_REL_MBUFS_NORMAL   = 0,
> +     IAVF_REL_MBUFS_VEC_AVX512       = 1,
> +     IAVF_REL_MBUFS_VEC              = 2,
> +     IAVF_REL_MBUFS_LAST             = 63,

IAVF_REL_MBUFS_LAST is not necessary.


Reply via email to