> -----Original Message-----
> From: dev <dev-boun...@dpdk.org> On Behalf Of Xiaoyu Min
> Sent: Thursday, October 17, 2019 13:50
> To: Shahaf Shuler <shah...@mellanox.com>; Yongseok Koh
> <ys...@mellanox.com>
> Cc: dev@dpdk.org; sta...@dpdk.org; Zengmo Gao <gaozen...@jd.com>
> Subject: [dpdk-dev] [PATCH] net/mlx5: fix segfault when create hash rxq of
> drop
> 
> When to create hrxq for the drop, it could fail on creating qp and goto the
> error handle which will release created ind_table by calling drop release
> function, which takes rte_ethdev as the only parameter and uses the
> priv->drop_queue.hrxq as input to release.
> 
> Unfortunately, at this point, the hrxq is not allocated and
> priv->drop_queue.hrxq is still NULL, which leads to a segfault.
> 
> This patch fixes the above by allocating the hrxq at first place and when the
> error happens, hrxq is released as the last one.
> 
> This patch also release other allocated resources by the correct order, which
> is missing previously.
> 
> Fixes: 78be885295b8 ("net/mlx5: handle drop queues as regular queues")
> Cc: sta...@dpdk.org
> 
> Reported-by: Zengmo Gao <gaozen...@jd.com>
> Signed-off-by: Xiaoyu Min <jack...@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viachesl...@mellanox.com>

> ---
>  drivers/net/mlx5/mlx5_rxq.c | 38 +++++++++++++++++++++++--------------
>  1 file changed, 24 insertions(+), 14 deletions(-)
> 
> diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index
> 0db065a22c..f0ab8438d3 100644
> --- a/drivers/net/mlx5/mlx5_rxq.c
> +++ b/drivers/net/mlx5/mlx5_rxq.c
> @@ -2536,17 +2536,27 @@ struct mlx5_hrxq *  mlx5_hrxq_drop_new(struct
> rte_eth_dev *dev)  {
>       struct mlx5_priv *priv = dev->data->dev_private;
> -     struct mlx5_ind_table_obj *ind_tbl;
> -     struct ibv_qp *qp;
> -     struct mlx5_hrxq *hrxq;
> +     struct mlx5_ind_table_obj *ind_tbl = NULL;
> +     struct ibv_qp *qp = NULL;
> +     struct mlx5_hrxq *hrxq = NULL;
> 
>       if (priv->drop_queue.hrxq) {
>               rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
>               return priv->drop_queue.hrxq;
>       }
> +     hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
> +     if (!hrxq) {
> +             DRV_LOG(WARNING,
> +                     "port %u cannot allocate memory for drop queue",
> +                     dev->data->port_id);
> +             rte_errno = ENOMEM;
> +             goto error;
> +     }
> +     priv->drop_queue.hrxq = hrxq;
>       ind_tbl = mlx5_ind_table_obj_drop_new(dev);
>       if (!ind_tbl)
> -             return NULL;
> +             goto error;
> +     hrxq->ind_table = ind_tbl;
>       qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
>                &(struct ibv_qp_init_attr_ex){
>                       .qp_type = IBV_QPT_RAW_PACKET,
> @@ -2570,15 +2580,6 @@ mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
>               rte_errno = errno;
>               goto error;
>       }
> -     hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
> -     if (!hrxq) {
> -             DRV_LOG(WARNING,
> -                     "port %u cannot allocate memory for drop queue",
> -                     dev->data->port_id);
> -             rte_errno = ENOMEM;
> -             goto error;
> -     }
> -     hrxq->ind_table = ind_tbl;
>       hrxq->qp = qp;
>  #ifdef HAVE_IBV_FLOW_DV_SUPPORT
>       hrxq->action = mlx5_glue-
> >dv_create_flow_action_dest_ibv_qp(hrxq->qp);
> @@ -2587,12 +2588,21 @@ mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
>               goto error;
>       }
>  #endif
> -     priv->drop_queue.hrxq = hrxq;
>       rte_atomic32_set(&hrxq->refcnt, 1);
>       return hrxq;
>  error:
> +#ifdef HAVE_IBV_FLOW_DV_SUPPORT
> +     if (hrxq && hrxq->action)
> +             mlx5_glue->destroy_flow_action(hrxq->action);
> +#endif
> +     if (qp)
> +             claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
>       if (ind_tbl)
>               mlx5_ind_table_obj_drop_release(dev);
> +     if (hrxq) {
> +             priv->drop_queue.hrxq = NULL;
> +             rte_free(hrxq);
> +     }
>       return NULL;
>  }
> 
> --
> 2.23.0

Reply via email to