Tuesday, April 2, 2019 9:23 AM, Viacheslav Ovsiienko:
> Subject: [PATCH 4/4] net/mlx5: add mutex for shared DV/DR structures

Same comment about title. 

> 
> This patch introduces the mutex for shared DV/DR structures.
> Application may have multiple threads (but single dedicated thread per port).
> Due to sharing the IB context in the multiport IB device configurations we
> should synchronize access to the shared DV/DR flow structures.
> 
> Signed-off-by: Viacheslav Ovsiienko <[email protected]>
> ---
>  drivers/net/mlx5/mlx5.c         |   6 +++
>  drivers/net/mlx5/mlx5.h         |   1 +
>  drivers/net/mlx5/mlx5_flow_dv.c | 103
> ++++++++++++++++++++++++++++++++++++++--
>  3 files changed, 106 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index
> 369b698..96ad4c6 100644
> --- a/drivers/net/mlx5/mlx5.c
> +++ b/drivers/net/mlx5/mlx5.c
> @@ -312,6 +312,7 @@ struct mlx5_dev_spawn_data {
> mlx5_alloc_shared_dv(struct mlx5_priv *priv)  {
>       struct mlx5_ibv_shared *sh = priv->sh;
> +     pthread_mutexattr_t mattr;
>       int err = 0;
>       void *ns;
> 
> @@ -336,6 +337,10 @@ struct mlx5_dev_spawn_data {
>               err = errno;
>               goto error;
>       }
> +     pthread_mutexattr_init(&mattr);
> +     pthread_mutexattr_settype(&mattr,
> PTHREAD_MUTEX_RECURSIVE);

Please add inline comment about why you use this type of mutex (so that you can 
use a single mutex for the entire flow engine logic). 

> +     pthread_mutex_init(&sh->dv_mutex, &mattr);
> +     pthread_mutexattr_destroy(&mattr);
>       sh->tx_ns = ns;
>       sh->dv_refcnt++;
>       priv->dv_shared = 1;
> @@ -381,6 +386,7 @@ struct mlx5_dev_spawn_data {
>               mlx5dv_dr_destroy_ns(sh->tx_ns);
>               sh->tx_ns = NULL;
>       }
> +     pthread_mutex_destroy(&sh->dv_mutex);
>  }
>  #endif
> 
> diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index
> e67227f..4f6c1b7 100644
> --- a/drivers/net/mlx5/mlx5.h
> +++ b/drivers/net/mlx5/mlx5.h
> @@ -223,6 +223,7 @@ struct mlx5_ibv_shared {
>       char ibdev_path[IBV_SYSFS_PATH_MAX]; /* IB device path for
> secondary */
>       struct ibv_device_attr_ex device_attr; /* Device properties. */
>       /* Shared DV/DR flow data section. */
> +     pthread_mutex_t dv_mutex; /* DV context mutex. */
>       uint32_t dv_refcnt; /* DV/DR data reference counter. */
>       void *rx_ns; /* RX Direct Rules name space handle. */
>       struct mlx5_flow_tbl_resource rx_tbl[MLX5_MAX_TABLES]; diff --git
> a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
> index 4912fc8..737011a 100644
> --- a/drivers/net/mlx5/mlx5_flow_dv.c
> +++ b/drivers/net/mlx5/mlx5_flow_dv.c
> @@ -125,6 +125,45 @@ struct field_modify_info modify_tcp[] = {  };
> 
>  /**
> + * Acquire the synchronizing object to protect multithreaded access
> + * to shared dv context. Lock occurs only if context is actually
> + * shared, i.e. we have multiport IB device and representors are
> + * created.
> + *
> + * @param[in] dev
> + *   Pointer to the rte_eth_dev structure.
> + */
> +static void
> +flow_d_shared_lock(struct rte_eth_dev *dev) {
> +     struct mlx5_priv *priv = dev->data->dev_private;
> +     struct mlx5_ibv_shared *sh = priv->sh;
> +
> +     if (sh->dv_refcnt > 1) {
> +             int ret;
> +
> +             ret = pthread_mutex_lock(&sh->dv_mutex);
> +             assert(!ret);
> +             (void)ret;
> +     }
> +}
> +
> +static void
> +flow_d_shared_unlock(struct rte_eth_dev *dev) {
> +     struct mlx5_priv *priv = dev->data->dev_private;
> +     struct mlx5_ibv_shared *sh = priv->sh;
> +
> +     if (sh->dv_refcnt > 1) {
> +             int ret;
> +
> +             ret = pthread_mutex_unlock(&sh->dv_mutex);
> +             assert(!ret);
> +             (void)ret;
> +     }
> +}
> +
> +/**
>   * Convert modify-header action to DV specification.
>   *
>   * @param[in] item
> @@ -3958,14 +3997,70 @@ struct field_modify_info modify_tcp[] = {
>       return ret;
>  }
> 
> +/*
> + * Mutex-protected thunk to flow_dv_translate().
> + */
> +static int
> +flow_d_translate(struct rte_eth_dev *dev,
> +              struct mlx5_flow *dev_flow,
> +              const struct rte_flow_attr *attr,
> +              const struct rte_flow_item items[],
> +              const struct rte_flow_action actions[],
> +              struct rte_flow_error *error)
> +{
> +     int ret;
> +
> +     flow_d_shared_lock(dev);
> +     ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
> +     flow_d_shared_unlock(dev);
> +     return ret;
> +}
> +
> +/*
> + * Mutex-protected thunk to flow_dv_apply().
> + */
> +static int
> +flow_d_apply(struct rte_eth_dev *dev,
> +          struct rte_flow *flow,
> +          struct rte_flow_error *error)
> +{
> +     int ret;
> +
> +     flow_d_shared_lock(dev);
> +     ret = flow_dv_apply(dev, flow, error);
> +     flow_d_shared_unlock(dev);
> +     return ret;
> +}
> +
> +/*
> + * Mutex-protected thunk to flow_dv_remove().
> + */
> +static void
> +flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow) {
> +     flow_d_shared_lock(dev);
> +     flow_dv_remove(dev, flow);
> +     flow_d_shared_unlock(dev);
> +}
> +
> +/*
> + * Mutex-protected thunk to flow_dv_destroy().
> + */
> +static void
> +flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) {
> +     flow_d_shared_lock(dev);
> +     flow_dv_destroy(dev, flow);
> +     flow_d_shared_unlock(dev);
> +}
> 
>  const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
>       .validate = flow_dv_validate,
>       .prepare = flow_dv_prepare,
> -     .translate = flow_dv_translate,
> -     .apply = flow_dv_apply,
> -     .remove = flow_dv_remove,
> -     .destroy = flow_dv_destroy,
> +     .translate = flow_d_translate,
> +     .apply = flow_d_apply,
> +     .remove = flow_d_remove,
> +     .destroy = flow_d_destroy,
>       .query = flow_dv_query,
>  };
> 
> --
> 1.8.3.1

Reply via email to