Hello Ferruh,

The patch has failed ci/Intel-compilation test.
(http://mails.dpdk.org/archives/test-report/2021-May/193327.html)
I've checked that with Thomas. He confirmed that was a known fault and 
should not affect the current patch.
Can we proceed with the patch integration ?

Thank you.

Regards,
Gregory 

> -----Original Message-----
> From: Gregory Etelson <getel...@nvidia.com>
> Sent: Thursday, May 6, 2021 12:58
> To: dev@dpdk.org
> Cc: Gregory Etelson <getel...@nvidia.com>; Matan Azrad
> <ma...@nvidia.com>; Ori Kam <or...@nvidia.com>; Raslan Darawsheh
> <rasl...@nvidia.com>; ferruh.yi...@intel.com; sta...@dpdk.org; Slava
> Ovsiienko <viachesl...@nvidia.com>; Shahaf Shuler
> <shah...@nvidia.com>
> Subject: [PATCH v4] net/mlx5: fix tunnel offload private items location
> 
> Tunnel offload API requires application to query PMD for specific flow items
> and actions. Application uses these PMD specific elements to build flow
> rules according to the tunnel offload model.
> The model does not restrict private elements location in a flow rule, but the
> current MLX5 PMD implementation expects that tunnel offload rule will
> begin with PMD specific elements.
> The patch removes that placement limitation.
> 
> Cc: sta...@dpdk.org
> 
> Fixes: 4ec6360de37d ("net/mlx5: implement tunnel offload")
> 
> Signed-off-by: Gregory Etelson <getel...@nvidia.com>
> Acked-by: Viacheslav Ovsiienko <viachesl...@nvidia.com>
> ---
> v2: Update the patch comment.
> v3: Remove testpmd patch from the series.
> v4: Remove redundant verifications in flow_dv_validate.
> ---
>  drivers/net/mlx5/mlx5_flow.c    | 48 ++++++++++++------
>  drivers/net/mlx5/mlx5_flow.h    | 46 ++++++++++-------
>  drivers/net/mlx5/mlx5_flow_dv.c | 88 ++++++++++++++++-----------------
>  3 files changed, 102 insertions(+), 80 deletions(-)
> 
> diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
> index 3194cd5633..f464271d42 100644
> --- a/drivers/net/mlx5/mlx5_flow.c
> +++ b/drivers/net/mlx5/mlx5_flow.c
> @@ -50,6 +50,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev
> *dev,
>                            const struct rte_flow_attr *attr,
>                            const struct rte_flow_action *app_actions,
>                            uint32_t flow_idx,
> +                          const struct mlx5_flow_tunnel *tunnel,
>                            struct tunnel_default_miss_ctx *ctx,
>                            struct rte_flow_error *error);
>  static struct mlx5_flow_tunnel *
> @@ -5958,22 +5959,14 @@ flow_create_split_outer(struct rte_eth_dev
> *dev,
>       return ret;
>  }
> 
> -static struct mlx5_flow_tunnel *
> -flow_tunnel_from_rule(struct rte_eth_dev *dev,
> -                   const struct rte_flow_attr *attr,
> -                   const struct rte_flow_item items[],
> -                   const struct rte_flow_action actions[])
> +static inline struct mlx5_flow_tunnel * flow_tunnel_from_rule(const
> +struct mlx5_flow *flow)
>  {
>       struct mlx5_flow_tunnel *tunnel;
> 
>  #pragma GCC diagnostic push
>  #pragma GCC diagnostic ignored "-Wcast-qual"
> -     if (is_flow_tunnel_match_rule(dev, attr, items, actions))
> -             tunnel = (struct mlx5_flow_tunnel *)items[0].spec;
> -     else if (is_flow_tunnel_steer_rule(dev, attr, items, actions))
> -             tunnel = (struct mlx5_flow_tunnel *)actions[0].conf;
> -     else
> -             tunnel = NULL;
> +     tunnel = (typeof(tunnel))flow->tunnel;
>  #pragma GCC diagnostic pop
> 
>       return tunnel;
> @@ -6168,12 +6161,11 @@ flow_list_create(struct rte_eth_dev *dev,
> uint32_t *list,
>                                             error);
>               if (ret < 0)
>                       goto error;
> -             if (is_flow_tunnel_steer_rule(dev, attr,
> -                                           buf->entry[i].pattern,
> -                                           p_actions_rx)) {
> +             if (is_flow_tunnel_steer_rule(wks->flows[0].tof_type)) {
>                       ret = flow_tunnel_add_default_miss(dev, flow, attr,
>                                                          p_actions_rx,
>                                                          idx,
> +                                                        wks-
> >flows[0].tunnel,
>                                                          &default_miss_ctx,
>                                                          error);
>                       if (ret < 0) {
> @@ -6237,7 +6229,7 @@ flow_list_create(struct rte_eth_dev *dev,
> uint32_t *list,
>       }
>       flow_rxq_flags_set(dev, flow);
>       rte_free(translated_actions);
> -     tunnel = flow_tunnel_from_rule(dev, attr, items, actions);
> +     tunnel = flow_tunnel_from_rule(wks->flows);
>       if (tunnel) {
>               flow->tunnel = 1;
>               flow->tunnel_id = tunnel->tunnel_id;
> @@ -8152,6 +8144,28 @@ int rte_pmd_mlx5_sync_flow(uint16_t port_id,
> uint32_t domains)
>       return ret;
>  }
> 
> +const struct mlx5_flow_tunnel *
> +mlx5_get_tof(const struct rte_flow_item *item,
> +          const struct rte_flow_action *action,
> +          enum mlx5_tof_rule_type *rule_type) {
> +     for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> +             if (item->type == (typeof(item->type))
> +                               MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL) {
> +                     *rule_type =
> MLX5_TUNNEL_OFFLOAD_MATCH_RULE;
> +                     return flow_items_to_tunnel(item);
> +             }
> +     }
> +     for (; action->conf != RTE_FLOW_ACTION_TYPE_END; action++) {
> +             if (action->type == (typeof(action->type))
> +
> MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {
> +                     *rule_type = MLX5_TUNNEL_OFFLOAD_SET_RULE;
> +                     return flow_actions_to_tunnel(action);
> +             }
> +     }
> +     return NULL;
> +}
> +
>  /**
>   * tunnel offload functionalilty is defined for DV environment only
>   */
> @@ -8182,13 +8196,13 @@ flow_tunnel_add_default_miss(struct
> rte_eth_dev *dev,
>                            const struct rte_flow_attr *attr,
>                            const struct rte_flow_action *app_actions,
>                            uint32_t flow_idx,
> +                          const struct mlx5_flow_tunnel *tunnel,
>                            struct tunnel_default_miss_ctx *ctx,
>                            struct rte_flow_error *error)
>  {
>       struct mlx5_priv *priv = dev->data->dev_private;
>       struct mlx5_flow *dev_flow;
>       struct rte_flow_attr miss_attr = *attr;
> -     const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;
>       const struct rte_flow_item miss_items[2] = {
>               {
>                       .type = RTE_FLOW_ITEM_TYPE_ETH,
> @@ -8274,6 +8288,7 @@ flow_tunnel_add_default_miss(struct
> rte_eth_dev *dev,
>       dev_flow->flow = flow;
>       dev_flow->external = true;
>       dev_flow->tunnel = tunnel;
> +     dev_flow->tof_type = MLX5_TUNNEL_OFFLOAD_MISS_RULE;
>       /* Subflow object was created, we must include one in the list. */
>       SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
>                     dev_flow->handle, next);
> @@ -8887,6 +8902,7 @@ flow_tunnel_add_default_miss(__rte_unused
> struct rte_eth_dev *dev,
>                            __rte_unused const struct rte_flow_attr *attr,
>                            __rte_unused const struct rte_flow_action
> *actions,
>                            __rte_unused uint32_t flow_idx,
> +                          __rte_unused const struct mlx5_flow_tunnel
> *tunnel,
>                            __rte_unused struct tunnel_default_miss_ctx
> *ctx,
>                            __rte_unused struct rte_flow_error *error)  { diff
> --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
> index 5365699426..04c8806bf6 100644
> --- a/drivers/net/mlx5/mlx5_flow.h
> +++ b/drivers/net/mlx5/mlx5_flow.h
> @@ -819,6 +819,16 @@ struct mlx5_flow_verbs_workspace {
>  /** Maximal number of device sub-flows supported. */  #define
> MLX5_NUM_MAX_DEV_FLOWS 32
> 
> +/**
> + * tunnel offload rules type
> + */
> +enum mlx5_tof_rule_type {
> +     MLX5_TUNNEL_OFFLOAD_NONE = 0,
> +     MLX5_TUNNEL_OFFLOAD_SET_RULE,
> +     MLX5_TUNNEL_OFFLOAD_MATCH_RULE,
> +     MLX5_TUNNEL_OFFLOAD_MISS_RULE,
> +};
> +
>  /** Device flow structure. */
>  __extension__
>  struct mlx5_flow {
> @@ -854,6 +864,7 @@ struct mlx5_flow {
>       struct mlx5_flow_handle *handle;
>       uint32_t handle_idx; /* Index of the mlx5 flow handle memory. */
>       const struct mlx5_flow_tunnel *tunnel;
> +     enum mlx5_tof_rule_type tof_type;
>  };
> 
>  /* Flow meter state. */
> @@ -949,10 +960,10 @@ mlx5_tunnel_hub(struct rte_eth_dev *dev)  }
> 
>  static inline bool
> -is_tunnel_offload_active(struct rte_eth_dev *dev)
> +is_tunnel_offload_active(const struct rte_eth_dev *dev)
>  {
>  #ifdef HAVE_IBV_FLOW_DV_SUPPORT
> -     struct mlx5_priv *priv = dev->data->dev_private;
> +     const struct mlx5_priv *priv = dev->data->dev_private;
>       return !!priv->config.dv_miss_info;
>  #else
>       RTE_SET_USED(dev);
> @@ -961,23 +972,15 @@ is_tunnel_offload_active(struct rte_eth_dev
> *dev)  }
> 
>  static inline bool
> -is_flow_tunnel_match_rule(__rte_unused struct rte_eth_dev *dev,
> -                       __rte_unused const struct rte_flow_attr *attr,
> -                       __rte_unused const struct rte_flow_item items[],
> -                       __rte_unused const struct rte_flow_action
> actions[])
> +is_flow_tunnel_match_rule(enum mlx5_tof_rule_type tof_rule_type)
>  {
> -     return (items[0].type == (typeof(items[0].type))
> -                              MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL);
> +     return tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE;
>  }
> 
>  static inline bool
> -is_flow_tunnel_steer_rule(__rte_unused struct rte_eth_dev *dev,
> -                       __rte_unused const struct rte_flow_attr *attr,
> -                       __rte_unused const struct rte_flow_item items[],
> -                       __rte_unused const struct rte_flow_action
> actions[])
> +is_flow_tunnel_steer_rule(enum mlx5_tof_rule_type tof_rule_type)
>  {
> -     return (actions[0].type == (typeof(actions[0].type))
> -
> MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET);
> +     return tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE;
>  }
> 
>  static inline const struct mlx5_flow_tunnel * @@ -1273,11 +1276,10 @@
> struct flow_grp_info {
> 
>  static inline bool
>  tunnel_use_standard_attr_group_translate
> -                 (struct rte_eth_dev *dev,
> -                  const struct mlx5_flow_tunnel *tunnel,
> +                 (const struct rte_eth_dev *dev,
>                    const struct rte_flow_attr *attr,
> -                  const struct rte_flow_item items[],
> -                  const struct rte_flow_action actions[])
> +                  const struct mlx5_flow_tunnel *tunnel,
> +                  enum mlx5_tof_rule_type tof_rule_type)
>  {
>       bool verdict;
> 
> @@ -1293,7 +1295,7 @@ tunnel_use_standard_attr_group_translate
>                * method
>                */
>               verdict = !attr->group &&
> -                       is_flow_tunnel_steer_rule(dev, attr, items,
> actions);
> +                       is_flow_tunnel_steer_rule(tof_rule_type);
>       } else {
>               /*
>                * non-tunnel group translation uses standard method for
> @@ -1681,4 +1683,10 @@ int mlx5_flow_create_def_policy(struct
> rte_eth_dev *dev);  void mlx5_flow_destroy_def_policy(struct rte_eth_dev
> *dev);  void flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
>                      struct mlx5_flow_handle *dev_handle);
> +const struct mlx5_flow_tunnel *
> +mlx5_get_tof(const struct rte_flow_item *items,
> +          const struct rte_flow_action *actions,
> +          enum mlx5_tof_rule_type *rule_type);
> +
> +
>  #endif /* RTE_PMD_MLX5_FLOW_H_ */
> diff --git a/drivers/net/mlx5/mlx5_flow_dv.c
> b/drivers/net/mlx5/mlx5_flow_dv.c index 70e8d0b113..10ca342edc
> 100644
> --- a/drivers/net/mlx5/mlx5_flow_dv.c
> +++ b/drivers/net/mlx5/mlx5_flow_dv.c
> @@ -6627,10 +6627,12 @@ flow_dv_validate(struct rte_eth_dev *dev,
> const struct rte_flow_attr *attr,
>       uint32_t rw_act_num = 0;
>       uint64_t is_root;
>       const struct mlx5_flow_tunnel *tunnel;
> +     enum mlx5_tof_rule_type tof_rule_type;
>       struct flow_grp_info grp_info = {
>               .external = !!external,
>               .transfer = !!attr->transfer,
>               .fdb_def_rule = !!priv->fdb_def_rule,
> +             .std_tbl_fix = true,
>       };
>       const struct rte_eth_hairpin_conf *conf;
>       const struct rte_flow_item *rule_items = items; @@ -6638,23
> +6640,22 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct
> rte_flow_attr *attr,
> 
>       if (items == NULL)
>               return -1;
> -     if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
> -             tunnel = flow_items_to_tunnel(items);
> -             action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
> -                             MLX5_FLOW_ACTION_DECAP;
> -     } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
> -             tunnel = flow_actions_to_tunnel(actions);
> -             action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
> -     } else {
> -             tunnel = NULL;
> +     tunnel = is_tunnel_offload_active(dev) ?
> +              mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
> +     if (tunnel) {
> +             if (priv->representor)
> +                     return rte_flow_error_set
> +                             (error, ENOTSUP,
> +                              RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +                              NULL, "decap not supported for VF
> representor");
> +             if (tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE)
> +                     action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
> +             else if (tof_rule_type ==
> MLX5_TUNNEL_OFFLOAD_MATCH_RULE)
> +                     action_flags |=
> MLX5_FLOW_ACTION_TUNNEL_MATCH |
> +                                     MLX5_FLOW_ACTION_DECAP;
> +             grp_info.std_tbl_fix =
> tunnel_use_standard_attr_group_translate
> +                                     (dev, attr, tunnel, tof_rule_type);
>       }
> -     if (tunnel && priv->representor)
> -             return rte_flow_error_set(error, ENOTSUP,
> -
> RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
> -                                       "decap not supported "
> -                                       "for VF representor");
> -     grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
> -                             (dev, tunnel, attr, items, actions);
>       ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info,
> error);
>       if (ret < 0)
>               return ret;
> @@ -6668,15 +6669,6 @@ flow_dv_validate(struct rte_eth_dev *dev,
> const struct rte_flow_attr *attr,
> 
> RTE_FLOW_ERROR_TYPE_ITEM,
>                                                 NULL, "item not
> supported");
>               switch (type) {
> -             case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
> -                     if (items[0].type != (typeof(items[0].type))
> -
>       MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
> -                             return rte_flow_error_set
> -                                             (error, EINVAL,
> -
>       RTE_FLOW_ERROR_TYPE_ITEM,
> -                                             NULL, "MLX5 private items "
> -                                             "must be the first");
> -                     break;
>               case RTE_FLOW_ITEM_TYPE_VOID:
>                       break;
>               case RTE_FLOW_ITEM_TYPE_PORT_ID:
> @@ -6975,6 +6967,11 @@ flow_dv_validate(struct rte_eth_dev *dev,
> const struct rte_flow_attr *attr,
>                       if (ret < 0)
>                               return ret;
>                       break;
> +             case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
> +                     /* tunnel offload item was processed before
> +                      * list it here as a supported type
> +                      */
> +                     break;
>               default:
>                       return rte_flow_error_set(error, ENOTSUP,
> 
> RTE_FLOW_ERROR_TYPE_ITEM,
> @@ -7516,17 +7513,6 @@ flow_dv_validate(struct rte_eth_dev *dev,
> const struct rte_flow_attr *attr,
>                       action_flags |= MLX5_FLOW_ACTION_SAMPLE;
>                       ++actions_n;
>                       break;
> -             case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
> -                     if (actions[0].type != (typeof(actions[0].type))
> -
>       MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
> -                             return rte_flow_error_set
> -                                             (error, EINVAL,
> -
>       RTE_FLOW_ERROR_TYPE_ACTION,
> -                                             NULL, "MLX5 private action "
> -                                             "must be the first");
> -
> -                     action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
> -                     break;
>               case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
>                       ret = flow_dv_validate_action_modify_field(dev,
> 
> action_flags,
> @@ -7551,6 +7537,11 @@ flow_dv_validate(struct rte_eth_dev *dev,
> const struct rte_flow_attr *attr,
>                               return ret;
>                       action_flags |= MLX5_FLOW_ACTION_CT;
>                       break;
> +             case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
> +                     /* tunnel offload action was processed before
> +                      * list it here as a supported type
> +                      */
> +                     break;
>               default:
>                       return rte_flow_error_set(error, ENOTSUP,
> 
> RTE_FLOW_ERROR_TYPE_ACTION,
> @@ -12035,13 +12026,14 @@ flow_dv_translate(struct rte_eth_dev *dev,
>       int tmp_actions_n = 0;
>       uint32_t table;
>       int ret = 0;
> -     const struct mlx5_flow_tunnel *tunnel;
> +     const struct mlx5_flow_tunnel *tunnel = NULL;
>       struct flow_grp_info grp_info = {
>               .external = !!dev_flow->external,
>               .transfer = !!attr->transfer,
>               .fdb_def_rule = !!priv->fdb_def_rule,
>               .skip_scale = dev_flow->skip_scale &
>                       (1 << MLX5_SCALE_FLOW_GROUP_BIT),
> +             .std_tbl_fix = true,
>       };
>       const struct rte_flow_item *head_item = items;
> 
> @@ -12057,15 +12049,21 @@ flow_dv_translate(struct rte_eth_dev *dev,
> 
> MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
>       /* update normal path action resource into last index of array */
>       sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM -
> 1];
> -     tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
> -              flow_items_to_tunnel(items) :
> -              is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
> -              flow_actions_to_tunnel(actions) :
> -              dev_flow->tunnel ? dev_flow->tunnel : NULL;
> +     if (is_tunnel_offload_active(dev)) {
> +             if (dev_flow->tunnel) {
> +                     RTE_VERIFY(dev_flow->tof_type ==
> +                                MLX5_TUNNEL_OFFLOAD_MISS_RULE);
> +                     tunnel = dev_flow->tunnel;
> +             } else {
> +                     tunnel = mlx5_get_tof(items, actions,
> +                                           &dev_flow->tof_type);
> +                     dev_flow->tunnel = tunnel;
> +             }
> +             grp_info.std_tbl_fix =
> tunnel_use_standard_attr_group_translate
> +                                     (dev, attr, tunnel, dev_flow-
> >tof_type);
> +     }
>       mhdr_res->ft_type = attr->egress ?
> MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
> 
> MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
> -     grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
> -                             (dev, tunnel, attr, items, actions);
>       ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
>                                      &grp_info, error);
>       if (ret)
> @@ -12075,7 +12073,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
>               mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
>       /* number of actions must be set to 0 in case of dirty stack. */
>       mhdr_res->actions_num = 0;
> -     if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
> +     if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
>               /*
>                * do not add decap action if match rule drops packet
>                * HW rejects rules with decap & drop
> --
> 2.31.1

Reply via email to