This commit adds support for encap hash calculation.

Signed-off-by: Ori Kam <or...@nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnow...@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c    | 29 +++++++++++++++
 drivers/net/mlx5/mlx5_flow.h    |  8 ++++
 drivers/net/mlx5/mlx5_flow_hw.c | 66 +++++++++++++++++++++++++++++++++
 3 files changed, 103 insertions(+)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 5159e8e773..0fb6b3b374 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1197,6 +1197,12 @@ mlx5_flow_calc_table_hash(struct rte_eth_dev *dev,
                          const struct rte_flow_item pattern[],
                          uint8_t pattern_template_index,
                          uint32_t *hash, struct rte_flow_error *error);
+static int
+mlx5_flow_calc_encap_hash(struct rte_eth_dev *dev,
+                         const struct rte_flow_item pattern[],
+                         enum rte_flow_encap_hash_field dest_field,
+                         uint8_t *hash,
+                         struct rte_flow_error *error);
 
 static const struct rte_flow_ops mlx5_flow_ops = {
        .validate = mlx5_flow_validate,
@@ -1253,6 +1259,7 @@ static const struct rte_flow_ops mlx5_flow_ops = {
        .async_action_list_handle_query_update =
                mlx5_flow_async_action_list_handle_query_update,
        .flow_calc_table_hash = mlx5_flow_calc_table_hash,
+       .flow_calc_encap_hash = mlx5_flow_calc_encap_hash,
 };
 
 /* Tunnel information. */
@@ -11121,6 +11128,28 @@ mlx5_flow_calc_table_hash(struct rte_eth_dev *dev,
                                          hash, error);
 }
 
+static int
+mlx5_flow_calc_encap_hash(struct rte_eth_dev *dev,
+                         const struct rte_flow_item pattern[],
+                         enum rte_flow_encap_hash_field dest_field,
+                         uint8_t *hash,
+                         struct rte_flow_error *error)
+{
+       enum mlx5_flow_drv_type drv_type = flow_get_drv_type(dev, NULL);
+       const struct mlx5_flow_driver_ops *fops;
+
+       if (drv_type == MLX5_FLOW_TYPE_MIN || drv_type == MLX5_FLOW_TYPE_MAX)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION,
+                                         NULL, "invalid driver type");
+       fops = flow_get_drv_ops(drv_type);
+       if (!fops || !fops->flow_calc_encap_hash)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION,
+                                         NULL, "no calc encap hash handler");
+       return fops->flow_calc_encap_hash(dev, pattern, dest_field, hash, 
error);
+}
+
 /**
  * Destroy all indirect actions (shared RSS).
  *
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index c9cc942d80..5a8404b9b3 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -2195,6 +2195,13 @@ typedef int
                         const struct rte_flow_item pattern[],
                         uint8_t pattern_template_index,
                         uint32_t *hash, struct rte_flow_error *error);
+typedef int
+(*mlx5_flow_calc_encap_hash_t)
+                       (struct rte_eth_dev *dev,
+                        const struct rte_flow_item pattern[],
+                        enum rte_flow_encap_hash_field dest_field,
+                        uint8_t *hash,
+                        struct rte_flow_error *error);
 
 struct mlx5_flow_driver_ops {
        mlx5_flow_validate_t validate;
@@ -2268,6 +2275,7 @@ struct mlx5_flow_driver_ops {
        mlx5_flow_async_action_list_handle_query_update_t
                async_action_list_handle_query_update;
        mlx5_flow_calc_table_hash_t flow_calc_table_hash;
+       mlx5_flow_calc_encap_hash_t flow_calc_encap_hash;
 };
 
 /* mlx5_flow.c */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 7510715189..d81ca96b7b 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -11628,6 +11628,71 @@ flow_hw_calc_table_hash(struct rte_eth_dev *dev,
        return 0;
 }
 
+static int
+flow_hw_calc_encap_hash(struct rte_eth_dev *dev,
+                       const struct rte_flow_item pattern[],
+                       enum rte_flow_encap_hash_field dest_field,
+                       uint8_t *hash,
+                       struct rte_flow_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5dr_crc_encap_entropy_hash_fields data;
+       enum mlx5dr_crc_encap_entropy_hash_size res_size =
+                       dest_field == RTE_FLOW_ENCAP_HASH_FIELD_SRC_PORT ?
+                               MLX5DR_CRC_ENCAP_ENTROPY_HASH_SIZE_16 :
+                               MLX5DR_CRC_ENCAP_ENTROPY_HASH_SIZE_8;
+       int res;
+
+       memset(&data, 0, sizeof(struct mlx5dr_crc_encap_entropy_hash_fields));
+
+       for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
+               switch (pattern->type) {
+               case RTE_FLOW_ITEM_TYPE_IPV4:
+                       data.dst.ipv4_addr =
+                               ((const struct rte_flow_item_ipv4 
*)(pattern->spec))->hdr.dst_addr;
+                       data.src.ipv4_addr =
+                               ((const struct rte_flow_item_ipv4 
*)(pattern->spec))->hdr.src_addr;
+                       break;
+               case RTE_FLOW_ITEM_TYPE_IPV6:
+                       memcpy(data.dst.ipv6_addr,
+                              ((const struct rte_flow_item_ipv6 
*)(pattern->spec))->hdr.dst_addr,
+                              sizeof(data.dst.ipv6_addr));
+                       memcpy(data.src.ipv6_addr,
+                              ((const struct rte_flow_item_ipv6 
*)(pattern->spec))->hdr.src_addr,
+                              sizeof(data.src.ipv6_addr));
+                       break;
+               case RTE_FLOW_ITEM_TYPE_UDP:
+                       data.next_protocol = IPPROTO_UDP;
+                       data.dst_port =
+                               ((const struct rte_flow_item_udp 
*)(pattern->spec))->hdr.dst_port;
+                       data.src_port =
+                               ((const struct rte_flow_item_udp 
*)(pattern->spec))->hdr.src_port;
+                       break;
+               case RTE_FLOW_ITEM_TYPE_TCP:
+                       data.next_protocol = IPPROTO_TCP;
+                       data.dst_port =
+                               ((const struct rte_flow_item_tcp 
*)(pattern->spec))->hdr.dst_port;
+                       data.src_port =
+                               ((const struct rte_flow_item_tcp 
*)(pattern->spec))->hdr.src_port;
+                       break;
+               case RTE_FLOW_ITEM_TYPE_ICMP:
+                       data.next_protocol = IPPROTO_ICMP;
+                       break;
+               case RTE_FLOW_ITEM_TYPE_ICMP6:
+                       data.next_protocol = IPPROTO_ICMPV6;
+                       break;
+               default:
+                       break;
+               }
+       }
+       res = mlx5dr_crc_encap_entropy_hash_calc(priv->dr_ctx, &data, hash, 
res_size);
+       if (res)
+               return rte_flow_error_set(error, res,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         NULL, "error while calculating encap 
hash");
+       return 0;
+}
+
 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
        .info_get = flow_hw_info_get,
        .configure = flow_hw_configure,
@@ -11673,6 +11738,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops 
= {
        .item_create = flow_dv_item_create,
        .item_release = flow_dv_item_release,
        .flow_calc_table_hash = flow_hw_calc_table_hash,
+       .flow_calc_encap_hash = flow_hw_calc_encap_hash,
 };
 
 /**
-- 
2.34.1

Reply via email to