Signed-off-by: Xueming Li <xuemi...@mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.c | 99 +++++++++++++++++++++++++++++++++++++++++++-
 drivers/net/mlx5/mlx5_rxtx.c |  3 +-
 2 files changed, 99 insertions(+), 3 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 371d029c8..1a7601cd9 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -91,6 +91,11 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
                       struct mlx5_flow_data *data);
 
 static int
+mlx5_flow_create_vxlan_gpe(const struct rte_flow_item *item,
+                          const void *default_mask,
+                          struct mlx5_flow_data *data);
+
+static int
 mlx5_flow_create_gre(const struct rte_flow_item *item,
                     const void *default_mask,
                     struct mlx5_flow_data *data);
@@ -241,10 +246,12 @@ struct rte_flow {
 
 #define IS_TUNNEL(type) ( \
        (type) == RTE_FLOW_ITEM_TYPE_VXLAN || \
+       (type) == RTE_FLOW_ITEM_TYPE_VXLAN_GPE || \
        (type) == RTE_FLOW_ITEM_TYPE_GRE)
 
 const uint32_t flow_ptype[] = {
        [RTE_FLOW_ITEM_TYPE_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+       [RTE_FLOW_ITEM_TYPE_VXLAN_GPE] = RTE_PTYPE_TUNNEL_VXLAN_GPE,
        [RTE_FLOW_ITEM_TYPE_GRE] = RTE_PTYPE_TUNNEL_GRE,
 };
 
@@ -253,6 +260,8 @@ const uint32_t flow_ptype[] = {
 const uint32_t ptype_ext[] = {
        [PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] = RTE_PTYPE_TUNNEL_VXLAN |
                                              RTE_PTYPE_L4_UDP,
+       [PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN_GPE)] = RTE_PTYPE_TUNNEL_VXLAN_GPE |
+                                                 RTE_PTYPE_L4_UDP,
        [PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)] = RTE_PTYPE_TUNNEL_GRE,
 };
 
@@ -310,6 +319,7 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
        [RTE_FLOW_ITEM_TYPE_END] = {
                .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
                               RTE_FLOW_ITEM_TYPE_VXLAN,
+                              RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
                               RTE_FLOW_ITEM_TYPE_GRE),
        },
        [RTE_FLOW_ITEM_TYPE_ETH] = {
@@ -388,7 +398,8 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
                .dst_sz = sizeof(struct ibv_flow_spec_ipv6),
        },
        [RTE_FLOW_ITEM_TYPE_UDP] = {
-               .items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN),
+               .items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN,
+                              RTE_FLOW_ITEM_TYPE_VXLAN_GPE),
                .actions = valid_actions,
                .mask = &(const struct rte_flow_item_udp){
                        .hdr = {
@@ -440,6 +451,19 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
                .convert = mlx5_flow_create_vxlan,
                .dst_sz = sizeof(struct ibv_flow_spec_tunnel),
        },
+       [RTE_FLOW_ITEM_TYPE_VXLAN_GPE] = {
+               .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
+                              RTE_FLOW_ITEM_TYPE_IPV4,
+                              RTE_FLOW_ITEM_TYPE_IPV6),
+               .actions = valid_actions,
+               .mask = &(const struct rte_flow_item_vxlan_gpe){
+                       .vni = "\xff\xff\xff",
+               },
+               .default_mask = &rte_flow_item_vxlan_gpe_mask,
+               .mask_sz = sizeof(struct rte_flow_item_vxlan_gpe),
+               .convert = mlx5_flow_create_vxlan_gpe,
+               .dst_sz = sizeof(struct ibv_flow_spec_tunnel),
+       },
 };
 
 /** Structure to pass to the conversion function. */
@@ -1753,6 +1777,79 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
 }
 
 /**
+ * Convert VXLAN-GPE item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param default_mask[in]
+ *   Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ *   User structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_create_vxlan_gpe(const struct rte_flow_item *item,
+                          const void *default_mask,
+                          struct mlx5_flow_data *data)
+{
+       const struct rte_flow_item_vxlan_gpe *spec = item->spec;
+       const struct rte_flow_item_vxlan_gpe *mask = item->mask;
+       struct mlx5_flow_parse *parser = data->parser;
+       unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+       struct ibv_flow_spec_tunnel vxlan = {
+               .type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,
+               .size = size,
+       };
+       union vni {
+               uint32_t vlan_id;
+               uint8_t vni[4];
+       } id;
+
+       id.vni[0] = 0;
+       parser->inner = IBV_FLOW_SPEC_INNER;
+       parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN_GPE)];
+       parser->out_layer = parser->layer;
+       parser->layer = HASH_RXQ_TUNNEL;
+       /* Default VXLAN-GPE to outer RSS. */
+       if (!parser->rss_conf.level)
+               parser->rss_conf.level = 1;
+       if (spec) {
+               if (!mask)
+                       mask = default_mask;
+               memcpy(&id.vni[1], spec->vni, 3);
+               vxlan.val.tunnel_id = id.vlan_id;
+               memcpy(&id.vni[1], mask->vni, 3);
+               vxlan.mask.tunnel_id = id.vlan_id;
+               if (spec->protocol)
+                       return rte_flow_error_set(data->error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 item,
+                                                 "VxLAN-GPE protocol not"
+                                                 " supported");
+               /* Remove unwanted bits from values. */
+               vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
+       }
+       /*
+        * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
+        * layer is defined in the Verbs specification it is interpreted as
+        * wildcard and all packets will match this rule, if it follows a full
+        * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
+        * before will also match this rule.
+        * To avoid such situation, VNI 0 is currently refused.
+        */
+       /* Only allow tunnel w/o tunnel id pattern after proper outer spec. */
+       if (parser->out_layer == HASH_RXQ_ETH && !vxlan.val.tunnel_id)
+               return rte_flow_error_set(data->error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM,
+                                         item,
+                                         "VxLAN-GPE vni cannot be 0");
+       mlx5_flow_create_copy(parser, &vxlan, size);
+       return 0;
+}
+
+/**
  * Convert GRE item to Verbs specification.
  *
  * @param item[in]
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 060ff0e85..f10ea13c1 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -466,8 +466,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, 
uint16_t pkts_n)
                        uint8_t vlan_sz =
                                (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0;
                        const uint64_t is_tunneled =
-                               buf->ol_flags & (PKT_TX_TUNNEL_GRE |
-                                                PKT_TX_TUNNEL_VXLAN);
+                               buf->ol_flags & (PKT_TX_TUNNEL_MASK);
 
                        tso_header_sz = buf->l2_len + vlan_sz +
                                        buf->l3_len + buf->l4_len;
-- 
2.13.3

Reply via email to