New FDIR parsing are added to handle the fragmented IPv4/IPv6 packet.

Signed-off-by: Ting Xu <ting...@intel.com>
Signed-off-by: Jeff Guo <jia....@intel.com>
---
 drivers/net/iavf/iavf_fdir.c         | 296 ++++++++++++++++++---------
 drivers/net/iavf/iavf_generic_flow.h |   5 +
 2 files changed, 209 insertions(+), 92 deletions(-)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index 459c09f6fb..df8d1d431e 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -34,7 +34,7 @@
 #define IAVF_FDIR_INSET_ETH_IPV4 (\
        IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
        IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
-       IAVF_INSET_IPV4_TTL)
+       IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_ID)
 
 #define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
        IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
@@ -56,6 +56,9 @@
        IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
        IAVF_INSET_IPV6_HOP_LIMIT)
 
+#define IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT (\
+       IAVF_INSET_IPV6_ID)
+
 #define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
        IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
        IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
@@ -113,10 +116,12 @@
 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
        {iavf_pattern_ethertype,                IAVF_FDIR_INSET_ETH,            
        IAVF_INSET_NONE},
        {iavf_pattern_eth_ipv4,                 IAVF_FDIR_INSET_ETH_IPV4,       
        IAVF_INSET_NONE},
+       {iavf_pattern_eth_ipv4,                 IAVF_FDIR_INSET_ETH_IPV4,       
        IAVF_INSET_NONE},
        {iavf_pattern_eth_ipv4_udp,             IAVF_FDIR_INSET_ETH_IPV4_UDP,   
        IAVF_INSET_NONE},
        {iavf_pattern_eth_ipv4_tcp,             IAVF_FDIR_INSET_ETH_IPV4_TCP,   
        IAVF_INSET_NONE},
        {iavf_pattern_eth_ipv4_sctp,            IAVF_FDIR_INSET_ETH_IPV4_SCTP,  
        IAVF_INSET_NONE},
        {iavf_pattern_eth_ipv6,                 IAVF_FDIR_INSET_ETH_IPV6,       
        IAVF_INSET_NONE},
+       {iavf_pattern_eth_ipv6_frag_ext,        
IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT,      IAVF_INSET_NONE},
        {iavf_pattern_eth_ipv6_udp,             IAVF_FDIR_INSET_ETH_IPV6_UDP,   
        IAVF_INSET_NONE},
        {iavf_pattern_eth_ipv6_tcp,             IAVF_FDIR_INSET_ETH_IPV6_TCP,   
        IAVF_INSET_NONE},
        {iavf_pattern_eth_ipv6_sctp,            IAVF_FDIR_INSET_ETH_IPV6_SCTP,  
        IAVF_INSET_NONE},
@@ -497,12 +502,13 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter 
*ad,
 {
        struct virtchnl_proto_hdrs *hdrs =
                        &filter->add_fltr.rule_cfg.proto_hdrs;
-       const struct rte_flow_item *item = pattern;
-       enum rte_flow_item_type item_type;
        enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
        const struct rte_flow_item_eth *eth_spec, *eth_mask;
-       const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+       const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
        const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+       const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_spec;
+       const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_last;
+       const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_mask;
        const struct rte_flow_item_udp *udp_spec, *udp_mask;
        const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
        const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
@@ -513,15 +519,16 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter 
*ad,
        const struct rte_flow_item_ah *ah_spec, *ah_mask;
        const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
        const struct rte_flow_item_ecpri *ecpri_spec, *ecpri_mask;
+       const struct rte_flow_item *item = pattern;
+       struct virtchnl_proto_hdr *hdr, *hdr1 = NULL;
        struct rte_ecpri_common_hdr ecpri_common;
        uint64_t input_set = IAVF_INSET_NONE;
-       uint8_t proto_id;
-
+       enum rte_flow_item_type item_type;
        enum rte_flow_item_type next_type;
+       bool spec_all_pid = false;
        uint16_t ether_type;
-
+       uint8_t proto_id;
        int layer = 0;
-       struct virtchnl_proto_hdr *hdr;
 
        uint8_t  ipv6_addr_mask[16] = {
                0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
@@ -529,26 +536,28 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter 
*ad,
        };
 
        for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
-               if (item->last) {
+               item_type = item->type;
+
+               if (item->last && (item_type != RTE_FLOW_ITEM_TYPE_IPV4 ||
+                                  item_type !=
+                                  RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT)) {
                        rte_flow_error_set(error, EINVAL,
-                                       RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                       "Not support range");
+                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                          "Not support range");
                }
 
-               item_type = item->type;
-
                switch (item_type) {
                case RTE_FLOW_ITEM_TYPE_ETH:
                        eth_spec = item->spec;
                        eth_mask = item->mask;
                        next_type = (item + 1)->type;
 
-                       hdr = &hdrs->proto_hdr[layer];
+                       hdr1 = &hdrs->proto_hdr[layer];
 
-                       VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
+                       VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH);
 
                        if (next_type == RTE_FLOW_ITEM_TYPE_END &&
-                               (!eth_spec || !eth_mask)) {
+                           (!eth_spec || !eth_mask)) {
                                rte_flow_error_set(error, EINVAL,
                                                RTE_FLOW_ERROR_TYPE_ITEM,
                                                item, "NULL eth spec/mask.");
@@ -584,10 +593,11 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter 
*ad,
                                }
 
                                input_set |= IAVF_INSET_ETHERTYPE;
-                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, 
ETHERTYPE);
+                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
+                                                                ETHERTYPE);
 
-                               rte_memcpy(hdr->buffer,
-                                       eth_spec, sizeof(struct rte_ether_hdr));
+                               rte_memcpy(hdr1->buffer, eth_spec,
+                                          sizeof(struct rte_ether_hdr));
                        }
 
                        hdrs->count = ++layer;
@@ -596,51 +606,102 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter 
*ad,
                case RTE_FLOW_ITEM_TYPE_IPV4:
                        l3 = RTE_FLOW_ITEM_TYPE_IPV4;
                        ipv4_spec = item->spec;
+                       ipv4_last = item->last;
                        ipv4_mask = item->mask;
+                       next_type = (item + 1)->type;
 
                        hdr = &hdrs->proto_hdr[layer];
 
                        VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
 
-                       if (ipv4_spec && ipv4_mask) {
-                               if (ipv4_mask->hdr.version_ihl ||
-                                       ipv4_mask->hdr.total_length ||
-                                       ipv4_mask->hdr.packet_id ||
-                                       ipv4_mask->hdr.fragment_offset ||
-                                       ipv4_mask->hdr.hdr_checksum) {
-                                       rte_flow_error_set(error, EINVAL,
-                                               RTE_FLOW_ERROR_TYPE_ITEM,
-                                               item, "Invalid IPv4 mask.");
-                                       return -rte_errno;
-                               }
+                       if (!(ipv4_spec && ipv4_mask)) {
+                               hdrs->count = ++layer;
+                               break;
+                       }
 
-                               if (ipv4_mask->hdr.type_of_service ==
-                                                               UINT8_MAX) {
-                                       input_set |= IAVF_INSET_IPV4_TOS;
-                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, 
IPV4, DSCP);
-                               }
-                               if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
-                                       input_set |= IAVF_INSET_IPV4_PROTO;
-                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, 
IPV4, PROT);
-                               }
-                               if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
-                                       input_set |= IAVF_INSET_IPV4_TTL;
-                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, 
IPV4, TTL);
-                               }
-                               if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
-                                       input_set |= IAVF_INSET_IPV4_SRC;
-                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, 
IPV4, SRC);
-                               }
-                               if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
-                                       input_set |= IAVF_INSET_IPV4_DST;
-                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, 
IPV4, DST);
-                               }
+                       if (ipv4_mask->hdr.version_ihl ||
+                           ipv4_mask->hdr.total_length ||
+                           ipv4_mask->hdr.hdr_checksum) {
+                               rte_flow_error_set(error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                                  item, "Invalid IPv4 mask.");
+                               return -rte_errno;
+                       }
 
-                               rte_memcpy(hdr->buffer,
-                                       &ipv4_spec->hdr,
-                                       sizeof(ipv4_spec->hdr));
+                       if (ipv4_last->hdr.version_ihl ||
+                           ipv4_last->hdr.type_of_service ||
+                           ipv4_last->hdr.time_to_live ||
+                           ipv4_last->hdr.total_length |
+                           ipv4_last->hdr.next_proto_id ||
+                           ipv4_last->hdr.hdr_checksum ||
+                           ipv4_last->hdr.src_addr ||
+                           ipv4_last->hdr.dst_addr) {
+                               rte_flow_error_set(error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                                  item, "Invalid IPv4 last.");
+                               return -rte_errno;
+                       }
+
+                       if (ipv4_mask->hdr.type_of_service ==
+                           UINT8_MAX) {
+                               input_set |= IAVF_INSET_IPV4_TOS;
+                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
+                                                                DSCP);
+                       }
+
+                       if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+                               input_set |= IAVF_INSET_IPV4_PROTO;
+                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
+                                                                PROT);
+                       }
+
+                       if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+                               input_set |= IAVF_INSET_IPV4_TTL;
+                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
+                                                                TTL);
+                       }
+
+                       if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+                               input_set |= IAVF_INSET_IPV4_SRC;
+                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
+                                                                SRC);
                        }
 
+                       if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+                               input_set |= IAVF_INSET_IPV4_DST;
+                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
+                                                                DST);
+                       }
+
+                       if (ipv4_mask->hdr.packet_id == UINT16_MAX) {
+                               input_set |= IAVF_INSET_IPV4_ID;
+                               if (ipv4_last &&
+                                   ipv4_spec->hdr.packet_id == 0 &&
+                                   ipv4_last->hdr.packet_id == 0xffff)
+                                       spec_all_pid = true;
+
+                               /* All IPv4 fragment packet has the same
+                                * ethertype, if the spec is for all invalid
+                                * packet id, set ethertype into input set.
+                                */
+                               input_set |= spec_all_pid ?
+                                       IAVF_INSET_ETHERTYPE :
+                                       IAVF_INSET_IPV4_ID;
+
+                               if (spec_all_pid)
+                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1,
+                                               ETH, ETHERTYPE);
+                               else
+                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
+                                               IPV4, PKID);
+                       }
+
+                       if (ipv4_mask->hdr.fragment_offset == UINT16_MAX)
+                               VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4_FRAG);
+
+                       rte_memcpy(hdr->buffer, &ipv4_spec->hdr,
+                                  sizeof(ipv4_spec->hdr));
+
                        hdrs->count = ++layer;
                        break;
 
@@ -653,46 +714,92 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter 
*ad,
 
                        VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
 
-                       if (ipv6_spec && ipv6_mask) {
-                               if (ipv6_mask->hdr.payload_len) {
-                                       rte_flow_error_set(error, EINVAL,
-                                               RTE_FLOW_ERROR_TYPE_ITEM,
-                                               item, "Invalid IPv6 mask");
-                                       return -rte_errno;
-                               }
+                       if (!(ipv6_spec && ipv6_mask)) {
+                               hdrs->count = ++layer;
+                               break;
+                       }
 
-                               if ((ipv6_mask->hdr.vtc_flow &
-                                       rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
-                                       == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) 
{
-                                       input_set |= IAVF_INSET_IPV6_TC;
-                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, 
IPV6, TC);
-                               }
-                               if (ipv6_mask->hdr.proto == UINT8_MAX) {
-                                       input_set |= IAVF_INSET_IPV6_NEXT_HDR;
-                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, 
IPV6, PROT);
-                               }
-                               if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
-                                       input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
-                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, 
IPV6, HOP_LIMIT);
-                               }
-                               if (!memcmp(ipv6_mask->hdr.src_addr,
-                                       ipv6_addr_mask,
-                                       RTE_DIM(ipv6_mask->hdr.src_addr))) {
-                                       input_set |= IAVF_INSET_IPV6_SRC;
-                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, 
IPV6, SRC);
-                               }
-                               if (!memcmp(ipv6_mask->hdr.dst_addr,
-                                       ipv6_addr_mask,
-                                       RTE_DIM(ipv6_mask->hdr.dst_addr))) {
-                                       input_set |= IAVF_INSET_IPV6_DST;
-                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, 
IPV6, DST);
-                               }
+                       if (ipv6_mask->hdr.payload_len) {
+                               rte_flow_error_set(error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                                  item, "Invalid IPv6 mask");
+                               return -rte_errno;
+                       }
 
-                               rte_memcpy(hdr->buffer,
-                                       &ipv6_spec->hdr,
-                                       sizeof(ipv6_spec->hdr));
+                       if ((ipv6_mask->hdr.vtc_flow &
+                             rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
+                            == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
+                               input_set |= IAVF_INSET_IPV6_TC;
+                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
+                                                                TC);
                        }
 
+                       if (ipv6_mask->hdr.proto == UINT8_MAX) {
+                               input_set |= IAVF_INSET_IPV6_NEXT_HDR;
+                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
+                                                                PROT);
+                       }
+
+                       if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+                               input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
+                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
+                                                                HOP_LIMIT);
+                       }
+
+                       if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
+                                   RTE_DIM(ipv6_mask->hdr.src_addr))) {
+                               input_set |= IAVF_INSET_IPV6_SRC;
+                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
+                                                                SRC);
+                       }
+                       if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
+                                   RTE_DIM(ipv6_mask->hdr.dst_addr))) {
+                               input_set |= IAVF_INSET_IPV6_DST;
+                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
+                                                                DST);
+                       }
+
+                       rte_memcpy(hdr->buffer, &ipv6_spec->hdr,
+                                  sizeof(ipv6_spec->hdr));
+
+                       hdrs->count = ++layer;
+                       break;
+
+               case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
+                       ipv6_frag_spec = item->spec;
+                       ipv6_frag_last = item->last;
+                       ipv6_frag_mask = item->mask;
+                       next_type = (item + 1)->type;
+
+                       hdr = &hdrs->proto_hdr[layer];
+
+                       VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6_EH_FRAG);
+
+                       if (!(ipv6_frag_spec && ipv6_frag_mask)) {
+                               hdrs->count = ++layer;
+                               break;
+                       }
+
+                       if (ipv6_frag_last && ipv6_frag_spec->hdr.id == 0 &&
+                           ipv6_frag_last->hdr.id == 0xffffffff)
+                               spec_all_pid = true;
+
+                       /* All IPv6 fragment packet has the same ethertype, if
+                        * the spec is for all invalid packet id, set ethertype
+                        * into input set.
+                        */
+                       input_set |= spec_all_pid ? IAVF_INSET_ETHERTYPE :
+                               IAVF_INSET_IPV6_ID;
+                       if (spec_all_pid)
+                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
+                                                                ETHERTYPE);
+                       else
+                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
+                                                                PKID);
+
+                       rte_memcpy(hdr->buffer, &ipv6_frag_spec->hdr,
+                                  sizeof(ipv6_frag_spec->hdr));
+
                        hdrs->count = ++layer;
                        break;
 
@@ -1010,8 +1117,13 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter 
*ad,
                return -rte_errno;
        }
 
-       if (input_set & ~input_set_mask)
-               return -EINVAL;
+       if (spec_all_pid) {
+               if (input_set & ~(input_set_mask | IAVF_INSET_ETHERTYPE))
+                       return -EINVAL;
+       } else {
+               if (input_set & ~input_set_mask)
+                       return -EINVAL;
+       }
 
        filter->input_set = input_set;
 
diff --git a/drivers/net/iavf/iavf_generic_flow.h 
b/drivers/net/iavf/iavf_generic_flow.h
index ce3d12bcd9..b7b9bd2495 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -61,6 +61,7 @@
 #define IAVF_PFCP_S_FIELD          (1ULL << 44)
 #define IAVF_PFCP_SEID             (1ULL << 43)
 #define IAVF_ECPRI_PC_RTC_ID       (1ULL << 42)
+#define IAVF_IP_PKID               (1ULL << 41)
 
 /* input set */
 
@@ -84,6 +85,8 @@
        (IAVF_PROT_IPV4_OUTER | IAVF_IP_PROTO)
 #define IAVF_INSET_IPV4_TTL \
        (IAVF_PROT_IPV4_OUTER | IAVF_IP_TTL)
+#define IAVF_INSET_IPV4_ID \
+       (IAVF_PROT_IPV4_OUTER | IAVF_IP_PKID)
 #define IAVF_INSET_IPV6_SRC \
        (IAVF_PROT_IPV6_OUTER | IAVF_IP_SRC)
 #define IAVF_INSET_IPV6_DST \
@@ -94,6 +97,8 @@
        (IAVF_PROT_IPV6_OUTER | IAVF_IP_TTL)
 #define IAVF_INSET_IPV6_TC \
        (IAVF_PROT_IPV6_OUTER | IAVF_IP_TOS)
+#define IAVF_INSET_IPV6_ID \
+       (IAVF_PROT_IPV6_OUTER | IAVF_IP_PKID)
 
 #define IAVF_INSET_TCP_SRC_PORT \
        (IAVF_PROT_TCP_OUTER | IAVF_SPORT)
-- 
2.20.1

Reply via email to