From: wei zhao1 <wei.zh...@intel.com>

check if the rule is a SYN rule, and get the SYN info.

Signed-off-by: wei zhao1 <wei.zh...@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo...@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 154 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 154 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index a421062..3ed749a 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -415,6 +415,11 @@ ixgbe_parse_ethertype_filter(const struct rte_flow_attr 
*attr,
                                const struct rte_flow_item pattern[],
                                const struct rte_flow_action actions[],
                                struct rte_eth_ethertype_filter *filter);
+static enum rte_flow_error_type
+cons_parse_syn_filter(const struct rte_flow_attr *attr,
+               const struct rte_flow_item pattern[],
+               const struct rte_flow_action actions[],
+               struct rte_eth_syn_filter *filter);
 enum rte_flow_error_type
 ixgbe_flow_rule_validate(__rte_unused struct rte_eth_dev *dev,
                                const struct rte_flow_attr *attr,
@@ -8459,6 +8464,148 @@ ixgbe_parse_ethertype_filter(const struct rte_flow_attr 
*attr,
 }
 
 /**
+ * Parse the rule to see if it is a SYN rule.
+ * And get the SYN filter info BTW.
+ */
+static enum rte_flow_error_type
+cons_parse_syn_filter(const struct rte_flow_attr *attr,
+                               const struct rte_flow_item pattern[],
+                               const struct rte_flow_action actions[],
+                               struct rte_eth_syn_filter *filter)
+{
+       const struct rte_flow_item *item;
+       const struct rte_flow_action *act;
+       const struct rte_flow_item_tcp *tcp_spec;
+       const struct rte_flow_item_tcp *tcp_mask;
+       const struct rte_flow_action_queue *act_q;
+       uint32_t i;
+
+       /************************************************
+        * parse pattern
+        ************************************************/
+       i = 0;
+
+       /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
+       PATTERN_SKIP_VOID(filter, struct rte_eth_syn_filter,
+                         RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+       if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+           item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+           item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+           item->type != RTE_FLOW_ITEM_TYPE_TCP)
+               return RTE_FLOW_ERROR_TYPE_ITEM;
+
+       /* Skip Ethernet */
+       if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+               /* if the item is MAC, the content should be NULL */
+               if (item->spec || item->mask)
+                       return RTE_FLOW_ERROR_TYPE_ITEM;
+
+               /* check if the next not void item is IPv4 or IPv6 */
+               i++;
+               PATTERN_SKIP_VOID(filter, struct rte_eth_syn_filter,
+                                 RTE_FLOW_ERROR_TYPE_ITEM);
+               if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+                   item->type != RTE_FLOW_ITEM_TYPE_IPV6)
+                       return RTE_FLOW_ERROR_TYPE_ITEM;
+       }
+
+       /* Skip IP */
+       if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+           item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+               /* if the item is IP, the content should be NULL */
+               if (item->spec || item->mask)
+                       return RTE_FLOW_ERROR_TYPE_ITEM;
+
+               /* check if the next not void item is TCP */
+               i++;
+               PATTERN_SKIP_VOID(filter, struct rte_eth_syn_filter,
+                                 RTE_FLOW_ERROR_TYPE_ITEM);
+               if (item->type != RTE_FLOW_ITEM_TYPE_TCP)
+                       return RTE_FLOW_ERROR_TYPE_ITEM;
+       }
+
+       /* Get the TCP info. Only support SYN. */
+       if (!item->spec || !item->mask)
+               return RTE_FLOW_ERROR_TYPE_ITEM;
+       tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+       tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+       if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
+           tcp_mask->hdr.src_port ||
+           tcp_mask->hdr.dst_port ||
+           tcp_mask->hdr.sent_seq ||
+           tcp_mask->hdr.recv_ack ||
+           tcp_mask->hdr.data_off ||
+           tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
+           tcp_mask->hdr.rx_win ||
+           tcp_mask->hdr.cksum ||
+           tcp_mask->hdr.tcp_urp) {
+               memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+               return RTE_FLOW_ERROR_TYPE_ITEM;
+       }
+
+       /* check if the next not void item is END */
+       i++;
+       PATTERN_SKIP_VOID(filter, struct rte_eth_syn_filter,
+                         RTE_FLOW_ERROR_TYPE_ITEM);
+       if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+               memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+               return RTE_FLOW_ERROR_TYPE_ITEM;
+       }
+
+       /************************************************
+        * parse action
+        ************************************************/
+       i = 0;
+
+       /* check if the first not void action is QUEUE. */
+       ACTION_SKIP_VOID(filter, struct rte_eth_syn_filter,
+                        RTE_FLOW_ERROR_TYPE_ACTION_NUM);
+       if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+               memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+               return RTE_FLOW_ERROR_TYPE_ACTION;
+       }
+
+       act_q = (const struct rte_flow_action_queue *)act->conf;
+       filter->queue = act_q->index;
+
+       /* check if the next not void item is END */
+       i++;
+       ACTION_SKIP_VOID(filter, struct rte_eth_syn_filter,
+                        RTE_FLOW_ERROR_TYPE_ACTION);
+       if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+               memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+               return RTE_FLOW_ERROR_TYPE_ACTION;
+       }
+
+       /************************************************
+        * parse attr
+        ************************************************/
+       /* must be input direction */
+       if (!attr->ingress) {
+               memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+               return RTE_FLOW_ERROR_TYPE_ATTR_INGRESS;
+       }
+
+       /* not supported */
+       if (attr->egress) {
+               memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+               return RTE_FLOW_ERROR_TYPE_ATTR_EGRESS;
+       }
+
+       /* Support 2 priorities, the lowest or highest. */
+       if (!attr->priority) {
+               filter->hig_pri = 0;
+       } else if (attr->priority == (uint32_t)~0U) {
+               filter->hig_pri = 1;
+       } else {
+               memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+               return RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY;
+       }
+
+       return 0;
+}
+
+/**
  * Check if the flow rule is supported by ixgbe.
  * It only checkes the format. Don't guarantee the rule can be programmed into
  * the HW. Because there can be no enough room for the rule.
@@ -8472,17 +8619,24 @@ ixgbe_flow_rule_validate(__rte_unused struct 
rte_eth_dev *dev,
        int ret;
        struct rte_eth_ntuple_filter ntuple_filter;
        struct rte_eth_ethertype_filter ethertype_filter;
+       struct rte_eth_syn_filter syn_filter;
 
        memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
        ret = ixgbe_parse_ntuple_filter(attr, pattern, actions, &ntuple_filter);
        if (!ret)
                return RTE_FLOW_ERROR_TYPE_NONE;
+
        memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
        ret = ixgbe_parse_ethertype_filter(attr, pattern,
                                                actions, &ethertype_filter);
        if (!ret)
                return RTE_FLOW_ERROR_TYPE_NONE;
 
+       memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+       ret = cons_parse_syn_filter(attr, pattern, actions, &syn_filter);
+       if (!ret)
+               return RTE_FLOW_ERROR_TYPE_NONE;
+
        return ret;
 }
 
-- 
2.5.5

Reply via email to