From: Ferruh Yigit <ferruh.yi...@intel.com>

The following library APIs's are implemented:
rte_flow_classify_create
rte_flow_classify_validate
rte_flow_classify_destroy
rte_flow_classify_query

The following librte_table ACL API's are used:
f_create to create a table ACL.
f_add to add an ACL rule to the table.
f_del to delete an ACL form the table.
f_lookup to match packets with the ACL rules.

The library supports counting of IPv4 five tupple packets only,
ie IPv4 UDP, TCP and SCTP packets.

Signed-off-by: Ferruh Yigit <ferruh.yi...@intel.com>
Signed-off-by: Bernard Iremonger <bernard.iremon...@intel.com>
---
 config/common_base                                 |   6 +
 doc/api/doxy-api-index.md                          |   1 +
 doc/api/doxy-api.conf                              |   1 +
 lib/Makefile                                       |   3 +
 lib/librte_eal/common/include/rte_log.h            |   1 +
 lib/librte_flow_classify/Makefile                  |  51 ++
 lib/librte_flow_classify/rte_flow_classify.c       | 459 +++++++++++++++++
 lib/librte_flow_classify/rte_flow_classify.h       | 207 ++++++++
 lib/librte_flow_classify/rte_flow_classify_parse.c | 546 +++++++++++++++++++++
 lib/librte_flow_classify/rte_flow_classify_parse.h |  74 +++
 .../rte_flow_classify_version.map                  |  10 +
 mk/rte.app.mk                                      |   2 +-
 12 files changed, 1360 insertions(+), 1 deletion(-)
 create mode 100644 lib/librte_flow_classify/Makefile
 create mode 100644 lib/librte_flow_classify/rte_flow_classify.c
 create mode 100644 lib/librte_flow_classify/rte_flow_classify.h
 create mode 100644 lib/librte_flow_classify/rte_flow_classify_parse.c
 create mode 100644 lib/librte_flow_classify/rte_flow_classify_parse.h
 create mode 100644 lib/librte_flow_classify/rte_flow_classify_version.map

diff --git a/config/common_base b/config/common_base
index 5e97a08..e378e0a 100644
--- a/config/common_base
+++ b/config/common_base
@@ -657,6 +657,12 @@ CONFIG_RTE_LIBRTE_GRO=y
 CONFIG_RTE_LIBRTE_METER=y
 
 #
+# Compile librte_classify
+#
+CONFIG_RTE_LIBRTE_FLOW_CLASSIFY=y
+CONFIG_RTE_LIBRTE_CLASSIFY_DEBUG=n
+
+#
 # Compile librte_sched
 #
 CONFIG_RTE_LIBRTE_SCHED=y
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 19e0d4f..a2fa281 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -105,6 +105,7 @@ The public API headers are grouped by topics:
   [LPM IPv4 route]     (@ref rte_lpm.h),
   [LPM IPv6 route]     (@ref rte_lpm6.h),
   [ACL]                (@ref rte_acl.h),
+  [flow_classify]      (@ref rte_flow_classify.h),
   [EFD]                (@ref rte_efd.h)
 
 - **QoS**:
diff --git a/doc/api/doxy-api.conf b/doc/api/doxy-api.conf
index 823554f..4e43a66 100644
--- a/doc/api/doxy-api.conf
+++ b/doc/api/doxy-api.conf
@@ -46,6 +46,7 @@ INPUT                   = doc/api/doxy-api-index.md \
                           lib/librte_efd \
                           lib/librte_ether \
                           lib/librte_eventdev \
+                          lib/librte_flow_classify \
                           lib/librte_gro \
                           lib/librte_hash \
                           lib/librte_ip_frag \
diff --git a/lib/Makefile b/lib/Makefile
index 86caba1..21fc3b0 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -82,6 +82,9 @@ DIRS-$(CONFIG_RTE_LIBRTE_POWER) += librte_power
 DEPDIRS-librte_power := librte_eal
 DIRS-$(CONFIG_RTE_LIBRTE_METER) += librte_meter
 DEPDIRS-librte_meter := librte_eal
+DIRS-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY) += librte_flow_classify
+DEPDIRS-librte_flow_classify := librte_eal librte_ether librte_net
+DEPDIRS-librte_flow_classify += librte_table librte_acl librte_port
 DIRS-$(CONFIG_RTE_LIBRTE_SCHED) += librte_sched
 DEPDIRS-librte_sched := librte_eal librte_mempool librte_mbuf librte_net
 DEPDIRS-librte_sched += librte_timer
diff --git a/lib/librte_eal/common/include/rte_log.h 
b/lib/librte_eal/common/include/rte_log.h
index ec8dba7..f975bde 100644
--- a/lib/librte_eal/common/include/rte_log.h
+++ b/lib/librte_eal/common/include/rte_log.h
@@ -87,6 +87,7 @@ struct rte_logs {
 #define RTE_LOGTYPE_CRYPTODEV 17 /**< Log related to cryptodev. */
 #define RTE_LOGTYPE_EFD       18 /**< Log related to EFD. */
 #define RTE_LOGTYPE_EVENTDEV  19 /**< Log related to eventdev. */
+#define RTE_LOGTYPE_CLASSIFY  20 /**< Log related to flow classify. */
 
 /* these log types can be used in an application */
 #define RTE_LOGTYPE_USER1     24 /**< User-defined log type 1. */
diff --git a/lib/librte_flow_classify/Makefile 
b/lib/librte_flow_classify/Makefile
new file mode 100644
index 0000000..7863a0c
--- /dev/null
+++ b/lib/librte_flow_classify/Makefile
@@ -0,0 +1,51 @@
+#   BSD LICENSE
+#
+#   Copyright(c) 2017 Intel Corporation. All rights reserved.
+#   All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in
+#       the documentation and/or other materials provided with the
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_flow_classify.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+
+EXPORT_MAP := rte_flow_classify_version.map
+
+LIBABIVER := 1
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY) += rte_flow_classify.c
+SRCS-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY) += rte_flow_classify_parse.c
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY)-include := rte_flow_classify.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_flow_classify/rte_flow_classify.c 
b/lib/librte_flow_classify/rte_flow_classify.c
new file mode 100644
index 0000000..595e08c
--- /dev/null
+++ b/lib/librte_flow_classify/rte_flow_classify.c
@@ -0,0 +1,459 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_flow_classify.h>
+#include "rte_flow_classify_parse.h"
+#include <rte_flow_driver.h>
+#include <rte_table_acl.h>
+#include <stdbool.h>
+
+static struct rte_eth_ntuple_filter ntuple_filter;
+
+enum {
+       PROTO_FIELD_IPV4,
+       SRC_FIELD_IPV4,
+       DST_FIELD_IPV4,
+       SRCP_FIELD_IPV4,
+       DSTP_FIELD_IPV4,
+       NUM_FIELDS_IPV4
+};
+
+struct ipv4_5tuple_data {
+       uint16_t priority; /**< flow API uses priority 0 to 8, 0 is highest */
+       uint32_t userdata; /**< value returned for match */
+       uint8_t tcp_flags; /**< tcp_flags only meaningful TCP protocol */
+};
+
+struct rte_flow_classify {
+       enum rte_flow_classify_type type; /**< classify type */
+       struct rte_flow_action action;    /**< action when match found */
+       struct ipv4_5tuple_data flow_extra_data;  /** extra rule data */
+       struct rte_table_acl_rule_add_params key_add; /**< add ACL rule key */
+       struct rte_table_acl_rule_delete_params
+                       key_del; /**< delete ACL rule key */
+       int key_found;   /**< ACL rule key found in table */
+       void *entry;     /**< pointer to buffer to hold ACL rule key */
+       void *entry_ptr; /**< handle to the table entry for the ACL rule key */
+};
+
+/* number of categories in an ACL context */
+#define FLOW_CLASSIFY_NUM_CATEGORY 1
+
+/* number of packets in a burst */
+#define MAX_PKT_BURST 32
+
+struct mbuf_search {
+       struct rte_mbuf *m_ipv4[MAX_PKT_BURST];
+       uint32_t res_ipv4[MAX_PKT_BURST];
+       int num_ipv4;
+};
+
+int
+rte_flow_classify_validate(void *table_handle,
+                  const struct rte_flow_attr *attr,
+                  const struct rte_flow_item pattern[],
+                  const struct rte_flow_action actions[],
+                  struct rte_flow_error *error)
+{
+       struct rte_flow_item *items;
+       parse_filter_t parse_filter;
+       uint32_t item_num = 0;
+       uint32_t i = 0;
+       int ret;
+
+       (void) table_handle;
+
+       if (!error)
+               return -EINVAL;
+
+       if (!pattern) {
+               rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+                                  NULL, "NULL pattern.");
+               return -EINVAL;
+       }
+
+       if (!actions) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+                                  NULL, "NULL action.");
+               return -EINVAL;
+       }
+
+       if (!attr) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ATTR,
+                                  NULL, "NULL attribute.");
+               return -EINVAL;
+       }
+
+       memset(&ntuple_filter, 0, sizeof(ntuple_filter));
+
+       /* Get the non-void item number of pattern */
+       while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+               if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+                       item_num++;
+               i++;
+       }
+       item_num++;
+
+       items = malloc(item_num * sizeof(struct rte_flow_item));
+       if (!items) {
+               rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+                                  NULL, "No memory for pattern items.");
+               return -ENOMEM;
+       }
+
+       memset(items, 0, item_num * sizeof(struct rte_flow_item));
+       classify_pattern_skip_void_item(items, pattern);
+
+       parse_filter = classify_find_parse_filter_func(items);
+       if (!parse_filter) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                  pattern, "Unsupported pattern");
+               return -EINVAL;
+       }
+
+       ret = parse_filter(attr, items, actions, &ntuple_filter, error);
+       free(items);
+       return ret;
+}
+
+#ifdef RTE_LIBRTE_CLASSIFY_DEBUG
+#define uint32_t_to_char(ip, a, b, c, d) do {\
+               *a = (unsigned char)(ip >> 24 & 0xff);\
+               *b = (unsigned char)(ip >> 16 & 0xff);\
+               *c = (unsigned char)(ip >> 8 & 0xff);\
+               *d = (unsigned char)(ip & 0xff);\
+       } while (0)
+
+static inline void
+print_ipv4_key_add(struct rte_table_acl_rule_add_params *key)
+{
+       unsigned char a, b, c, d;
+
+       printf("ipv4_key_add: 0x%02hhx/0x%hhx ",
+               key->field_value[PROTO_FIELD_IPV4].value.u8,
+               key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
+
+       uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
+                       &a, &b, &c, &d);
+       printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
+                       key->field_value[SRC_FIELD_IPV4].mask_range.u32);
+
+       uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
+                       &a, &b, &c, &d);
+       printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
+                       key->field_value[DST_FIELD_IPV4].mask_range.u32);
+
+       printf("%hu : 0x%x %hu : 0x%x",
+               key->field_value[SRCP_FIELD_IPV4].value.u16,
+               key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
+               key->field_value[DSTP_FIELD_IPV4].value.u16,
+               key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
+
+       printf(" priority: 0x%x\n", key->priority);
+}
+
+static inline void
+print_ipv4_key_delete(struct rte_table_acl_rule_delete_params *key)
+{
+       unsigned char a, b, c, d;
+
+       printf("ipv4_key_del: 0x%02hhx/0x%hhx ",
+               key->field_value[PROTO_FIELD_IPV4].value.u8,
+               key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
+
+       uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
+                       &a, &b, &c, &d);
+       printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
+                       key->field_value[SRC_FIELD_IPV4].mask_range.u32);
+
+       uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
+                       &a, &b, &c, &d);
+       printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
+                       key->field_value[DST_FIELD_IPV4].mask_range.u32);
+
+       printf("%hu : 0x%x %hu : 0x%x\n",
+               key->field_value[SRCP_FIELD_IPV4].value.u16,
+               key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
+               key->field_value[DSTP_FIELD_IPV4].value.u16,
+               key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
+}
+#endif
+
+static struct rte_flow_classify *
+allocate_5tuple(void)
+{
+       struct rte_flow_classify *flow_classify;
+
+       flow_classify = malloc(sizeof(struct rte_flow_classify));
+       if (!flow_classify)
+               return flow_classify;
+
+       memset(flow_classify, 0, sizeof(struct rte_flow_classify));
+
+       flow_classify->type = RTE_FLOW_CLASSIFY_TYPE_5TUPLE;
+       memcpy(&flow_classify->action, classify_get_flow_action(),
+              sizeof(struct rte_flow_action));
+
+       flow_classify->flow_extra_data.priority = ntuple_filter.priority;
+       flow_classify->flow_extra_data.tcp_flags = ntuple_filter.tcp_flags;
+
+       /* key add values */
+       flow_classify->key_add.priority = ntuple_filter.priority;
+       flow_classify->key_add.field_value[PROTO_FIELD_IPV4].mask_range.u8 =
+                       ntuple_filter.proto_mask;
+       flow_classify->key_add.field_value[PROTO_FIELD_IPV4].value.u8 =
+                       ntuple_filter.proto;
+
+       flow_classify->key_add.field_value[SRC_FIELD_IPV4].mask_range.u32 =
+                       ntuple_filter.src_ip_mask;
+       flow_classify->key_add.field_value[SRC_FIELD_IPV4].value.u32 =
+                       ntuple_filter.src_ip;
+
+       flow_classify->key_add.field_value[DST_FIELD_IPV4].mask_range.u32 =
+                       ntuple_filter.dst_ip_mask;
+       flow_classify->key_add.field_value[DST_FIELD_IPV4].value.u32 =
+                       ntuple_filter.dst_ip;
+
+       flow_classify->key_add.field_value[SRCP_FIELD_IPV4].mask_range.u16 =
+                       ntuple_filter.src_port_mask;
+       flow_classify->key_add.field_value[SRCP_FIELD_IPV4].value.u16 =
+                       ntuple_filter.src_port;
+
+       flow_classify->key_add.field_value[DSTP_FIELD_IPV4].mask_range.u16 =
+                       ntuple_filter.dst_port_mask;
+       flow_classify->key_add.field_value[DSTP_FIELD_IPV4].value.u16 =
+                       ntuple_filter.dst_port;
+
+#ifdef RTE_LIBRTE_CLASSIFY_DEBUG
+       print_ipv4_key_add(&flow_classify->key_add);
+#endif
+
+       /* key delete values */
+       memcpy(&flow_classify->key_del.field_value[PROTO_FIELD_IPV4],
+              &flow_classify->key_add.field_value[PROTO_FIELD_IPV4],
+              NUM_FIELDS_IPV4 * sizeof(struct rte_acl_field));
+
+#ifdef RTE_LIBRTE_CLASSIFY_DEBUG
+       print_ipv4_key_delete(&flow_classify->key_del);
+#endif
+       return flow_classify;
+}
+
+struct rte_flow_classify *
+rte_flow_classify_create(void *table_handle,
+               uint32_t entry_size,
+               const struct rte_flow_attr *attr,
+               const struct rte_flow_item pattern[],
+               const struct rte_flow_action actions[],
+               struct rte_flow_error *error)
+{
+       struct rte_flow_classify *flow_classify;
+       struct rte_acl_rule *acl_rule;
+       int ret;
+
+       if (!error)
+               return NULL;
+
+       if (!table_handle) {
+               rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+                                  NULL, "NULL table_handle.");
+               return NULL;
+       }
+
+       if (!pattern) {
+               rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+                                  NULL, "NULL pattern.");
+               return NULL;
+       }
+
+       if (!actions) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+                                  NULL, "NULL action.");
+               return NULL;
+       }
+
+       if (!attr) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ATTR,
+                                  NULL, "NULL attribute.");
+               return NULL;
+       }
+
+       /* parse attr, pattern and actions */
+       ret = rte_flow_classify_validate(table_handle, attr, pattern,
+                       actions, error);
+       if (ret < 0)
+               return NULL;
+
+       flow_classify = allocate_5tuple();
+       if (!flow_classify)
+               return NULL;
+
+       flow_classify->entry = malloc(entry_size);
+       if (!flow_classify->entry) {
+               free(flow_classify);
+               flow_classify = NULL;
+               return NULL;
+       }
+
+       ret = rte_table_acl_ops.f_add(table_handle, &flow_classify->key_add,
+                       flow_classify->entry, &flow_classify->key_found,
+                       &flow_classify->entry_ptr);
+       if (ret) {
+               free(flow_classify->entry);
+               free(flow_classify);
+               flow_classify = NULL;
+               return NULL;
+       }
+       acl_rule = flow_classify->entry;
+       flow_classify->flow_extra_data.userdata = acl_rule->data.userdata;
+
+       return flow_classify;
+}
+
+int
+rte_flow_classify_destroy(void *table_handle,
+               struct rte_flow_classify *flow_classify,
+               struct rte_flow_error *error)
+{
+       int ret;
+       int key_found;
+
+       if (!error)
+               return -EINVAL;
+
+       if (!flow_classify || !table_handle) {
+               rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL, "invalid input");
+               return -EINVAL;
+       }
+
+       ret = rte_table_acl_ops.f_delete(table_handle,
+                       &flow_classify->key_del, &key_found,
+                       flow_classify->entry);
+       if ((ret == 0) && key_found) {
+               free(flow_classify->entry);
+               free(flow_classify);
+       } else
+               ret = -1;
+       return ret;
+}
+
+static int
+flow_match(void *table, struct rte_mbuf **pkts_in, const uint16_t nb_pkts,
+               uint64_t *count, uint32_t userdata)
+{
+       int ret = -1;
+       int i;
+       uint64_t pkts_mask;
+       uint64_t lookup_hit_mask;
+       struct rte_acl_rule *entries[RTE_PORT_IN_BURST_SIZE_MAX];
+
+       if (nb_pkts) {
+               pkts_mask = RTE_LEN2MASK(nb_pkts, uint64_t);
+               ret = rte_table_acl_ops.f_lookup(table, pkts_in,
+                               pkts_mask, &lookup_hit_mask, (void **)entries);
+               if (!ret) {
+                       for (i = 0; i < nb_pkts &&
+                            (lookup_hit_mask & (1 << i)); i++) {
+                               if (entries[i]->data.userdata == userdata)
+                                       (*count)++; /* match found */
+                       }
+                       if (*count == 0)
+                               ret = -1;
+               } else
+                       ret = -1;
+       }
+       return ret;
+}
+
+static int
+action_apply(const struct rte_flow_classify *flow_classify,
+               struct rte_flow_classify_stats *stats, uint64_t count)
+{
+       struct rte_flow_classify_5tuple_stats *ntuple_stats;
+
+       switch (flow_classify->action.type) {
+       case RTE_FLOW_ACTION_TYPE_COUNT:
+               ntuple_stats =
+                       (struct rte_flow_classify_5tuple_stats *)stats->stats;
+               ntuple_stats->counter1 = count;
+               stats->used_space = 1;
+               break;
+       default:
+               return -ENOTSUP;
+       }
+
+       return 0;
+}
+
+int
+rte_flow_classify_query(void *table_handle,
+               const struct rte_flow_classify *flow_classify,
+               struct rte_mbuf **pkts,
+               const uint16_t nb_pkts,
+               struct rte_flow_classify_stats *stats,
+               struct rte_flow_error *error)
+{
+       uint64_t count = 0;
+       int ret = -EINVAL;
+
+       if (!error)
+               return ret;
+
+       if (!table_handle || !flow_classify || !pkts || !stats) {
+               rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL, "invalid input");
+               return ret;
+       }
+
+       if ((stats->available_space == 0) || (nb_pkts == 0)) {
+               rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               NULL, "invalid input");
+               return ret;
+       }
+
+       ret = flow_match(table_handle, pkts, nb_pkts, &count,
+                       flow_classify->flow_extra_data.userdata);
+       if (ret == 0)
+               ret = action_apply(flow_classify, stats, count);
+
+       return ret;
+}
diff --git a/lib/librte_flow_classify/rte_flow_classify.h 
b/lib/librte_flow_classify/rte_flow_classify.h
new file mode 100644
index 0000000..2b200fb
--- /dev/null
+++ b/lib/librte_flow_classify/rte_flow_classify.h
@@ -0,0 +1,207 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_FLOW_CLASSIFY_H_
+#define _RTE_FLOW_CLASSIFY_H_
+
+/**
+ * @file
+ *
+ * RTE Flow Classify Library
+ *
+ * This library provides flow record information with some measured properties.
+ *
+ * Application should define the flow and measurement criteria (action) for it.
+ *
+ * Library doesn't maintain any flow records itself, instead flow information 
is
+ * returned to upper layer only for given packets.
+ *
+ * It is application's responsibility to call rte_flow_classify_query()
+ * for group of packets, just after receiving them or before transmitting them.
+ * Application should provide the flow type interested in, measurement to apply
+ * to that flow in rte_flow_classify_create() API, and should provide
+ * rte_flow_classify object and storage to put results in
+ * rte_flow_classify_query() API.
+ *
+ *  Usage:
+ *  - application calls rte_flow_classify_create() to create a 
rte_flow_classify
+ *    object.
+ *  - application calls rte_flow_classify_query() in a polling manner,
+ *    preferably after rte_eth_rx_burst(). This will cause the library to
+ *    convert packet information to flow information with some measurements.
+ *  - rte_flow_classify object can be destroyed when they are no more needed
+ *    via rte_flow_classify_destroy()
+ */
+
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_flow.h>
+#include <rte_acl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum rte_flow_classify_type {
+       RTE_FLOW_CLASSIFY_TYPE_NONE,    /**< no type */
+       RTE_FLOW_CLASSIFY_TYPE_5TUPLE,  /**< IPv4 5tuple type */
+};
+
+struct rte_flow_classify;
+
+/**
+ * Flow stats
+ *
+ * For single action an array of stats can be returned by API. Technically each
+ * packet can return a stat at max.
+ *
+ * Storage for stats is provided by application, library should know available
+ * space, and should return the number of used space.
+ *
+ * stats type is based on what measurement (action) requested by application.
+ *
+ */
+struct rte_flow_classify_stats {
+       const unsigned int available_space;
+       unsigned int used_space;
+       void **stats;
+};
+
+struct rte_flow_classify_5tuple_stats {
+       uint64_t counter1; /**< count of packets that match 5tupple pattern */
+};
+
+/**
+ * Create a flow classify rule.
+ *
+ * @param[in] table_handle
+ *   Pointer to table ACL
+ * @param[in] entry_size
+ *   Size of ACL rule
+ * @param[in] attr
+ *   Flow rule attributes
+ * @param[in] pattern
+ *   Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ *   Associated actions (list terminated by the END pattern item).
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. Structure
+ *   initialised in case of error only.
+ * @return
+ *   A valid handle in case of success, NULL otherwise.
+ */
+struct rte_flow_classify *
+rte_flow_classify_create(void *table_handle,
+                  uint32_t entry_size,
+                  const struct rte_flow_attr *attr,
+                  const struct rte_flow_item pattern[],
+                  const struct rte_flow_action actions[],
+                  struct rte_flow_error *error);
+
+/**
+ * Validate a flow classify rule.
+ *
+ * @param[in] table_handle
+ *   Pointer to table ACL
+ * @param[in] attr
+ *   Flow rule attributes
+ * @param[in] pattern
+ *   Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ *   Associated actions (list terminated by the END pattern item).
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. Structure
+ *   initialised in case of error only.
+ *
+ * @return
+ *   reurn code.
+ */
+int
+rte_flow_classify_validate(void *table_handle,
+                  const struct rte_flow_attr *attr,
+                  const struct rte_flow_item pattern[],
+                  const struct rte_flow_action actions[],
+                  struct rte_flow_error *error);
+
+/**
+ * Destroy a flow classify rule.
+ *
+ * @param[in] table_handle
+ *   Pointer to table ACL
+ * @param[in] flow_classify
+ *   Flow rule handle to destroy
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. Structure
+ *   initialised in case of error only.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise.
+ */
+int
+rte_flow_classify_destroy(void *table_handle,
+                  struct rte_flow_classify *flow_classify,
+                  struct rte_flow_error *error);
+
+/**
+ * Get flow classification stats for given packets.
+ *
+ * @param[in] table_handle
+ *   Pointer to table ACL
+ * @param[in] flow_classify
+ *   Pointer to Flow rule object
+ * @param[in] pkts
+ *   Pointer to packets to process
+ * @param[in] nb_pkts
+ *   Number of packets to process
+ * @param[in] stats
+ *   To store stats defined by action
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. Structure
+ *   initialised in case of error only.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise.
+ */
+int
+rte_flow_classify_query(void *table_handle,
+               const struct rte_flow_classify *flow_classify,
+               struct rte_mbuf **pkts,
+               const uint16_t nb_pkts,
+               struct rte_flow_classify_stats *stats,
+               struct rte_flow_error *error);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_FLOW_CLASSIFY_H_ */
diff --git a/lib/librte_flow_classify/rte_flow_classify_parse.c 
b/lib/librte_flow_classify/rte_flow_classify_parse.c
new file mode 100644
index 0000000..e5a3885
--- /dev/null
+++ b/lib/librte_flow_classify/rte_flow_classify_parse.c
@@ -0,0 +1,546 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_flow_classify.h>
+#include "rte_flow_classify_parse.h"
+#include <rte_flow_driver.h>
+
+struct classify_valid_pattern {
+       enum rte_flow_item_type *items;
+       parse_filter_t parse_filter;
+};
+
+static struct rte_flow_action action;
+
+/* Pattern matched ntuple filter */
+static enum rte_flow_item_type pattern_ntuple_1[] = {
+       RTE_FLOW_ITEM_TYPE_ETH,
+       RTE_FLOW_ITEM_TYPE_IPV4,
+       RTE_FLOW_ITEM_TYPE_UDP,
+       RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* Pattern matched ntuple filter */
+static enum rte_flow_item_type pattern_ntuple_2[] = {
+       RTE_FLOW_ITEM_TYPE_ETH,
+       RTE_FLOW_ITEM_TYPE_IPV4,
+       RTE_FLOW_ITEM_TYPE_TCP,
+       RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* Pattern matched ntuple filter */
+static enum rte_flow_item_type pattern_ntuple_3[] = {
+       RTE_FLOW_ITEM_TYPE_ETH,
+       RTE_FLOW_ITEM_TYPE_IPV4,
+       RTE_FLOW_ITEM_TYPE_SCTP,
+       RTE_FLOW_ITEM_TYPE_END,
+};
+
+static int
+classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
+                        const struct rte_flow_item pattern[],
+                        const struct rte_flow_action actions[],
+                        struct rte_eth_ntuple_filter *filter,
+                        struct rte_flow_error *error);
+
+static struct classify_valid_pattern classify_supported_patterns[] = {
+       /* ntuple */
+       { pattern_ntuple_1, classify_parse_ntuple_filter },
+       { pattern_ntuple_2, classify_parse_ntuple_filter },
+       { pattern_ntuple_3, classify_parse_ntuple_filter },
+};
+
+struct rte_flow_action *
+classify_get_flow_action(void)
+{
+       return &action;
+}
+
+/* Find the first VOID or non-VOID item pointer */
+const struct rte_flow_item *
+classify_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+       bool is_find;
+
+       while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+               if (is_void)
+                       is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+               else
+                       is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+               if (is_find)
+                       break;
+               item++;
+       }
+       return item;
+}
+
+/* Skip all VOID items of the pattern */
+void
+classify_pattern_skip_void_item(struct rte_flow_item *items,
+                           const struct rte_flow_item *pattern)
+{
+       uint32_t cpy_count = 0;
+       const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+       for (;;) {
+               /* Find a non-void item first */
+               pb = classify_find_first_item(pb, false);
+               if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+                       pe = pb;
+                       break;
+               }
+
+               /* Find a void item */
+               pe = classify_find_first_item(pb + 1, true);
+
+               cpy_count = pe - pb;
+               rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+               items += cpy_count;
+
+               if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
+                       pb = pe;
+                       break;
+               }
+
+               pb = pe + 1;
+       }
+       /* Copy the END item. */
+       rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+classify_match_pattern(enum rte_flow_item_type *item_array,
+                  struct rte_flow_item *pattern)
+{
+       struct rte_flow_item *item = pattern;
+
+       while ((*item_array == item->type) &&
+              (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+               item_array++;
+               item++;
+       }
+
+       return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+               item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+/* Find if there's parse filter function matched */
+parse_filter_t
+classify_find_parse_filter_func(struct rte_flow_item *pattern)
+{
+       parse_filter_t parse_filter = NULL;
+       uint8_t i = 0;
+
+       for (; i < RTE_DIM(classify_supported_patterns); i++) {
+               if (classify_match_pattern(classify_supported_patterns[i].items,
+                                       pattern)) {
+                       parse_filter =
+                               classify_supported_patterns[i].parse_filter;
+                       break;
+               }
+       }
+
+       return parse_filter;
+}
+
+#define FLOW_RULE_MIN_PRIORITY 8
+#define FLOW_RULE_MAX_PRIORITY 0
+
+#define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
+       do {            \
+               item = pattern + index;\
+               while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
+               index++;                                \
+               item = pattern + index;         \
+               }                                               \
+       } while (0)
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index)\
+       do {                                                            \
+               act = actions + index;                                  \
+               while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
+               index++;                                        \
+               act = actions + index;                          \
+               }                                                       \
+       } while (0)
+
+/**
+ * Please aware there's an asumption for all the parsers.
+ * rte_flow_item is using big endian, rte_flow_attr and
+ * rte_flow_action are using CPU order.
+ * Because the pattern is used to describe the packets,
+ * normally the packets should use network order.
+ */
+
+/**
+ * Parse the rule to see if it is a n-tuple rule.
+ * And get the n-tuple filter info BTW.
+ * pattern:
+ * The first not void item can be ETH or IPV4.
+ * The second not void item must be IPV4 if the first one is ETH.
+ * The third not void item must be UDP or TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM                Spec                    Mask
+ * ETH         NULL                    NULL
+ * IPV4                src_addr 192.168.1.20   0xFFFFFFFF
+ *                     dst_addr 192.167.3.50   0xFFFFFFFF
+ *                     next_proto_id   17      0xFF
+ * UDP/TCP/    src_port        80      0xFFFF
+ * SCTP                dst_port        80      0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
+                        const struct rte_flow_item pattern[],
+                        const struct rte_flow_action actions[],
+                        struct rte_eth_ntuple_filter *filter,
+                        struct rte_flow_error *error)
+{
+       const struct rte_flow_item *item;
+       const struct rte_flow_action *act;
+       const struct rte_flow_item_ipv4 *ipv4_spec;
+       const struct rte_flow_item_ipv4 *ipv4_mask;
+       const struct rte_flow_item_tcp *tcp_spec;
+       const struct rte_flow_item_tcp *tcp_mask;
+       const struct rte_flow_item_udp *udp_spec;
+       const struct rte_flow_item_udp *udp_mask;
+       const struct rte_flow_item_sctp *sctp_spec;
+       const struct rte_flow_item_sctp *sctp_mask;
+       uint32_t index;
+
+       if (!pattern) {
+               rte_flow_error_set(error,
+                       EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+                       NULL, "NULL pattern.");
+               return -rte_errno;
+       }
+
+       if (!actions) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+                                  NULL, "NULL action.");
+               return -rte_errno;
+       }
+       if (!attr) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ATTR,
+                                  NULL, "NULL attribute.");
+               return -rte_errno;
+       }
+
+       /* parse pattern */
+       index = 0;
+
+       /* the first not void item can be MAC or IPv4 */
+       NEXT_ITEM_OF_PATTERN(item, pattern, index);
+
+       if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+           item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       item, "Not supported by ntuple filter");
+               return -rte_errno;
+       }
+       /* Skip Ethernet */
+       if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+               /*Not supported last point for range*/
+               if (item->last) {
+                       rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                       item,
+                                       "Not supported last point for range");
+                       return -rte_errno;
+
+               }
+               /* if the first item is MAC, the content should be NULL */
+               if (item->spec || item->mask) {
+                       rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ITEM,
+                                       item,
+                                       "Not supported by ntuple filter");
+                       return -rte_errno;
+               }
+               /* check if the next not void item is IPv4 */
+               index++;
+               NEXT_ITEM_OF_PATTERN(item, pattern, index);
+               if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+                       rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ITEM,
+                                       item,
+                                       "Not supported by ntuple filter");
+                       return -rte_errno;
+               }
+       }
+
+       /* get the IPv4 info */
+       if (!item->spec || !item->mask) {
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       item, "Invalid ntuple mask");
+               return -rte_errno;
+       }
+       /*Not supported last point for range*/
+       if (item->last) {
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                       item, "Not supported last point for range");
+               return -rte_errno;
+
+       }
+
+       ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+       /**
+        * Only support src & dst addresses, protocol,
+        * others should be masked.
+        */
+       if (ipv4_mask->hdr.version_ihl ||
+               ipv4_mask->hdr.type_of_service ||
+               ipv4_mask->hdr.total_length ||
+               ipv4_mask->hdr.packet_id ||
+               ipv4_mask->hdr.fragment_offset ||
+               ipv4_mask->hdr.time_to_live ||
+               ipv4_mask->hdr.hdr_checksum) {
+               rte_flow_error_set(error,
+                       EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+                       item, "Not supported by ntuple filter");
+               return -rte_errno;
+       }
+
+       filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+       filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+       filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
+
+       ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+       filter->dst_ip = ipv4_spec->hdr.dst_addr;
+       filter->src_ip = ipv4_spec->hdr.src_addr;
+       filter->proto  = ipv4_spec->hdr.next_proto_id;
+
+       /* check if the next not void item is TCP or UDP or SCTP */
+       index++;
+       NEXT_ITEM_OF_PATTERN(item, pattern, index);
+       if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+           item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+           item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       item, "Not supported by ntuple filter");
+               return -rte_errno;
+       }
+
+       /* get the TCP/UDP info */
+       if (!item->spec || !item->mask) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       item, "Invalid ntuple mask");
+               return -rte_errno;
+       }
+
+       /*Not supported last point for range*/
+       if (item->last) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                       item, "Not supported last point for range");
+               return -rte_errno;
+
+       }
+
+       if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+               tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+
+               /**
+                * Only support src & dst ports, tcp flags,
+                * others should be masked.
+                */
+               if (tcp_mask->hdr.sent_seq ||
+                   tcp_mask->hdr.recv_ack ||
+                   tcp_mask->hdr.data_off ||
+                   tcp_mask->hdr.rx_win ||
+                   tcp_mask->hdr.cksum ||
+                   tcp_mask->hdr.tcp_urp) {
+                       memset(filter, 0,
+                               sizeof(struct rte_eth_ntuple_filter));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by ntuple filter");
+                       return -rte_errno;
+               }
+
+               filter->dst_port_mask  = tcp_mask->hdr.dst_port;
+               filter->src_port_mask  = tcp_mask->hdr.src_port;
+               if (tcp_mask->hdr.tcp_flags == 0xFF) {
+                       filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+               } else if (!tcp_mask->hdr.tcp_flags) {
+                       filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
+               } else {
+                       memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by ntuple filter");
+                       return -rte_errno;
+               }
+
+               tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+               filter->dst_port  = tcp_spec->hdr.dst_port;
+               filter->src_port  = tcp_spec->hdr.src_port;
+               filter->tcp_flags = tcp_spec->hdr.tcp_flags;
+       } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+               udp_mask = (const struct rte_flow_item_udp *)item->mask;
+
+               /**
+                * Only support src & dst ports,
+                * others should be masked.
+                */
+               if (udp_mask->hdr.dgram_len ||
+                   udp_mask->hdr.dgram_cksum) {
+                       memset(filter, 0,
+                               sizeof(struct rte_eth_ntuple_filter));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by ntuple filter");
+                       return -rte_errno;
+               }
+
+               filter->dst_port_mask = udp_mask->hdr.dst_port;
+               filter->src_port_mask = udp_mask->hdr.src_port;
+
+               udp_spec = (const struct rte_flow_item_udp *)item->spec;
+               filter->dst_port = udp_spec->hdr.dst_port;
+               filter->src_port = udp_spec->hdr.src_port;
+       } else {
+               sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
+
+               /**
+                * Only support src & dst ports,
+                * others should be masked.
+                */
+               if (sctp_mask->hdr.tag ||
+                   sctp_mask->hdr.cksum) {
+                       memset(filter, 0,
+                               sizeof(struct rte_eth_ntuple_filter));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by ntuple filter");
+                       return -rte_errno;
+               }
+
+               filter->dst_port_mask = sctp_mask->hdr.dst_port;
+               filter->src_port_mask = sctp_mask->hdr.src_port;
+
+               sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
+               filter->dst_port = sctp_spec->hdr.dst_port;
+               filter->src_port = sctp_spec->hdr.src_port;
+       }
+
+       /* check if the next not void item is END */
+       index++;
+       NEXT_ITEM_OF_PATTERN(item, pattern, index);
+       if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       item, "Not supported by ntuple filter");
+               return -rte_errno;
+       }
+
+       /* parse action */
+       index = 0;
+
+       /**
+        * n-tuple only supports count,
+        * check if the first not void action is COUNT.
+        */
+       memset(&action, 0, sizeof(action));
+       NEXT_ITEM_OF_ACTION(act, actions, index);
+       if (act->type != RTE_FLOW_ACTION_TYPE_COUNT) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ACTION,
+                       item, "Not supported action.");
+               return -rte_errno;
+       }
+       action.type = RTE_FLOW_ACTION_TYPE_COUNT;
+
+       /* check if the next not void item is END */
+       index++;
+       NEXT_ITEM_OF_ACTION(act, actions, index);
+       if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ACTION,
+                       act, "Not supported action.");
+               return -rte_errno;
+       }
+
+       /* parse attr */
+       /* must be input direction */
+       if (!attr->ingress) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+                                  attr, "Only support ingress.");
+               return -rte_errno;
+       }
+
+       /* not supported */
+       if (attr->egress) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+                                  attr, "Not support egress.");
+               return -rte_errno;
+       }
+
+       if (attr->priority > 0xFFFF) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+                                  attr, "Error priority.");
+               return -rte_errno;
+       }
+       filter->priority = (uint16_t)attr->priority;
+       if (attr->priority >  FLOW_RULE_MIN_PRIORITY)
+               filter->priority = FLOW_RULE_MAX_PRIORITY;
+
+       return 0;
+}
diff --git a/lib/librte_flow_classify/rte_flow_classify_parse.h 
b/lib/librte_flow_classify/rte_flow_classify_parse.h
new file mode 100644
index 0000000..1d4708a
--- /dev/null
+++ b/lib/librte_flow_classify/rte_flow_classify_parse.h
@@ -0,0 +1,74 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_FLOW_CLASSIFY_PARSE_H_
+#define _RTE_FLOW_CLASSIFY_PARSE_H_
+
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_flow.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef int (*parse_filter_t)(const struct rte_flow_attr *attr,
+                             const struct rte_flow_item pattern[],
+                             const struct rte_flow_action actions[],
+                             struct rte_eth_ntuple_filter *filter,
+                             struct rte_flow_error *error);
+
+/* Skip all VOID items of the pattern */
+void
+classify_pattern_skip_void_item(struct rte_flow_item *items,
+                           const struct rte_flow_item *pattern);
+
+/* Find the first VOID or non-VOID item pointer */
+const struct rte_flow_item *
+classify_find_first_item(const struct rte_flow_item *item, bool is_void);
+
+
+/* Find if there's parse filter function matched */
+parse_filter_t
+classify_find_parse_filter_func(struct rte_flow_item *pattern);
+
+/* get action data */
+struct rte_flow_action *
+classify_get_flow_action(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_FLOW_CLASSIFY_PARSE_H_ */
diff --git a/lib/librte_flow_classify/rte_flow_classify_version.map 
b/lib/librte_flow_classify/rte_flow_classify_version.map
new file mode 100644
index 0000000..e2c9ecf
--- /dev/null
+++ b/lib/librte_flow_classify/rte_flow_classify_version.map
@@ -0,0 +1,10 @@
+DPDK_17.08 {
+       global:
+
+       rte_flow_classify_create;
+       rte_flow_classify_destroy;
+       rte_flow_classify_query;
+       rte_flow_classify_validate;
+
+       local: *;
+};
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index c25fdd9..909ab95 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -58,6 +58,7 @@ _LDLIBS-y += -L$(RTE_SDK_BIN)/lib
 #
 # Order is important: from higher level to lower level
 #
+_LDLIBS-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY)  += -lrte_flow_classify
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PIPELINE)       += -lrte_pipeline
 _LDLIBS-$(CONFIG_RTE_LIBRTE_TABLE)          += -lrte_table
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PORT)           += -lrte_port
@@ -84,7 +85,6 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_EFD)            += -lrte_efd
 _LDLIBS-$(CONFIG_RTE_LIBRTE_CFGFILE)        += -lrte_cfgfile
 
 _LDLIBS-y += --whole-archive
-
 _LDLIBS-$(CONFIG_RTE_LIBRTE_HASH)           += -lrte_hash
 _LDLIBS-$(CONFIG_RTE_LIBRTE_VHOST)          += -lrte_vhost
 _LDLIBS-$(CONFIG_RTE_LIBRTE_KVARGS)         += -lrte_kvargs
-- 
1.9.1

Reply via email to