Hi Qiming,

> -----Original Message-----
> From: dev [mailto:dev-boun...@dpdk.org] On Behalf Of Qiming Yang
> Sent: Wednesday, June 12, 2019 3:50 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.y...@intel.com>
> Subject: [dpdk-dev] [PATCH v2 2/3] net/ice: add generic flow API
> 
> This patch adds ice_flow_create, ice_flow_destroy,
> ice_flow_flush and ice_flow_validate support,
> these are going to used to handle all the generic filters.
> 
> Signed-off-by: Qiming Yang <qiming.y...@intel.com>
> ---
>  drivers/net/ice/Makefile           |   1 +
>  drivers/net/ice/ice_ethdev.c       |  44 +++
>  drivers/net/ice/ice_ethdev.h       |   7 +-
>  drivers/net/ice/ice_generic_flow.c | 567
> +++++++++++++++++++++++++++++++++++++
>  drivers/net/ice/ice_generic_flow.h | 404 ++++++++++++++++++++++++++
>  5 files changed, 1022 insertions(+), 1 deletion(-)
>  create mode 100644 drivers/net/ice/ice_generic_flow.c
>  create mode 100644 drivers/net/ice/ice_generic_flow.h
> 
> diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
> index b10d826..32abeb6 100644
> --- a/drivers/net/ice/Makefile
[...]

>       bool offset_loaded;
>       bool adapter_stopped;
> +     struct ice_flow_list flow_list;
>  };
> 
>  /**
> diff --git a/drivers/net/ice/ice_generic_flow.c
> b/drivers/net/ice/ice_generic_flow.c
> new file mode 100644
> index 0000000..4fb50b2
> --- /dev/null
> +++ b/drivers/net/ice/ice_generic_flow.c
> @@ -0,0 +1,567 @@

License header is missing.

> +#include <sys/queue.h>
> +#include <stdio.h>
> +#include <errno.h>
> +#include <stdint.h>
> +#include <string.h>
> +#include <unistd.h>
> +#include <stdarg.h>
> +
> +#include <rte_ether.h>
> +#include <rte_ethdev_driver.h>
> +#include <rte_malloc.h>
> +
> +#include "ice_ethdev.h"
> +#include "ice_generic_flow.h"
> +#include "ice_switch_filter.h"
> +

[...]
> +                             if (eth_mask->type == RTE_BE16(0xffff))
> +                                     input_set |= ICE_INSET_ETHERTYPE;
> +                     }
> +                     break;
> +             case RTE_FLOW_ITEM_TYPE_IPV4:
> +                     ipv4_spec = item->spec;
> +                     ipv4_mask = item->mask;
> +
> +                     if (!(ipv4_spec && ipv4_mask)) {
> +                             rte_flow_error_set(error, EINVAL,
> +                                        RTE_FLOW_ERROR_TYPE_ITEM,
> +                                        item,
> +                                        "Invalid IPv4 spec or mask.");
> +                             return 0;
> +                     }

A lot of this kind of check in this function, could we just check " item->spec 
&& item->mask" once before the switch {}?

> +
> +                     /* Check IPv4 mask and update input set */
> +                     if (ipv4_mask->hdr.version_ihl ||
> +                         ipv4_mask->hdr.total_length ||
> +                         ipv4_mask->hdr.packet_id ||
> +                         ipv4_mask->hdr.hdr_checksum) {
> +                             rte_flow_error_set(error, EINVAL,
> +                                        RTE_FLOW_ERROR_TYPE_ITEM,
> +                                        item,
> +                                        "Invalid IPv4 mask.");
> +                             return 0;
> +                     }
> +
> +                     if (outer_ip) {
> +                             if (ipv4_mask->hdr.src_addr == UINT32_MAX)
> +                                     input_set |= ICE_INSET_IPV4_SRC;
> +                             if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
> +                                     input_set |= ICE_INSET_IPV4_DST;
> +                             if (ipv4_mask->hdr.type_of_service ==
> UINT8_MAX)
> +                                     input_set |= ICE_INSET_IPV4_TOS;
> +                             if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
> +                                     input_set |= ICE_INSET_IPV4_TTL;
> +                             if (ipv4_mask->hdr.fragment_offset == 0)
> +                                     input_set |= ICE_INSET_IPV4_PROTO;
> +                             outer_ip = false;
> +                     } else {
> +                             if (ipv4_mask->hdr.src_addr == UINT32_MAX)
> +                                     input_set |=
> ICE_INSET_TUN_IPV4_SRC;
> +                             if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
> +                                     input_set |=
> ICE_INSET_TUN_IPV4_DST;
> +                             if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
> +                                     input_set |= ICE_INSET_TUN_IPV4_TTL;
> +                             if (ipv4_mask->hdr.next_proto_id ==
> UINT8_MAX)
> +                                     input_set |=
> ICE_INSET_TUN_IPV4_PROTO;
> +                     }
> +                     break;
> +             case RTE_FLOW_ITEM_TYPE_IPV6:
> +                     ipv6_spec = item->spec;
> +                     ipv6_mask = item->mask;
> +
> +                     if (!(ipv6_spec && ipv6_mask)) {
> +                             rte_flow_error_set(error, EINVAL,
> +                                     RTE_FLOW_ERROR_TYPE_ITEM,
> +                                     item, "Invalid IPv6 spec or mask");
> +                             return 0;
> +                     }
> +
> +                     if (ipv6_mask->hdr.payload_len ||
> +                         ipv6_mask->hdr.vtc_flow) {
> +                             rte_flow_error_set(error, EINVAL,
> +                                        RTE_FLOW_ERROR_TYPE_ITEM,
> +                                        item,
> +                                        "Invalid IPv6 mask");
> +                             return 0;
> +                     }
> +
> +                     if (outer_ip) {
> +                             if (!memcmp(ipv6_mask->hdr.src_addr,

[...]

> +                                                item,
> +                                                "Invalid ICMP mask");
> +                             return 0;
> +                     }
> +
> +                     if (icmp_mask->hdr.icmp_type == UINT8_MAX)
> +                             input_set |= ICE_INSET_ICMP;
> +                     break;
> +             case RTE_FLOW_ITEM_TYPE_ICMP6:
> +                     icmp6_mask = item->mask;
> +                     if (icmp6_mask->code ||
> +                         icmp6_mask->checksum) {
> +                             rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM,
> +                                                item,
> +                                                "Invalid ICMP6 mask");
> +                             return 0;
> +                     }
> +
> +                     if (icmp6_mask->type == UINT8_MAX)
> +                     input_set |= ICE_INSET_ICMP6;

Add a '\t' for Indent.

> +                     break;
> +             default:
> +                     rte_flow_error_set(error, EINVAL,
> +                                        RTE_FLOW_ERROR_TYPE_ITEM,
> +                                        item,
> +                                        "Invalid mask no exist");
> +                     break;
> +             }
> +     }
> +     return input_set;
> +}
> +
> +static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
> +                     uint64_t inset, struct rte_flow_error *error)
> +{
> +     uint64_t fields;
> +
> +     /* get valid field */
> +     fields = ice_get_flow_field(pattern, error);
> +     if ((!fields) || (fields && (!inset))) {
> +             rte_flow_error_set(error, EINVAL,
> +                                RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
> +                                pattern,
> +                                "Invalid input set");
> +             return -rte_errno;
> +     }
> +
> +     return 0;
> +}
> +
> +static int ice_flow_valid_action(const struct rte_flow_action *actions,
> +                                    struct rte_flow_error *error)
> +{
> +     switch (actions->type) {
> +     case RTE_FLOW_ACTION_TYPE_QUEUE:
> +             break;
> +     case RTE_FLOW_ACTION_TYPE_DROP:
> +             break;
> +     default:
> +             rte_flow_error_set(error, EINVAL,
> +                                RTE_FLOW_ERROR_TYPE_ACTION, actions,
> +                                "Invalid action.");
> +             return -rte_errno;
> +     }
> +
> +     return 0;
> +}
> +
> +static int
> +ice_flow_validate(__rte_unused struct rte_eth_dev *dev,
> +                const struct rte_flow_attr *attr,
> +                const struct rte_flow_item pattern[],
> +                const struct rte_flow_action actions[],
> +                struct rte_flow_error *error)
> +{
> +     uint64_t inset = 0;
> +     int ret = ICE_ERR_NOT_SUPPORTED;
> +
> +     if (!pattern) {
> +             rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> +                                NULL, "NULL pattern.");
> +             return -rte_errno;
> +     }
> +
> +     if (!actions) {
> +             rte_flow_error_set(error, EINVAL,
> +                                RTE_FLOW_ERROR_TYPE_ACTION_NUM,
> +                                NULL, "NULL action.");
> +             return -rte_errno;
> +     }
> +
> +     if (!attr) {
> +             rte_flow_error_set(error, EINVAL,
> +                                RTE_FLOW_ERROR_TYPE_ATTR,
> +                                NULL, "NULL attribute.");
> +             return -rte_errno;
> +     }
> +
> +     ret = ice_flow_valid_attr(attr, error);
> +     if (!ret)
> +             return ret;
> +
> +     inset = ice_flow_valid_pattern(pattern, error);
> +     if (!inset)
> +             return -rte_errno;
> +
> +     ret = ice_flow_valid_inset(pattern, inset, error);
> +     if (ret)
> +             return ret;
> +
> +     ret = ice_flow_valid_action(actions, error);
> +     if (ret)
> +             return ret;
> +
> +     return 0;
> +}
> +
> +static struct rte_flow *
> +ice_flow_create(struct rte_eth_dev *dev,
> +              const struct rte_flow_attr *attr,
> +              const struct rte_flow_item pattern[],
> +              const struct rte_flow_action actions[],
> +              struct rte_flow_error *error)
> +{
> +     struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> +     struct rte_flow *flow = NULL;
> +     int ret;
> +
> +     flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
> +     if (!flow) {
> +             rte_flow_error_set(error, ENOMEM,
> +                                RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +                                "Failed to allocate memory");
> +             return flow;
> +     }
> +
> +     ret = ice_flow_validate(dev, attr, pattern, actions, error);
> +     if (ret < 0)
> +             return NULL;
> +
> +     ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
> +     if (ret)
> +             goto free_flow;
> +
> +     TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
> +     return flow;
> +
> +free_flow:
> +     rte_flow_error_set(error, -ret,
> +                        RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +                        "Failed to create flow.");
> +     rte_free(flow);
> +     return NULL;
> +}
> +
> +static int
> +ice_flow_destroy(struct rte_eth_dev *dev,
> +              struct rte_flow *flow,
> +              struct rte_flow_error *error)
> +{
> +     struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> +     int ret = 0;
> +
> +     ret = ice_destroy_switch_filter(pf, flow);
> +
> +     if (!ret) {
> +             TAILQ_REMOVE(&pf->flow_list, flow, node);
> +             rte_free(flow);
> +     } else
> +             rte_flow_error_set(error, -ret,
> +                                RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +                                "Failed to destroy flow.");
> +
> +     return ret;
> +}
> +
> +static int
> +ice_flow_flush(struct rte_eth_dev *dev,
> +            struct rte_flow_error *error)
> +{
> +     struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> +     struct rte_flow *p_flow;
> +     int ret;
> +
> +     TAILQ_FOREACH(p_flow, &pf->flow_list, node) {
> +             ret = ice_flow_destroy(dev, p_flow, error);
> +             if (ret) {
> +                     rte_flow_error_set(error, -ret,
> +                                        RTE_FLOW_ERROR_TYPE_HANDLE,
> NULL,
> +                                        "Failed to flush SW flows.");
> +                     return -rte_errno;
> +             }
> +     }
> +
> +     return ret;
> +}
> diff --git a/drivers/net/ice/ice_generic_flow.h
> b/drivers/net/ice/ice_generic_flow.h
> new file mode 100644
> index 0000000..46c3461
> --- /dev/null
> +++ b/drivers/net/ice/ice_generic_flow.h
> @@ -0,0 +1,404 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2018 Intel Corporation

s/2018/2019/g

> + */
> +
> +#ifndef _ICE_GENERIC_FLOW_H_
> +#define _ICE_GENERIC_FLOW_H_
> +
> +#include <rte_flow_driver.h>
> +
> +struct ice_flow_pattern {
[...]

> +     RTE_FLOW_ITEM_TYPE_IPV4,
> +     RTE_FLOW_ITEM_TYPE_UDP,
> +     RTE_FLOW_ITEM_TYPE_ETH,
> +     RTE_FLOW_ITEM_TYPE_IPV6,
> +     RTE_FLOW_ITEM_TYPE_SCTP,
> +     RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_tunnel_eth_ipv6_icmp[] = {
> +     RTE_FLOW_ITEM_TYPE_ETH,
> +     RTE_FLOW_ITEM_TYPE_IPV4,
> +     RTE_FLOW_ITEM_TYPE_UDP,
> +     RTE_FLOW_ITEM_TYPE_ETH,
> +     RTE_FLOW_ITEM_TYPE_IPV6,
> +     RTE_FLOW_ITEM_TYPE_ICMP,
> +     RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static struct ice_flow_pattern ice_supported_patterns[] = {
> +     {pattern_ethertype, INSET_ETHER},
> +     {pattern_ipv4, INSET_MAC_IPV4},
> +     {pattern_ipv4_udp, INSET_MAC_IPV4_L4},
> +     {pattern_ipv4_sctp, INSET_MAC_IPV4_L4},
> +     {pattern_ipv4_tcp, INSET_MAC_IPV4_L4},
> +     {pattern_ipv4_icmp, INSET_MAC_IPV4_ICMP},
> +     {pattern_ipv6, INSET_MAC_IPV6},
> +     {pattern_ipv6_udp, INSET_MAC_IPV6_L4},
> +     {pattern_ipv6_sctp, INSET_MAC_IPV6_L4},
> +     {pattern_ipv6_tcp, INSET_MAC_IPV6_L4},
> +     {pattern_ipv6_icmp6, INSET_MAC_IPV6_ICMP},
> +     {pattern_ipv4_tunnel_ipv4, INSET_TUNNEL_IPV4_TYPE1},
> +     {pattern_ipv4_tunnel_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
> +     {pattern_ipv4_tunnel_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
> +     {pattern_ipv4_tunnel_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
> +     {pattern_ipv4_tunnel_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
> +     {pattern_ipv4_tunnel_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
> +     {pattern_ipv4_tunnel_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
> +     {pattern_ipv4_tunnel_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
> +     {pattern_ipv4_tunnel_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
> +     {pattern_ipv4_tunnel_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
> +     {pattern_ipv4_tunnel_ipv6, INSET_TUNNEL_IPV6_TYPE1},
> +     {pattern_ipv4_tunnel_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
> +     {pattern_ipv4_tunnel_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
> +     {pattern_ipv4_tunnel_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
> +     {pattern_ipv4_tunnel_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
> +     {pattern_ipv4_tunnel_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
> +     {pattern_ipv4_tunnel_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
> +     {pattern_ipv4_tunnel_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
> +     {pattern_ipv4_tunnel_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
> +     {pattern_ipv4_tunnel_eth_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
> +};
> +
> +#endif
> --
> 2.9.5

I have the same feeling with Beilei for the duplication, some flow validation 
work is done in this patch, however the switch filter part (1/3) also 
implements some check.

BRs,
Xiao

Reply via email to