For VXLAN offload, matches should be done on outer header for tunnel properties as well as inner packet matches. Add a function for parsing VXLAN tunnel matches.
Signed-off-by: Eli Britstein <el...@nvidia.com> Reviewed-by: Gaetan Rivet <gaet...@nvidia.com> --- NEWS | 1 + lib/netdev-offload-dpdk.c | 155 +++++++++++++++++++++++++++++++++++++- 2 files changed, 155 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index 74eed0a6c..2fee43b9a 100644 --- a/NEWS +++ b/NEWS @@ -34,6 +34,7 @@ v2.15.0 - 15 Feb 2021 * Removed support for vhost-user dequeue zero-copy. * Add support for DPDK 20.11. * Add hardware offload support for tunnel pop action (experimental). + * Add hardware offload support for VXLAN flows (experimental). - Userspace datapath: * Add the 'pmd' option to "ovs-appctl dpctl/dump-flows", which restricts a flow dump to a single PMD thread if set. diff --git a/lib/netdev-offload-dpdk.c b/lib/netdev-offload-dpdk.c index 197deb96b..52a74a707 100644 --- a/lib/netdev-offload-dpdk.c +++ b/lib/netdev-offload-dpdk.c @@ -372,6 +372,20 @@ dump_flow_pattern(struct ds *s, ipv6_mask->hdr.hop_limits); } ds_put_cstr(s, "/ "); + } else if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) { + const struct rte_flow_item_vxlan *vxlan_spec = item->spec; + const struct rte_flow_item_vxlan *vxlan_mask = item->mask; + + ds_put_cstr(s, "vxlan "); + if (vxlan_spec) { + if (!vxlan_mask) { + vxlan_mask = &rte_flow_item_vxlan_mask; + } + DUMP_PATTERN_ITEM(vxlan_mask->vni, "vni", "%"PRIu32, + ntohl(*(ovs_be32 *) vxlan_spec->vni) >> 8, + ntohl(*(ovs_be32 *) vxlan_mask->vni) >> 8); + } + ds_put_cstr(s, "/ "); } else { ds_put_format(s, "unknown rte flow pattern (%d)\n", item->type); } @@ -863,15 +877,154 @@ out: return ret; } +static int +parse_tnl_ip_match(struct flow_patterns *patterns, + struct match *match, + uint8_t proto) +{ + struct flow *consumed_masks; + + consumed_masks = &match->wc.masks; + /* IP v4 */ + if (match->wc.masks.tunnel.ip_src || match->wc.masks.tunnel.ip_dst) { + struct rte_flow_item_ipv4 *spec, *mask; + + spec = xzalloc(sizeof *spec); + mask = xzalloc(sizeof *mask); + + spec->hdr.type_of_service = match->flow.tunnel.ip_tos; + spec->hdr.time_to_live = match->flow.tunnel.ip_ttl; + spec->hdr.next_proto_id = proto; + spec->hdr.src_addr = match->flow.tunnel.ip_src; + spec->hdr.dst_addr = match->flow.tunnel.ip_dst; + + mask->hdr.type_of_service = match->wc.masks.tunnel.ip_tos; + mask->hdr.time_to_live = match->wc.masks.tunnel.ip_ttl; + mask->hdr.next_proto_id = UINT8_MAX; + mask->hdr.src_addr = match->wc.masks.tunnel.ip_src; + mask->hdr.dst_addr = match->wc.masks.tunnel.ip_dst; + + consumed_masks->tunnel.ip_tos = 0; + consumed_masks->tunnel.ip_ttl = 0; + consumed_masks->tunnel.ip_src = 0; + consumed_masks->tunnel.ip_dst = 0; + + add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_IPV4, spec, mask); + } else if (!is_all_zeros(&match->wc.masks.tunnel.ipv6_src, + sizeof(struct in6_addr)) || + !is_all_zeros(&match->wc.masks.tunnel.ipv6_dst, + sizeof(struct in6_addr))) { + /* IP v6 */ + struct rte_flow_item_ipv6 *spec, *mask; + + spec = xzalloc(sizeof *spec); + mask = xzalloc(sizeof *mask); + + spec->hdr.proto = proto; + spec->hdr.hop_limits = match->flow.tunnel.ip_ttl; + spec->hdr.vtc_flow = htonl((uint32_t) match->flow.tunnel.ip_tos << + RTE_IPV6_HDR_TC_SHIFT); + memcpy(spec->hdr.src_addr, &match->flow.tunnel.ipv6_src, + sizeof spec->hdr.src_addr); + memcpy(spec->hdr.dst_addr, &match->flow.tunnel.ipv6_dst, + sizeof spec->hdr.dst_addr); + + mask->hdr.proto = UINT8_MAX; + mask->hdr.hop_limits = match->wc.masks.tunnel.ip_ttl; + mask->hdr.vtc_flow = htonl((uint32_t) match->wc.masks.tunnel.ip_tos << + RTE_IPV6_HDR_TC_SHIFT); + memcpy(mask->hdr.src_addr, &match->wc.masks.tunnel.ipv6_src, + sizeof mask->hdr.src_addr); + memcpy(mask->hdr.dst_addr, &match->wc.masks.tunnel.ipv6_dst, + sizeof mask->hdr.dst_addr); + + consumed_masks->tunnel.ip_tos = 0; + consumed_masks->tunnel.ip_ttl = 0; + memset(&consumed_masks->tunnel.ipv6_src, 0, + sizeof consumed_masks->tunnel.ipv6_src); + memset(&consumed_masks->tunnel.ipv6_dst, 0, + sizeof consumed_masks->tunnel.ipv6_dst); + + add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_IPV6, spec, mask); + } else { + VLOG_ERR_RL(&rl, "Tunnel L3 protocol is neither IPv4 nor IPv6"); + return -1; + } + + return 0; +} + +static void +parse_tnl_udp_match(struct flow_patterns *patterns, + struct match *match) +{ + struct flow *consumed_masks; + struct rte_flow_item_udp *spec, *mask; + + consumed_masks = &match->wc.masks; + + spec = xzalloc(sizeof *spec); + mask = xzalloc(sizeof *mask); + + spec->hdr.src_port = match->flow.tunnel.tp_src; + spec->hdr.dst_port = match->flow.tunnel.tp_dst; + + mask->hdr.src_port = match->wc.masks.tunnel.tp_src; + mask->hdr.dst_port = match->wc.masks.tunnel.tp_dst; + + consumed_masks->tunnel.tp_src = 0; + consumed_masks->tunnel.tp_dst = 0; + + add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_UDP, spec, mask); +} + +static int +parse_vxlan_match(struct flow_patterns *patterns, + struct match *match) +{ + struct rte_flow_item_vxlan *vx_spec, *vx_mask; + struct flow *consumed_masks; + int ret; + + ret = parse_tnl_ip_match(patterns, match, IPPROTO_UDP); + if (ret) { + return -1; + } + parse_tnl_udp_match(patterns, match); + + consumed_masks = &match->wc.masks; + /* VXLAN */ + vx_spec = xzalloc(sizeof *vx_spec); + vx_mask = xzalloc(sizeof *vx_mask); + + put_unaligned_be32((ovs_be32 *) vx_spec->vni, + htonl(ntohll(match->flow.tunnel.tun_id) << 8)); + put_unaligned_be32((ovs_be32 *) vx_mask->vni, + htonl(ntohll(match->wc.masks.tunnel.tun_id) << 8)); + + consumed_masks->tunnel.tun_id = 0; + consumed_masks->tunnel.flags = 0; + + add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_VXLAN, vx_spec, vx_mask); + return 0; +} + static int parse_flow_tnl_match(struct netdev *tnldev, struct flow_patterns *patterns, odp_port_t orig_in_port, - struct match *match OVS_UNUSED) + struct match *match) { int ret; ret = add_vport_match(patterns, orig_in_port, tnldev); + if (ret) { + return ret; + } + + if (!strcmp(netdev_get_type(tnldev), "vxlan")) { + ret = parse_vxlan_match(patterns, match); + } return ret; } -- 2.28.0.2311.g225365fb51 _______________________________________________ dev mailing list d...@openvswitch.org https://mail.openvswitch.org/mailman/listinfo/ovs-dev