There is no need to do anything with wildcards when skip_wildcards is true.
Signed-off-by: Jarno Rajahalme <jrajaha...@nicira.com> --- ofproto/ofproto-dpif-xlate.c | 66 +++++++++++++++++++++++------------------- ofproto/ofproto-dpif.c | 11 +------ ofproto/tunnel.c | 35 ++++++++++++---------- 3 files changed, 57 insertions(+), 55 deletions(-) diff --git a/ofproto/ofproto-dpif-xlate.c b/ofproto/ofproto-dpif-xlate.c index d2451a3..a1928cd 100644 --- a/ofproto/ofproto-dpif-xlate.c +++ b/ofproto/ofproto-dpif-xlate.c @@ -4376,7 +4376,7 @@ void xlate_actions(struct xlate_in *xin, struct xlate_out *xout) { struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp); - struct flow_wildcards *wc = &xout->wc; + struct flow_wildcards *wc = NULL; struct flow *flow = &xin->flow; struct rule_dpif *rule = NULL; @@ -4433,25 +4433,32 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) if (!ctx.xbridge) { return; } - ctx.rule = xin->rule; ctx.base_flow = *flow; memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel); ctx.orig_tunnel_ip_dst = flow->tunnel.ip_dst; - flow_wildcards_init_catchall(wc); - memset(&wc->masks.in_port, 0xff, sizeof wc->masks.in_port); - memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type); - if (is_ip_any(flow)) { - wc->masks.nw_frag |= FLOW_NW_FRAG_MASK; + if (!xin->skip_wildcards) { + wc = &xout->wc; + flow_wildcards_init_catchall(wc); + memset(&wc->masks.in_port, 0xff, sizeof wc->masks.in_port); + memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type); + if (is_ip_any(flow)) { + wc->masks.nw_frag |= FLOW_NW_FRAG_MASK; + } + if (ctx.xbridge->enable_recirc) { + /* Always exactly match recirc_id when datapath supports + * recirculation. */ + wc->masks.recirc_id = UINT32_MAX; + } + if (ctx.xbridge->netflow) { + netflow_mask_wc(flow, wc); + } } is_icmp = is_icmpv4(flow) || is_icmpv6(flow); tnl_may_send = tnl_xlate_init(&ctx.base_flow, flow, wc); - if (ctx.xbridge->netflow) { - netflow_mask_wc(flow, wc); - } ctx.recurse = 0; ctx.resubmits = 0; @@ -4465,8 +4472,7 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) ctx.was_mpls = false; if (!xin->ofpacts && !ctx.rule) { - rule = rule_dpif_lookup(ctx.xbridge->ofproto, flow, - xin->skip_wildcards ? NULL : wc, + rule = rule_dpif_lookup(ctx.xbridge->ofproto, flow, wc, ctx.xin->xcache != NULL, ctx.xin->resubmit_stats, &ctx.table_id); if (ctx.xin->resubmit_stats) { @@ -4628,23 +4634,25 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) ofpbuf_uninit(&ctx.stack); ofpbuf_uninit(&ctx.action_set); - /* Clear the metadata and register wildcard masks, because we won't - * use non-header fields as part of the cache. */ - flow_wildcards_clear_non_packet_fields(wc); - - /* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields. struct flow uses - * the low 8 bits of the 16-bit tp_src and tp_dst members to represent - * these fields. The datapath interface, on the other hand, represents - * them with just 8 bits each. This means that if the high 8 bits of the - * masks for these fields somehow become set, then they will get chopped - * off by a round trip through the datapath, and revalidation will spot - * that as an inconsistency and delete the flow. Avoid the problem here by - * making sure that only the low 8 bits of either field can be unwildcarded - * for ICMP. - */ - if (is_icmp) { - wc->masks.tp_src &= htons(UINT8_MAX); - wc->masks.tp_dst &= htons(UINT8_MAX); + if (wc) { + /* Clear the metadata and register wildcard masks, because we won't + * use non-header fields as part of the cache. */ + flow_wildcards_clear_non_packet_fields(wc); + + /* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields. struct flow + * uses the low 8 bits of the 16-bit tp_src and tp_dst members to + * represent these fields. The datapath interface, on the other hand, + * represents them with just 8 bits each. This means that if the high + * 8 bits of the masks for these fields somehow become set, then they + * will get chopped off by a round trip through the datapath, and + * revalidation will spot that as an inconsistency and delete the flow. + * Avoid the problem here by making sure that only the low 8 bits of + * either field can be unwildcarded for ICMP. + */ + if (is_icmp) { + wc->masks.tp_src &= htons(UINT8_MAX); + wc->masks.tp_dst &= htons(UINT8_MAX); + } } } diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c index 9b67518..38ad6e2 100644 --- a/ofproto/ofproto-dpif.c +++ b/ofproto/ofproto-dpif.c @@ -3803,16 +3803,7 @@ rule_dpif_lookup(struct ofproto_dpif *ofproto, struct flow *flow, struct flow_wildcards *wc, bool take_ref, const struct dpif_flow_stats *stats, uint8_t *table_id) { - *table_id = 0; - - if (ofproto_dpif_get_enable_recirc(ofproto)) { - /* Always exactly match recirc_id since datapath supports - * recirculation. */ - if (wc) { - wc->masks.recirc_id = UINT32_MAX; - } - *table_id = rule_dpif_lookup_get_init_table_id(flow); - } + *table_id = rule_dpif_lookup_get_init_table_id(flow); return rule_dpif_lookup_from_table(ofproto, flow, wc, take_ref, stats, table_id, flow->in_port.ofp_port, true, diff --git a/ofproto/tunnel.c b/ofproto/tunnel.c index d079a24..f621efb 100644 --- a/ofproto/tunnel.c +++ b/ofproto/tunnel.c @@ -333,7 +333,9 @@ tnl_ecn_ok(const struct flow *base_flow, struct flow *flow, { if (is_ip_any(base_flow)) { if ((flow->tunnel.ip_tos & IP_ECN_MASK) == IP_ECN_CE) { - wc->masks.nw_tos |= IP_ECN_MASK; + if (wc) { + wc->masks.nw_tos |= IP_ECN_MASK; + } if ((base_flow->nw_tos & IP_ECN_MASK) == IP_ECN_NOT_ECT) { VLOG_WARN_RL(&rl, "dropping tunnel packet marked ECN CE" " but is not ECN capable"); @@ -362,21 +364,22 @@ tnl_xlate_init(const struct flow *base_flow, struct flow *flow, * always unwildcard the 'in_port', we do not need to unwildcard * the 'tunnel.ip_dst' for non-tunneled packets. */ if (tnl_port_should_receive(flow)) { - wc->masks.tunnel.tun_id = OVS_BE64_MAX; - wc->masks.tunnel.ip_src = OVS_BE32_MAX; - wc->masks.tunnel.ip_dst = OVS_BE32_MAX; - wc->masks.tunnel.flags = (FLOW_TNL_F_DONT_FRAGMENT | - FLOW_TNL_F_CSUM | - FLOW_TNL_F_KEY); - wc->masks.tunnel.ip_tos = UINT8_MAX; - wc->masks.tunnel.ip_ttl = UINT8_MAX; - /* The tp_src and tp_dst members in flow_tnl are set to be always - * wildcarded, not to unwildcard them here. */ - wc->masks.tunnel.tp_src = 0; - wc->masks.tunnel.tp_dst = 0; - - memset(&wc->masks.pkt_mark, 0xff, sizeof wc->masks.pkt_mark); - + if (wc) { + wc->masks.tunnel.tun_id = OVS_BE64_MAX; + wc->masks.tunnel.ip_src = OVS_BE32_MAX; + wc->masks.tunnel.ip_dst = OVS_BE32_MAX; + wc->masks.tunnel.flags = (FLOW_TNL_F_DONT_FRAGMENT | + FLOW_TNL_F_CSUM | + FLOW_TNL_F_KEY); + wc->masks.tunnel.ip_tos = UINT8_MAX; + wc->masks.tunnel.ip_ttl = UINT8_MAX; + /* The tp_src and tp_dst members in flow_tnl are set to be always + * wildcarded, not to unwildcard them here. */ + wc->masks.tunnel.tp_src = 0; + wc->masks.tunnel.tp_dst = 0; + + memset(&wc->masks.pkt_mark, 0xff, sizeof wc->masks.pkt_mark); + } if (!tnl_ecn_ok(base_flow, flow, wc)) { return false; } -- 1.7.10.4 _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev