This adds a new WITH_DP_GROUP() macro that can be used as an argument to ovn_lflow_add().
This commit converts all uses of ovn_lflow_add_with_dp_group() to ovn_lflow_add(). Signed-off-by: Mark Michelson <[email protected]> --- northd/lflow-mgr.h | 10 ++--- northd/northd.c | 96 +++++++++++++++++++++++++++------------------- 2 files changed, 60 insertions(+), 46 deletions(-) diff --git a/northd/lflow-mgr.h b/northd/lflow-mgr.h index 7c6aaa269..b0afa5349 100644 --- a/northd/lflow-mgr.h +++ b/northd/lflow-mgr.h @@ -106,6 +106,9 @@ void lflow_table_add_lflow(struct lflow_table *, const struct ovn_datapath *, struct lflow_ref *); #define WITH_HINT(HINT) .stage_hint = HINT +#define WITH_DP_GROUP(DP_BITMAP, DP_BITMAP_LEN) \ + .dp_bitmap = DP_BITMAP, \ + .dp_bitmap_len = DP_BITMAP_LEN /* The IN_OUT_PORT argument tells the lport name that appears in the MATCH, * which helps ovn-controller to bypass lflows parsing when the lport is * not local to the chassis. The critiera of the lport to be added using this @@ -174,13 +177,6 @@ void lflow_table_add_lflow(struct lflow_table *, const struct ovn_datapath *, }, OVS_SOURCE_LOCATOR) /* Adds a row with the specified contents to the Logical_Flow table. */ -#define ovn_lflow_add_with_dp_group(LFLOW_TABLE, DP_BITMAP, DP_BITMAP_LEN, \ - STAGE, PRIORITY, MATCH, ACTIONS, \ - STAGE_HINT, LFLOW_REF) \ - lflow_table_add_lflow(LFLOW_TABLE, NULL, DP_BITMAP, DP_BITMAP_LEN, STAGE, \ - PRIORITY, MATCH, ACTIONS, NULL, NULL, STAGE_HINT, \ - OVS_SOURCE_LOCATOR, NULL, LFLOW_REF) - #define ovn_lflow_add_default_drop(LFLOW_TABLE, OD, STAGE, LFLOW_REF) \ lflow_table_add_lflow(LFLOW_TABLE, OD, NULL, 0, STAGE, 0, "1", \ debug_drop_action(), NULL, NULL, NULL, \ diff --git a/northd/northd.c b/northd/northd.c index 6c3c837d3..ecbad180c 100644 --- a/northd/northd.c +++ b/northd/northd.c @@ -8084,10 +8084,11 @@ build_lb_rules_pre_stateful(struct lflow_table *lflows, lb_vip->port_str); } - ovn_lflow_add_with_dp_group( - lflows, lb_dps->nb_ls_map.map, ods_size(ls_datapaths), - S_SWITCH_IN_PRE_STATEFUL, 120, ds_cstr(match), ds_cstr(action), - &lb->nlb->header_, lb_dps->lflow_ref); + ovn_lflow_add( + lflows, NULL, S_SWITCH_IN_PRE_STATEFUL, 120, ds_cstr(match), + ds_cstr(action), lb_dps->lflow_ref, + WITH_HINT(&lb->nlb->header_), + WITH_DP_GROUP(lb_dps->nb_ls_map.map, ods_size(ls_datapaths))); struct lflow_ref *lflow_ref = lb_dps->lflow_ref; struct hmapx_node *hmapx_node; @@ -8193,10 +8194,12 @@ build_lb_affinity_lr_flows(struct lflow_table *lflows, } /* Create affinity check flow. */ - ovn_lflow_add_with_dp_group( - lflows, dp_bitmap, ods_size(lr_datapaths), S_ROUTER_IN_LB_AFF_CHECK, + ovn_lflow_add( + lflows, NULL, S_ROUTER_IN_LB_AFF_CHECK, 100, new_lb_match, REGBIT_KNOWN_LB_SESSION" = chk_lb_aff(); next;", - &lb->nlb->header_, lflow_ref); + lflow_ref, + WITH_HINT(&lb->nlb->header_), + WITH_DP_GROUP(dp_bitmap, ods_size(lr_datapaths))); /* Prepare common part of affinity LB and affinity learn action. */ ds_put_cstr(&aff_action_learn, "commit_lb_aff(vip = \""); @@ -8274,17 +8277,20 @@ build_lb_affinity_lr_flows(struct lflow_table *lflows, lb->affinity_timeout); /* Forward to OFTABLE_CHK_LB_AFFINITY table to store flow tuple. */ - ovn_lflow_add_with_dp_group( - lflows, dp_bitmap, ods_size(lr_datapaths), - S_ROUTER_IN_LB_AFF_LEARN, 100, ds_cstr(&aff_match_learn), - ds_cstr(&aff_action_learn), &lb->nlb->header_, - lflow_ref); + ovn_lflow_add( + lflows, NULL, S_ROUTER_IN_LB_AFF_LEARN, 100, + ds_cstr(&aff_match_learn), + ds_cstr(&aff_action_learn), + lflow_ref, + WITH_HINT(&lb->nlb->header_), + WITH_DP_GROUP(dp_bitmap, ods_size(lr_datapaths))); /* Use already selected backend within affinity timeslot. */ - ovn_lflow_add_with_dp_group( - lflows, dp_bitmap, ods_size(lr_datapaths), S_ROUTER_IN_DNAT, 150, - ds_cstr(&aff_match), ds_cstr(&aff_action), &lb->nlb->header_, - lflow_ref); + ovn_lflow_add( + lflows, NULL, S_ROUTER_IN_DNAT, 150, + ds_cstr(&aff_match), ds_cstr(&aff_action), lflow_ref, + WITH_HINT(&lb->nlb->header_), + WITH_DP_GROUP(dp_bitmap, ods_size(lr_datapaths))); ds_truncate(&aff_action, aff_action_len); ds_truncate(&aff_action_learn, aff_action_learn_len); @@ -8364,10 +8370,12 @@ build_lb_affinity_ls_flows(struct lflow_table *lflows, static char *aff_check = REGBIT_KNOWN_LB_SESSION" = chk_lb_aff(); next;"; - ovn_lflow_add_with_dp_group( - lflows, lb_dps->nb_ls_map.map, ods_size(ls_datapaths), + ovn_lflow_add( + lflows, NULL, S_SWITCH_IN_LB_AFF_CHECK, 100, ds_cstr(&new_lb_match), aff_check, - &lb_dps->lb->nlb->header_, lflow_ref); + lflow_ref, + WITH_HINT(&lb_dps->lb->nlb->header_), + WITH_DP_GROUP(lb_dps->nb_ls_map.map, ods_size(ls_datapaths))); ds_destroy(&new_lb_match); struct ds aff_action = DS_EMPTY_INITIALIZER; @@ -8446,17 +8454,20 @@ build_lb_affinity_ls_flows(struct lflow_table *lflows, lb->affinity_timeout); /* Forward to OFTABLE_CHK_LB_AFFINITY table to store flow tuple. */ - ovn_lflow_add_with_dp_group( - lflows, lb_dps->nb_ls_map.map, ods_size(ls_datapaths), + ovn_lflow_add( + lflows, NULL, S_SWITCH_IN_LB_AFF_LEARN, 100, ds_cstr(&aff_match_learn), - ds_cstr(&aff_action_learn), &lb->nlb->header_, - lflow_ref); + ds_cstr(&aff_action_learn), lflow_ref, + WITH_HINT(&lb->nlb->header_), + WITH_DP_GROUP(lb_dps->nb_ls_map.map, ods_size(ls_datapaths))); /* Use already selected backend within affinity timeslot. */ - ovn_lflow_add_with_dp_group( - lflows, lb_dps->nb_ls_map.map, ods_size(ls_datapaths), + ovn_lflow_add( + lflows, NULL, S_SWITCH_IN_LB, 150, ds_cstr(&aff_match), ds_cstr(&aff_action), - &lb->nlb->header_, lflow_ref); + lflow_ref, + WITH_HINT(&lb->nlb->header_), + WITH_DP_GROUP(lb_dps->nb_ls_map.map, ods_size(ls_datapaths))); ds_truncate(&aff_action, aff_action_len); ds_truncate(&aff_action_learn, aff_action_learn_len); @@ -8582,11 +8593,14 @@ build_lb_rules(struct lflow_table *lflows, struct ovn_lb_datapaths *lb_dps, } } if (!reject || build_non_meter) { - ovn_lflow_add_with_dp_group( - lflows, dp_non_meter ? dp_non_meter : lb_dps->nb_ls_map.map, - ods_size(ls_datapaths), S_SWITCH_IN_LB, priority, - ds_cstr(match), ds_cstr(action), &lb->nlb->header_, - lb_dps->lflow_ref); + ovn_lflow_add( + lflows, NULL, S_SWITCH_IN_LB, priority, + ds_cstr(match), ds_cstr(action), lb_dps->lflow_ref, + WITH_HINT(&lb->nlb->header_), + WITH_DP_GROUP(dp_non_meter ? + dp_non_meter : + lb_dps->nb_ls_map.map, + ods_size(ls_datapaths))); } bitmap_free(dp_non_meter); } @@ -12798,10 +12812,13 @@ build_gw_lrouter_nat_flows_for_lb(struct lrouter_nat_lb_flows_ctx *ctx, } } if (!ctx->reject || build_non_meter) { - ovn_lflow_add_with_dp_group(ctx->lflows, - dp_non_meter ? dp_non_meter : dp_bitmap, ods_size(lr_datapaths), - S_ROUTER_IN_DNAT, ctx->prio, ds_cstr(ctx->new_match), - ctx->new_action[type], &ctx->lb->nlb->header_, lflow_ref); + ovn_lflow_add(ctx->lflows, NULL, + S_ROUTER_IN_DNAT, ctx->prio, ds_cstr(ctx->new_match), + ctx->new_action[type], lflow_ref, + WITH_HINT(&ctx->lb->nlb->header_), + WITH_DP_GROUP(dp_non_meter ? + dp_non_meter : + dp_bitmap, ods_size(lr_datapaths))); } bitmap_free(dp_non_meter); } @@ -13078,10 +13095,11 @@ build_lrouter_defrag_flows_for_lb(struct ovn_lb_datapaths *lb_dps, ds_put_format(match, "ip && ip%c.dst == %s", ipv6 ? '6' : '4', lb_vip->vip_str); - ovn_lflow_add_with_dp_group( - lflows, lb_dps->nb_lr_map.map, ods_size(lr_datapaths), - S_ROUTER_IN_DEFRAG, prio, ds_cstr(match), "ct_dnat;", - &lb_dps->lb->nlb->header_, lb_dps->lflow_ref); + ovn_lflow_add( + lflows, NULL, S_ROUTER_IN_DEFRAG, prio, ds_cstr(match), + "ct_dnat;", lb_dps->lflow_ref, + WITH_HINT(&lb_dps->lb->nlb->header_), + WITH_DP_GROUP(lb_dps->nb_lr_map.map, ods_size(lr_datapaths))); } } -- 2.51.1 _______________________________________________ dev mailing list [email protected] https://mail.openvswitch.org/mailman/listinfo/ovs-dev
