Reduce duplicated code between nb_ls_map and nb_lr_map.
Acked-by: Mark Michelson <[email protected]>
Signed-off-by: Lorenzo Bianconi <[email protected]>
---
northd/en-lr-stateful.c | 2 +-
northd/en-sync-sb.c | 24 ++++++++++---------
northd/lb.c | 20 ++++++++--------
northd/lb.h | 13 +++++++----
northd/northd.c | 52 +++++++++++++++++++++++------------------
5 files changed, 62 insertions(+), 49 deletions(-)
diff --git a/northd/en-lr-stateful.c b/northd/en-lr-stateful.c
index 56e93f3c4..58e90f2f2 100644
--- a/northd/en-lr-stateful.c
+++ b/northd/en-lr-stateful.c
@@ -252,7 +252,7 @@ lr_stateful_lb_data_handler(struct engine_node *node, void
*data_)
size_t index;
BITMAP_FOR_EACH_1 (index, ods_size(input_data.lr_datapaths),
- lb_dps->nb_lr_map) {
+ lb_dps->nb_lr_map.map) {
const struct ovn_datapath *od =
ovn_datapaths_find_by_index(input_data.lr_datapaths, index);
diff --git a/northd/en-sync-sb.c b/northd/en-sync-sb.c
index a111f14fd..343c76b54 100644
--- a/northd/en-sync-sb.c
+++ b/northd/en-sync-sb.c
@@ -676,7 +676,7 @@ sb_lb_table_build_and_sync(
struct sb_lb_record *sb_lb;
HMAP_FOR_EACH (lb_dps, hmap_node, lb_dps_map) {
- if (!lb_dps->n_nb_ls && !lb_dps->n_nb_lr) {
+ if (!lb_dps->nb_ls_map.n_elems && !lb_dps->nb_lr_map.n_elems) {
continue;
}
@@ -754,10 +754,10 @@ sync_sb_lb_record(struct sb_lb_record *sb_lb,
sbrec_lr_dp_group = sbrec_lb->lr_datapath_group;
}
- if (lb_dps->n_nb_ls) {
+ if (lb_dps->nb_ls_map.n_elems) {
sb_lb->ls_dpg = ovn_dp_group_get(&sb_lbs->ls_dp_groups,
- lb_dps->n_nb_ls,
- lb_dps->nb_ls_map,
+ lb_dps->nb_ls_map.n_elems,
+ lb_dps->nb_ls_map.map,
ods_size(ls_datapaths));
if (sb_lb->ls_dpg) {
/* Update the dpg's sb dp_group. */
@@ -787,7 +787,7 @@ sync_sb_lb_record(struct sb_lb_record *sb_lb,
} else {
sb_lb->ls_dpg = ovn_dp_group_create(
ovnsb_txn, &sb_lbs->ls_dp_groups, sbrec_ls_dp_group,
- lb_dps->n_nb_ls, lb_dps->nb_ls_map,
+ lb_dps->nb_ls_map.n_elems, lb_dps->nb_ls_map.map,
ods_size(ls_datapaths), true,
ls_datapaths, lr_datapaths);
}
@@ -809,10 +809,10 @@ sync_sb_lb_record(struct sb_lb_record *sb_lb,
}
- if (lb_dps->n_nb_lr) {
+ if (lb_dps->nb_lr_map.n_elems) {
sb_lb->lr_dpg = ovn_dp_group_get(&sb_lbs->lr_dp_groups,
- lb_dps->n_nb_lr,
- lb_dps->nb_lr_map,
+ lb_dps->nb_lr_map.n_elems,
+ lb_dps->nb_lr_map.map,
ods_size(lr_datapaths));
if (sb_lb->lr_dpg) {
/* Update the dpg's sb dp_group. */
@@ -842,7 +842,7 @@ sync_sb_lb_record(struct sb_lb_record *sb_lb,
} else {
sb_lb->lr_dpg = ovn_dp_group_create(
ovnsb_txn, &sb_lbs->lr_dp_groups, sbrec_lr_dp_group,
- lb_dps->n_nb_lr, lb_dps->nb_lr_map,
+ lb_dps->nb_lr_map.n_elems, lb_dps->nb_lr_map.map,
ods_size(lr_datapaths), false,
ls_datapaths, lr_datapaths);
}
@@ -919,7 +919,8 @@ sync_changed_lbs(struct sb_lb_table *sb_lbs,
sb_lb = sb_lb_table_find(&sb_lbs->entries, nb_uuid);
- if (!sb_lb && !lb_dps->n_nb_ls && !lb_dps->n_nb_lr) {
+ if (!sb_lb && !lb_dps->nb_ls_map.n_elems &&
+ !lb_dps->nb_lr_map.n_elems) {
continue;
}
@@ -933,7 +934,8 @@ sync_changed_lbs(struct sb_lb_table *sb_lbs,
sbrec_load_balancer_table_get_for_uuid(sb_lb_table, nb_uuid);
}
- if (sb_lb && !lb_dps->n_nb_ls && !lb_dps->n_nb_lr) {
+ if (sb_lb && !lb_dps->nb_ls_map.n_elems &&
+ !lb_dps->nb_lr_map.n_elems) {
const struct sbrec_load_balancer *sbrec_lb =
sbrec_load_balancer_table_get_for_uuid(sb_lb_table, nb_uuid);
if (sbrec_lb) {
diff --git a/northd/lb.c b/northd/lb.c
index 30726cd27..9ce9f7bc9 100644
--- a/northd/lb.c
+++ b/northd/lb.c
@@ -631,8 +631,8 @@ ovn_lb_datapaths_create(const struct ovn_northd_lb *lb,
size_t n_ls_datapaths,
{
struct ovn_lb_datapaths *lb_dps = xzalloc(sizeof *lb_dps);
lb_dps->lb = lb;
- lb_dps->nb_ls_map = bitmap_allocate(n_ls_datapaths);
- lb_dps->nb_lr_map = bitmap_allocate(n_lr_datapaths);
+ lb_dps->nb_ls_map.map = bitmap_allocate(n_ls_datapaths);
+ lb_dps->nb_lr_map.map = bitmap_allocate(n_lr_datapaths);
lb_dps->lflow_ref = lflow_ref_create();
hmapx_init(&lb_dps->ls_lb_with_stateless_mode);
return lb_dps;
@@ -641,8 +641,8 @@ ovn_lb_datapaths_create(const struct ovn_northd_lb *lb,
size_t n_ls_datapaths,
void
ovn_lb_datapaths_destroy(struct ovn_lb_datapaths *lb_dps)
{
- bitmap_free(lb_dps->nb_lr_map);
- bitmap_free(lb_dps->nb_ls_map);
+ bitmap_free(lb_dps->nb_lr_map.map);
+ bitmap_free(lb_dps->nb_ls_map.map);
lflow_ref_destroy(lb_dps->lflow_ref);
hmapx_destroy(&lb_dps->ls_lb_with_stateless_mode);
free(lb_dps);
@@ -653,9 +653,9 @@ ovn_lb_datapaths_add_lr(struct ovn_lb_datapaths *lb_dps,
size_t n,
struct ovn_datapath **ods)
{
for (size_t i = 0; i < n; i++) {
- if (!bitmap_is_set(lb_dps->nb_lr_map, ods[i]->index)) {
- bitmap_set1(lb_dps->nb_lr_map, ods[i]->index);
- lb_dps->n_nb_lr++;
+ if (!bitmap_is_set(lb_dps->nb_lr_map.map, ods[i]->index)) {
+ bitmap_set1(lb_dps->nb_lr_map.map, ods[i]->index);
+ lb_dps->nb_lr_map.n_elems++;
}
}
}
@@ -665,9 +665,9 @@ ovn_lb_datapaths_add_ls(struct ovn_lb_datapaths *lb_dps,
size_t n,
struct ovn_datapath **ods)
{
for (size_t i = 0; i < n; i++) {
- if (!bitmap_is_set(lb_dps->nb_ls_map, ods[i]->index)) {
- bitmap_set1(lb_dps->nb_ls_map, ods[i]->index);
- lb_dps->n_nb_ls++;
+ if (!bitmap_is_set(lb_dps->nb_ls_map.map, ods[i]->index)) {
+ bitmap_set1(lb_dps->nb_ls_map.map, ods[i]->index);
+ lb_dps->nb_ls_map.n_elems++;
}
}
}
diff --git a/northd/lb.h b/northd/lb.h
index a0e560204..1c03c3763 100644
--- a/northd/lb.h
+++ b/northd/lb.h
@@ -138,15 +138,20 @@ void ovn_lb_group_reinit(
const struct hmap *lbs);
struct lflow_ref;
+
+struct dynamic_bitmap {
+ unsigned long *map;
+ size_t n_elems;
+ size_t capacity;
+};
+
struct ovn_lb_datapaths {
struct hmap_node hmap_node;
const struct ovn_northd_lb *lb;
- size_t n_nb_ls;
- unsigned long *nb_ls_map;
- size_t n_nb_lr;
- unsigned long *nb_lr_map;
+ struct dynamic_bitmap nb_ls_map;
+ struct dynamic_bitmap nb_lr_map;
struct hmapx ls_lb_with_stateless_mode;
diff --git a/northd/northd.c b/northd/northd.c
index 015f30a35..2e1cce6d0 100644
--- a/northd/northd.c
+++ b/northd/northd.c
@@ -3411,7 +3411,8 @@ build_lswitch_lbs_from_lrouter(struct ovn_datapaths
*lr_datapaths,
size_t index;
HMAP_FOR_EACH (lb_dps, hmap_node, lb_dps_map) {
- BITMAP_FOR_EACH_1 (index, ods_size(lr_datapaths), lb_dps->nb_lr_map) {
+ BITMAP_FOR_EACH_1 (index, ods_size(lr_datapaths),
+ lb_dps->nb_lr_map.map) {
struct ovn_datapath *od = lr_datapaths->array[index];
ovn_lb_datapaths_add_ls(lb_dps, vector_len(&od->ls_peers),
vector_get_array(&od->ls_peers));
@@ -3445,8 +3446,10 @@ build_lb_count_dps(struct hmap *lb_dps_map,
struct ovn_lb_datapaths *lb_dps;
HMAP_FOR_EACH (lb_dps, hmap_node, lb_dps_map) {
- lb_dps->n_nb_lr = bitmap_count1(lb_dps->nb_lr_map, n_lr_datapaths);
- lb_dps->n_nb_ls = bitmap_count1(lb_dps->nb_ls_map, n_ls_datapaths);
+ lb_dps->nb_lr_map.n_elems = bitmap_count1(lb_dps->nb_lr_map.map,
+ n_lr_datapaths);
+ lb_dps->nb_ls_map.n_elems = bitmap_count1(lb_dps->nb_ls_map.map,
+ n_ls_datapaths);
}
}
@@ -5007,7 +5010,7 @@ northd_handle_lb_data_changes(struct tracked_lb_data
*trk_lb_data,
size_t index;
BITMAP_FOR_EACH_1 (index, ods_size(ls_datapaths),
- lb_dps->nb_ls_map) {
+ lb_dps->nb_ls_map.map) {
od = ls_datapaths->array[index];
/* Add the ls datapath to the northd tracked data. */
@@ -5141,7 +5144,7 @@ northd_handle_lb_data_changes(struct tracked_lb_data
*trk_lb_data,
ovs_assert(lb_dps);
size_t index;
BITMAP_FOR_EACH_1 (index, ods_size(ls_datapaths),
- lb_dps->nb_ls_map) {
+ lb_dps->nb_ls_map.map) {
od = ls_datapaths->array[index];
/* Add the ls datapath to the northd tracked data. */
@@ -7589,7 +7592,7 @@ build_lb_rules_pre_stateful(struct lflow_table *lflows,
const struct ovn_datapaths *ls_datapaths,
struct ds *match, struct ds *action)
{
- if (!lb_dps->n_nb_ls) {
+ if (!lb_dps->nb_ls_map.n_elems) {
return;
}
@@ -7630,7 +7633,7 @@ build_lb_rules_pre_stateful(struct lflow_table *lflows,
}
ovn_lflow_add_with_dp_group(
- lflows, lb_dps->nb_ls_map, ods_size(ls_datapaths),
+ lflows, lb_dps->nb_ls_map.map, ods_size(ls_datapaths),
S_SWITCH_IN_PRE_STATEFUL, 120, ds_cstr(match), ds_cstr(action),
&lb->nlb->header_, lb_dps->lflow_ref);
@@ -7887,7 +7890,7 @@ build_lb_affinity_ls_flows(struct lflow_table *lflows,
const struct ovn_datapaths *ls_datapaths,
struct lflow_ref *lflow_ref)
{
- if (!lb_dps->lb->affinity_timeout || !lb_dps->n_nb_ls) {
+ if (!lb_dps->lb->affinity_timeout || !lb_dps->nb_ls_map.n_elems) {
return;
}
@@ -7909,7 +7912,7 @@ build_lb_affinity_ls_flows(struct lflow_table *lflows,
static char *aff_check = REGBIT_KNOWN_LB_SESSION" = chk_lb_aff(); next;";
ovn_lflow_add_with_dp_group(
- lflows, lb_dps->nb_ls_map, ods_size(ls_datapaths),
+ lflows, lb_dps->nb_ls_map.map, ods_size(ls_datapaths),
S_SWITCH_IN_LB_AFF_CHECK, 100, ds_cstr(&new_lb_match), aff_check,
&lb_dps->lb->nlb->header_, lflow_ref);
ds_destroy(&new_lb_match);
@@ -7991,14 +7994,14 @@ build_lb_affinity_ls_flows(struct lflow_table *lflows,
/* Forward to OFTABLE_CHK_LB_AFFINITY table to store flow tuple. */
ovn_lflow_add_with_dp_group(
- lflows, lb_dps->nb_ls_map, ods_size(ls_datapaths),
+ lflows, lb_dps->nb_ls_map.map, ods_size(ls_datapaths),
S_SWITCH_IN_LB_AFF_LEARN, 100, ds_cstr(&aff_match_learn),
ds_cstr(&aff_action_learn), &lb->nlb->header_,
lflow_ref);
/* Use already selected backend within affinity timeslot. */
ovn_lflow_add_with_dp_group(
- lflows, lb_dps->nb_ls_map, ods_size(ls_datapaths),
+ lflows, lb_dps->nb_ls_map.map, ods_size(ls_datapaths),
S_SWITCH_IN_LB, 150, ds_cstr(&aff_match), ds_cstr(&aff_action),
&lb->nlb->header_, lflow_ref);
@@ -8082,10 +8085,10 @@ build_lb_rules(struct lflow_table *lflows, struct
ovn_lb_datapaths *lb_dps,
if (reject) {
size_t index;
- dp_non_meter = bitmap_clone(lb_dps->nb_ls_map,
+ dp_non_meter = bitmap_clone(lb_dps->nb_ls_map.map,
ods_size(ls_datapaths));
BITMAP_FOR_EACH_1 (index, ods_size(ls_datapaths),
- lb_dps->nb_ls_map) {
+ lb_dps->nb_ls_map.map) {
struct ovn_datapath *od = ls_datapaths->array[index];
meter = copp_meter_get(COPP_REJECT, od->nbs->copp,
@@ -8104,7 +8107,7 @@ build_lb_rules(struct lflow_table *lflows, struct
ovn_lb_datapaths *lb_dps,
}
if (!reject || build_non_meter) {
ovn_lflow_add_with_dp_group(
- lflows, dp_non_meter ? dp_non_meter : lb_dps->nb_ls_map,
+ lflows, dp_non_meter ? dp_non_meter : lb_dps->nb_ls_map.map,
ods_size(ls_datapaths), S_SWITCH_IN_LB, priority,
ds_cstr(match), ds_cstr(action), &lb->nlb->header_,
lb_dps->lflow_ref);
@@ -12227,7 +12230,7 @@ build_lrouter_nat_flows_for_lb(
size_t index;
bool use_stateless_nat = smap_get_bool(&lb->nlb->options,
"use_stateless_nat", false);
- BITMAP_FOR_EACH_1 (index, bitmap_len, lb_dps->nb_lr_map) {
+ BITMAP_FOR_EACH_1 (index, bitmap_len, lb_dps->nb_lr_map.map) {
struct ovn_datapath *od = lr_datapaths->array[index];
enum lrouter_nat_lb_flow_type type;
@@ -12312,7 +12315,7 @@ build_lswitch_flows_for_lb(struct ovn_lb_datapaths
*lb_dps,
const struct svc_monitors_map_data *svc_mons_data,
struct ds *match, struct ds *action)
{
- if (!lb_dps->n_nb_ls) {
+ if (!lb_dps->nb_ls_map.n_elems) {
return;
}
@@ -12326,7 +12329,8 @@ build_lswitch_flows_for_lb(struct ovn_lb_datapaths
*lb_dps,
}
size_t index;
- BITMAP_FOR_EACH_1 (index, ods_size(ls_datapaths), lb_dps->nb_ls_map) {
+ BITMAP_FOR_EACH_1 (index, ods_size(ls_datapaths),
+ lb_dps->nb_ls_map.map) {
struct ovn_datapath *od = ls_datapaths->array[index];
ovn_lflow_add_with_hint__(lflows, od,
@@ -12370,7 +12374,7 @@ build_lrouter_defrag_flows_for_lb(struct
ovn_lb_datapaths *lb_dps,
const struct ovn_datapaths *lr_datapaths,
struct ds *match)
{
- if (!lb_dps->n_nb_lr) {
+ if (!lb_dps->nb_lr_map.n_elems) {
return;
}
@@ -12384,7 +12388,7 @@ build_lrouter_defrag_flows_for_lb(struct
ovn_lb_datapaths *lb_dps,
lb_vip->vip_str);
ovn_lflow_add_with_dp_group(
- lflows, lb_dps->nb_lr_map, ods_size(lr_datapaths),
+ lflows, lb_dps->nb_lr_map.map, ods_size(lr_datapaths),
S_ROUTER_IN_DEFRAG, prio, ds_cstr(match), "ct_dnat;",
&lb_dps->lb->nlb->header_, lb_dps->lflow_ref);
}
@@ -12404,7 +12408,7 @@ build_lrouter_allow_vip_traffic_template(struct
lflow_table *lflows,
struct ds match = DS_EMPTY_INITIALIZER;
size_t index;
- BITMAP_FOR_EACH_1 (index, ods_size(lr_dps), lb_dps->nb_lr_map) {
+ BITMAP_FOR_EACH_1 (index, ods_size(lr_dps), lb_dps->nb_lr_map.map) {
struct ovn_datapath *od = lr_dps->array[index];
/* Do not drop ip traffic with destination the template VIP. */
ds_clear(&match);
@@ -12430,7 +12434,7 @@ build_lrouter_flows_for_lb(struct ovn_lb_datapaths
*lb_dps,
{
size_t index;
- if (!lb_dps->n_nb_lr) {
+ if (!lb_dps->nb_lr_map.n_elems) {
return;
}
@@ -12450,7 +12454,8 @@ build_lrouter_flows_for_lb(struct ovn_lb_datapaths
*lb_dps,
continue;
}
- BITMAP_FOR_EACH_1 (index, ods_size(lr_datapaths), lb_dps->nb_lr_map) {
+ BITMAP_FOR_EACH_1 (index, ods_size(lr_datapaths),
+ lb_dps->nb_lr_map.map) {
struct ovn_datapath *od = lr_datapaths->array[index];
ovn_lflow_add_with_hint__(lflows, od, S_ROUTER_IN_DNAT,
@@ -12464,7 +12469,8 @@ build_lrouter_flows_for_lb(struct ovn_lb_datapaths
*lb_dps,
}
if (lb->skip_snat) {
- BITMAP_FOR_EACH_1 (index, ods_size(lr_datapaths), lb_dps->nb_lr_map) {
+ BITMAP_FOR_EACH_1 (index, ods_size(lr_datapaths),
+ lb_dps->nb_lr_map.map) {
struct ovn_datapath *od = lr_datapaths->array[index];
ovn_lflow_add(lflows, od, S_ROUTER_OUT_SNAT, 120,
--
2.50.1
_______________________________________________
dev mailing list
[email protected]
https://mail.openvswitch.org/mailman/listinfo/ovs-dev