> Hi Lorenzo, > > I have feedback inline. Hi Mark,
thx for the review. > > On 7/24/25 1:20 PM, Lorenzo Bianconi wrote: > > From: Numan Siddique <num...@ovn.org> > > > > This is required to add I-P for logical switch and logical router > > inserts. > > > > Signed-off-by: Numan Siddique <num...@ovn.org> > > --- > > lib/ovn-util.h | 20 +++++++++++++++++ > > northd/lb.c | 23 ++++++++++++++++++-- > > northd/lb.h | 22 +++++++++++++++---- > > northd/northd.c | 58 ++++++++++++++++++++++++++++++++----------------- > > 4 files changed, 97 insertions(+), 26 deletions(-) > > > > diff --git a/lib/ovn-util.h b/lib/ovn-util.h > > index 695058f90..a8ade6bb4 100644 > > --- a/lib/ovn-util.h > > +++ b/lib/ovn-util.h > > @@ -17,6 +17,7 @@ > > #define OVN_UTIL_H 1 > > #include "ovsdb-idl.h" > > +#include "lib/bitmap.h" > > #include "lib/packets.h" > > #include "lib/sset.h" > > #include "lib/svec.h" > > @@ -477,6 +478,25 @@ void sorted_array_apply_diff(const struct sorted_array > > *a1, > > bool add), > > const void *arg); > > +static inline unsigned long * > > +ovn_bitmap_realloc(unsigned long *bitmap, size_t n_bits_old, > > + size_t n_bits_new) > > +{ > > + ovs_assert(n_bits_new >= n_bits_old); > > + > > + if (bitmap_n_bytes(n_bits_old) == bitmap_n_bytes(n_bits_new)) { > > + return bitmap; > > + } > > + > > + bitmap = xrealloc(bitmap, bitmap_n_bytes(n_bits_new)); > > + /* Set the unitialized bits to 0 as xrealloc doesn't initialize the > > + * added memory. */ > > + size_t delta = BITMAP_N_LONGS(n_bits_new) - BITMAP_N_LONGS(n_bits_old); > > + memset(&bitmap[BITMAP_N_LONGS(n_bits_old)], 0, delta * sizeof *bitmap); > > + > > + return bitmap; > > +} > > + > > /* Utilities around properly handling exit command. */ > > struct ovn_exit_args { > > struct unixctl_conn **conns; > > diff --git a/northd/lb.c b/northd/lb.c > > index d8327c2fc..9615aa244 100644 > > --- a/northd/lb.c > > +++ b/northd/lb.c > > @@ -579,7 +579,9 @@ ovn_lb_datapaths_create(const struct ovn_northd_lb *lb, > > size_t n_ls_datapaths, > > struct ovn_lb_datapaths *lb_dps = xzalloc(sizeof *lb_dps); > > lb_dps->lb = lb; > > lb_dps->nb_ls_map.map = bitmap_allocate(n_ls_datapaths); > > + lb_dps->nb_ls_map.capacity = n_ls_datapaths; > > lb_dps->nb_lr_map.map = bitmap_allocate(n_lr_datapaths); > > + lb_dps->nb_lr_map.capacity = n_lr_datapaths; > > lb_dps->lflow_ref = lflow_ref_create(); > > hmapx_init(&lb_dps->ls_lb_with_stateless_mode); > > return lb_dps; > > @@ -595,11 +597,23 @@ ovn_lb_datapaths_destroy(struct ovn_lb_datapaths > > *lb_dps) > > free(lb_dps); > > } > > +static void > > +dynamic_bitmap_realloc(struct dynamic_bitmap *db, size_t new_n_elems) > > +{ > > + if (new_n_elems > db->capacity) { > > + db->map = ovn_bitmap_realloc(db->map, db->capacity, new_n_elems); > > + db->capacity = bitmap_n_bytes(new_n_elems); > > This is my fault because of the feedback I gave on v2. > > Based on how db->capacity is used throughout the rest of this file, it > should be set to new_n_elems here instead of bitmap_n_bytes(new_n_elems). In > the rest of the file, db->capacity indicates the highest number of bits the > bitmap can hold, but here, db->capacity is being set to the size of the > bitmap allocation in bytes. This will make db->capacity lower than it should > be. This likely will result in the bitmap being reallocated when it doesn't > need to be, and even worse, we'll probably assert in > ovn_lb_datapaths_add_lr() or ovn_lb_datapaths_add_ls() eventually. ack, I will fix it in v4. > > > + } > > +} > > + > > void > > ovn_lb_datapaths_add_lr(struct ovn_lb_datapaths *lb_dps, size_t n, > > - struct ovn_datapath **ods) > > + struct ovn_datapath **ods, > > + size_t n_lr_datapaths) > > { > > + dynamic_bitmap_realloc(&lb_dps->nb_lr_map, n_lr_datapaths); > > for (size_t i = 0; i < n; i++) { > > + ovs_assert(ods[i]->index < lb_dps->nb_lr_map.capacity); > > if (!bitmap_is_set(lb_dps->nb_lr_map.map, ods[i]->index)) { > > bitmap_set1(lb_dps->nb_lr_map.map, ods[i]->index); > > lb_dps->nb_lr_map.n_elems++; > > @@ -609,9 +623,12 @@ ovn_lb_datapaths_add_lr(struct ovn_lb_datapaths > > *lb_dps, size_t n, > > void > > ovn_lb_datapaths_add_ls(struct ovn_lb_datapaths *lb_dps, size_t n, > > - struct ovn_datapath **ods) > > + struct ovn_datapath **ods, > > + size_t n_ls_datapaths) > > { > > + dynamic_bitmap_realloc(&lb_dps->nb_ls_map, n_ls_datapaths); > > for (size_t i = 0; i < n; i++) { > > + ovs_assert(ods[i]->index < lb_dps->nb_ls_map.capacity); > > if (!bitmap_is_set(lb_dps->nb_ls_map.map, ods[i]->index)) { > > bitmap_set1(lb_dps->nb_ls_map.map, ods[i]->index); > > lb_dps->nb_ls_map.n_elems++; > > @@ -643,6 +660,8 @@ ovn_lb_group_datapaths_create(const struct ovn_lb_group > > *lb_group, > > lb_group_dps->lb_group = lb_group; > > lb_group_dps->ls = xmalloc(max_ls_datapaths * sizeof > > *lb_group_dps->ls); > > lb_group_dps->lr = xmalloc(max_lr_datapaths * sizeof > > *lb_group_dps->lr); > > + lb_group_dps->max_lr = max_ls_datapaths; > > + lb_group_dps->max_lr = max_lr_datapaths; > > return lb_group_dps; > > } > > diff --git a/northd/lb.h b/northd/lb.h > > index 1d6152a1d..be9f9a837 100644 > > --- a/northd/lb.h > > +++ b/northd/lb.h > > @@ -185,9 +185,11 @@ struct ovn_lb_datapaths *ovn_lb_datapaths_find(const > > struct hmap *, > > void ovn_lb_datapaths_destroy(struct ovn_lb_datapaths *); > > void ovn_lb_datapaths_add_lr(struct ovn_lb_datapaths *, size_t n, > > - struct ovn_datapath **); > > + struct ovn_datapath **, > > + size_t n_lr_datapaths); > > void ovn_lb_datapaths_add_ls(struct ovn_lb_datapaths *, size_t n, > > - struct ovn_datapath **); > > + struct ovn_datapath **, > > + size_t n_ls_datapaths); > > struct ovn_lb_group_datapaths { > > struct hmap_node hmap_node; > > @@ -196,8 +198,10 @@ struct ovn_lb_group_datapaths { > > /* Datapaths to which 'lb_group' is applied. */ > > size_t n_ls; > > + size_t max_ls; > > struct ovn_datapath **ls; > > size_t n_lr; > > + size_t max_lr; > > struct ovn_datapath **lr; > > }; > > @@ -211,16 +215,26 @@ struct ovn_lb_group_datapaths > > *ovn_lb_group_datapaths_find( > > static inline void > > ovn_lb_group_datapaths_add_ls(struct ovn_lb_group_datapaths *lbg_dps, > > size_t n, > > - struct ovn_datapath **ods) > > + struct ovn_datapath **ods, size_t > > n_ls_datapaths) > > { > > + if (n_ls_datapaths > lbg_dps->max_ls) { > > + lbg_dps->ls = xrealloc(lbg_dps->ls, > > + n_ls_datapaths * sizeof *lbg_dps->ls); > > + lbg_dps->max_ls = n_ls_datapaths; > > + } > > memcpy(&lbg_dps->ls[lbg_dps->n_ls], ods, n * sizeof *ods);> > lbg_dps->n_ls += n; > > In the previous iteration of this patch, I suggested using a vector instead > of coding the dynamic array manually. > > If this were done, then the entire ovn_lb_group_datapaths_add_ls() function > could be replaced with a call to vector_push_array(). > > All vector operations that insert data (vector_push(), vector_push_array(), > and vector_insert()) will automatically reallocate the vector's dynamic > array. ack, I will fix it in v4. Regards, Lorenzo > > > } > > static inline void > > ovn_lb_group_datapaths_add_lr(struct ovn_lb_group_datapaths *lbg_dps, > > - struct ovn_datapath *lr) > > + struct ovn_datapath *lr, size_t > > n_lr_datapaths) > > { > > + if (n_lr_datapaths > lbg_dps->max_lr) { > > + lbg_dps->lr = xrealloc(lbg_dps->lr, > > + n_lr_datapaths * sizeof *lbg_dps->lr); > > + lbg_dps->max_lr = n_lr_datapaths; > > + } > > lbg_dps->lr[lbg_dps->n_lr++] = lr; > > } > > And ovn_lb_group_datapaths_add_lr() could be replaced with a call to > vector_push(). > > > diff --git a/northd/northd.c b/northd/northd.c > > index 95cd67a5a..e4125606a 100644 > > --- a/northd/northd.c > > +++ b/northd/northd.c > > @@ -3669,7 +3669,7 @@ build_lb_datapaths(const struct hmap *lbs, const > > struct hmap *lb_groups, > > &od->nbs->load_balancer[i]->header_.uuid; > > lb_dps = ovn_lb_datapaths_find(lb_datapaths_map, lb_uuid); > > ovs_assert(lb_dps); > > - ovn_lb_datapaths_add_ls(lb_dps, 1, &od); > > + ovn_lb_datapaths_add_ls(lb_dps, 1, &od, > > ods_size(ls_datapaths)); > > if (od->lb_with_stateless_mode) { > > hmapx_add(&lb_dps->ls_lb_with_stateless_mode, od); > > } > > @@ -3682,7 +3682,8 @@ build_lb_datapaths(const struct hmap *lbs, const > > struct hmap *lb_groups, > > ovn_lb_group_datapaths_find(lb_group_datapaths_map, > > lb_group_uuid); > > ovs_assert(lb_group_dps); > > - ovn_lb_group_datapaths_add_ls(lb_group_dps, 1, &od); > > + ovn_lb_group_datapaths_add_ls(lb_group_dps, 1, &od, > > + ods_size(ls_datapaths)); > > } > > } > > @@ -3697,7 +3698,8 @@ build_lb_datapaths(const struct hmap *lbs, const > > struct hmap *lb_groups, > > ovn_lb_group_datapaths_find(lb_group_datapaths_map, > > lb_group_uuid); > > ovs_assert(lb_group_dps); > > - ovn_lb_group_datapaths_add_lr(lb_group_dps, od); > > + ovn_lb_group_datapaths_add_lr(lb_group_dps, od, > > + ods_size(lr_datapaths)); > > } > > for (size_t i = 0; i < od->nbr->n_load_balancer; i++) { > > @@ -3705,7 +3707,7 @@ build_lb_datapaths(const struct hmap *lbs, const > > struct hmap *lb_groups, > > &od->nbr->load_balancer[i]->header_.uuid; > > lb_dps = ovn_lb_datapaths_find(lb_datapaths_map, lb_uuid); > > ovs_assert(lb_dps); > > - ovn_lb_datapaths_add_lr(lb_dps, 1, &od); > > + ovn_lb_datapaths_add_lr(lb_dps, 1, &od, > > ods_size(lr_datapaths)); > > } > > } > > @@ -3716,9 +3718,11 @@ build_lb_datapaths(const struct hmap *lbs, const > > struct hmap *lb_groups, > > lb_dps = ovn_lb_datapaths_find(lb_datapaths_map, lb_uuid); > > ovs_assert(lb_dps); > > ovn_lb_datapaths_add_ls(lb_dps, lb_group_dps->n_ls, > > - lb_group_dps->ls); > > + lb_group_dps->ls, > > + ods_size(ls_datapaths)); > > ovn_lb_datapaths_add_lr(lb_dps, lb_group_dps->n_lr, > > - lb_group_dps->lr); > > + lb_group_dps->lr, > > + ods_size(lr_datapaths)); > > } > > } > > } > > @@ -3764,6 +3768,7 @@ build_lb_svcs( > > static void > > build_lswitch_lbs_from_lrouter(struct ovn_datapaths *lr_datapaths, > > + struct ovn_datapaths *ls_datapaths, > > struct hmap *lb_dps_map, > > struct hmap *lb_group_dps_map) > > { > > @@ -3779,7 +3784,8 @@ build_lswitch_lbs_from_lrouter(struct ovn_datapaths > > *lr_datapaths, > > lb_dps->nb_lr_map.map) { > > struct ovn_datapath *od = lr_datapaths->array[index]; > > ovn_lb_datapaths_add_ls(lb_dps, vector_len(&od->ls_peers), > > - vector_get_array(&od->ls_peers)); > > + vector_get_array(&od->ls_peers), > > + ods_size(ls_datapaths)); > > } > > } > > @@ -3789,14 +3795,15 @@ build_lswitch_lbs_from_lrouter(struct ovn_datapaths > > *lr_datapaths, > > struct ovn_datapath *od = lb_group_dps->lr[i]; > > ovn_lb_group_datapaths_add_ls(lb_group_dps, > > vector_len(&od->ls_peers), > > - vector_get_array(&od->ls_peers)); > > + vector_get_array(&od->ls_peers), > > 0); > > for (size_t j = 0; j < lb_group_dps->lb_group->n_lbs; j++) { > > const struct uuid *lb_uuid = > > &lb_group_dps->lb_group->lbs[j]->nlb->header_.uuid; > > lb_dps = ovn_lb_datapaths_find(lb_dps_map, lb_uuid); > > ovs_assert(lb_dps); > > ovn_lb_datapaths_add_ls(lb_dps, vector_len(&od->ls_peers), > > - vector_get_array(&od->ls_peers)); > > + vector_get_array(&od->ls_peers), > > + ods_size(ls_datapaths)); > > } > > } > > } > > @@ -3827,7 +3834,9 @@ build_lb_port_related_data( > > const struct sbrec_service_monitor_table *sbrec_service_monitor_table, > > const char *svc_monitor_mac, > > const struct eth_addr *svc_monitor_mac_ea, > > - struct ovn_datapaths *lr_datapaths, struct hmap *ls_ports, > > + struct ovn_datapaths *lr_datapaths, > > + struct ovn_datapaths *ls_datapaths, > > + struct hmap *ls_ports, > > struct hmap *lb_dps_map, struct hmap *lb_group_dps_map, > > struct sset *svc_monitor_lsps, > > struct hmap *svc_monitor_map) > > @@ -3835,7 +3844,8 @@ build_lb_port_related_data( > > build_lb_svcs(ovnsb_txn, sbrec_service_monitor_table, svc_monitor_mac, > > svc_monitor_mac_ea, ls_ports, lb_dps_map, > > svc_monitor_lsps, svc_monitor_map); > > - build_lswitch_lbs_from_lrouter(lr_datapaths, lb_dps_map, > > lb_group_dps_map); > > + build_lswitch_lbs_from_lrouter(lr_datapaths, ls_datapaths, lb_dps_map, > > + lb_group_dps_map); > > } > > /* Returns true if the peer port IPs of op should be added in the > > nat_addresses > > @@ -5324,7 +5334,7 @@ northd_handle_lb_data_changes(struct tracked_lb_data > > *trk_lb_data, > > UUIDSET_FOR_EACH (uuidnode, &codlb->assoc_lbs) { > > lb_dps = ovn_lb_datapaths_find(lb_datapaths_map, > > &uuidnode->uuid); > > ovs_assert(lb_dps); > > - ovn_lb_datapaths_add_ls(lb_dps, 1, &od); > > + ovn_lb_datapaths_add_ls(lb_dps, 1, &od, > > ods_size(ls_datapaths)); > > if (od->lb_with_stateless_mode) { > > hmapx_add(&lb_dps->ls_lb_with_stateless_mode, od); > > @@ -5338,7 +5348,8 @@ northd_handle_lb_data_changes(struct tracked_lb_data > > *trk_lb_data, > > lbgrp_dps = ovn_lb_group_datapaths_find(lbgrp_datapaths_map, > > &uuidnode->uuid); > > ovs_assert(lbgrp_dps); > > - ovn_lb_group_datapaths_add_ls(lbgrp_dps, 1, &od); > > + ovn_lb_group_datapaths_add_ls(lbgrp_dps, 1, &od, > > + ods_size(ls_datapaths)); > > /* Associate all the lbs of the lbgrp to the datapath 'od' */ > > for (size_t j = 0; j < lbgrp_dps->lb_group->n_lbs; j++) { > > @@ -5346,7 +5357,8 @@ northd_handle_lb_data_changes(struct tracked_lb_data > > *trk_lb_data, > > = &lbgrp_dps->lb_group->lbs[j]->nlb->header_.uuid; > > lb_dps = ovn_lb_datapaths_find(lb_datapaths_map, lb_uuid); > > ovs_assert(lb_dps); > > - ovn_lb_datapaths_add_ls(lb_dps, 1, &od); > > + ovn_lb_datapaths_add_ls(lb_dps, 1, &od, > > + ods_size(ls_datapaths)); > > /* Add the lb to the northd tracked data. */ > > hmapx_add(&nd_changes->trk_lbs.crupdated, lb_dps); > > @@ -5365,7 +5377,7 @@ northd_handle_lb_data_changes(struct tracked_lb_data > > *trk_lb_data, > > UUIDSET_FOR_EACH (uuidnode, &codlb->assoc_lbs) { > > lb_dps = ovn_lb_datapaths_find(lb_datapaths_map, > > &uuidnode->uuid); > > ovs_assert(lb_dps); > > - ovn_lb_datapaths_add_lr(lb_dps, 1, &od); > > + ovn_lb_datapaths_add_lr(lb_dps, 1, &od, > > ods_size(lr_datapaths)); > > /* Add the lb to the northd tracked data. */ > > hmapx_add(&nd_changes->trk_lbs.crupdated, lb_dps); > > @@ -5375,7 +5387,8 @@ northd_handle_lb_data_changes(struct tracked_lb_data > > *trk_lb_data, > > lbgrp_dps = ovn_lb_group_datapaths_find(lbgrp_datapaths_map, > > &uuidnode->uuid); > > ovs_assert(lbgrp_dps); > > - ovn_lb_group_datapaths_add_lr(lbgrp_dps, od); > > + ovn_lb_group_datapaths_add_lr(lbgrp_dps, od, > > + ods_size(lr_datapaths)); > > /* Associate all the lbs of the lbgrp to the datapath 'od' */ > > for (size_t j = 0; j < lbgrp_dps->lb_group->n_lbs; j++) { > > @@ -5383,7 +5396,8 @@ northd_handle_lb_data_changes(struct tracked_lb_data > > *trk_lb_data, > > = &lbgrp_dps->lb_group->lbs[j]->nlb->header_.uuid; > > lb_dps = ovn_lb_datapaths_find(lb_datapaths_map, lb_uuid); > > ovs_assert(lb_dps); > > - ovn_lb_datapaths_add_lr(lb_dps, 1, &od); > > + ovn_lb_datapaths_add_lr(lb_dps, 1, &od, > > + ods_size(lr_datapaths)); > > /* Add the lb to the northd tracked data. */ > > hmapx_add(&nd_changes->trk_lbs.crupdated, lb_dps); > > @@ -5424,12 +5438,14 @@ northd_handle_lb_data_changes(struct > > tracked_lb_data *trk_lb_data, > > ovs_assert(lb_dps); > > for (size_t i = 0; i < lbgrp_dps->n_lr; i++) { > > od = lbgrp_dps->lr[i]; > > - ovn_lb_datapaths_add_lr(lb_dps, 1, &od); > > + ovn_lb_datapaths_add_lr(lb_dps, 1, &od, > > + ods_size(lr_datapaths)); > > } > > for (size_t i = 0; i < lbgrp_dps->n_ls; i++) { > > od = lbgrp_dps->ls[i]; > > - ovn_lb_datapaths_add_ls(lb_dps, 1, &od); > > + ovn_lb_datapaths_add_ls(lb_dps, 1, &od, > > + ods_size(ls_datapaths)); > > /* Add the ls datapath to the northd tracked data. */ > > hmapx_add(&nd_changes->ls_with_changed_lbs, od); > > @@ -19215,7 +19231,9 @@ ovnnb_db_run(struct northd_input *input_data, > > input_data->sbrec_service_monitor_table, > > input_data->svc_monitor_mac, > > &input_data->svc_monitor_mac_ea, > > - &data->lr_datapaths, &data->ls_ports, > > + &data->lr_datapaths, > > + &data->ls_datapaths, > > + &data->ls_ports, > > &data->lb_datapaths_map, > > &data->lb_group_datapaths_map, > > &data->svc_monitor_lsps, >
_______________________________________________ dev mailing list d...@openvswitch.org https://mail.openvswitch.org/mailman/listinfo/ovs-dev