Thanks Lucas and Numan.

I merged this to main.

On Thu, Sep 25, 2025 at 10:25 AM Numan Siddique <[email protected]> wrote:
>
> On Thu, Sep 25, 2025 at 8:16 AM Lucas Vargas Dias via dev
> <[email protected]> wrote:
> >
> > Change the logic to save sbflow uuid and just update if
> > the lflow is reused. Otherwise, it's removed.
> > Also, reduce sbflow searching with uuidset instead of
> > searching through all lflow table.
> > Add lflow states:
> > LFLOW_STALE - Lflow is not relevant
> > LFLOW_TO_SYNC - Lflow needs to be synced with SB DB
> > LFLOW_SYNCED - Lflow is synced with SB SB
> >
> > It generates the following results in a scenario with:
> > - LSPs: 56548
> > - LRPs: 27304
> > - LRs: 7922
> > - LSs: 28602
> >
> > Without the commit:
> > ovn-nbctl --no-leader-only --print-wait-time --wait=sb lr-add lr9-1
> > Time spent on processing nb_cfg 275438:
> >         ovn-northd delay before processing:     16069ms
> >         ovn-northd completion:                  32828ms
> > ovn-nbctl --no-leader-only --print-wait-time --wait=sb ls-add bar9-1
> > Time spent on processing nb_cfg 275439:
> >         ovn-northd delay before processing:     15019ms
> >         ovn-northd completion:                  33207ms
> >
> > ovn-nbctl --no-leader-only --print-wait-time --wait=sb lrp-add lr9-1 
> > rp9-1-bar 00:01:ff:22:00:01 192.168.10.1/24 2801:80:3eaf:4401::1/64
> > Time spent on processing nb_cfg 275440:
> >         ovn-northd delay before processing:     14784ms
> >         ovn-northd completion:                  33577ms
> >
> > ovn-nbctl --no-leader-only --print-wait-time --wait=sb lsp-add bar9-1 
> > bar-rp9-1 -- set Logical_Switch_Port bar-rp9-1 type=router 
> > options:router-port=rp9-1-bar -- lsp-set-addresses bar-rp9-1 router
> > Time spent on processing nb_cfg 275441:
> >         ovn-northd delay before processing:     14598ms
> >         ovn-northd completion:                  31942ms
> >
> > With the commit:
> > ovn-nbctl --no-leader-only --print-wait-time --wait=sb lr-add lr9-1
> > Time spent on processing nb_cfg 275401:
> >         ovn-northd delay before processing:     12602ms
> >         ovn-northd completion:                  26103ms
> > ovn-nbctl --no-leader-only --print-wait-time --wait=sb ls-add bar9-1
> > Time spent on processing nb_cfg 275402:
> >         ovn-northd delay before processing:     12639ms
> >         ovn-northd completion:                  26759ms
> > ovn-nbctl --no-leader-only --print-wait-time --wait=sb lrp-add lr9-1 
> > rp9-1-bar 00:01:ff:22:00:01 192.168.10.1/24 2801:80:3eaf:4401::1/64
> > Time spent on processing nb_cfg 275403:
> >         ovn-northd delay before processing:     11874ms
> >         ovn-northd completion:                  29733ms
> > ovn-nbctl --no-leader-only --print-wait-time --wait=sb lsp-add bar9-1 
> > bar-rp9-1 -- set Logical_Switch_Port bar-rp9-1 type=router 
> > options:router-port=rp9-1-bar -- lsp-set-addresses bar-rp9-1 router
> > Time spent on processing nb_cfg 275404:
> >         ovn-northd delay before processing:     4058ms
> >         ovn-northd completion:                  17323ms
> >
> > Signed-off-by: Lucas Vargas Dias <[email protected]>
>
> Thanks for addressing the comments and for the great improvements.
>
> Acked-by: Numan Siddique <[email protected]>
>
> Numan
>
> > ---
> >  northd/en-lflow.c   |   2 +-
> >  northd/lflow-mgr.c  | 108 ++++++++++++++++++++++++--------------------
> >  northd/lflow-mgr.h  |  10 +++-
> >  northd/northd.c     |   8 ++--
> >  tests/ovn-northd.at |  32 +++++++++++++
> >  5 files changed, 106 insertions(+), 54 deletions(-)
> >
> > diff --git a/northd/en-lflow.c b/northd/en-lflow.c
> > index 50570b611..13c5e3119 100644
> > --- a/northd/en-lflow.c
> > +++ b/northd/en-lflow.c
> > @@ -122,7 +122,7 @@ en_lflow_run(struct engine_node *node, void *data)
> >      stopwatch_start(BUILD_LFLOWS_STOPWATCH_NAME, time_msec());
> >
> >      struct lflow_data *lflow_data = data;
> > -    lflow_table_clear(lflow_data->lflow_table);
> > +    lflow_table_clear(lflow_data->lflow_table, false);
> >      lflow_reset_northd_refs(&lflow_input);
> >      lflow_ref_clear(lflow_input.igmp_lflow_ref);
> >
> > diff --git a/northd/lflow-mgr.c b/northd/lflow-mgr.c
> > index 6a66a9718..5a989fe19 100644
> > --- a/northd/lflow-mgr.c
> > +++ b/northd/lflow-mgr.c
> > @@ -26,6 +26,7 @@
> >  #include "lflow-mgr.h"
> >  #include "lib/ovn-parallel-hmap.h"
> >  #include "lib/ovn-util.h"
> > +#include "lib/uuidset.h"
> >
> >  VLOG_DEFINE_THIS_MODULE(lflow_mgr);
> >
> > @@ -37,7 +38,8 @@ static void ovn_lflow_init(struct ovn_lflow *, struct 
> > ovn_datapath *od,
> >                             uint16_t priority, char *match,
> >                             char *actions, char *io_port,
> >                             char *ctrl_meter, char *stage_hint,
> > -                           const char *where, const char *flow_desc);
> > +                           const char *where, const char *flow_desc,
> > +                           struct uuid sbuuid);
> >  static struct ovn_lflow *ovn_lflow_find(const struct hmap *lflows,
> >                                          enum ovn_stage stage,
> >                                          uint16_t priority, const char 
> > *match,
> > @@ -147,6 +149,13 @@ static struct ovs_mutex 
> > lflow_hash_locks[LFLOW_HASH_LOCK_MASK + 1];
> >   */
> >  extern struct ovs_mutex fake_hash_mutex;
> >
> > +
> > +enum ovn_lflow_state {
> > +    LFLOW_STALE,
> > +    LFLOW_TO_SYNC,
> > +    LFLOW_SYNCED,
> > +};
> > +
> >  /* Represents a logical ovn flow (lflow).
> >   *
> >   * A logical flow with match 'M' and actions 'A' - L(M, A) is created
> > @@ -181,14 +190,7 @@ struct ovn_lflow {
> >      struct hmap dp_refcnts_map; /* Maintains the number of times this 
> > ovn_lflow
> >                                   * is referenced by a given datapath.
> >                                   * Contains 'struct dp_refcnt' in the map. 
> > */
> > -};
> > -
> > -/* Logical flow table. */
> > -struct lflow_table {
> > -    struct hmap entries; /* hmap of lflows. */
> > -    struct hmap ls_dp_groups; /* hmap of logical switch dp groups. */
> > -    struct hmap lr_dp_groups; /* hmap of logical router dp groups. */
> > -    ssize_t max_seen_lflow_size;
> > +    enum ovn_lflow_state sync_state;
> >  };
> >
> >  struct lflow_table *
> > @@ -210,10 +212,14 @@ lflow_table_init(struct lflow_table *lflow_table)
> >  }
> >
> >  void
> > -lflow_table_clear(struct lflow_table *lflow_table)
> > +lflow_table_clear(struct lflow_table *lflow_table, bool destroy_all)
> >  {
> >      struct ovn_lflow *lflow;
> >      HMAP_FOR_EACH_SAFE (lflow, hmap_node, &lflow_table->entries) {
> > +        if (!destroy_all) {
> > +            lflow->sync_state = LFLOW_STALE;
> > +            continue;
> > +        }
> >          ovn_lflow_destroy(lflow_table, lflow);
> >      }
> >
> > @@ -224,7 +230,7 @@ lflow_table_clear(struct lflow_table *lflow_table)
> >  void
> >  lflow_table_destroy(struct lflow_table *lflow_table)
> >  {
> > -    lflow_table_clear(lflow_table);
> > +    lflow_table_clear(lflow_table, true);
> >      hmap_destroy(&lflow_table->entries);
> >      ovn_dp_groups_destroy(&lflow_table->ls_dp_groups);
> >      ovn_dp_groups_destroy(&lflow_table->lr_dp_groups);
> > @@ -257,16 +263,42 @@ lflow_table_sync_to_sb(struct lflow_table 
> > *lflow_table,
> >                         const struct sbrec_logical_flow_table 
> > *sb_flow_table,
> >                         const struct sbrec_logical_dp_group_table 
> > *dpgrp_table)
> >  {
> > +    struct uuidset sb_uuid_set = UUIDSET_INITIALIZER(&sb_uuid_set);
> >      struct hmap lflows_temp = HMAP_INITIALIZER(&lflows_temp);
> >      struct hmap *lflows = &lflow_table->entries;
> >      struct ovn_lflow *lflow;
> > +    const struct sbrec_logical_flow *sbflow;
> >
> >      fast_hmap_size_for(&lflows_temp,
> >                         lflow_table->max_seen_lflow_size);
> >
> > +    HMAP_FOR_EACH_SAFE (lflow, hmap_node, lflows) {
> > +        if (lflow->sync_state == LFLOW_STALE) {
> > +            ovn_lflow_destroy(lflow_table, lflow);
> > +            continue;
> > +        }
> > +        sbflow = NULL;
> > +        if (!uuid_is_zero(&lflow->sb_uuid)) {
> > +            sbflow = sbrec_logical_flow_table_get_for_uuid(sb_flow_table,
> > +                                                           
> > &lflow->sb_uuid);
> > +        }
> > +        sync_lflow_to_sb(lflow, ovnsb_txn, lflow_table, ls_datapaths,
> > +                         lr_datapaths, ovn_internal_version_changed,
> > +                         sbflow, dpgrp_table);
> > +        uuidset_insert(&sb_uuid_set, &lflow->sb_uuid);
> > +        hmap_remove(lflows, &lflow->hmap_node);
> > +        hmap_insert(&lflows_temp, &lflow->hmap_node,
> > +                    hmap_node_hash(&lflow->hmap_node));
> > +    }
> >      /* Push changes to the Logical_Flow table to database. */
> > -    const struct sbrec_logical_flow *sbflow;
> >      SBREC_LOGICAL_FLOW_TABLE_FOR_EACH_SAFE (sbflow, sb_flow_table) {
> > +        struct uuidset_node *node = uuidset_find(&sb_uuid_set,
> > +                                                 &sbflow->header_.uuid);
> > +        if (!node) {
> > +            sbrec_logical_flow_delete(sbflow);
> > +            continue;
> > +        }
> > +        uuidset_delete(&sb_uuid_set, node);
> >          struct sbrec_logical_dp_group *dp_group = sbflow->logical_dp_group;
> >          struct ovn_datapath *logical_datapath_od = NULL;
> >          size_t i;
> > @@ -297,38 +329,8 @@ lflow_table_sync_to_sb(struct lflow_table *lflow_table,
> >              sbrec_logical_flow_delete(sbflow);
> >              continue;
> >          }
> > -
> > -        enum ovn_pipeline pipeline
> > -            = !strcmp(sbflow->pipeline, "ingress") ? P_IN : P_OUT;
> > -
> > -        lflow = ovn_lflow_find(
> > -            lflows,
> > -            ovn_stage_build(ovn_datapath_get_type(logical_datapath_od),
> > -                            pipeline, sbflow->table_id),
> > -            sbflow->priority, sbflow->match, sbflow->actions,
> > -            sbflow->controller_meter, sbflow->hash);
> > -        if (lflow) {
> > -            sync_lflow_to_sb(lflow, ovnsb_txn, lflow_table, ls_datapaths,
> > -                             lr_datapaths, ovn_internal_version_changed,
> > -                             sbflow, dpgrp_table);
> > -
> > -            hmap_remove(lflows, &lflow->hmap_node);
> > -            hmap_insert(&lflows_temp, &lflow->hmap_node,
> > -                        hmap_node_hash(&lflow->hmap_node));
> > -        } else {
> > -            sbrec_logical_flow_delete(sbflow);
> > -        }
> > -    }
> > -
> > -    HMAP_FOR_EACH_SAFE (lflow, hmap_node, lflows) {
> > -        sync_lflow_to_sb(lflow, ovnsb_txn, lflow_table, ls_datapaths,
> > -                         lr_datapaths, ovn_internal_version_changed,
> > -                         NULL, dpgrp_table);
> > -
> > -        hmap_remove(lflows, &lflow->hmap_node);
> > -        hmap_insert(&lflows_temp, &lflow->hmap_node,
> > -                    hmap_node_hash(&lflow->hmap_node));
> >      }
> > +    uuidset_destroy(&sb_uuid_set);
> >      hmap_swap(lflows, &lflows_temp);
> >      hmap_destroy(&lflows_temp);
> >  }
> > @@ -847,7 +849,7 @@ ovn_lflow_init(struct ovn_lflow *lflow, struct 
> > ovn_datapath *od,
> >                 size_t dp_bitmap_len, enum ovn_stage stage, uint16_t 
> > priority,
> >                 char *match, char *actions, char *io_port, char *ctrl_meter,
> >                 char *stage_hint, const char *where,
> > -               const char *flow_desc)
> > +               const char *flow_desc, struct uuid sbuuid)
> >  {
> >      lflow->dpg_bitmap = bitmap_allocate(dp_bitmap_len);
> >      lflow->od = od;
> > @@ -861,7 +863,8 @@ ovn_lflow_init(struct ovn_lflow *lflow, struct 
> > ovn_datapath *od,
> >      lflow->flow_desc = flow_desc;
> >      lflow->dpg = NULL;
> >      lflow->where = where;
> > -    lflow->sb_uuid = UUID_ZERO;
> > +    lflow->sb_uuid = sbuuid;
> > +    lflow->sync_state = LFLOW_TO_SYNC;
> >      hmap_init(&lflow->dp_refcnts_map);
> >      ovs_list_init(&lflow->referenced_by);
> >  }
> > @@ -957,13 +960,18 @@ do_ovn_lflow_add(struct lflow_table *lflow_table, 
> > size_t dp_bitmap_len,
> >  {
> >      struct ovn_lflow *old_lflow;
> >      struct ovn_lflow *lflow;
> > +    struct uuid sbuuid = UUID_ZERO;
> >
> >      ovs_assert(dp_bitmap_len);
> >
> >      old_lflow = ovn_lflow_find(&lflow_table->entries, stage,
> >                                 priority, match, actions, ctrl_meter, hash);
> >      if (old_lflow) {
> > -        return old_lflow;
> > +        if (old_lflow->sync_state != LFLOW_STALE) {
> > +            return old_lflow;
> > +        }
> > +        sbuuid = old_lflow->sb_uuid;
> > +        ovn_lflow_destroy(lflow_table, old_lflow);
> >      }
> >
> >      lflow = xzalloc(sizeof *lflow);
> > @@ -975,14 +983,16 @@ do_ovn_lflow_add(struct lflow_table *lflow_table, 
> > size_t dp_bitmap_len,
> >                     io_port ? xstrdup(io_port) : NULL,
> >                     nullable_xstrdup(ctrl_meter),
> >                     ovn_lflow_hint(stage_hint), where,
> > -                   flow_desc);
> > +                   flow_desc, sbuuid);
> >
> >      if (parallelization_state != STATE_USE_PARALLELIZATION) {
> >          hmap_insert(&lflow_table->entries, &lflow->hmap_node, hash);
> >      } else {
> >          hmap_insert_fast(&lflow_table->entries, &lflow->hmap_node,
> >                           hash);
> > -        thread_lflow_counter++;
> > +        if (uuid_is_zero(&lflow->sb_uuid)) {
> > +            thread_lflow_counter++;
> > +        }
> >      }
> >
> >      return lflow;
> > @@ -1150,6 +1160,7 @@ sync_lflow_to_sb(struct ovn_lflow *lflow,
> >                              "referencing the dp group ["UUID_FMT"]",
> >                              UUID_ARGS(&sbflow->header_.uuid),
> >                              UUID_ARGS(&lflow->dpg->dpg_uuid));
> > +                lflow->sync_state = LFLOW_STALE;
> >                  return false;
> >              }
> >          } else {
> > @@ -1169,6 +1180,7 @@ sync_lflow_to_sb(struct ovn_lflow *lflow,
> >          ovn_dp_group_release(dp_groups, pre_sync_dpg);
> >      }
> >
> > +    lflow->sync_state = LFLOW_SYNCED;
> >      return true;
> >  }
> >
> > diff --git a/northd/lflow-mgr.h b/northd/lflow-mgr.h
> > index 1521270d6..91708c8a9 100644
> > --- a/northd/lflow-mgr.h
> > +++ b/northd/lflow-mgr.h
> > @@ -26,10 +26,16 @@ struct ovn_datapath;
> >  struct ovsdb_idl_row;
> >
> >  /* lflow map which stores the logical flows. */
> > -struct lflow_table;
> > +struct lflow_table {
> > +    struct hmap entries; /* hmap of lflows. */
> > +    struct hmap ls_dp_groups; /* hmap of logical switch dp groups. */
> > +    struct hmap lr_dp_groups; /* hmap of logical router dp groups. */
> > +    ssize_t max_seen_lflow_size;
> > +};
> > +
> >  struct lflow_table *lflow_table_alloc(void);
> >  void lflow_table_init(struct lflow_table *);
> > -void lflow_table_clear(struct lflow_table *);
> > +void lflow_table_clear(struct lflow_table *, bool);
> >  void lflow_table_destroy(struct lflow_table *);
> >  void lflow_table_expand(struct lflow_table *);
> >  void lflow_table_set_size(struct lflow_table *, size_t);
> > diff --git a/northd/northd.c b/northd/northd.c
> > index fe5199a86..214a3bf6f 100644
> > --- a/northd/northd.c
> > +++ b/northd/northd.c
> > @@ -18068,9 +18068,9 @@ noop_callback(struct worker_pool *pool OVS_UNUSED,
> >  static void
> >  fix_flow_table_size(struct lflow_table *lflow_table,
> >                    struct lswitch_flow_build_info *lsiv,
> > -                  size_t n_lsiv)
> > +                  size_t n_lsiv, size_t start)
> >  {
> > -    size_t total = 0;
> > +    size_t total = start;
> >      for (size_t i = 0; i < n_lsiv; i++) {
> >          total += lsiv[i].thread_lflow_counter;
> >      }
> > @@ -18145,8 +18145,10 @@ build_lswitch_and_lrouter_flows(
> >          }
> >
> >          /* Run thread pool. */
> > +        size_t current_lflow_table_size = hmap_count(&lflows->entries);
> >          run_pool_callback(build_lflows_pool, NULL, NULL, noop_callback);
> > -        fix_flow_table_size(lflows, lsiv, build_lflows_pool->size);
> > +        fix_flow_table_size(lflows, lsiv, build_lflows_pool->size,
> > +                            current_lflow_table_size);
> >
> >          for (index = 0; index < build_lflows_pool->size; index++) {
> >              ds_destroy(&lsiv[index].match);
> > diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at
> > index 83a142d20..b2df05256 100644
> > --- a/tests/ovn-northd.at
> > +++ b/tests/ovn-northd.at
> > @@ -12535,6 +12535,38 @@ AT_CHECK([echo $dpgrp_dps | grep $sw3_uuid], [0], 
> > [ignore])
> >  AT_CHECK([echo $dpgrp_dps | grep $sw4_uuid], [0], [ignore])
> >  AT_CHECK([echo $dpgrp_dps | grep $sw5_uuid], [0], [ignore])
> >
> > +# Clear the SB:Logical_Flow.logical_dp_groups column of all the
> > +# logical flows and then modify the NB:Load_balancer.  ovn-northd
> > +# should resync the logical flows.
> > +for l in $(ovn-sbctl --bare --columns _uuid list logical_flow)
> > +do
> > +    check ovn-sbctl clear logical_flow $l logical_dp_group
> > +done
> > +
> > +check ovn-nbctl --wait=sb set load_balancer lb2 
> > vips='{"10.0.0.10:80"="10.0.0.3:80,10.0.0.4:80"}'
> > +check_engine_stats lflow recompute compute
> > +CHECK_NO_CHANGE_AFTER_RECOMPUTE
> > +
> > +lb_lflow_uuid=$(fetch_column Logical_flow _uuid match='"ct.new && ip4.dst 
> > == 10.0.0.10 && reg1[[16..23]] == 6 && reg1[[0..15]] == 80"')
> > +AT_CHECK([test "$lb_lflow_uuid" != ""])
> > +
> > +lb_lflow_dp=$(ovn-sbctl --bare --columns logical_datapath list 
> > logical_flow $lb_lflow_uuid)
> > +AT_CHECK([test "$lb_lflow_dp" = ""])
> > +
> > +lb_lflow_dpgrp=$(ovn-sbctl --bare --columns logical_dp_group list 
> > logical_flow $lb_lflow_uuid)
> > +AT_CHECK([test "$lb_lflow_dpgrp" != ""])
> > +
> > +dpgrp_dps=$(ovn-sbctl --bare --columns datapaths list logical_dp_group 
> > $lb_lflow_dpgrp)
> > +
> > +echo "dpgrp dps - $dpgrp_dps"
> > +
> > +AT_CHECK([echo $dpgrp_dps | grep $sw0_uuid], [1], [ignore])
> > +AT_CHECK([echo $dpgrp_dps | grep $sw1_uuid], [1], [ignore])
> > +AT_CHECK([echo $dpgrp_dps | grep $sw2_uuid], [1], [ignore])
> > +AT_CHECK([echo $dpgrp_dps | grep $sw3_uuid], [0], [ignore])
> > +AT_CHECK([echo $dpgrp_dps | grep $sw4_uuid], [0], [ignore])
> > +AT_CHECK([echo $dpgrp_dps | grep $sw5_uuid], [0], [ignore])
> > +
> >  AT_CLEANUP
> >  ])
> >
> > --
> > 2.34.1
> >
> >
> > --
> >
> >
> >
> >
> > _'Esta mensagem é direcionada apenas para os endereços constantes no
> > cabeçalho inicial. Se você não está listado nos endereços constantes no
> > cabeçalho, pedimos-lhe que desconsidere completamente o conteúdo dessa
> > mensagem e cuja cópia, encaminhamento e/ou execução das ações citadas estão
> > imediatamente anuladas e proibidas'._
> >
> >
> > * **'Apesar do Magazine Luiza tomar
> > todas as precauções razoáveis para assegurar que nenhum vírus esteja
> > presente nesse e-mail, a empresa não poderá aceitar a responsabilidade por
> > quaisquer perdas ou danos causados por esse e-mail ou por seus anexos'.*
> >
> >
> >
> > _______________________________________________
> > dev mailing list
> > [email protected]
> > https://mail.openvswitch.org/mailman/listinfo/ovs-dev
> _______________________________________________
> dev mailing list
> [email protected]
> https://mail.openvswitch.org/mailman/listinfo/ovs-dev

_______________________________________________
dev mailing list
[email protected]
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to