On Fri, Jan 19, 2024 at 6:13 AM Dumitru Ceara <dce...@redhat.com> wrote:
>
> On 1/11/24 16:32, num...@ovn.org wrote:
> > From: Numan Siddique <num...@ovn.org>
> >
> > Since northd tracked data has the changed lb data, northd
> > engine handler for lflow engine now handles the lb changes
> > incrementally.  All the lflows generated for each lb is
>
> The first sentence is a bit confusing.  What about re-phrasing it to:
>
> Since northd tracked data has the changed lb information, the lflow
> change handler for northd inputs can now handle lb updates incrementally.

Ack.


>
> > stored in the ovn_lb_datapaths->lflow_ref and this is used
> > similar to how we handle ovn_port changes.
> >
> > Signed-off-by: Numan Siddique <num...@ovn.org>
> > ---
> >  northd/en-lflow.c   | 11 ++---
> >  northd/lb.c         |  3 ++
> >  northd/lb.h         | 26 ++++++++++++
> >  northd/lflow-mgr.c  | 47 +++++++++++++++++-----
> >  northd/northd.c     | 98 +++++++++++++++++++++++++++++++++++++++------
> >  northd/northd.h     |  4 ++
> >  tests/ovn-northd.at | 30 ++++++++++----
> >  7 files changed, 184 insertions(+), 35 deletions(-)
> >
> > diff --git a/northd/en-lflow.c b/northd/en-lflow.c
> > index fef9a1352d..205605578f 100644
> > --- a/northd/en-lflow.c
> > +++ b/northd/en-lflow.c
> > @@ -123,11 +123,6 @@ lflow_northd_handler(struct engine_node *node,
> >          return false;
> >      }
> >
> > -    /* Fall back to recompute if load balancers have changed. */
> > -    if (northd_has_lbs_in_tracked_data(&northd_data->trk_data)) {
> > -        return false;
> > -    }
> > -
> >      const struct engine_context *eng_ctx = engine_get_context();
> >      struct lflow_data *lflow_data = data;
> >
> > @@ -140,6 +135,12 @@ lflow_northd_handler(struct engine_node *node,
> >          return false;
> >      }
> >
> > +    if (!lflow_handle_northd_lb_changes(eng_ctx->ovnsb_idl_txn,
> > +                                &northd_data->trk_data.trk_lbs,
> > +                                &lflow_input, lflow_data->lflow_table)) {
>
> Nit: indentation

Ack.


>
> > +        return false;
> > +    }
> > +
> >      engine_set_node_state(node, EN_UPDATED);
> >      return true;
> >  }
> > diff --git a/northd/lb.c b/northd/lb.c
> > index e6c8a51911..5fca41e049 100644
> > --- a/northd/lb.c
> > +++ b/northd/lb.c
> > @@ -21,6 +21,7 @@
> >
> >  /* OVN includes */
> >  #include "lb.h"
> > +#include "lflow-mgr.h"
> >  #include "lib/lb.h"
> >  #include "northd.h"
> >
> > @@ -33,6 +34,7 @@ ovn_lb_datapaths_create(const struct ovn_northd_lb *lb, 
> > size_t n_ls_datapaths,
> >      lb_dps->lb = lb;
> >      lb_dps->nb_ls_map = bitmap_allocate(n_ls_datapaths);
> >      lb_dps->nb_lr_map = bitmap_allocate(n_lr_datapaths);
> > +    lb_dps->lflow_ref = lflow_ref_create();
> >
> >      return lb_dps;
> >  }
> > @@ -42,6 +44,7 @@ ovn_lb_datapaths_destroy(struct ovn_lb_datapaths *lb_dps)
> >  {
> >      bitmap_free(lb_dps->nb_lr_map);
> >      bitmap_free(lb_dps->nb_ls_map);
> > +    lflow_ref_destroy(lb_dps->lflow_ref);
> >      free(lb_dps);
> >  }
> >
> > diff --git a/northd/lb.h b/northd/lb.h
> > index eeef2ea260..de677ca4ef 100644
> > --- a/northd/lb.h
> > +++ b/northd/lb.h
> > @@ -20,6 +20,8 @@
> >  #include "openvswitch/hmap.h"
> >  #include "uuid.h"
> >
> > +struct lflow_ref;
> > +
> >  struct ovn_lb_datapaths {
> >      struct hmap_node hmap_node;
> >
> > @@ -29,6 +31,30 @@ struct ovn_lb_datapaths {
> >
> >      size_t n_nb_lr;
> >      unsigned long *nb_lr_map;
> > +
> > +    /* Reference of lflows generated for this load balancer.
> > +     *
> > +     * This data is initialized and destroyed by the en_northd node, but
> > +     * populated and used only by the en_lflow node. Ideally this data 
> > should
> > +     * be maintained as part of en_lflow's data (struct lflow_data): a hash
> > +     * index from ovn_port key to lflows.  However, it would be less 
> > efficient
> > +     * and more complex:
> > +     *
> > +     * 1. It would require an extra search (using the index) to find the
> > +     * lflows.
> > +     *
> > +     * 2. Building the index needs to be thread-safe, using either a global
> > +     * lock which is obviously less efficient, or hash-based lock array 
> > which
> > +     * is more complex.
> > +     *
> > +     * Maintaining the lflow_ref here is more straightforward. The 
> > drawback is
> > +     * that we need to keep in mind that this data belongs to en_lflow 
> > node,
> > +     * so never access it from any other nodes.
> > +     *
> > +     * 'lflow_ref' is used to reference logical flows generated for this
> > +     *  load balancer.
> > +     */
> > +    struct lflow_ref *lflow_ref;
> >  };
> >
> >  struct ovn_lb_datapaths *ovn_lb_datapaths_create(const struct 
> > ovn_northd_lb *,
> > diff --git a/northd/lflow-mgr.c b/northd/lflow-mgr.c
> > index 3cf9696f6e..6cb2a367fe 100644
> > --- a/northd/lflow-mgr.c
> > +++ b/northd/lflow-mgr.c
> > @@ -375,7 +375,15 @@ struct lflow_ref_node {
> >      /* The lflow. */
> >      struct ovn_lflow *lflow;
> >
> > -    /* Index id of the datapath this lflow_ref_node belongs to. */
> > +    /* Indicates of the lflow was added with dp_group or not using
> > +     * ovn_lflow_add_with_dp_group() macro. */
> > +    bool dpgrp_lflow;
> > +    /* dpgrp bitmap and bitmap length.  Valid only of dpgrp_lflow is true. 
> > */
> > +    unsigned long *dpgrp_bitmap;
> > +    size_t dpgrp_bitmap_len;
>
> Instead of adding this new 'bool dgprp_lflow' and checking it everywhere
> where we need to update the lflow's dp_refcnts_map can't we just always
> use dpgrp_bitmap?
>
> For the case when flows are not added with ovn_lflow_add_with_dp_group()
> we can just set a single bit in dpgrp_bitmap, the one corresponding to
> od->index.
>
> Wdyt?

It is a good suggestion.  But unfortunately when
ovn_lflow_add_with_dp_group() is not used,
we don't know the bitmap size. Although this can be passed, it
requires a lot of changes
to pass the bitmap length.

Thanks
Numan

>
> > +
> > +    /* Index id of the datapath this lflow_ref_node belongs to.
> > +     * Valid only if dpgrp_lflow is false. */
> >      size_t dp_index;
> >
> >      /* Indicates if the lflow_ref_node for an lflow - L(M, A) is linked
> > @@ -429,9 +437,19 @@ lflow_ref_unlink_lflows(struct lflow_ref *lflow_ref)
> >      struct lflow_ref_node *lrn;
> >
> >      LIST_FOR_EACH (lrn, lflow_list_node, &lflow_ref->lflows_ref_list) {
> > -        if (dec_dp_refcnt(&lrn->lflow->dp_refcnts_map,
> > -                          lrn->dp_index)) {
> > -            bitmap_set0(lrn->lflow->dpg_bitmap, lrn->dp_index);
> > +        if (lrn->dpgrp_lflow) {
> > +            size_t index;
> > +            BITMAP_FOR_EACH_1 (index, lrn->dpgrp_bitmap_len,
> > +                               lrn->dpgrp_bitmap) {
> > +                if (dec_dp_refcnt(&lrn->lflow->dp_refcnts_map, index)) {
> > +                    bitmap_set0(lrn->lflow->dpg_bitmap, lrn->dp_index);
> > +                }
> > +            }
> > +        } else {
> > +            if (dec_dp_refcnt(&lrn->lflow->dp_refcnts_map,
> > +                              lrn->dp_index)) {
> > +                bitmap_set0(lrn->lflow->dpg_bitmap, lrn->dp_index);
> > +            }
> >          }
> >
> >          lrn->linked = false;
> > @@ -502,18 +520,26 @@ lflow_table_add_lflow(struct lflow_table *lflow_table,
> >                           io_port, ctrl_meter, stage_hint, where);
> >
> >      if (lflow_ref) {
> > -        /* lflow referencing is only supported if 'od' is not NULL. */
> > -        ovs_assert(od);
> > -
> >          struct lflow_ref_node *lrn =
> >              lflow_ref_node_find(&lflow_ref->lflow_ref_nodes, lflow, hash);
> >          if (!lrn) {
> >              lrn = xzalloc(sizeof *lrn);
> >              lrn->lflow = lflow;
> > -            lrn->dp_index = od->index;
> > +            lrn->dpgrp_lflow = !od;
> > +            if (lrn->dpgrp_lflow) {
> > +                lrn->dpgrp_bitmap = bitmap_clone(dp_bitmap, dp_bitmap_len);
> > +                lrn->dpgrp_bitmap_len = dp_bitmap_len;
> > +
> > +                size_t index;
> > +                BITMAP_FOR_EACH_1 (index, dp_bitmap_len, dp_bitmap) {
> > +                    inc_dp_refcnt(&lflow->dp_refcnts_map, index);
> > +                }
> > +            } else {
> > +                lrn->dp_index = od->index;
> > +                inc_dp_refcnt(&lflow->dp_refcnts_map, lrn->dp_index);
> > +            }
> >              ovs_list_insert(&lflow_ref->lflows_ref_list,
> >                              &lrn->lflow_list_node);
> > -            inc_dp_refcnt(&lflow->dp_refcnts_map, lrn->dp_index);
> >              ovs_list_insert(&lflow->referenced_by, &lrn->ref_list_node);
> >
> >              hmap_insert(&lflow_ref->lflow_ref_nodes, &lrn->ref_node, hash);
> > @@ -1257,5 +1283,8 @@ lflow_ref_node_destroy(struct lflow_ref_node *lrn,
> >      }
> >      ovs_list_remove(&lrn->lflow_list_node);
> >      ovs_list_remove(&lrn->ref_list_node);
> > +    if (lrn->dpgrp_lflow) {
> > +        bitmap_free(lrn->dpgrp_bitmap);
> > +    }
> >      free(lrn);
> >  }
>
> Thanks,
> Dumitru
>
> _______________________________________________
> dev mailing list
> d...@openvswitch.org
> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
>
_______________________________________________
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to