On Thu, Jun 29, 2023 at 9:19 AM Dumitru Ceara <dce...@redhat.com> wrote:
>
> On 6/27/23 10:23, Numan Siddique wrote:
> > On Mon, Jun 26, 2023 at 10:34 PM Han Zhou <hz...@ovn.org> wrote:
> >>
> >> On Mon, Jun 26, 2023 at 7:25 AM Numan Siddique <num...@ovn.org> wrote:
> >>>
> >>> On Sun, Jun 18, 2023 at 11:48 AM Han Zhou <hz...@ovn.org> wrote:
> >>>>
> >>>> For incremental processing, it is important to maintain relationship
> >>>> between the inputs and the logical flows generated. This patch
creates
> >>>> the links between ovn_port and logical flows. The same data structure
> >>>> may be expanded to maintain links between logical flows and other
types
> >>>> of inputs.
> >>>>
> >>>> This patch also refactors the temp_lflow_list operations to
> >>>> collected_lflows with helper functions to start and end collecting.
It
> >>>> still uses global variables just to avoid updating all the
lflow_add_...
> >>>> related code all over the northd.c file.
> >>>>
> >>>> Signed-off-by: Han Zhou <hz...@ovn.org>
> >>>
> >>> Hi Han,
> >>>
> >>> Please see a few comments below.  I did review all the 3 patches in
the
> >> series.
> >>> They LGTM overall.  I'd like to do some more testing before providing
my
> >> Acks.
> >>>
> >>
> >> Thanks for your review!
> >>
> >>>
> >>>> ---
> >>>>  northd/northd.c | 271
+++++++++++++++++++++++++++++++-----------------
> >>>>  1 file changed, 178 insertions(+), 93 deletions(-)
> >>>>
> >>>> diff --git a/northd/northd.c b/northd/northd.c
> >>>> index 98f528f93cfc..aa0f853ce2db 100644
> >>>> --- a/northd/northd.c
> >>>> +++ b/northd/northd.c
> >>>> @@ -1457,6 +1457,19 @@ struct ovn_port_routable_addresses {
> >>>>      size_t n_addrs;
> >>>>  };
> >>>>
> >>>> +/* A node that maintains link between an object (such as an
ovn_port)
> >> and
> >>>> + * a lflow. */
> >>>> +struct lflow_ref_node {
> >>>> +    /* This list follows different lflows referenced by the same
> >> object. List
> >>>> +     * head is, for example, ovn_port->lflows.  */
> >>>> +    struct ovs_list lflow_list_node;
> >>>> +    /* This list follows different objects that reference the same
> >> lflow. List
> >>>> +     * head is ovn_lflow->referenced_by. */
> >>>> +    struct ovs_list ref_list_node;
> >>>> +    /* The lflow. */
> >>>> +    struct ovn_lflow *lflow;
> >>>> +};
> >>>> +
> >>>>  /* A logical switch port or logical router port.
> >>>>   *
> >>>>   * In steady state, an ovn_port points to a northbound
> >> Logical_Switch_Port
> >>>> @@ -1548,6 +1561,28 @@ struct ovn_port {
> >>>>
> >>>>      /* Temporarily used for traversing a list (or hmap) of ports. */
> >>>>      bool visited;
> >>>> +
> >>>> +    /* List of struct lflow_ref_node that points to the lflows
> >> generated by
> >>>> +     * this ovn_port.
> >>>> +     *
> >>>> +     * This data is initialized and destroyed by the en_northd node,
> >> but
> >>>> +     * populated and used only by the en_lflow node. Ideally this
data
> >> should
> >>>> +     * be maintained as part of en_lflow's data (struct
lflow_data): a
> >> hash
> >>>> +     * index from ovn_port key to lflows.  However, it would be less
> >> efficient
> >>>> +     * and more complex:
> >>>> +     *
> >>>> +     * 1. It would require an extra search (using the index) to find
> >> the
> >>>> +     * lflows.
> >>>> +     *
> >>>> +     * 2. Building the index needs to be thread-safe, using either a
> >> global
> >>>> +     * lock which is obviously less efficient, or hash-based lock
> >> array which
> >>>> +     * is more complex.
> >>>> +     *
> >>>> +     * Adding the list here is more straightforward. The drawback is
> >> that we
> >>>> +     * need to keep in mind that this data belongs to en_lflow node,
> >> so never
> >>>> +     * access it from any other nodes.
> >>>> +     */
> >>>> +    struct ovs_list lflows;
> >>>>  };
> >>>
> >>>
> >>>>
> >>>>  static bool lsp_can_be_inc_processed(const struct
> >> nbrec_logical_switch_port *);
> >>>> @@ -1635,6 +1670,8 @@ ovn_port_create(struct hmap *ports, const char
> >> *key,
> >>>>      ovn_port_set_nb(op, nbsp, nbrp);
> >>>>      op->l3dgw_port = op->cr_port = NULL;
> >>>>      hmap_insert(ports, &op->key_node, hash_string(op->key, 0));
> >>>> +
> >>>> +    ovs_list_init(&op->lflows);
> >>>>      return op;
> >>>>  }
> >>>>
> >>>> @@ -1665,6 +1702,13 @@ ovn_port_destroy_orphan(struct ovn_port *port)
> >>>>      destroy_lport_addresses(&port->proxy_arp_addrs);
> >>>>      free(port->json_key);
> >>>>      free(port->key);
> >>>> +
> >>>> +    struct lflow_ref_node *l;
> >>>> +    LIST_FOR_EACH_SAFE (l, lflow_list_node, &port->lflows) {
> >>>> +        ovs_list_remove(&l->lflow_list_node);
> >>>> +        ovs_list_remove(&l->ref_list_node);
> >>>> +        free(l);
> >>>> +    }
> >>>>      free(port);
> >>>>  }
> >>>>
> >>>> @@ -4893,6 +4937,7 @@ static struct ovn_port *
> >>>>  ls_port_create(struct ovsdb_idl_txn *ovnsb_txn, struct hmap
*ls_ports,
> >>>>                 const char *key, const struct
nbrec_logical_switch_port
> >> *nbsp,
> >>>>                 struct ovn_datapath *od, const struct
> >> sbrec_port_binding *sb,
> >>>> +               struct ovs_list *lflows,
> >>>>                 const struct sbrec_mirror_table *sbrec_mirror_table,
> >>>>                 const struct sbrec_chassis_table
*sbrec_chassis_table,
> >>>>                 struct ovsdb_idl_index *sbrec_chassis_by_name,
> >>>> @@ -4903,6 +4948,9 @@ ls_port_create(struct ovsdb_idl_txn *ovnsb_txn,
> >> struct hmap *ls_ports,
> >>>>      parse_lsp_addrs(op);
> >>>>      op->od = od;
> >>>>      hmap_insert(&od->ports, &op->dp_node,
> >> hmap_node_hash(&op->key_node));
> >>>> +    if (lflows) {
> >>>> +        ovs_list_splice(&op->lflows, lflows->next, lflows);
> >>>> +    }
> >>>>
> >>>>      /* Assign explicitly requested tunnel ids first. */
> >>>>      if (!ovn_port_assign_requested_tnl_id(sbrec_chassis_table, op))
{
> >>>> @@ -5082,7 +5130,7 @@ northd_handle_ls_changes(struct ovsdb_idl_txn
> >> *ovnsb_idl_txn,
> >>>>                      goto fail;
> >>>>                  }
> >>>>                  op = ls_port_create(ovnsb_idl_txn, &nd->ls_ports,
> >>>> -                                    new_nbsp->name, new_nbsp, od,
NULL,
> >>>> +                                    new_nbsp->name, new_nbsp, od,
> >> NULL, NULL,
> >>>>                                      ni->sbrec_mirror_table,
> >>>>                                      ni->sbrec_chassis_table,
> >>>>                                      ni->sbrec_chassis_by_name,
> >>>> @@ -5114,13 +5162,16 @@ northd_handle_ls_changes(struct ovsdb_idl_txn
> >> *ovnsb_idl_txn,
> >>>>                      op->visited = true;
> >>>>                      continue;
> >>>>                  }
> >>>> +                struct ovs_list lflows =
OVS_LIST_INITIALIZER(&lflows);
> >>>> +                ovs_list_splice(&lflows, op->lflows.next,
&op->lflows);
> >>>>                  ovn_port_destroy(&nd->ls_ports, op);
> >>>>                  op = ls_port_create(ovnsb_idl_txn, &nd->ls_ports,
> >>>> -                                    new_nbsp->name, new_nbsp, od,
sb,
> >>>> +                                    new_nbsp->name, new_nbsp, od,
sb,
> >> &lflows,
> >>>>                                      ni->sbrec_mirror_table,
> >>>>                                      ni->sbrec_chassis_table,
> >>>>                                      ni->sbrec_chassis_by_name,
> >>>>                                      ni->sbrec_chassis_by_hostname);
> >>>> +                ovs_assert(ovs_list_is_empty(&lflows));
> >>>>                  if (!op) {
> >>>>                      goto fail;
> >>>>                  }
> >>>> @@ -5577,7 +5628,8 @@ ovn_igmp_group_destroy(struct hmap
*igmp_groups,
> >>>>
> >>>>  struct ovn_lflow {
> >>>>      struct hmap_node hmap_node;
> >>>> -    struct ovs_list list_node;
> >>>> +    struct ovs_list list_node;   /* For temporary list of lflows.
> >> Don't remove
> >>>> +                                    at destroy. */
> >>>>
> >>>>      struct ovn_datapath *od;     /* 'logical_datapath' in SB schema.
> >>  */
> >>>>      unsigned long *dpg_bitmap;   /* Bitmap of all datapaths by their
> >> 'index'.*/
> >>>> @@ -5591,6 +5643,8 @@ struct ovn_lflow {
> >>>>      size_t n_ods;                /* Number of datapaths referenced
by
> >> 'od' and
> >>>>                                    * 'dpg_bitmap'. */
> >>>>      struct ovn_dp_group *dpg;    /* Link to unique Sb datapath
group.
> >> */
> >>>> +
> >>>> +    struct ovs_list referenced_by;  /* List of struct
lflow_ref_node.
> >> */
> >>>>      const char *where;
> >>>>
> >>>>      struct uuid sb_uuid;            /* SB DB row uuid, specified by
> >> northd. */
> >>>> @@ -5640,6 +5694,7 @@ ovn_lflow_init(struct ovn_lflow *lflow, struct
> >> ovn_datapath *od,
> >>>>                 char *stage_hint, const char *where)
> >>>>  {
> >>>>      ovs_list_init(&lflow->list_node);
> >>>> +    ovs_list_init(&lflow->referenced_by);
> >>>>      lflow->dpg_bitmap = bitmap_allocate(dp_bitmap_len);
> >>>>      lflow->od = od;
> >>>>      lflow->stage = stage;
> >>>> @@ -5767,20 +5822,30 @@ ovn_dp_group_add_with_reference(struct
> >> ovn_lflow *lflow_ref,
> >>>>      }
> >>>>  }
> >>>>
> >>>> +/* This global variable collects the lflows generated by
> >> do_ovn_lflow_add().
> >>>> + * start_collecting_lflows() will enable the lflow collection and
the
> >> calls to
> >>>> + * do_ovn_lflow_add (or the macros ovn_lflow_add_...) will add
> >> generated lflows
> >>>> + * to the list. end_collecting_lflows() will disable it. */
> >>>> +static thread_local struct ovs_list collected_lflows;
> >>>> +static thread_local bool collecting_lflows = false;
> >>>> +
> >>>> +static void
> >>>> +start_collecting_lflows(void)
> >>>> +{
> >>>> +    ovs_assert(!collecting_lflows);
> >>>> +    ovs_list_init(&collected_lflows);
> >>>> +    collecting_lflows = true;
> >>>> +}
> >>>> +
> >>>> +static void
> >>>> +end_collecting_lflows(void)
> >>>> +{
> >>>> +    ovs_assert(collecting_lflows);
> >>>> +    collecting_lflows = false;
> >>>> +}
> >>>> +
> >>>
> >>> I think we can avoid these functions and the thread local variable
> >>> "collected_lflows".
> >>>
> >>> I'd suggest the below
> >>>
> >>> ----------------------------
> >>>
> >>> static void
> >>> do_ovn_lflow_add(struct hmap *lflow_map, const struct ovn_datapath
*od,
> >>>                  const unsigned long *dp_bitmap, size_t dp_bitmap_len,
> >>>                  uint32_t hash, enum ovn_stage stage, uint16_t
priority,
> >>>                  const char *match, const char *actions, const char
> >> *io_port,
> >>>                  struct ovs_list *lflow_ref_list,
> >>>                  const struct ovsdb_idl_row *stage_hint,
> >>>                  const char *where, const char *ctrl_meter)
> >>>     OVS_REQUIRES(fake_hash_mutex)
> >>> {
> >>>     ...
> >>>     ...
> >>>     /* At the end. */
> >>>     if (lflow_ref_list) {
> >>>         struct lflow_ref_node *lfrn = xmalloc(sizeof *lfrn);
> >>>         lfrn->lflow = lflow;
> >>>         ovs_list_insert(lflow_ref_list, &lfrn->lflow_list_node);
> >>>         ovs_list_insert(&lflow->referenced_by, &lfrn->ref_list_node);
> >>>     }
> >>> }
> >>>
> >>>
> >>> #define ovn_lflow_add_with_lport_and_hint(LFLOW_MAP, OD, STAGE,
PRIORITY,
> >> \
> >>>                                           MATCH, ACTIONS,
IN_OUT_PORT, \
> >>>                                           LFLOW_REF_LIST, STAGE_HINT)
\
> >>>     ovn_lflow_add_at(LFLOW_MAP, OD, NULL, 0, STAGE, PRIORITY, MATCH,
> >> ACTIONS, \
> >>>                      IN_OUT_PORT, LFLOW_REF_LIST, NULL, STAGE_HINT, \
> >>>                      OVS_SOURCE_LOCATOR)
> >>>
> >>> And pass &op->lflows in ovn_lflow_add_with_lport_and_hint()
> >>>
> >>> -----------------------------
> >>>
> >>> What do you think ?  Definitely this would result in a bit more work
> >>> as it would require a lot of (tedious) changes.
> >>> I think this is a better approach.
> >>>
> >> Firstly, I think it is not good to use "lflow_ref_list" directly in the
> >> parameter, because there can be more than one object contributing to
the
> >> lflow generation. When we implement I-P for more inputs, a single
lflow may
> >> be referenced by multiple objects. We can't pass multiple
lflow_ref_list to
> >> the function either, because the number of such lists is unknown. For
> >> example, a lflow may be generated as a result of a LSP, a DP and a LB
> >> backend. If we want to implement I-P for LSP, DP and LB backend, we
need to
> >> track the reference for all of them. So the current idea is just to
collect
> >> a list of lflows generated by a higher level function, such as the
> >> build_lswitch_and_lrouter_iterate_by_lsp. When implementing I-P for
more
> >> than one object of the same lflow, this needs to be more fine-grained.
>
> I'm still reviewing this patch but my first impression was that we can
> probably try to use 'struct objdep_mgr' (defined in lib/objdep.h) to
> model all these (potentially many-to-many) relationships.  We do similar
> things in ovn-controller.
>
My first impression was similar, but later I figured out it is different
(and more complex than ovn-controller in general) and we will need to make
changes to objdep_mgr. However, until more input I-P is implemented I am
not sure if I will be able to get a general enough abstraction in
objdep_mgr that can handle all the use cases. On the other hand, the
references between LSP and related lflows are quite simple, so I went ahead
and implemented the lists (embeded into ovn_port and lflow without extra
hash tables), with minimum effort, to make it work at least for this
scenario and should also work for similar scenarios. I think we can
refactor the reference implementation any time but it may be better to get
more I-P implemented, during the process we can always revisit and see if
objdep_mgr can be extended or another common dependency/reference library
for ovn-northd can be created, etc. What do you think?

Regards,
Han

> >
> > Agree.  I'm working on the I-P for datapath changes in lflow engine and
> > as a flow can be referenced by multiple datapaths,  I agree this needs
to
> > be tracked properly.  And definitely that is out of scope of this patch
series.
> >
> >
> >>
> >> Secondly, I agree with you adding a new parameter to the
do_ovn_lflow_add
> >> is cleaner. For the collected list I mentioned, it can be a parameter
> >> instead of a thread local variable. However, as you mentioned, the
change
> >> is going to be all over the northd.c code, not only for the
> >> ovn_lflow_add_xxx, but also the functions calling the ovn_lflow_add_xxx
> >> macros, and upper layer functions calling those functions, and so on.
> >> Unfortunately C doesn't support optional args. At this moment I am not
sure
> >> if the interface is stable enough for the incremental-processing, so I
am
> >> not sure if it is worth such a big change. If we need to modify them
again
> >> later, all such changes are going to be wasted. On the other hand,
although
> >> the thread local variable is not the best way, I think it is still
clear
> >> and manageable, if we call the start_collecting_lflows and
> >> end_collecting_lflows in pairs. So, is it ok to leave it for this
patch and
> >> in the future when this mechanism proves to work well for more I-P, we
can
> >> have a separate patch to refactor (which will include all the tedious
> >> function call changes). What do you think?
> >
> > I agree.  It is definitely tedious to do all the changes.  I'm ok with
> > the approach taken
> > in this patch series.
> >
> > Also request to please take a look at the load balancer I-P patch
> > series -
http://patchwork.ozlabs.org/project/ovn/list/?series=361083&state=*
> >
> > Thanks
> > Numan
> >
> >>
> >> Thanks,
> >> Han
> >>
> >>> Also I'm planning to work on top of your patches to add I-P for load
> >>> balancers in lflow engine (or perhaps I-P for datapath changes)
> >>>
> >>> My idea is to have a lflow ref list stored in "struct ovn_datapath"
> >>> similar to the way you have done in this patch in "struct ovn_port"
> >>>
> >>> And while adding the flows using one of the macro variants
> >>> 'ovn_lflow_add*' pass &od->lflows.
> >>>
> >>> Please let me know your comments.
> >>>
> >>> Only concern I have with this patch is the "op->lflows" modified by
> >>> the lflow engine node.
> >>> But I agree with your added comments and also thinking to use the same
> >>> approach for datapath I-P handling,
> >>> And I don't have a better approach at the moment. So I'm fine with it.
> >>>
> >>> Thanks
> >>> Numan
> >>>
> >>>
> >>>>  /* Adds a row with the specified contents to the Logical_Flow table.
> >>>> - * Version to use when hash bucket locking is NOT required.
> >>>> - *
> >>>> - * Note: This function can add generated lflows to the global
variable
> >>>> - * temp_lflow_list as its output, controlled by the global variable
> >>>> - * add_lflow_to_temp_list. The caller of the ovn_lflow_add_...
marcros
> >> can get
> >>>> - * a list of lflows generated by setting add_lflow_to_temp_list to
> >> true. The
> >>>> - * caller is responsible for initializing the temp_lflow_list, and
also
> >>>> - * reset the add_lflow_to_temp_list to false when it is no longer
> >> needed.
> >>>> - * XXX: this mechanism is temporary and will be replaced when we add
> >> hash index
> >>>> - * to lflow_data and refactor related functions.
> >>>> - */
> >>>> -static bool add_lflow_to_temp_list = false;
> >>>> -static struct ovs_list temp_lflow_list;
> >>>> + * Version to use when hash bucket locking is NOT required. */
> >>>>  static void
> >>>>  do_ovn_lflow_add(struct hmap *lflow_map, const struct ovn_datapath
*od,
> >>>>                   const unsigned long *dp_bitmap, size_t
dp_bitmap_len,
> >>>> @@ -5797,7 +5862,7 @@ do_ovn_lflow_add(struct hmap *lflow_map, const
> >> struct ovn_datapath *od,
> >>>>      size_t bitmap_len = od ? ods_size(od->datapaths) :
dp_bitmap_len;
> >>>>      ovs_assert(bitmap_len);
> >>>>
> >>>> -    if (add_lflow_to_temp_list) {
> >>>> +    if (collecting_lflows) {
> >>>>          ovs_assert(od);
> >>>>          ovs_assert(!dp_bitmap);
> >>>>      } else {
> >>>> @@ -5829,8 +5894,8 @@ do_ovn_lflow_add(struct hmap *lflow_map, const
> >> struct ovn_datapath *od,
> >>>>          thread_lflow_counter++;
> >>>>      }
> >>>>
> >>>> -    if (add_lflow_to_temp_list) {
> >>>> -        ovs_list_insert(&temp_lflow_list, &lflow->list_node);
> >>>> +    if (collecting_lflows) {
> >>>> +        ovs_list_insert(&collected_lflows, &lflow->list_node);
> >>>>      }
> >>>>  }
> >>>>
> >>>> @@ -5950,10 +6015,28 @@ ovn_lflow_destroy(struct hmap *lflows, struct
> >> ovn_lflow *lflow)
> >>>>          free(lflow->io_port);
> >>>>          free(lflow->stage_hint);
> >>>>          free(lflow->ctrl_meter);
> >>>> +        struct lflow_ref_node *l;
> >>>> +        LIST_FOR_EACH_SAFE (l, ref_list_node,
&lflow->referenced_by) {
> >>>> +            ovs_list_remove(&l->lflow_list_node);
> >>>> +            ovs_list_remove(&l->ref_list_node);
> >>>> +            free(l);
> >>>> +        }
> >>>>          free(lflow);
> >>>>      }
> >>>>  }
> >>>>
> >>>> +static void
> >>>> +link_ovn_port_to_lflows(struct ovn_port *op, struct ovs_list
*lflows)
> >>>> +{
> >>>> +    struct ovn_lflow *f;
> >>>> +    LIST_FOR_EACH (f, list_node, lflows) {
> >>>> +        struct lflow_ref_node *lfrn = xmalloc(sizeof *lfrn);
> >>>> +        lfrn->lflow = f;
> >>>> +        ovs_list_insert(&op->lflows, &lfrn->lflow_list_node);
> >>>> +        ovs_list_insert(&f->referenced_by, &lfrn->ref_list_node);
> >>>> +    }
> >>>> +}
> >>>> +
> >>>>  static bool
> >>>>  build_dhcpv4_action(struct ovn_port *op, ovs_be32 offer_ip,
> >>>>                      struct ds *options_action, struct ds
> >> *response_action,
> >>>> @@ -15483,6 +15566,7 @@
build_lswitch_and_lrouter_iterate_by_lsp(struct
> >> ovn_port *op,
> >>>>                                           struct hmap *lflows)
> >>>>  {
> >>>>      ovs_assert(op->nbsp);
> >>>> +    start_collecting_lflows();
> >>>>
> >>>>      /* Build Logical Switch Flows. */
> >>>>      build_lswitch_port_sec_op(op, lflows, actions, match);
> >>>> @@ -15497,6 +15581,9 @@
build_lswitch_and_lrouter_iterate_by_lsp(struct
> >> ovn_port *op,
> >>>>      /* Build Logical Router Flows. */
> >>>>      build_ip_routing_flows_for_router_type_lsp(op, lr_ports,
lflows);
> >>>>      build_arp_resolve_flows_for_lsp(op, lflows, lr_ports, match,
> >> actions);
> >>>> +
> >>>> +    link_ovn_port_to_lflows(op, &collected_lflows);
> >>>> +    end_collecting_lflows();
> >>>>  }
> >>>>
> >>>>  /* Helper function to combine all lflow generation which is iterated
> >> by logical
> >>>> @@ -16223,14 +16310,10 @@ bool lflow_handle_northd_ls_changes(struct
> >> ovsdb_idl_txn *ovnsb_txn,
> >>>>  {
> >>>>      struct ls_change *ls_change;
> >>>>      LIST_FOR_EACH (ls_change, list_node, &ls_changes->updated) {
> >>>> -        ovs_list_init(&temp_lflow_list);
> >>>> -        add_lflow_to_temp_list = true;
> >>>>          if (!ovs_list_is_empty(&ls_change->updated_ports) ||
> >>>>              !ovs_list_is_empty(&ls_change->deleted_ports)) {
> >>>>              /* XXX: implement lflow index so that we can handle
> >> updated and
> >>>>               * deleted LSPs incrementally. */
> >>>> -            ovs_list_init(&temp_lflow_list);
> >>>> -            add_lflow_to_temp_list = false;
> >>>>              return false;
> >>>>          }
> >>>>
> >>>> @@ -16277,83 +16360,85 @@ bool lflow_handle_northd_ls_changes(struct
> >> ovsdb_idl_txn *ovnsb_txn,
> >>>>
> >>  sbrec_multicast_group_update_ports_addvalue(sbmc_unknown,
> >>>>                                                              op->sb);
> >>>>              }
> >>>> -        }
> >>>> -        /* Sync the newly added flows to SB. */
> >>>> -        struct ovn_lflow *lflow;
> >>>> -        LIST_FOR_EACH (lflow, list_node, &temp_lflow_list) {
> >>>> -            size_t n_datapaths;
> >>>> -            struct ovn_datapath **datapaths_array;
> >>>> -            if (ovn_stage_to_datapath_type(lflow->stage) ==
DP_SWITCH)
> >> {
> >>>> -                n_datapaths = ods_size(lflow_input->ls_datapaths);
> >>>> -                datapaths_array = lflow_input->ls_datapaths->array;
> >>>> -            } else {
> >>>> -                n_datapaths = ods_size(lflow_input->lr_datapaths);
> >>>> -                datapaths_array = lflow_input->lr_datapaths->array;
> >>>> -            }
> >>>> -            uint32_t n_ods = bitmap_count1(lflow->dpg_bitmap,
> >> n_datapaths);
> >>>> -            ovs_assert(n_ods == 1);
> >>>> -            /* There is only one datapath, so it should be moved out
> >> of the
> >>>> -             * group to a single 'od'. */
> >>>> -            size_t index = bitmap_scan(lflow->dpg_bitmap, true, 0,
> >>>> -                                       n_datapaths);
> >>>>
> >>>> -            bitmap_set0(lflow->dpg_bitmap, index);
> >>>> -            lflow->od = datapaths_array[index];
> >>>> -
> >>>> -            /* Logical flow should be re-hashed to allow lookups. */
> >>>> -            uint32_t hash = hmap_node_hash(&lflow->hmap_node);
> >>>> -            /* Remove from lflows. */
> >>>> -            hmap_remove(lflows, &lflow->hmap_node);
> >>>> -            hash =
> >> ovn_logical_flow_hash_datapath(&lflow->od->sb->header_.uuid,
> >>>> -                                                  hash);
> >>>> -            /* Add back. */
> >>>> -            hmap_insert(lflows, &lflow->hmap_node, hash);
> >>>> -
> >>>> -            /* Sync to SB. */
> >>>> -            const struct sbrec_logical_flow *sbflow;
> >>>> -            lflow->sb_uuid = uuid_random();
> >>>> -            sbflow =
sbrec_logical_flow_insert_persist_uuid(ovnsb_txn,
> >>>> -
> >>  &lflow->sb_uuid);
> >>>> -            const char *pipeline =
> >> ovn_stage_get_pipeline_name(lflow->stage);
> >>>> -            uint8_t table = ovn_stage_get_table(lflow->stage);
> >>>> -            sbrec_logical_flow_set_logical_datapath(sbflow,
> >> lflow->od->sb);
> >>>> -            sbrec_logical_flow_set_logical_dp_group(sbflow, NULL);
> >>>> -            sbrec_logical_flow_set_pipeline(sbflow, pipeline);
> >>>> -            sbrec_logical_flow_set_table_id(sbflow, table);
> >>>> -            sbrec_logical_flow_set_priority(sbflow,
lflow->priority);
> >>>> -            sbrec_logical_flow_set_match(sbflow, lflow->match);
> >>>> -            sbrec_logical_flow_set_actions(sbflow, lflow->actions);
> >>>> -            if (lflow->io_port) {
> >>>> -                struct smap tags = SMAP_INITIALIZER(&tags);
> >>>> -                smap_add(&tags, "in_out_port", lflow->io_port);
> >>>> -                sbrec_logical_flow_set_tags(sbflow, &tags);
> >>>> -                smap_destroy(&tags);
> >>>> -            }
> >>>> -            sbrec_logical_flow_set_controller_meter(sbflow,
> >> lflow->ctrl_meter);
> >>>> -            /* Trim the source locator lflow->where, which looks
> >> something like
> >>>> -             * "ovn/northd/northd.c:1234", down to just the part
> >> following the
> >>>> -             * last slash, e.g. "northd.c:1234". */
> >>>> -            const char *slash = strrchr(lflow->where, '/');
> >>>> +            /* Sync the newly added flows to SB. */
> >>>> +            struct lflow_ref_node *lfrn;
> >>>> +            LIST_FOR_EACH (lfrn, lflow_list_node, &op->lflows) {
> >>>> +                struct ovn_lflow *lflow = lfrn->lflow;
> >>>> +                size_t n_datapaths;
> >>>> +                struct ovn_datapath **datapaths_array;
> >>>> +                if (ovn_stage_to_datapath_type(lflow->stage) ==
> >> DP_SWITCH) {
> >>>> +                    n_datapaths =
ods_size(lflow_input->ls_datapaths);
> >>>> +                    datapaths_array =
lflow_input->ls_datapaths->array;
> >>>> +                } else {
> >>>> +                    n_datapaths =
ods_size(lflow_input->lr_datapaths);
> >>>> +                    datapaths_array =
lflow_input->lr_datapaths->array;
> >>>> +                }
> >>>> +                uint32_t n_ods = bitmap_count1(lflow->dpg_bitmap,
> >> n_datapaths);
> >>>> +                ovs_assert(n_ods == 1);
> >>>> +                /* There is only one datapath, so it should be moved
> >> out of the
> >>>> +                 * group to a single 'od'. */
> >>>> +                size_t index = bitmap_scan(lflow->dpg_bitmap, true,
0,
> >>>> +                                           n_datapaths);
> >>>> +
> >>>> +                bitmap_set0(lflow->dpg_bitmap, index);
> >>>> +                lflow->od = datapaths_array[index];
> >>>> +
> >>>> +                /* Logical flow should be re-hashed to allow
lookups.
> >> */
> >>>> +                uint32_t hash = hmap_node_hash(&lflow->hmap_node);
> >>>> +                /* Remove from lflows. */
> >>>> +                hmap_remove(lflows, &lflow->hmap_node);
> >>>> +                hash = ovn_logical_flow_hash_datapath(
> >>>> +
> >>  &lflow->od->sb->header_.uuid, hash);
> >>>> +                /* Add back. */
> >>>> +                hmap_insert(lflows, &lflow->hmap_node, hash);
> >>>> +
> >>>> +                /* Sync to SB. */
> >>>> +                const struct sbrec_logical_flow *sbflow;
> >>>> +                lflow->sb_uuid = uuid_random();
> >>>> +                sbflow = sbrec_logical_flow_insert_persist_uuid(
> >>>> +                                                ovnsb_txn,
> >> &lflow->sb_uuid);
> >>>> +                const char *pipeline = ovn_stage_get_pipeline_name(
> >>>> +
> >> lflow->stage);
> >>>> +                uint8_t table = ovn_stage_get_table(lflow->stage);
> >>>> +                sbrec_logical_flow_set_logical_datapath(sbflow,
> >> lflow->od->sb);
> >>>> +                sbrec_logical_flow_set_logical_dp_group(sbflow,
NULL);
> >>>> +                sbrec_logical_flow_set_pipeline(sbflow, pipeline);
> >>>> +                sbrec_logical_flow_set_table_id(sbflow, table);
> >>>> +                sbrec_logical_flow_set_priority(sbflow,
> >> lflow->priority);
> >>>> +                sbrec_logical_flow_set_match(sbflow, lflow->match);
> >>>> +                sbrec_logical_flow_set_actions(sbflow,
lflow->actions);
> >>>> +                if (lflow->io_port) {
> >>>> +                    struct smap tags = SMAP_INITIALIZER(&tags);
> >>>> +                    smap_add(&tags, "in_out_port", lflow->io_port);
> >>>> +                    sbrec_logical_flow_set_tags(sbflow, &tags);
> >>>> +                    smap_destroy(&tags);
> >>>> +                }
> >>>> +                sbrec_logical_flow_set_controller_meter(sbflow,
> >>>> +
> >>  lflow->ctrl_meter);
> >>>> +                /* Trim the source locator lflow->where, which looks
> >> something
> >>>> +                 * like "ovn/northd/northd.c:1234", down to just the
> >> part
> >>>> +                 * following the last slash, e.g. "northd.c:1234".
*/
> >>>> +                const char *slash = strrchr(lflow->where, '/');
> >>>>  #if _WIN32
> >>>> -            const char *backslash = strrchr(lflow->where, '\\');
> >>>> -            if (!slash || backslash > slash) {
> >>>> -                slash = backslash;
> >>>> -            }
> >>>> +                const char *backslash = strrchr(lflow->where, '\\');
> >>>> +                if (!slash || backslash > slash) {
> >>>> +                    slash = backslash;
> >>>> +                }
> >>>>  #endif
> >>>> -            const char *where = slash ? slash + 1 : lflow->where;
> >>>> +                const char *where = slash ? slash + 1 :
lflow->where;
> >>>>
> >>>> -            struct smap ids = SMAP_INITIALIZER(&ids);
> >>>> -            smap_add(&ids, "stage-name",
> >> ovn_stage_to_str(lflow->stage));
> >>>> -            smap_add(&ids, "source", where);
> >>>> -            if (lflow->stage_hint) {
> >>>> -                smap_add(&ids, "stage-hint", lflow->stage_hint);
> >>>> +                struct smap ids = SMAP_INITIALIZER(&ids);
> >>>> +                smap_add(&ids, "stage-name",
> >> ovn_stage_to_str(lflow->stage));
> >>>> +                smap_add(&ids, "source", where);
> >>>> +                if (lflow->stage_hint) {
> >>>> +                    smap_add(&ids, "stage-hint", lflow->stage_hint);
> >>>> +                }
> >>>> +                sbrec_logical_flow_set_external_ids(sbflow, &ids);
> >>>> +                smap_destroy(&ids);
> >>>>              }
> >>>> -            sbrec_logical_flow_set_external_ids(sbflow, &ids);
> >>>> -            smap_destroy(&ids);
> >>>>          }
> >>>>      }
> >>>> -    ovs_list_init(&temp_lflow_list);
> >>>> -    add_lflow_to_temp_list = false;
> >>>>      return true;
> >>>>
> >>>>  }
> >>>> --
> >>>> 2.30.2
> >>>>
> >>>> _______________________________________________
> >>>> dev mailing list
> >>>> d...@openvswitch.org
> >>>> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
> >>>>
> >> _______________________________________________
> >> dev mailing list
> >> d...@openvswitch.org
> >> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
> > _______________________________________________
> > dev mailing list
> > d...@openvswitch.org
> > https://mail.openvswitch.org/mailman/listinfo/ovs-dev
>
_______________________________________________
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to