On Sun, May 24, 2020 at 11:12 AM Han Zhou <hz...@ovn.org> wrote:

> On Sat, May 23, 2020 at 3:26 AM Numan Siddique <num...@ovn.org> wrote:
> >
> >
> >
> > On Thu, May 21, 2020 at 12:28 PM Han Zhou <hz...@ovn.org> wrote:
> >>
> >> Please see my comments below.
> >>
> >> On Wed, May 20, 2020 at 12:41 PM <num...@ovn.org> wrote:
> >> >
> >> > From: Numan Siddique <num...@ovn.org>
> >> >
> >> > In order to handle runtime data changes incrementally, the flow outut
> >> > runtime data handle should know the changed runtime data.
> >> > Runtime data now tracks the changed data for any OVS interface
> >> > and SB port binding changes. The tracked data contains a hmap
> >> > of tracked datapaths (which changed during runtime data processing.
> >> >
> >> > The flow outout runtime_data handler in this patch doesn't do much
> >> > with the tracked data. It returns false if there is tracked data
> available
> >> > so that flow_output run is called. If no tracked data is available
> >> > then there is no need for flow computation and the handler returns
> true.
> >> >
> >> > Next patch in the series processes the tracked data incrementally.
> >> >
> >> > Acked-by: Mark Michelson <mmich...@redhat.com>
> >> > Co-Authored-by: Venkata Anil <anilvenk...@redhat.com>
> >> > Signed-off-by: Venkata Anil <anilvenk...@redhat.com>
> >> > Signed-off-by: Numan Siddique <num...@ovn.org>
> >> > ---
> >> >  controller/binding.c        | 159
> ++++++++++++++++++++++++++++++++----
> >> >  controller/binding.h        |  21 +++++
> >> >  controller/ovn-controller.c |  62 +++++++++++++-
> >> >  tests/ovn-performance.at    |  28 +++----
> >> >  4 files changed, 240 insertions(+), 30 deletions(-)
> >> >
> >> > diff --git a/controller/binding.c b/controller/binding.c
> >> > index f00c975ce..9b3d46b23 100644
> >> > --- a/controller/binding.c
> >> > +++ b/controller/binding.c
> >> > @@ -69,13 +69,20 @@ binding_register_ovs_idl(struct ovsdb_idl
> *ovs_idl)
> >> >      ovsdb_idl_add_column(ovs_idl, &ovsrec_qos_col_type);
> >> >  }
> >> >
> >> > +static struct tracked_binding_datapath
> *tracked_binding_datapath_create(
> >> > +    const struct sbrec_datapath_binding *,
> >> > +    bool is_new, struct hmap *tracked_dps);
> >> > +static struct tracked_binding_datapath
> *tracked_binding_datapath_find(
> >> > +    struct hmap *, const struct sbrec_datapath_binding *);
> >> > +
> >> >  static void
> >> >  add_local_datapath__(struct ovsdb_idl_index
> >> *sbrec_datapath_binding_by_key,
> >> >                       struct ovsdb_idl_index
> >> *sbrec_port_binding_by_datapath,
> >> >                       struct ovsdb_idl_index
> *sbrec_port_binding_by_name,
> >> >                       const struct sbrec_datapath_binding *datapath,
> >> >                       bool has_local_l3gateway, int depth,
> >> > -                     struct hmap *local_datapaths)
> >> > +                     struct hmap *local_datapaths,
> >> > +                     struct hmap *updated_dp_bindings)
> >> >  {
> >> >      uint32_t dp_key = datapath->tunnel_key;
> >> >      struct local_datapath *ld = get_local_datapath(local_datapaths,
> >> dp_key);
> >> > @@ -92,6 +99,11 @@ add_local_datapath__(struct ovsdb_idl_index
> >> *sbrec_datapath_binding_by_key,
> >> >      ld->localnet_port = NULL;
> >> >      ld->has_local_l3gateway = has_local_l3gateway;
> >> >
> >> > +    if (updated_dp_bindings &&
> >> > +        !tracked_binding_datapath_find(updated_dp_bindings,
> datapath)) {
> >> > +        tracked_binding_datapath_create(datapath, true,
> >> updated_dp_bindings);
> >> > +    }
> >> > +
> >> >      if (depth >= 100) {
> >> >          static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1,
> 1);
> >> >          VLOG_WARN_RL(&rl, "datapaths nested too deep");
> >> > @@ -124,7 +136,8 @@ add_local_datapath__(struct ovsdb_idl_index
> >> *sbrec_datapath_binding_by_key,
> >> >
> >> sbrec_port_binding_by_datapath,
> >> >
> sbrec_port_binding_by_name,
> >> >                                               peer->datapath, false,
> >> > -                                             depth + 1,
> local_datapaths);
> >> > +                                             depth + 1,
> local_datapaths,
> >> > +                                             updated_dp_bindings);
> >> >                      }
> >> >                      ld->n_peer_ports++;
> >> >                      if (ld->n_peer_ports >
> ld->n_allocated_peer_ports) {
> >> > @@ -147,12 +160,14 @@ add_local_datapath(struct ovsdb_idl_index
> >> *sbrec_datapath_binding_by_key,
> >> >                     struct ovsdb_idl_index
> >> *sbrec_port_binding_by_datapath,
> >> >                     struct ovsdb_idl_index
> *sbrec_port_binding_by_name,
> >> >                     const struct sbrec_datapath_binding *datapath,
> >> > -                   bool has_local_l3gateway, struct hmap
> >> *local_datapaths)
> >> > +                   bool has_local_l3gateway, struct hmap
> >> *local_datapaths,
> >> > +                   struct hmap *updated_dp_bindings)
> >> >  {
> >> >      add_local_datapath__(sbrec_datapath_binding_by_key,
> >> >                           sbrec_port_binding_by_datapath,
> >> >                           sbrec_port_binding_by_name,
> >> > -                         datapath, has_local_l3gateway, 0,
> >> local_datapaths);
> >> > +                         datapath, has_local_l3gateway, 0,
> >> local_datapaths,
> >> > +                         updated_dp_bindings);
> >> >  }
> >> >
> >> >  static void
> >> > @@ -623,6 +638,71 @@ is_lport_container(const struct
> sbrec_port_binding
> >> *pb)
> >> >      return !pb->type[0] && pb->parent_port && pb->parent_port[0];
> >> >  }
> >> >
> >> > +static struct tracked_binding_datapath *
> >> > +tracked_binding_datapath_create(const struct sbrec_datapath_binding
> *dp,
> >> > +                                bool is_new,
> >> > +                                struct hmap *tracked_datapaths)
> >> > +{
> >> > +    struct tracked_binding_datapath *t_dp = xzalloc(sizeof *t_dp);
> >> > +    t_dp->dp = dp;
> >> > +    t_dp->is_new = is_new;
> >> > +    ovs_list_init(&t_dp->lports_head);
> >> > +    hmap_insert(tracked_datapaths, &t_dp->node,
> >> uuid_hash(&dp->header_.uuid));
> >> > +    return t_dp;
> >> > +}
> >> > +
> >> > +static struct tracked_binding_datapath *
> >> > +tracked_binding_datapath_find(struct hmap *tracked_datapaths,
> >> > +                              const struct sbrec_datapath_binding
> *dp)
> >> > +{
> >> > +    struct tracked_binding_datapath *t_dp;
> >> > +    size_t hash = uuid_hash(&dp->header_.uuid);
> >> > +    HMAP_FOR_EACH_WITH_HASH (t_dp, node, hash, tracked_datapaths) {
> >> > +        if (uuid_equals(&t_dp->dp->header_.uuid, &dp->header_.uuid))
> {
> >> > +            return t_dp;
> >> > +        }
> >> > +    }
> >> > +
> >> > +    return NULL;
> >> > +}
> >> > +
> >> > +static void
> >> > +tracked_binding_datapath_lport_add(const struct sbrec_port_binding
> *pb,
> >> > +                                   bool deleted,
> >> > +                                   struct hmap *tracked_datapaths)
> >> > +{
> >> > +    if (!tracked_datapaths) {
> >> > +        return;
> >> > +    }
> >> > +
> >> > +    struct tracked_binding_datapath *tracked_dp =
> >> > +        tracked_binding_datapath_find(tracked_datapaths,
> pb->datapath);
> >> > +    if (!tracked_dp) {
> >> > +        tracked_dp = tracked_binding_datapath_create(pb->datapath,
> false,
> >> > +
> tracked_datapaths);
> >> > +    }
> >> > +    struct tracked_binding_lport *lport = xmalloc(sizeof *lport);
> >> > +    lport->pb = pb;
> >> > +    lport->deleted = deleted;
> >> > +    ovs_list_push_back(&tracked_dp->lports_head, &lport->list_node);
> >> > +}
> >> > +
> >> > +void
> >> > +binding_tracked_dp_destroy(struct hmap *tracked_datapaths)
> >> > +{
> >> > +    struct tracked_binding_datapath *t_dp;
> >> > +    HMAP_FOR_EACH_POP (t_dp, node, tracked_datapaths) {
> >> > +    struct tracked_binding_lport *lport, *next;
> >> > +        LIST_FOR_EACH_SAFE (lport, next, list_node,
> &t_dp->lports_head) {
> >> > +            ovs_list_remove(&lport->list_node);
> >> > +            free(lport);
> >> > +        }
> >> > +        free(t_dp);
> >> > +    }
> >> > +
> >> > +    hmap_destroy(tracked_datapaths);
> >> > +}
> >> > +
> >> >  /* Corresponds to each Port_Binding.type. */
> >> >  enum en_lport_type {
> >> >      LP_UNKNOWN,
> >> > @@ -770,7 +850,8 @@ can_bind_on_this_chassis(const struct
> sbrec_chassis
> >> *chassis_rec,
> >> >  static bool
> >> >  release_local_binding_children(const struct sbrec_chassis
> *chassis_rec,
> >> >                                 struct local_binding *lbinding,
> >> > -                               bool sb_readonly)
> >> > +                               bool sb_readonly,
> >> > +                               struct hmap *tracked_dp_bindings)
> >> >  {
> >> >      struct shash_node *node;
> >> >      SHASH_FOR_EACH (node, &lbinding->children) {
> >> > @@ -780,6 +861,10 @@ release_local_binding_children(const struct
> >> sbrec_chassis *chassis_rec,
> >> >                  return false;
> >> >              }
> >> >          }
> >> > +        if (tracked_dp_bindings) {
> >> > +            tracked_binding_datapath_lport_add(l->pb, true,
> >> > +                                               tracked_dp_bindings);
> >> > +        }
> >> >      }
> >> >
> >> >      return true;
> >> > @@ -787,10 +872,11 @@ release_local_binding_children(const struct
> >> sbrec_chassis *chassis_rec,
> >> >
> >> >  static bool
> >> >  release_local_binding(const struct sbrec_chassis *chassis_rec,
> >> > -                      struct local_binding *lbinding, bool
> sb_readonly)
> >> > +                      struct local_binding *lbinding, bool
> sb_readonly,
> >> > +                      struct hmap *tracked_dp_bindings)
> >> >  {
> >> >      if (!release_local_binding_children(chassis_rec, lbinding,
> >> > -                                        sb_readonly)) {
> >> > +                                        sb_readonly,
> >> tracked_dp_bindings)) {
> >> >          return false;
> >> >      }
> >> >
> >> > @@ -798,6 +884,10 @@ release_local_binding(const struct sbrec_chassis
> >> *chassis_rec,
> >> >          return release_lport(lbinding->pb, sb_readonly);
> >> >      }
> >> >
> >> > +    if (tracked_dp_bindings) {
> >> > +        tracked_binding_datapath_lport_add(lbinding->pb, true,
> >> > +                                           tracked_dp_bindings);
> >> > +    }
> >> >      return true;
> >> >  }
> >> >
> >> > @@ -821,7 +911,8 @@ consider_vif_lport_(const struct
> sbrec_port_binding
> >> *pb,
> >> >
>  add_local_datapath(b_ctx_in->sbrec_datapath_binding_by_key,
> >> >                              b_ctx_in->sbrec_port_binding_by_datapath,
> >> >                              b_ctx_in->sbrec_port_binding_by_name,
> >> > -                            pb->datapath, false,
> >> b_ctx_out->local_datapaths);
> >> > +                            pb->datapath, false,
> >> b_ctx_out->local_datapaths,
> >> > +                            b_ctx_out->tracked_dp_bindings);
> >> >              update_local_lport_ids(b_ctx_out->local_lport_ids, pb);
> >> >              if (lbinding->iface && qos_map && b_ctx_in->ovs_idl_txn)
> {
> >> >                  get_qos_params(pb, qos_map);
> >> > @@ -1003,7 +1094,8 @@ consider_nonvif_lport_(const struct
> >> sbrec_port_binding *pb,
> >> >                             b_ctx_in->sbrec_port_binding_by_datapath,
> >> >                             b_ctx_in->sbrec_port_binding_by_name,
> >> >                             pb->datapath, has_local_l3gateway,
> >> > -                           b_ctx_out->local_datapaths);
> >> > +                           b_ctx_out->local_datapaths,
> >> > +                           b_ctx_out->tracked_dp_bindings);
> >> >
> >> >          update_local_lport_ids(b_ctx_out->local_lport_ids, pb);
> >> >          return claim_lport(pb, b_ctx_in->chassis_rec, NULL,
> >> > @@ -1080,7 +1172,8 @@ consider_ha_lport(const struct
> sbrec_port_binding
> >> *pb,
> >> >                             b_ctx_in->sbrec_port_binding_by_datapath,
> >> >                             b_ctx_in->sbrec_port_binding_by_name,
> >> >                             pb->datapath, false,
> >> > -                           b_ctx_out->local_datapaths);
> >> > +                           b_ctx_out->local_datapaths,
> >> > +                           b_ctx_out->tracked_dp_bindings);
> >> >      }
> >> >
> >> >      return consider_nonvif_lport_(pb, our_chassis, false, b_ctx_in,
> >> b_ctx_out);
> >> > @@ -1367,7 +1460,8 @@ add_local_datapath_peer_port(const struct
> >> sbrec_port_binding *pb,
> >> >
>  b_ctx_in->sbrec_port_binding_by_datapath,
> >> >                               b_ctx_in->sbrec_port_binding_by_name,
> >> >                               peer->datapath, false,
> >> > -                             1, b_ctx_out->local_datapaths);
> >> > +                             1, b_ctx_out->local_datapaths,
> >> > +                             b_ctx_out->tracked_dp_bindings);
> >> >          return;
> >> >      }
> >> >
> >> > @@ -1444,6 +1538,18 @@ remove_pb_from_local_datapath(const struct
> >> sbrec_port_binding *pb,
> >> >      }
> >> >  }
> >> >
> >> > +static void
> >> > +update_lport_tracking(const struct sbrec_port_binding *pb,
> >> > +                      bool old_claim, bool new_claim,
> >> > +                      struct hmap *tracked_dp_bindings)
> >> > +{
> >> > +    if (!tracked_dp_bindings || !pb || (old_claim == new_claim)) {
> >> > +        return;
> >> > +    }
> >> > +
> >> > +    tracked_binding_datapath_lport_add(pb, old_claim,
> >> tracked_dp_bindings);
> >> > +}
> >> > +
> >> >  static bool
> >> >  handle_iface_add(const struct ovsrec_interface *iface_rec,
> >> >                   const char *iface_id,
> >> > @@ -1472,6 +1578,7 @@ handle_iface_add(const struct ovsrec_interface
> >> *iface_rec,
> >> >          }
> >> >      }
> >> >
> >> > +    bool claimed = is_lbinding_this_chassis(lbinding,
> >> b_ctx_in->chassis_rec);
> >> >      if (lbinding->pb) {
> >> >          if (!consider_vif_lport(lbinding->pb, b_ctx_in, b_ctx_out,
> >> >                                  lbinding, qos_map)) {
> >> > @@ -1479,6 +1586,10 @@ handle_iface_add(const struct ovsrec_interface
> >> *iface_rec,
> >> >          }
> >> >      }
> >> >
> >> > +    bool now_claimed =
> >> > +        is_lbinding_this_chassis(lbinding, b_ctx_in->chassis_rec);
> >> > +    update_lport_tracking(lbinding->pb, claimed, now_claimed,
> >> > +                          b_ctx_out->tracked_dp_bindings);
> >> >      /* Update the child local_binding's iface (if any children) and
> try
> >> to
> >> >       *  claim the container lbindings. */
> >> >      struct shash_node *node;
> >> > @@ -1486,10 +1597,15 @@ handle_iface_add(const struct ovsrec_interface
> >> *iface_rec,
> >> >          struct local_binding *child = node->data;
> >> >          child->iface = iface_rec;
> >> >          if (child->type == BT_CONTAINER) {
> >> > +            claimed = is_lbinding_this_chassis(child,
> >> b_ctx_in->chassis_rec);
> >> >              if (!consider_container_lport(child->pb, b_ctx_in,
> b_ctx_out,
> >> >                                            qos_map)) {
> >> >                  return false;
> >> >              }
> >> > +            now_claimed =
> >> > +                is_lbinding_this_chassis(child,
> b_ctx_in->chassis_rec);
> >> > +            update_lport_tracking(child->pb, claimed, now_claimed,
> >> > +                                  b_ctx_out->tracked_dp_bindings);
> >> >          }
> >> >      }
> >> >
> >> > @@ -1508,7 +1624,8 @@ handle_iface_delete(const char *lport_name,
> const
> >> char *iface_name,
> >> >          lbinding->pb->chassis == b_ctx_in->chassis_rec) {
> >> >
> >> >          if (!release_local_binding(b_ctx_in->chassis_rec, lbinding,
> >> > -                                   !b_ctx_in->ovnsb_idl_txn)) {
> >> > +                                   !b_ctx_in->ovnsb_idl_txn,
> >> > +                                   b_ctx_out->tracked_dp_bindings)) {
> >> >              return false;
> >> >          }
> >> >
> >> > @@ -1556,6 +1673,8 @@ binding_handle_ovs_interface_changes(struct
> >> binding_ctx_in *b_ctx_in,
> >> >      bool handled = true;
> >> >      *changed = false;
> >> >
> >> > +    *b_ctx_out->local_lports_changed = false;
> >> > +
> >> >      /* Run the tracked interfaces loop twice. One to handle deleted
> >> >       * changes. And another to handle add/update changes.
> >> >       * This will ensure correctness.
> >> > @@ -1698,9 +1817,10 @@ handle_deleted_vif_lport(const struct
> >> sbrec_port_binding *pb,
> >> >           * clear the 'chassis' column of 'pb'. But we need to do
> >> >           * for the local_binding's children. */
> >> >          if (lbinding->type == BT_VIF &&
> >> > -
> !release_local_binding_children(b_ctx_in->chassis_rec,
> >> > -                                                lbinding,
> >> > -
> >>  !b_ctx_in->ovnsb_idl_txn)) {
> >> > +                !release_local_binding_children(
> >> > +                    b_ctx_in->chassis_rec, lbinding,
> >> > +                    !b_ctx_in->ovnsb_idl_txn,
> >> > +                    b_ctx_out->tracked_dp_bindings)) {
> >> >              return false;
> >> >          }
> >> >
> >> > @@ -1746,6 +1866,9 @@ handle_updated_vif_lport(const struct
> >> sbrec_port_binding *pb,
> >> >          return true;
> >> >      }
> >> >
> >> > +    update_lport_tracking(pb, claimed, now_claimed,
> >> > +                          b_ctx_out->tracked_dp_bindings);
> >> > +
> >> >      struct local_binding *lbinding =
> >> >          local_binding_find(b_ctx_out->local_bindings,
> pb->logical_port);
> >> >
> >> > @@ -1755,11 +1878,17 @@ handle_updated_vif_lport(const struct
> >> sbrec_port_binding *pb,
> >> >      SHASH_FOR_EACH (node, &lbinding->children) {
> >> >          struct local_binding *child = node->data;
> >> >          if (child->type == BT_CONTAINER) {
> >> > +            claimed = is_lbinding_this_chassis(child,
> >> b_ctx_in->chassis_rec);
> >> >              handled = consider_container_lport(child->pb, b_ctx_in,
> >> b_ctx_out,
> >> >                                                 qos_map);
> >> >              if (!handled) {
> >> >                  return false;
> >> >              }
> >> > +
> >> > +            now_claimed = is_lbinding_this_chassis(child,
> >> > +
> >> b_ctx_in->chassis_rec);
> >> > +            update_lport_tracking(child->pb, claimed, now_claimed,
> >> > +                                  b_ctx_out->tracked_dp_bindings);
> >> >          }
> >> >      }
> >> >
> >> > diff --git a/controller/binding.h b/controller/binding.h
> >> > index 21118ecd4..fc2a673e5 100644
> >> > --- a/controller/binding.h
> >> > +++ b/controller/binding.h
> >> > @@ -19,6 +19,9 @@
> >> >
> >> >  #include <stdbool.h>
> >> >  #include "openvswitch/shash.h"
> >> > +#include "openvswitch/hmap.h"
> >> > +#include "openvswitch/uuid.h"
> >> > +#include "openvswitch/list.h"
> >> >
> >> >  struct hmap;
> >> >  struct ovsdb_idl;
> >> > @@ -58,6 +61,8 @@ struct binding_ctx_out {
> >> >      struct sset *local_lport_ids;
> >> >      struct sset *egress_ifaces;
> >> >      struct smap *local_iface_ids;
> >> > +    struct hmap *tracked_dp_bindings;
> >> > +    bool *local_lports_changed;
> >> >  };
> >> >
> >> >  enum local_binding_type {
> >> > @@ -82,6 +87,21 @@ local_binding_find(struct shash *local_bindings,
> const
> >> char *name)
> >> >      return shash_find_data(local_bindings, name);
> >> >  }
> >> >
> >> > +/* Represents a tracked binding logical port. */
> >> > +struct tracked_binding_lport {
> >> > +    const struct sbrec_port_binding *pb;
> >> > +    struct ovs_list list_node;
> >> > +    bool deleted;
> >> > +};
> >> > +
> >> > +/* Represent a tracked binding datapath. */
> >> > +struct tracked_binding_datapath {
> >> > +    struct hmap_node node;
> >> > +    const struct sbrec_datapath_binding *dp;
> >> > +    bool is_new;
> >> > +    struct ovs_list lports_head; /* List of struct
> >> tracked_binding_lport. */
> >> > +};
> >> > +
> >> >  void binding_register_ovs_idl(struct ovsdb_idl *);
> >> >  void binding_run(struct binding_ctx_in *, struct binding_ctx_out *);
> >> >  bool binding_cleanup(struct ovsdb_idl_txn *ovnsb_idl_txn,
> >> > @@ -96,4 +116,5 @@ bool binding_handle_ovs_interface_changes(struct
> >> binding_ctx_in *,
> >> >  bool binding_handle_port_binding_changes(struct binding_ctx_in *,
> >> >                                           struct binding_ctx_out *,
> >> >                                           bool *changed);
> >> > +void binding_tracked_dp_destroy(struct hmap *tracked_datapaths);
> >> >  #endif /* controller/binding.h */
> >> > diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c
> >> > index 8f95fff1f..dc790f0f7 100644
> >> > --- a/controller/ovn-controller.c
> >> > +++ b/controller/ovn-controller.c
> >> > @@ -978,6 +978,23 @@ struct ed_type_runtime_data {
> >> >      struct smap local_iface_ids;
> >> >  };
> >> >
> >> > +struct ed_type_runtime_tracked_data {
> >> > +    struct hmap tracked_dp_bindings;
> >> > +    bool local_lports_changed;
> >> > +    bool tracked;
> >> > +};
> >> > +
> >> > +static void
> >> > +en_runtime_clear_tracked_data(void *tracked_data)
> >> > +{
> >> > +    struct ed_type_runtime_tracked_data *data = tracked_data;
> >> > +
> >> > +    binding_tracked_dp_destroy(&data->tracked_dp_bindings);
> >> > +    hmap_init(&data->tracked_dp_bindings);
> >> > +    data->local_lports_changed = false;
> >> > +    data->tracked = false;
> >> > +}
> >> > +
> >> >  static void *
> >> >  en_runtime_data_init(struct engine_node *node OVS_UNUSED,
> >> >                       struct engine_arg *arg OVS_UNUSED)
> >> > @@ -991,6 +1008,13 @@ en_runtime_data_init(struct engine_node *node
> >> OVS_UNUSED,
> >> >      sset_init(&data->egress_ifaces);
> >> >      smap_init(&data->local_iface_ids);
> >> >      local_bindings_init(&data->local_bindings);
> >> > +
> >> > +    struct ed_type_runtime_tracked_data *tracked_data =
> >> > +        xzalloc(sizeof *tracked_data);
> >> > +    hmap_init(&tracked_data->tracked_dp_bindings);
> >> > +    node->tracked_data = tracked_data;
> >> > +    node->clear_tracked_data = en_runtime_clear_tracked_data;
> >> > +
> >> >      return data;
> >> >  }
> >> >
> >> > @@ -1093,6 +1117,8 @@ init_binding_ctx(struct engine_node *node,
> >> >      b_ctx_out->egress_ifaces = &rt_data->egress_ifaces;
> >> >      b_ctx_out->local_bindings = &rt_data->local_bindings;
> >> >      b_ctx_out->local_iface_ids = &rt_data->local_iface_ids;
> >> > +    b_ctx_out->tracked_dp_bindings = NULL;
> >> > +    b_ctx_out->local_lports_changed = NULL;
> >> >  }
> >> >
> >> >  static void
> >> > @@ -1104,6 +1130,8 @@ en_runtime_data_run(struct engine_node *node,
> void
> >> *data)
> >> >      struct sset *local_lport_ids = &rt_data->local_lport_ids;
> >> >      struct sset *active_tunnels = &rt_data->active_tunnels;
> >> >
> >> > +    en_runtime_clear_tracked_data(node->tracked_data);
> >> > +
> >> >      static bool first_run = true;
> >> >      if (first_run) {
> >> >          /* don't cleanup since there is no data yet */
> >> > @@ -1156,9 +1184,13 @@ static bool
> >> >  runtime_data_ovs_interface_handler(struct engine_node *node, void
> *data)
> >> >  {
> >> >      struct ed_type_runtime_data *rt_data = data;
> >> > +    struct ed_type_runtime_tracked_data *tracked_data =
> >> node->tracked_data;
> >> >      struct binding_ctx_in b_ctx_in;
> >> >      struct binding_ctx_out b_ctx_out;
> >> >      init_binding_ctx(node, rt_data, &b_ctx_in, &b_ctx_out);
> >> > +    tracked_data->tracked = true;
> >> > +    b_ctx_out.tracked_dp_bindings =
> &tracked_data->tracked_dp_bindings;
> >> > +    b_ctx_out.local_lports_changed =
> &tracked_data->local_lports_changed;
> >> >
> >> >      bool changed = false;
> >> >      if (!binding_handle_ovs_interface_changes(&b_ctx_in, &b_ctx_out,
> >> > @@ -1191,6 +1223,10 @@ runtime_data_sb_port_binding_handler(struct
> >> engine_node *node, void *data)
> >> >          return false;
> >> >      }
> >> >
> >> > +    struct ed_type_runtime_tracked_data *tracked_data =
> >> node->tracked_data;
> >> > +    tracked_data->tracked = true;
> >> > +    b_ctx_out.tracked_dp_bindings =
> &tracked_data->tracked_dp_bindings;
> >> > +
> >> >      bool changed = false;
> >> >      if (!binding_handle_port_binding_changes(&b_ctx_in, &b_ctx_out,
> >> >                                               &changed)) {
> >> > @@ -1862,6 +1898,29 @@ physical_flow_changes_ovs_iface_handler(struct
> >> engine_node *node OVS_UNUSED,
> >> >      return true;
> >> >  }
> >> >
> >> > +static bool
> >> > +flow_output_runtime_data_handler(struct engine_node *node,
> >> > +                                 void *data OVS_UNUSED)
> >> > +{
> >> > +    struct ed_type_runtime_tracked_data *tracked_data =
> >> > +        engine_get_input_tracked_data("runtime_data", node);
> >> > +
> >> > +    if (!tracked_data || !tracked_data->tracked) {
> >> > +        return false;
> >> > +    }
> >> > +
> >> > +    if (!hmap_is_empty(&tracked_data->tracked_dp_bindings)) {
> >> > +        /* We are not yet handling the tracked datapath binding
> >> > +         * changes. Return false to trigger full recompute. */
> >> > +        return false;
> >> > +    }
> >> > +
> >> > +    if (tracked_data->local_lports_changed) {
> >> > +        engine_set_node_state(node, EN_UPDATED);
> >> > +    }
> >> > +    return true;
> >>
> >> Once the change handler is added and when it returns true, it means it
> has
> >> handled the changes incrementally with full confidence. However, I
> didn't
> >> see handling of the changes in runtime_data for local_bindings,
> >> local_lport_ids, active_tunnels, egress_ifaces, local_iface_ids, etc.
> (they
> >> are not involved in the tracked data, if I read the code correctly).
> Even
> >> for local_lports, why do we return true even if there are changes
> tracked
> >> for it?
> >
> >
> > I really don't understand the need for addding all the information in the
> > tracked data. And I'm not sure how would flow_output handlers will use
> all of them.
> >
> > The tracked data will be used by the flow_output_runtime_data_handler().
> >
> > If we take each of the  runtime data:
> >
> >  - active_tunnels: This is not updated during the I-P of OVS interface
> and Port binding changes.
> >                     active_tunnels are built in en_runtime_data_run()
> when it calls bfd_calculate_active_tunnels()
> >                     and the tunnel OVS interfaces are considered.
> >                     If any non VIF OVS interrace changes presently we
> fall back to full recompute of runtime data.
> >                     So I don't think there is a need to have this as part
> of tracked data.
> >
> Thanks for the explain. This reason is important but it is implicit. From
> the dependency graph point of view, one can't easily tell why is it
> ignored, so I think it deserves a XXX comment, for code reader to
> understand and track for future improvement when we do incremental
> processing for non-VIF interface changes.
>
> >  - egress_ifaces:  This is used by only binding.c and is not used during
> flow computation. So I don't see why it
> >                    should be part of tracked data.
> >
> >  - local_iface_ids: This is used by only binding.c and is not used during
> flow computation. So I don't see why it
> >                     should be part of tracked data.
> >
>
> This reason is more obvious. I'd suggest to add a comment in the struct
> definition of the ed_type_runtime_data to note it as "engine private data,
> not depended by other nodes". The I-P engine framework doesn't express this
> kind of encapsulation well, but I think some comments would be helpful.
>
> >  - local_lport_ids:  When an OVS interface is added/updated with
> external_ids:iface-id set, then we add this to
> >                      this sset. It is used in
> >                      1. ovn-controller.c for monitoring any Port bindings
> with this name.
> >                      2. And also in lflows.c to check if we need to skip
> the logical flow
> >
> >    Lets say an OVS interface was added with external_ids:iface-id=foo and
> if
> >    'foo' logical port is not yet present. In this case local_lport_ids is
> updated with 'foo'.
> >    This doesn't result in any port binding changes. Hence we only need to
> do is to call update_sb_monitors
> >    and that's why flow_output_runtime_data_handler() sets the engine node
> to updated and returns true
> >    as there is no need to do any flow calculations. And to indicate this
> 'local_lports_changed' bool
> >    is added in the 'struct ed_type_runtime_tracked_data'.
> >
> > Lets say during the I-P handling of OVS interface change, a port 'foo' is
> bound and it is
> > the first port of its logical switch (say sw0) to bound.
> >
> > With this patch series we add the following tracked data -  tracked dp
> bindings = [ls = sw0, is_new = true, tracked ports = [lport = foo, deleted
> = false]]
> > and it also would have updated local_lport_ids, local_bindings,
> local_datapaths etc.
> >
>
> I see, so it is handled indirectly by handling tracked_binding_lport in
> tracked_data->tracked_dp_bindings->lports_head. I think it should be ok,
> but it deserves a comment to help understanding why we don't need track and
> handle local_lport_ids change directly.
>
> > Lets say we also put - tracked local_lport_ids, tracked local_iface_ids,
> tracked local_binding etc into the tracked data. My question how it will be
> used
> > by the flow output handlers ?
> >
> > Something like ?
> >
> > for each tracked dp bindings
> >    calculate flows for these dp bindings
> > done
> >
> > for each tracked local_lport_ids
> >     calculate flows for these lport ids
> > done
> >
> > for each tracked local_bindings
> >     calculate flows for these local bindings
> > done
> >
> > Can you please tell how the flow output handlers will use this data as
> I'm not clear.
> >
>
> Usually the straightforward way to implement change-handlers (my
> understanding) is to check how the full computing is using each of the
> input data. Consider each input as a DB table (for runtime_data, since it
> has multiple members, it is like multiple sub-tables combined, and ignore
> the private members that are not used as inputs), the full compute is
> considered as a join between the tables, but the "join" is not a simple
> relational DB join but highly customerized functions. Now for I-P change
> handler, to handle each tracked change, ideally it is a probe to other
> related tables, depending on how the "join" was implemented in full
> computing. However, such kind of probe is hard to implement, so the general
> approach is to link each table item to the key of the output table (for
> lflow_output, the key is the uuid of lflow for logical flows, or uuid of PB
> for physical flows) by building an index for efficient lookup, such as the
> lflow_resource_ref index, and handle the changes of any input table by
> finding the key in the output and recompute the items related to that key.
> This is like always probe in the same order of table traverse. With this
> approach it is easier to reason the correctness. It is essentially no
> difference from the DDlog approach, but just all logic are manually
> implemented.
>
> Take local_lport_ids as example, in full computing it is used to see if a
> lflow should be bypassed on current chassis. So, if we tracked the changes
> of this "sub-table", we can do:
>
> for each tracked local_lport_ids:
>     find the lflow uuid related to this local_lport_id // this requires
> building the index, as you already done, which is perfect
>     delete all ovn-flows that is generated by this lflow
>     call consider_lflow() again
> done
>
> This ensures the impacted lflow get bypassed/unbypassed properly because
> the related lflows are reconsidered.
>
> >
> >>
> >> So how could we be confident that those changes can be ignored by
> >> flow_output node? If they can be ignored, why would they be needed in
> >> flow_output_run(), which passes them to flow_run()/physical_run()? Is
> there
> >> something missing?
> >
> >
> > In my opinion, run time data handlers need to indicate to the flow_output
> (or its parent engine) that
> > these port bindings added/deleted and these datapaths added/deleted. And
> flow computation can be
> > done for these tracked/changed entities. And any changes  to the runtime
> data  is captured in the
> > hmap of 'struct tracked_binding_datapath'.
> >
> It is ok if you have evaluated all inputs regarding how they are used in
> full compute and ensured the way you tracked and handled the changes
> guarenteed exactly same result. For maintenanbility, I'd suggest at least
> document clearly how is it linked to the original full recompute for each
> member of the input data.
>

I've added the comments in v8. Request to please take a look.

Thanks
Numan


>
> > Thanks
> > Numan
> >
> >>
> >> > +}
> >> > +
> >> >  struct ovn_controller_exit_args {
> >> >      bool *exiting;
> >> >      bool *restart;
> >> > @@ -2014,7 +2073,8 @@ main(int argc, char *argv[])
> >> >                       flow_output_addr_sets_handler);
> >> >      engine_add_input(&en_flow_output, &en_port_groups,
> >> >                       flow_output_port_groups_handler);
> >> > -    engine_add_input(&en_flow_output, &en_runtime_data, NULL);
> >> > +    engine_add_input(&en_flow_output, &en_runtime_data,
> >> > +                     flow_output_runtime_data_handler);
> >> >      engine_add_input(&en_flow_output, &en_mff_ovn_geneve, NULL);
> >> >      engine_add_input(&en_flow_output, &en_physical_flow_changes,
> >> >                       flow_output_physical_flow_changes_handler);
> >> > diff --git a/tests/ovn-performance.at b/tests/ovn-performance.at
> >> > index 5cc1960b6..6e064e64f 100644
> >> > --- a/tests/ovn-performance.at
> >> > +++ b/tests/ovn-performance.at
> >> > @@ -274,7 +274,7 @@ for i in 1 2; do
> >> >      )
> >> >
> >> >      # Add router port to $ls
> >> > -    OVN_CONTROLLER_EXPECT_HIT(
> >> > +    OVN_CONTROLLER_EXPECT_NO_HIT(
> >> >          [hv1 hv2], [lflow_run],
> >> >          [ovn-nbctl --wait=hv lrp-add lr1 $lrp 02:00:00:00:0$i:01
> >> 10.0.$i.1/24]
> >> >      )
> >> > @@ -282,15 +282,15 @@ for i in 1 2; do
> >> >          [hv1 hv2], [lflow_run],
> >> >          [ovn-nbctl --wait=hv lsp-add $ls $lsp]
> >> >      )
> >> > -    OVN_CONTROLLER_EXPECT_HIT(
> >> > +    OVN_CONTROLLER_EXPECT_NO_HIT(
> >> >          [hv1 hv2], [lflow_run],
> >> >          [ovn-nbctl --wait=hv lsp-set-type $lsp router]
> >> >      )
> >> > -    OVN_CONTROLLER_EXPECT_HIT(
> >> > +    OVN_CONTROLLER_EXPECT_NO_HIT(
> >> >          [hv1 hv2], [lflow_run],
> >> >          [ovn-nbctl --wait=hv lsp-set-options $lsp router-port=$lrp]
> >> >      )
> >> > -    OVN_CONTROLLER_EXPECT_HIT(
> >> > +    OVN_CONTROLLER_EXPECT_NO_HIT(
> >> >          [hv1 hv2], [lflow_run],
> >> >          [ovn-nbctl --wait=hv lsp-set-addresses $lsp router]
> >> >      )
> >> > @@ -404,8 +404,8 @@ for i in 1 2; do
> >> >      lp=lp$i
> >> >
> >> >      # Delete port $lp
> >> > -    OVN_CONTROLLER_EXPECT_HIT_COND(
> >> > -        [hv$i hv$j], [lflow_run], [>0 =0],
> >> > +    OVN_CONTROLLER_EXPECT_NO_HIT(
> >> > +        [hv$i hv$j], [lflow_run],
> >> >          [ovn-nbctl --wait=hv lsp-del $lp]
> >> >      )
> >> >  done
> >> > @@ -416,15 +416,15 @@ OVN_CONTROLLER_EXPECT_NO_HIT(
> >> >      [ovn-nbctl --wait=hv destroy Port_Group pg1]
> >> >  )
> >> >
> >> > -for i in 1 2; do
> >> > -    ls=ls$i
> >> > +OVN_CONTROLLER_EXPECT_HIT(
> >> > +    [hv1 hv2], [lflow_run],
> >> > +    [ovn-nbctl --wait=hv ls-del ls1]
> >> > +)
> >> >
> >> > -    # Delete switch $ls
> >> > -    OVN_CONTROLLER_EXPECT_HIT(
> >> > -        [hv1 hv2], [lflow_run],
> >> > -        [ovn-nbctl --wait=hv ls-del $ls]
> >> > -    )
> >> > -done
> >> > +OVN_CONTROLLER_EXPECT_NO_HIT(
> >> > +    [hv1 hv2], [lflow_run],
> >> > +    [ovn-nbctl --wait=hv ls-del ls2]
> >> > +)
> >> >
> >> >  # Delete router lr1
> >> >  OVN_CONTROLLER_EXPECT_NO_HIT(
> >> > --
> >> > 2.26.2
> >> >
> >> > _______________________________________________
> >> > dev mailing list
> >> > d...@openvswitch.org
> >> > https://mail.openvswitch.org/mailman/listinfo/ovs-dev
> >> _______________________________________________
> >> dev mailing list
> >> d...@openvswitch.org
> >> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
> >>
> _______________________________________________
> dev mailing list
> d...@openvswitch.org
> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
>
>
_______________________________________________
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to