Reduce duplicated code between nb_ls_map and nb_lr_map.
Acked-by: Mairtin O'Loingsigh <[email protected]>
Acked-by: Mark Michelson <[email protected]>
Signed-off-by: Lorenzo Bianconi <[email protected]>
---
lib/ovn-util.h | 95 +++++++++++++++++++++++++++++++++++++++++
northd/en-lr-stateful.c | 3 +-
northd/en-sync-sb.c | 27 +++++++-----
northd/lb.c | 18 +++-----
northd/lb.h | 6 +--
northd/northd.c | 60 ++++++++++++--------------
6 files changed, 148 insertions(+), 61 deletions(-)
diff --git a/lib/ovn-util.h b/lib/ovn-util.h
index 63beae3e5..94b3f87ce 100644
--- a/lib/ovn-util.h
+++ b/lib/ovn-util.h
@@ -18,6 +18,7 @@
#include "openvswitch/meta-flow.h"
#include "ovsdb-idl.h"
+#include "lib/bitmap.h"
#include "lib/packets.h"
#include "lib/sset.h"
#include "lib/svec.h"
@@ -482,6 +483,100 @@ void sorted_array_apply_diff(const struct sorted_array
*a1,
bool add),
const void *arg);
+struct dynamic_bitmap {
+ unsigned long *map;
+ size_t n_elems;
+ size_t capacity;
+};
+
+static inline unsigned long *
+ovn_bitmap_realloc(unsigned long *bitmap, size_t n_bits_old,
+ size_t n_bits_new)
+{
+ ovs_assert(n_bits_new >= n_bits_old);
+
+ if (bitmap_n_bytes(n_bits_old) == bitmap_n_bytes(n_bits_new)) {
+ return bitmap;
+ }
+
+ bitmap = xrealloc(bitmap, bitmap_n_bytes(n_bits_new));
+ /* Set the unitialized bits to 0 as xrealloc doesn't initialize the
+ * added memory. */
+ size_t delta = BITMAP_N_LONGS(n_bits_new) - BITMAP_N_LONGS(n_bits_old);
+ memset(&bitmap[BITMAP_N_LONGS(n_bits_old)], 0, delta * sizeof *bitmap);
+
+ return bitmap;
+}
+
+static inline void
+dynamic_bitmap_alloc(struct dynamic_bitmap *db, size_t n_elems)
+{
+ db->map = bitmap_allocate(n_elems);
+ db->capacity = n_elems;
+}
+
+static inline void
+dynamic_bitmap_free(struct dynamic_bitmap *db)
+{
+ bitmap_free(db->map);
+}
+
+static inline void
+dynamic_bitmap_realloc(struct dynamic_bitmap *db, size_t new_n_elems)
+{
+ if (new_n_elems > db->capacity) {
+ db->map = ovn_bitmap_realloc(db->map, db->capacity, new_n_elems);
+ db->capacity = new_n_elems;
+ }
+}
+
+static inline bool
+dynamic_bitmap_is_empty(const struct dynamic_bitmap *db)
+{
+ return !db->n_elems;
+}
+
+static inline int
+dynamic_bitmap_get_n_elems(const struct dynamic_bitmap *db)
+{
+ return db->n_elems;
+}
+
+static inline void
+dynamic_bitmap_set1(struct dynamic_bitmap *db, int index)
+{
+ ovs_assert(index < db->capacity);
+ if (!bitmap_is_set(db->map, index)) {
+ bitmap_set1(db->map, index);
+ db->n_elems++;
+ }
+}
+
+static inline void
+dynamic_bitmap_set0(struct dynamic_bitmap *db, int index)
+{
+ ovs_assert(index < db->capacity);
+ if (bitmap_is_set(db->map, index)) {
+ bitmap_set0(db->map, index);
+ db->n_elems--;
+ }
+}
+
+static inline unsigned long *
+dynamic_bitmap_clone_map(struct dynamic_bitmap *db)
+{
+ return bitmap_clone(db->map, db->capacity);
+}
+
+static inline size_t
+dynamic_bitmap_scan(struct dynamic_bitmap *dp, bool target, size_t start)
+{
+ return bitmap_scan(dp->map, target, start, dp->capacity);
+}
+
+#define DYNAMIC_BITMAP_FOR_EACH_1(IDX, MAP) \
+ BITMAP_FOR_EACH_1(IDX, (MAP)->capacity, (MAP)->map)
+
/* Utilities around properly handling exit command. */
struct ovn_exit_args {
struct unixctl_conn **conns;
diff --git a/northd/en-lr-stateful.c b/northd/en-lr-stateful.c
index 56e93f3c4..4ad161c33 100644
--- a/northd/en-lr-stateful.c
+++ b/northd/en-lr-stateful.c
@@ -251,8 +251,7 @@ lr_stateful_lb_data_handler(struct engine_node *node, void
*data_)
ovs_assert(lb_dps);
size_t index;
- BITMAP_FOR_EACH_1 (index, ods_size(input_data.lr_datapaths),
- lb_dps->nb_lr_map) {
+ DYNAMIC_BITMAP_FOR_EACH_1 (index, &lb_dps->nb_lr_map) {
const struct ovn_datapath *od =
ovn_datapaths_find_by_index(input_data.lr_datapaths, index);
diff --git a/northd/en-sync-sb.c b/northd/en-sync-sb.c
index a111f14fd..a92eb9612 100644
--- a/northd/en-sync-sb.c
+++ b/northd/en-sync-sb.c
@@ -676,7 +676,8 @@ sb_lb_table_build_and_sync(
struct sb_lb_record *sb_lb;
HMAP_FOR_EACH (lb_dps, hmap_node, lb_dps_map) {
- if (!lb_dps->n_nb_ls && !lb_dps->n_nb_lr) {
+ if (dynamic_bitmap_is_empty(&lb_dps->nb_ls_map) &&
+ dynamic_bitmap_is_empty(&lb_dps->nb_lr_map)) {
continue;
}
@@ -754,10 +755,10 @@ sync_sb_lb_record(struct sb_lb_record *sb_lb,
sbrec_lr_dp_group = sbrec_lb->lr_datapath_group;
}
- if (lb_dps->n_nb_ls) {
+ if (!dynamic_bitmap_is_empty(&lb_dps->nb_ls_map)) {
sb_lb->ls_dpg = ovn_dp_group_get(&sb_lbs->ls_dp_groups,
- lb_dps->n_nb_ls,
- lb_dps->nb_ls_map,
+ lb_dps->nb_ls_map.n_elems,
+ lb_dps->nb_ls_map.map,
ods_size(ls_datapaths));
if (sb_lb->ls_dpg) {
/* Update the dpg's sb dp_group. */
@@ -787,7 +788,7 @@ sync_sb_lb_record(struct sb_lb_record *sb_lb,
} else {
sb_lb->ls_dpg = ovn_dp_group_create(
ovnsb_txn, &sb_lbs->ls_dp_groups, sbrec_ls_dp_group,
- lb_dps->n_nb_ls, lb_dps->nb_ls_map,
+ lb_dps->nb_ls_map.n_elems, lb_dps->nb_ls_map.map,
ods_size(ls_datapaths), true,
ls_datapaths, lr_datapaths);
}
@@ -809,10 +810,10 @@ sync_sb_lb_record(struct sb_lb_record *sb_lb,
}
- if (lb_dps->n_nb_lr) {
+ if (!dynamic_bitmap_is_empty(&lb_dps->nb_lr_map)) {
sb_lb->lr_dpg = ovn_dp_group_get(&sb_lbs->lr_dp_groups,
- lb_dps->n_nb_lr,
- lb_dps->nb_lr_map,
+ lb_dps->nb_lr_map.n_elems,
+ lb_dps->nb_lr_map.map,
ods_size(lr_datapaths));
if (sb_lb->lr_dpg) {
/* Update the dpg's sb dp_group. */
@@ -842,7 +843,7 @@ sync_sb_lb_record(struct sb_lb_record *sb_lb,
} else {
sb_lb->lr_dpg = ovn_dp_group_create(
ovnsb_txn, &sb_lbs->lr_dp_groups, sbrec_lr_dp_group,
- lb_dps->n_nb_lr, lb_dps->nb_lr_map,
+ lb_dps->nb_lr_map.n_elems, lb_dps->nb_lr_map.map,
ods_size(lr_datapaths), false,
ls_datapaths, lr_datapaths);
}
@@ -919,7 +920,9 @@ sync_changed_lbs(struct sb_lb_table *sb_lbs,
sb_lb = sb_lb_table_find(&sb_lbs->entries, nb_uuid);
- if (!sb_lb && !lb_dps->n_nb_ls && !lb_dps->n_nb_lr) {
+ if (!sb_lb &&
+ dynamic_bitmap_is_empty(&lb_dps->nb_ls_map) &&
+ dynamic_bitmap_is_empty(&lb_dps->nb_lr_map)) {
continue;
}
@@ -933,7 +936,9 @@ sync_changed_lbs(struct sb_lb_table *sb_lbs,
sbrec_load_balancer_table_get_for_uuid(sb_lb_table, nb_uuid);
}
- if (sb_lb && !lb_dps->n_nb_ls && !lb_dps->n_nb_lr) {
+ if (sb_lb &&
+ dynamic_bitmap_is_empty(&lb_dps->nb_ls_map) &&
+ dynamic_bitmap_is_empty(&lb_dps->nb_lr_map)) {
const struct sbrec_load_balancer *sbrec_lb =
sbrec_load_balancer_table_get_for_uuid(sb_lb_table, nb_uuid);
if (sbrec_lb) {
diff --git a/northd/lb.c b/northd/lb.c
index 495a5cdd8..536652fd6 100644
--- a/northd/lb.c
+++ b/northd/lb.c
@@ -631,8 +631,8 @@ ovn_lb_datapaths_create(const struct ovn_northd_lb *lb,
size_t n_ls_datapaths,
{
struct ovn_lb_datapaths *lb_dps = xzalloc(sizeof *lb_dps);
lb_dps->lb = lb;
- lb_dps->nb_ls_map = bitmap_allocate(n_ls_datapaths);
- lb_dps->nb_lr_map = bitmap_allocate(n_lr_datapaths);
+ dynamic_bitmap_alloc(&lb_dps->nb_ls_map, n_ls_datapaths);
+ dynamic_bitmap_alloc(&lb_dps->nb_lr_map, n_lr_datapaths);
lb_dps->lflow_ref = lflow_ref_create();
hmapx_init(&lb_dps->ls_lb_with_stateless_mode);
return lb_dps;
@@ -641,8 +641,8 @@ ovn_lb_datapaths_create(const struct ovn_northd_lb *lb,
size_t n_ls_datapaths,
void
ovn_lb_datapaths_destroy(struct ovn_lb_datapaths *lb_dps)
{
- bitmap_free(lb_dps->nb_lr_map);
- bitmap_free(lb_dps->nb_ls_map);
+ dynamic_bitmap_free(&lb_dps->nb_lr_map);
+ dynamic_bitmap_free(&lb_dps->nb_ls_map);
lflow_ref_destroy(lb_dps->lflow_ref);
hmapx_destroy(&lb_dps->ls_lb_with_stateless_mode);
free(lb_dps);
@@ -653,10 +653,7 @@ ovn_lb_datapaths_add_lr(struct ovn_lb_datapaths *lb_dps,
size_t n,
struct ovn_datapath **ods)
{
for (size_t i = 0; i < n; i++) {
- if (!bitmap_is_set(lb_dps->nb_lr_map, ods[i]->index)) {
- bitmap_set1(lb_dps->nb_lr_map, ods[i]->index);
- lb_dps->n_nb_lr++;
- }
+ dynamic_bitmap_set1(&lb_dps->nb_lr_map, ods[i]->index);
}
}
@@ -665,10 +662,7 @@ ovn_lb_datapaths_add_ls(struct ovn_lb_datapaths *lb_dps,
size_t n,
struct ovn_datapath **ods)
{
for (size_t i = 0; i < n; i++) {
- if (!bitmap_is_set(lb_dps->nb_ls_map, ods[i]->index)) {
- bitmap_set1(lb_dps->nb_ls_map, ods[i]->index);
- lb_dps->n_nb_ls++;
- }
+ dynamic_bitmap_set1(&lb_dps->nb_ls_map, ods[i]->index);
}
}
diff --git a/northd/lb.h b/northd/lb.h
index 4c45e0283..3702819aa 100644
--- a/northd/lb.h
+++ b/northd/lb.h
@@ -142,11 +142,9 @@ struct ovn_lb_datapaths {
struct hmap_node hmap_node;
const struct ovn_northd_lb *lb;
- size_t n_nb_ls;
- unsigned long *nb_ls_map;
- size_t n_nb_lr;
- unsigned long *nb_lr_map;
+ struct dynamic_bitmap nb_ls_map;
+ struct dynamic_bitmap nb_lr_map;
struct hmapx ls_lb_with_stateless_mode;
diff --git a/northd/northd.c b/northd/northd.c
index 9a9d70b7a..239d121d2 100644
--- a/northd/northd.c
+++ b/northd/northd.c
@@ -3396,7 +3396,7 @@ build_lswitch_lbs_from_lrouter(struct ovn_datapaths
*lr_datapaths,
size_t index;
HMAP_FOR_EACH (lb_dps, hmap_node, lb_dps_map) {
- BITMAP_FOR_EACH_1 (index, ods_size(lr_datapaths), lb_dps->nb_lr_map) {
+ DYNAMIC_BITMAP_FOR_EACH_1 (index, &lb_dps->nb_lr_map) {
struct ovn_datapath *od = lr_datapaths->array[index];
ovn_lb_datapaths_add_ls(lb_dps, vector_len(&od->ls_peers),
vector_get_array(&od->ls_peers));
@@ -3423,15 +3423,15 @@ build_lswitch_lbs_from_lrouter(struct ovn_datapaths
*lr_datapaths,
}
static void
-build_lb_count_dps(struct hmap *lb_dps_map,
- size_t n_ls_datapaths,
- size_t n_lr_datapaths)
+build_lb_count_dps(struct hmap *lb_dps_map)
{
struct ovn_lb_datapaths *lb_dps;
HMAP_FOR_EACH (lb_dps, hmap_node, lb_dps_map) {
- lb_dps->n_nb_lr = bitmap_count1(lb_dps->nb_lr_map, n_lr_datapaths);
- lb_dps->n_nb_ls = bitmap_count1(lb_dps->nb_ls_map, n_ls_datapaths);
+ lb_dps->nb_lr_map.n_elems = dynamic_bitmap_get_n_elems(
+ &lb_dps->nb_lr_map);
+ lb_dps->nb_ls_map.n_elems = dynamic_bitmap_get_n_elems(
+ &lb_dps->nb_ls_map);
}
}
@@ -4991,8 +4991,7 @@ northd_handle_lb_data_changes(struct tracked_lb_data
*trk_lb_data,
ovs_assert(lb_dps);
size_t index;
- BITMAP_FOR_EACH_1 (index, ods_size(ls_datapaths),
- lb_dps->nb_ls_map) {
+ DYNAMIC_BITMAP_FOR_EACH_1 (index, &lb_dps->nb_ls_map) {
od = ls_datapaths->array[index];
/* Add the ls datapath to the northd tracked data. */
@@ -5126,7 +5125,7 @@ northd_handle_lb_data_changes(struct tracked_lb_data
*trk_lb_data,
ovs_assert(lb_dps);
size_t index;
BITMAP_FOR_EACH_1 (index, ods_size(ls_datapaths),
- lb_dps->nb_ls_map) {
+ lb_dps->nb_ls_map.map) {
od = ls_datapaths->array[index];
/* Add the ls datapath to the northd tracked data. */
@@ -7574,7 +7573,7 @@ build_lb_rules_pre_stateful(struct lflow_table *lflows,
const struct ovn_datapaths *ls_datapaths,
struct ds *match, struct ds *action)
{
- if (!lb_dps->n_nb_ls) {
+ if (dynamic_bitmap_is_empty(&lb_dps->nb_ls_map)) {
return;
}
@@ -7615,7 +7614,7 @@ build_lb_rules_pre_stateful(struct lflow_table *lflows,
}
ovn_lflow_add_with_dp_group(
- lflows, lb_dps->nb_ls_map, ods_size(ls_datapaths),
+ lflows, lb_dps->nb_ls_map.map, ods_size(ls_datapaths),
S_SWITCH_IN_PRE_STATEFUL, 120, ds_cstr(match), ds_cstr(action),
&lb->nlb->header_, lb_dps->lflow_ref);
@@ -7872,7 +7871,8 @@ build_lb_affinity_ls_flows(struct lflow_table *lflows,
const struct ovn_datapaths *ls_datapaths,
struct lflow_ref *lflow_ref)
{
- if (!lb_dps->lb->affinity_timeout || !lb_dps->n_nb_ls) {
+ if (!lb_dps->lb->affinity_timeout ||
+ dynamic_bitmap_is_empty(&lb_dps->nb_ls_map)) {
return;
}
@@ -7894,7 +7894,7 @@ build_lb_affinity_ls_flows(struct lflow_table *lflows,
static char *aff_check = REGBIT_KNOWN_LB_SESSION" = chk_lb_aff(); next;";
ovn_lflow_add_with_dp_group(
- lflows, lb_dps->nb_ls_map, ods_size(ls_datapaths),
+ lflows, lb_dps->nb_ls_map.map, ods_size(ls_datapaths),
S_SWITCH_IN_LB_AFF_CHECK, 100, ds_cstr(&new_lb_match), aff_check,
&lb_dps->lb->nlb->header_, lflow_ref);
ds_destroy(&new_lb_match);
@@ -7976,14 +7976,14 @@ build_lb_affinity_ls_flows(struct lflow_table *lflows,
/* Forward to OFTABLE_CHK_LB_AFFINITY table to store flow tuple. */
ovn_lflow_add_with_dp_group(
- lflows, lb_dps->nb_ls_map, ods_size(ls_datapaths),
+ lflows, lb_dps->nb_ls_map.map, ods_size(ls_datapaths),
S_SWITCH_IN_LB_AFF_LEARN, 100, ds_cstr(&aff_match_learn),
ds_cstr(&aff_action_learn), &lb->nlb->header_,
lflow_ref);
/* Use already selected backend within affinity timeslot. */
ovn_lflow_add_with_dp_group(
- lflows, lb_dps->nb_ls_map, ods_size(ls_datapaths),
+ lflows, lb_dps->nb_ls_map.map, ods_size(ls_datapaths),
S_SWITCH_IN_LB, 150, ds_cstr(&aff_match), ds_cstr(&aff_action),
&lb->nlb->header_, lflow_ref);
@@ -8067,10 +8067,8 @@ build_lb_rules(struct lflow_table *lflows, struct
ovn_lb_datapaths *lb_dps,
if (reject) {
size_t index;
- dp_non_meter = bitmap_clone(lb_dps->nb_ls_map,
- ods_size(ls_datapaths));
- BITMAP_FOR_EACH_1 (index, ods_size(ls_datapaths),
- lb_dps->nb_ls_map) {
+ dp_non_meter = dynamic_bitmap_clone_map(&lb_dps->nb_ls_map);
+ DYNAMIC_BITMAP_FOR_EACH_1 (index, &lb_dps->nb_ls_map) {
struct ovn_datapath *od = ls_datapaths->array[index];
meter = copp_meter_get(COPP_REJECT, od->nbs->copp,
@@ -8089,7 +8087,7 @@ build_lb_rules(struct lflow_table *lflows, struct
ovn_lb_datapaths *lb_dps,
}
if (!reject || build_non_meter) {
ovn_lflow_add_with_dp_group(
- lflows, dp_non_meter ? dp_non_meter : lb_dps->nb_ls_map,
+ lflows, dp_non_meter ? dp_non_meter : lb_dps->nb_ls_map.map,
ods_size(ls_datapaths), S_SWITCH_IN_LB, priority,
ds_cstr(match), ds_cstr(action), &lb->nlb->header_,
lb_dps->lflow_ref);
@@ -12208,7 +12206,7 @@ build_lrouter_nat_flows_for_lb(
size_t index;
bool use_stateless_nat = smap_get_bool(&lb->nlb->options,
"use_stateless_nat", false);
- BITMAP_FOR_EACH_1 (index, bitmap_len, lb_dps->nb_lr_map) {
+ DYNAMIC_BITMAP_FOR_EACH_1 (index, &lb_dps->nb_lr_map) {
struct ovn_datapath *od = lr_datapaths->array[index];
enum lrouter_nat_lb_flow_type type;
@@ -12293,7 +12291,7 @@ build_lswitch_flows_for_lb(struct ovn_lb_datapaths
*lb_dps,
const struct svc_monitors_map_data *svc_mons_data,
struct ds *match, struct ds *action)
{
- if (!lb_dps->n_nb_ls) {
+ if (dynamic_bitmap_is_empty(&lb_dps->nb_ls_map)) {
return;
}
@@ -12307,7 +12305,7 @@ build_lswitch_flows_for_lb(struct ovn_lb_datapaths
*lb_dps,
}
size_t index;
- BITMAP_FOR_EACH_1 (index, ods_size(ls_datapaths), lb_dps->nb_ls_map) {
+ DYNAMIC_BITMAP_FOR_EACH_1 (index, &lb_dps->nb_ls_map) {
struct ovn_datapath *od = ls_datapaths->array[index];
ovn_lflow_add_with_hint__(lflows, od,
@@ -12351,7 +12349,7 @@ build_lrouter_defrag_flows_for_lb(struct
ovn_lb_datapaths *lb_dps,
const struct ovn_datapaths *lr_datapaths,
struct ds *match)
{
- if (!lb_dps->n_nb_lr) {
+ if (dynamic_bitmap_is_empty(&lb_dps->nb_lr_map)) {
return;
}
@@ -12365,7 +12363,7 @@ build_lrouter_defrag_flows_for_lb(struct
ovn_lb_datapaths *lb_dps,
lb_vip->vip_str);
ovn_lflow_add_with_dp_group(
- lflows, lb_dps->nb_lr_map, ods_size(lr_datapaths),
+ lflows, lb_dps->nb_lr_map.map, ods_size(lr_datapaths),
S_ROUTER_IN_DEFRAG, prio, ds_cstr(match), "ct_dnat;",
&lb_dps->lb->nlb->header_, lb_dps->lflow_ref);
}
@@ -12385,7 +12383,7 @@ build_lrouter_allow_vip_traffic_template(struct
lflow_table *lflows,
struct ds match = DS_EMPTY_INITIALIZER;
size_t index;
- BITMAP_FOR_EACH_1 (index, ods_size(lr_dps), lb_dps->nb_lr_map) {
+ DYNAMIC_BITMAP_FOR_EACH_1 (index, &lb_dps->nb_lr_map) {
struct ovn_datapath *od = lr_dps->array[index];
/* Do not drop ip traffic with destination the template VIP. */
ds_clear(&match);
@@ -12411,7 +12409,7 @@ build_lrouter_flows_for_lb(struct ovn_lb_datapaths
*lb_dps,
{
size_t index;
- if (!lb_dps->n_nb_lr) {
+ if (dynamic_bitmap_is_empty(&lb_dps->nb_lr_map)) {
return;
}
@@ -12431,7 +12429,7 @@ build_lrouter_flows_for_lb(struct ovn_lb_datapaths
*lb_dps,
continue;
}
- BITMAP_FOR_EACH_1 (index, ods_size(lr_datapaths), lb_dps->nb_lr_map) {
+ DYNAMIC_BITMAP_FOR_EACH_1 (index, &lb_dps->nb_lr_map) {
struct ovn_datapath *od = lr_datapaths->array[index];
ovn_lflow_add_with_hint__(lflows, od, S_ROUTER_IN_DNAT,
@@ -12445,7 +12443,7 @@ build_lrouter_flows_for_lb(struct ovn_lb_datapaths
*lb_dps,
}
if (lb->skip_snat) {
- BITMAP_FOR_EACH_1 (index, ods_size(lr_datapaths), lb_dps->nb_lr_map) {
+ DYNAMIC_BITMAP_FOR_EACH_1 (index, &lb_dps->nb_lr_map) {
struct ovn_datapath *od = lr_datapaths->array[index];
ovn_lflow_add(lflows, od, S_ROUTER_OUT_SNAT, 120,
@@ -19167,9 +19165,7 @@ ovnnb_db_run(struct northd_input *input_data,
&data->lb_group_datapaths_map, &data->svc_monitor_lsps,
&data->local_svc_monitors_map,
input_data->ic_learned_svc_monitors_map);
- build_lb_count_dps(&data->lb_datapaths_map,
- ods_size(&data->ls_datapaths),
- ods_size(&data->lr_datapaths));
+ build_lb_count_dps(&data->lb_datapaths_map);
build_ipam(&data->ls_datapaths.datapaths);
build_lrouter_groups(&data->lr_ports, &data->lr_datapaths);
build_ip_mcast(ovnsb_txn, input_data->sbrec_ip_multicast_table,
--
2.50.1
_______________________________________________
dev mailing list
[email protected]
https://mail.openvswitch.org/mailman/listinfo/ovs-dev