When multiple chassis are set in requested-chassis, port binding is
configured in multiple cluster locations. In case of live migration
scenario, only one of the locations run a workload at a particular
point in time. Yet, it's expected that the workload may switch to
running at an additional chassis at any moment during live migration
(depends on libvirt / qemu migration progress). To speed up the switch
to near instant, do the following:

When a port located sends a packet to another port that has multiple
chassis then, in addition to sending the packet to the main chassis,
also send it to additional chassis. When the sending port is bound on
either the main or additional chassis, then handle the packet locally
plus send it to all other chassis.

This is achieved with additional flows in tables 37 and 38.

Signed-off-by: Ihar Hrachyshka <ihrac...@redhat.com>
---
 controller/binding.c  |   2 +-
 controller/binding.h  |   3 +
 controller/physical.c | 233 ++++++++++++--
 tests/ovn.at          | 693 ++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 900 insertions(+), 31 deletions(-)

diff --git a/controller/binding.c b/controller/binding.c
index 346e4cb76..f55c07001 100644
--- a/controller/binding.c
+++ b/controller/binding.c
@@ -988,7 +988,7 @@ update_port_additional_encap_if_needed(
     return true;
 }
 
-static bool
+bool
 is_additional_chassis(const struct sbrec_port_binding *pb,
                       const struct sbrec_chassis *chassis_rec)
 {
diff --git a/controller/binding.h b/controller/binding.h
index 46f88aff7..80f1f66a1 100644
--- a/controller/binding.h
+++ b/controller/binding.h
@@ -194,4 +194,7 @@ enum en_lport_type {
 
 enum en_lport_type get_lport_type(const struct sbrec_port_binding *);
 
+bool is_additional_chassis(const struct sbrec_port_binding *pb,
+                           const struct sbrec_chassis *chassis_rec);
+
 #endif /* controller/binding.h */
diff --git a/controller/physical.c b/controller/physical.c
index 43b5687ae..e9d138780 100644
--- a/controller/physical.c
+++ b/controller/physical.c
@@ -60,6 +60,11 @@ struct zone_ids {
     int snat;                   /* MFF_LOG_SNAT_ZONE. */
 };
 
+struct additional_tunnel {
+    struct ovs_list list_node;
+    const struct chassis_tunnel *tun;
+};
+
 static void
 load_logical_ingress_metadata(const struct sbrec_port_binding *binding,
                               const struct zone_ids *zone_ids,
@@ -287,12 +292,13 @@ match_outport_dp_and_port_keys(struct match *match,
 }
 
 static void
-put_remote_port_redirect_overlay(const struct
-                                 sbrec_port_binding *binding,
+put_remote_port_redirect_overlay(const struct sbrec_port_binding *binding,
                                  bool is_ha_remote,
                                  struct ha_chassis_ordered *ha_ch_ordered,
                                  enum mf_field_id mff_ovn_geneve,
                                  const struct chassis_tunnel *tun,
+                                 const struct ovs_list *additional_tuns,
+                                 uint32_t dp_key,
                                  uint32_t port_key,
                                  struct match *match,
                                  struct ofpbuf *ofpacts_p,
@@ -301,14 +307,48 @@ put_remote_port_redirect_overlay(const struct
 {
     if (!is_ha_remote) {
         /* Setup encapsulation */
-        if (!tun) {
-            return;
+        bool is_vtep = !strcmp(binding->type, "vtep");
+        if (!additional_tuns) {
+            /* Output to main chassis tunnel. */
+            put_encapsulation(mff_ovn_geneve, tun, binding->datapath, port_key,
+                              is_vtep, ofpacts_p);
+            ofpact_put_OUTPUT(ofpacts_p)->port = tun->ofport;
+
+            ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 100,
+                            binding->header_.uuid.parts[0],
+                            match, ofpacts_p, &binding->header_.uuid);
+        } else {
+            /* For packets arriving from tunnels, don't clone to avoid sending
+             * packets received from another chassis back to it. */
+            match_outport_dp_and_port_keys(match, dp_key, port_key);
+            match_set_reg_masked(match, MFF_LOG_FLAGS - MFF_REG0,
+                                 MLF_LOCAL_ONLY, MLF_LOCAL_ONLY);
+
+            /* Output to main chassis tunnel. */
+            put_encapsulation(mff_ovn_geneve, tun, binding->datapath, port_key,
+                              is_vtep, ofpacts_p);
+            ofpact_put_OUTPUT(ofpacts_p)->port = tun->ofport;
+
+            ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 110,
+                            binding->header_.uuid.parts[0], match, ofpacts_p,
+                            &binding->header_.uuid);
+
+            /* For packets originating from this chassis, clone to all
+             * corresponding tunnels. */
+            match_outport_dp_and_port_keys(match, dp_key, port_key);
+            ofpbuf_clear(ofpacts_p);
+
+            struct additional_tunnel *addnl_tun;
+            LIST_FOR_EACH (addnl_tun, list_node, additional_tuns) {
+                put_encapsulation(mff_ovn_geneve, addnl_tun->tun,
+                                  binding->datapath, port_key, is_vtep,
+                                  ofpacts_p);
+                ofpact_put_OUTPUT(ofpacts_p)->port = addnl_tun->tun->ofport;
+            }
+            ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 100,
+                            binding->header_.uuid.parts[0], match, ofpacts_p,
+                            &binding->header_.uuid);
         }
-        put_encapsulation(mff_ovn_geneve, tun, binding->datapath, port_key,
-                          !strcmp(binding->type, "vtep"),
-                          ofpacts_p);
-        /* Output to tunnel. */
-        ofpact_put_OUTPUT(ofpacts_p)->port = tun->ofport;
     } else {
         /* Make sure all tunnel endpoints use the same encapsulation,
          * and set it up */
@@ -376,10 +416,11 @@ put_remote_port_redirect_overlay(const struct
         bundle->basis = 0;
         bundle->fields = NX_HASH_FIELDS_ETH_SRC;
         ofpact_finish_BUNDLE(ofpacts_p, &bundle);
+
+        ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 100,
+                        binding->header_.uuid.parts[0],
+                        match, ofpacts_p, &binding->header_.uuid);
     }
-    ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 100,
-                    binding->header_.uuid.parts[0],
-                    match, ofpacts_p, &binding->header_.uuid);
 }
 
 
@@ -728,6 +769,8 @@ put_local_common_flows(uint32_t dp_key,
                        const struct sbrec_port_binding *pb,
                        const struct sbrec_port_binding *parent_pb,
                        const struct zone_ids *zone_ids,
+                       const struct ovs_list *additional_tuns,
+                       enum mf_field_id mff_ovn_geneve,
                        struct ofpbuf *ofpacts_p,
                        struct ovn_desired_flow_table *flow_table)
 {
@@ -745,16 +788,45 @@ put_local_common_flows(uint32_t dp_key,
 
     ofpbuf_clear(ofpacts_p);
 
-    /* Match MFF_LOG_DATAPATH, MFF_LOG_OUTPORT. */
-    match_outport_dp_and_port_keys(&match, dp_key, port_key);
+    if (!additional_tuns) {
+        match_outport_dp_and_port_keys(&match, dp_key, port_key);
 
-    put_zones_ofpacts(zone_ids, ofpacts_p);
+        put_zones_ofpacts(zone_ids, ofpacts_p);
+        put_resubmit(OFTABLE_CHECK_LOOPBACK, ofpacts_p);
+        ofctrl_add_flow(flow_table, OFTABLE_LOCAL_OUTPUT, 100,
+                        pb->header_.uuid.parts[0], &match, ofpacts_p,
+                        &pb->header_.uuid);
+    } else {
+        /* For packets arriving from tunnels, don't clone again. */
+        match_outport_dp_and_port_keys(&match, dp_key, port_key);
+        match_set_reg_masked(&match, MFF_LOG_FLAGS - MFF_REG0,
+                             MLF_LOCAL_ONLY, MLF_LOCAL_ONLY);
 
-    /* Resubmit to table 39. */
-    put_resubmit(OFTABLE_CHECK_LOOPBACK, ofpacts_p);
-    ofctrl_add_flow(flow_table, OFTABLE_LOCAL_OUTPUT, 100,
-                    pb->header_.uuid.parts[0], &match, ofpacts_p,
-                    &pb->header_.uuid);
+        put_zones_ofpacts(zone_ids, ofpacts_p);
+        put_resubmit(OFTABLE_CHECK_LOOPBACK, ofpacts_p);
+        ofctrl_add_flow(flow_table, OFTABLE_LOCAL_OUTPUT, 110,
+                        pb->header_.uuid.parts[0], &match, ofpacts_p,
+                        &pb->header_.uuid);
+
+        /* For packets originating from this chassis, clone in addition to
+         * handling it locally. */
+        match_outport_dp_and_port_keys(&match, dp_key, port_key);
+
+        ofpbuf_clear(ofpacts_p);
+        put_zones_ofpacts(zone_ids, ofpacts_p);
+        put_resubmit(OFTABLE_CHECK_LOOPBACK, ofpacts_p);
+
+        struct additional_tunnel *addnl_tun;
+        LIST_FOR_EACH (addnl_tun, list_node, additional_tuns) {
+            put_encapsulation(mff_ovn_geneve, addnl_tun->tun, pb->datapath,
+                              port_key, false, ofpacts_p);
+            ofpact_put_OUTPUT(ofpacts_p)->port = addnl_tun->tun->ofport;
+        }
+
+        ofctrl_add_flow(flow_table, OFTABLE_LOCAL_OUTPUT, 100,
+                        pb->header_.uuid.parts[0], &match, ofpacts_p,
+                        &pb->header_.uuid);
+    }
 
     /* Table 39, Priority 100.
      * =======================
@@ -877,6 +949,77 @@ get_binding_peer(struct ovsdb_idl_index 
*sbrec_port_binding_by_name,
     return peer;
 }
 
+static struct sbrec_encap *
+find_additional_encap_for_chassis(const struct sbrec_port_binding *pb,
+                                  const struct sbrec_chassis *chassis_rec)
+{
+    for (int i = 0; i < pb->n_additional_encap; i++) {
+        if (!strcmp(pb->additional_encap[i]->chassis_name,
+                    chassis_rec->name)) {
+            return pb->additional_encap[i];
+        }
+    }
+    return NULL;
+}
+
+static struct ovs_list *
+get_additional_tunnels(const struct sbrec_port_binding *binding,
+                       const struct sbrec_chassis *chassis,
+                       const struct hmap *chassis_tunnels)
+{
+    const struct chassis_tunnel *tun;
+
+    if (!binding->additional_chassis) {
+        return NULL;
+    }
+
+    struct ovs_list *additional_tunnels = xmalloc(sizeof *additional_tunnels);
+    ovs_list_init(additional_tunnels);
+
+    if (binding->chassis && binding->chassis != chassis) {
+        tun = get_port_binding_tun(binding->encap, binding->chassis,
+                                   chassis_tunnels);
+        if (!tun) {
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+            VLOG_WARN_RL(
+                &rl, "Failed to locate tunnel to reach main chassis %s "
+                     "for port %s. Cloning packets disabled for the chassis.",
+                binding->chassis->name, binding->logical_port);
+        } else {
+            struct additional_tunnel *addnl_tun = xmalloc(sizeof *addnl_tun);
+            addnl_tun->tun = tun;
+            ovs_list_push_back(additional_tunnels, &addnl_tun->list_node);
+        }
+    }
+
+    for (int i = 0; i < binding->n_additional_chassis; i++) {
+        if (binding->additional_chassis[i] == chassis) {
+            continue;
+        }
+        const struct sbrec_encap *additional_encap;
+        additional_encap = find_additional_encap_for_chassis(binding, chassis);
+        tun = get_port_binding_tun(additional_encap,
+                                   binding->additional_chassis[i],
+                                   chassis_tunnels);
+        if (!tun) {
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+            VLOG_WARN_RL(
+                &rl, "Failed to locate tunnel to reach additional chassis %s "
+                     "for port %s. Cloning packets disabled for the chassis.",
+                binding->additional_chassis[i]->name, binding->logical_port);
+            continue;
+        }
+        struct additional_tunnel *addnl_tun = xmalloc(sizeof *addnl_tun);
+        addnl_tun->tun = tun;
+        ovs_list_push_back(additional_tunnels, &addnl_tun->list_node);
+    }
+    if (ovs_list_is_empty(additional_tunnels)) {
+        free(additional_tunnels);
+        return NULL;
+    }
+    return additional_tunnels;
+}
+
 static void
 consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
                       enum mf_field_id mff_ovn_geneve,
@@ -898,6 +1041,7 @@ consider_port_binding(struct ovsdb_idl_index 
*sbrec_port_binding_by_name,
         return;
     }
 
+    struct ovs_list *additional_tuns = NULL;
     struct match match;
     if (!strcmp(binding->type, "patch")
         || (!strcmp(binding->type, "l3gateway")
@@ -911,6 +1055,7 @@ consider_port_binding(struct ovsdb_idl_index 
*sbrec_port_binding_by_name,
 
         struct zone_ids binding_zones = get_zone_ids(binding, ct_zones);
         put_local_common_flows(dp_key, binding, NULL, &binding_zones,
+                               NULL, mff_ovn_geneve,
                                ofpacts_p, flow_table);
 
         ofpbuf_clear(ofpacts_p);
@@ -1051,7 +1196,7 @@ consider_port_binding(struct ovsdb_idl_index 
*sbrec_port_binding_by_name,
                                                 binding->logical_port);
         if (ofport && !lport_can_bind_on_this_chassis(chassis, binding)) {
             /* Even though there is an ofport for this port_binding, it is
-             * requested on a different chassis. So ignore this ofport.
+             * requested on different chassis. So ignore this ofport.
              */
             ofport = 0;
         }
@@ -1064,7 +1209,9 @@ consider_port_binding(struct ovsdb_idl_index 
*sbrec_port_binding_by_name,
     if (!ofport) {
         /* It is remote port, may be reached by tunnel or localnet port */
         is_remote = true;
-        if (localnet_port) {
+        /* Enforce tunneling while we clone packets to additional chassis b/c
+         * otherwise upstream switch won't flood the packet to both chassis. */
+        if (localnet_port && !binding->additional_chassis) {
             ofport = u16_to_ofp(simap_get(patch_ofports,
                                           localnet_port->logical_port));
             if (!ofport) {
@@ -1090,6 +1237,10 @@ consider_port_binding(struct ovsdb_idl_index 
*sbrec_port_binding_by_name,
         }
     }
 
+    /* Clone packets to additional chassis if needed. */
+    additional_tuns = get_additional_tunnels(binding, chassis,
+                                             chassis_tunnels);
+
     if (!is_remote) {
         /* Packets that arrive from a vif can belong to a VM or
          * to a container located inside that VM. Packets that
@@ -1100,6 +1251,7 @@ consider_port_binding(struct ovsdb_idl_index 
*sbrec_port_binding_by_name,
         /* Pass the parent port binding if the port is a nested
          * container. */
         put_local_common_flows(dp_key, binding, parent_port, &zone_ids,
+                               additional_tuns, mff_ovn_geneve,
                                ofpacts_p, flow_table);
 
         /* Table 0, Priority 150 and 100.
@@ -1328,11 +1480,20 @@ consider_port_binding(struct ovsdb_idl_index 
*sbrec_port_binding_by_name,
         } else {
             put_remote_port_redirect_overlay(binding, is_ha_remote,
                                              ha_ch_ordered, mff_ovn_geneve,
-                                             tun, port_key, &match, ofpacts_p,
+                                             tun, additional_tuns,
+                                             dp_key, port_key,
+                                             &match, ofpacts_p,
                                              chassis_tunnels, flow_table);
         }
     }
 out:
+    if (additional_tuns) {
+        struct additional_tunnel *addnl_tun;
+        LIST_FOR_EACH_POP (addnl_tun, list_node, additional_tuns) {
+            free(addnl_tun);
+        }
+        free(additional_tuns);
+    }
     if (ha_ch_ordered) {
         ha_chassis_destroy_ordered(ha_ch_ordered);
     }
@@ -1490,7 +1651,8 @@ consider_mc_group(struct ovsdb_idl_index 
*sbrec_port_binding_by_name,
             put_load(port->tunnel_key, MFF_LOG_OUTPORT, 0, 32,
                      &remote_ofpacts);
             put_resubmit(OFTABLE_CHECK_LOOPBACK, &remote_ofpacts);
-        } else if (port->chassis == chassis
+        } else if ((port->chassis == chassis
+                    || is_additional_chassis(port, chassis))
                    && (local_binding_get_primary_pb(local_bindings, lport_name)
                        || !strcmp(port->type, "l3gateway"))) {
             put_load(port->tunnel_key, MFF_LOG_OUTPORT, 0, 32, &ofpacts);
@@ -1513,15 +1675,26 @@ consider_mc_group(struct ovsdb_idl_index 
*sbrec_port_binding_by_name,
                     put_resubmit(OFTABLE_CHECK_LOOPBACK, &ofpacts);
                 }
             }
-        } else if (port->chassis && !get_localnet_port(
-                local_datapaths, mc->datapath->tunnel_key)) {
+        } else if (!get_localnet_port(local_datapaths,
+                                      mc->datapath->tunnel_key)) {
             /* Add remote chassis only when localnet port not exist,
              * otherwise multicast will reach remote ports through localnet
              * port. */
-            if (chassis_is_vtep(port->chassis)) {
-                sset_add(&vtep_chassis, port->chassis->name);
-            } else {
-                sset_add(&remote_chassis, port->chassis->name);
+            if (port->chassis) {
+                if (chassis_is_vtep(port->chassis)) {
+                    sset_add(&vtep_chassis, port->chassis->name);
+                } else {
+                    sset_add(&remote_chassis, port->chassis->name);
+                }
+            }
+            for (int j = 0; j < port->n_additional_chassis; j++) {
+                if (chassis_is_vtep(port->additional_chassis[j])) {
+                    sset_add(&vtep_chassis,
+                             port->additional_chassis[j]->name);
+                } else {
+                    sset_add(&remote_chassis,
+                             port->additional_chassis[j]->name);
+                }
             }
         }
     }
diff --git a/tests/ovn.at b/tests/ovn.at
index 0c683fe3b..517da92bc 100644
--- a/tests/ovn.at
+++ b/tests/ovn.at
@@ -13873,6 +13873,699 @@ OVN_CLEANUP([hv1],[hv2])
 AT_CLEANUP
 ])
 
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([basic connectivity with multiple requested-chassis])
+ovn_start
+
+net_add n1
+for i in 1 2 3; do
+    sim_add hv$i
+    as hv$i
+    check ovs-vsctl add-br br-phys
+    ovn_attach n1 br-phys 192.168.0.$i
+done
+
+# Disable local ARP responder to pass ARP requests through tunnels
+check ovn-nbctl ls-add ls0 -- add Logical_Switch ls0 other_config 
vlan-passthru=true
+
+check ovn-nbctl lsp-add ls0 first
+check ovn-nbctl lsp-add ls0 second
+check ovn-nbctl lsp-add ls0 third
+check ovn-nbctl lsp-add ls0 migrator
+check ovn-nbctl lsp-set-addresses first "00:00:00:00:00:01 10.0.0.1"
+check ovn-nbctl lsp-set-addresses second "00:00:00:00:00:02 10.0.0.2"
+check ovn-nbctl lsp-set-addresses third "00:00:00:00:00:03 10.0.0.3"
+check ovn-nbctl lsp-set-addresses migrator "00:00:00:00:00:ff 10.0.0.100"
+
+# The test scenario will migrate Migrator port between hv1 and hv2 and check
+# that connectivity to and from the port is functioning properly for both
+# chassis locations. Connectivity will be checked for resources located at hv1
+# (First) and hv2 (Second) as well as for hv3 (Third) that does not take part
+# in port migration.
+check ovn-nbctl lsp-set-options first requested-chassis=hv1
+check ovn-nbctl lsp-set-options second requested-chassis=hv2
+check ovn-nbctl lsp-set-options third requested-chassis=hv3
+
+as hv1 check ovs-vsctl -- add-port br-int first -- \
+    set Interface first external-ids:iface-id=first \
+    options:tx_pcap=hv1/first-tx.pcap \
+    options:rxq_pcap=hv1/first-rx.pcap
+as hv2 check ovs-vsctl -- add-port br-int second -- \
+    set Interface second external-ids:iface-id=second \
+    options:tx_pcap=hv2/second-tx.pcap \
+    options:rxq_pcap=hv2/second-rx.pcap
+as hv3 check ovs-vsctl -- add-port br-int third -- \
+    set Interface third external-ids:iface-id=third \
+    options:tx_pcap=hv3/third-tx.pcap \
+    options:rxq_pcap=hv3/third-rx.pcap
+
+# Create Migrator interfaces on both hv1 and hv2
+for hv in hv1 hv2; do
+    as $hv check ovs-vsctl -- add-port br-int migrator -- \
+        set Interface migrator external-ids:iface-id=migrator \
+        options:tx_pcap=$hv/migrator-tx.pcap \
+        options:rxq_pcap=$hv/migrator-rx.pcap
+done
+
+send_arp() {
+    local hv=$1 inport=$2 eth_src=$3 eth_dst=$4 spa=$5 tpa=$6
+    local 
request=${eth_dst}${eth_src}08060001080006040001${eth_src}${spa}${eth_dst}${tpa}
+    as ${hv} ovs-appctl netdev-dummy/receive $inport $request
+    echo "${request}"
+}
+
+reset_pcap_file() {
+    local hv=$1
+    local iface=$2
+    local pcap_file=$3
+    as $hv check ovs-vsctl -- set Interface $iface 
options:tx_pcap=dummy-tx.pcap \
+                                                   
options:rxq_pcap=dummy-rx.pcap
+    check rm -f ${pcap_file}*.pcap
+    as $hv check ovs-vsctl -- set Interface $iface 
options:tx_pcap=${pcap_file}-tx.pcap \
+                                                   
options:rxq_pcap=${pcap_file}-rx.pcap
+}
+
+reset_env() {
+    reset_pcap_file hv1 first hv1/first
+    reset_pcap_file hv2 second hv2/second
+    reset_pcap_file hv3 third hv3/third
+    reset_pcap_file hv1 migrator hv1/migrator
+    reset_pcap_file hv2 migrator hv2/migrator
+
+    for port in hv1/migrator hv2/migrator hv1/first hv2/second hv3/third; do
+        : > $port.expected
+    done
+}
+
+check_packets() {
+    OVN_CHECK_PACKETS([hv1/migrator-tx.pcap], [hv1/migrator.expected])
+    OVN_CHECK_PACKETS([hv2/migrator-tx.pcap], [hv2/migrator.expected])
+    OVN_CHECK_PACKETS([hv1/first-tx.pcap], [hv1/first.expected])
+    OVN_CHECK_PACKETS([hv2/second-tx.pcap], [hv2/second.expected])
+    OVN_CHECK_PACKETS([hv3/third-tx.pcap], [hv3/third.expected])
+}
+
+migrator_tpa=$(ip_to_hex 10 0 0 100)
+first_spa=$(ip_to_hex 10 0 0 1)
+second_spa=$(ip_to_hex 10 0 0 2)
+third_spa=$(ip_to_hex 10 0 0 3)
+
+for hv in hv1 hv2 hv3; do
+    wait_row_count Chassis 1 name=$hv
+done
+hv1_uuid=$(fetch_column Chassis _uuid name=hv1)
+hv2_uuid=$(fetch_column Chassis _uuid name=hv2)
+
+# Start with Migrator on hv1 but not hv2
+check ovn-nbctl lsp-set-options migrator requested-chassis=hv1
+wait_for_ports_up
+wait_column "$hv1_uuid" Port_Binding chassis logical_port=migrator
+wait_column "$hv1_uuid" Port_Binding requested_chassis logical_port=migrator
+wait_column "" Port_Binding additional_chassis logical_port=migrator
+wait_column "" Port_Binding requested_additional_chassis logical_port=migrator
+wait_for_ports_up
+
+reset_env
+
+OVN_POPULATE_ARP
+
+# check that...
+# unicast from First arrives to hv1:Migrator
+# unicast from First doesn't arrive to hv2:Migrator
+request=$(send_arp hv1 first 000000000001 0000000000ff $first_spa 
$migrator_tpa)
+echo $request >> hv1/migrator.expected
+
+# mcast from First arrives to hv1:Migrator
+# mcast from First doesn't arrive to hv2:Migrator
+request=$(send_arp hv1 first 000000000001 ffffffffffff $first_spa 
$migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv2/second.expected
+echo $request >> hv3/third.expected
+
+# unicast from Second arrives to hv1:Migrator
+# unicast from Second doesn't arrive to hv2:Migrator
+request=$(send_arp hv2 second 000000000002 0000000000ff $second_spa 
$migrator_tpa)
+echo $request >> hv1/migrator.expected
+
+# mcast from Second arrives to hv1:Migrator
+# mcast from Second doesn't arrive to hv2:Migrator
+request=$(send_arp hv2 second 000000000002 ffffffffffff $second_spa 
$migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv1/first.expected
+echo $request >> hv3/third.expected
+
+# unicast from Third arrives to hv1:Migrator
+# unicast from Third doesn't arrive to hv2:Migrator
+request=$(send_arp hv3 third 000000000003 0000000000ff $third_spa 
$migrator_tpa)
+echo $request >> hv1/migrator.expected
+
+# mcast from Third arrives to hv1:Migrator
+# mcast from Third doesn't arrive to hv2:Migrator
+request=$(send_arp hv3 third 000000000003 ffffffffffff $third_spa 
$migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv1/first.expected
+echo $request >> hv2/second.expected
+
+# unicast from hv1:Migrator arrives to First, Second, and Third
+request=$(send_arp hv1 migrator 0000000000ff 000000000001 $migrator_tpa 
$first_spa)
+echo $request >> hv1/first.expected
+request=$(send_arp hv1 migrator 0000000000ff 000000000002 $migrator_tpa 
$second_spa)
+echo $request >> hv2/second.expected
+request=$(send_arp hv1 migrator 0000000000ff 000000000003 $migrator_tpa 
$third_spa)
+echo $request >> hv3/third.expected
+
+# unicast from hv2:Migrator doesn't arrive to First, Second, or Third
+request=$(send_arp hv2 migrator 0000000000ff 000000000001 $migrator_tpa 
$first_spa)
+request=$(send_arp hv2 migrator 0000000000ff 000000000002 $migrator_tpa 
$second_spa)
+request=$(send_arp hv2 migrator 0000000000ff 000000000003 $migrator_tpa 
$third_spa)
+
+# mcast from hv1:Migrator arrives to First, Second, and Third
+request=$(send_arp hv1 migrator 0000000000ff ffffffffffff $migrator_tpa 
$first_spa)
+echo $request >> hv1/first.expected
+echo $request >> hv2/second.expected
+echo $request >> hv3/third.expected
+
+# mcast from hv2:Migrator doesn't arrive to First, Second, or Third
+request=$(send_arp hv2 migrator 0000000000ff ffffffffffff $migrator_tpa 
$first_spa)
+
+check_packets
+reset_env
+
+# Start port migration hv1 -> hv2: both hypervisors are now bound
+check ovn-nbctl lsp-set-options migrator requested-chassis=hv1,hv2
+wait_for_ports_up
+wait_column "$hv1_uuid" Port_Binding chassis logical_port=migrator
+wait_column "$hv1_uuid" Port_Binding requested_chassis logical_port=migrator
+wait_column "$hv2_uuid" Port_Binding additional_chassis logical_port=migrator
+wait_column "$hv2_uuid" Port_Binding requested_additional_chassis 
logical_port=migrator
+
+# check that...
+# unicast from First arrives to hv1:Migrator
+# unicast from First arrives to hv2:Migrator
+request=$(send_arp hv1 first 000000000001 0000000000ff $first_spa 
$migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv2/migrator.expected
+
+# mcast from First arrives to hv1:Migrator
+# mcast from First arrives to hv2:Migrator
+request=$(send_arp hv1 first 000000000001 ffffffffffff $first_spa 
$migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv2/migrator.expected
+echo $request >> hv3/third.expected
+echo $request >> hv2/second.expected
+
+# unicast from Second arrives to hv1:Migrator
+# unicast from Second arrives to hv2:Migrator
+request=$(send_arp hv2 second 000000000002 0000000000ff $second_spa 
$migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv2/migrator.expected
+
+# mcast from Second arrives to hv1:Migrator
+# mcast from Second arrives to hv2:Migrator
+request=$(send_arp hv2 second 000000000002 ffffffffffff $second_spa 
$migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv2/migrator.expected
+echo $request >> hv3/third.expected
+echo $request >> hv1/first.expected
+
+# unicast from Third arrives to hv1:Migrator binding
+# unicast from Third arrives to hv2:Migrator binding
+request=$(send_arp hv3 third 000000000003 0000000000ff $third_spa 
$migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv2/migrator.expected
+
+# mcast from Third arrives to hv1:Migrator
+# mcast from Third arrives to hv2:Migrator
+request=$(send_arp hv3 third 000000000003 ffffffffffff $third_spa 
$migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv2/migrator.expected
+echo $request >> hv1/first.expected
+echo $request >> hv2/second.expected
+
+# unicast from hv1:Migrator arrives to First, Second, and Third
+request=$(send_arp hv1 migrator 0000000000ff 000000000001 $migrator_tpa 
$first_spa)
+echo $request >> hv1/first.expected
+request=$(send_arp hv1 migrator 0000000000ff 000000000002 $migrator_tpa 
$second_spa)
+echo $request >> hv2/second.expected
+request=$(send_arp hv1 migrator 0000000000ff 000000000003 $migrator_tpa 
$third_spa)
+echo $request >> hv3/third.expected
+
+# unicast from hv2:Migrator arrives to First, Second, and Third
+request=$(send_arp hv2 migrator 0000000000ff 000000000001 $migrator_tpa 
$first_spa)
+echo $request >> hv1/first.expected
+request=$(send_arp hv2 migrator 0000000000ff 000000000002 $migrator_tpa 
$second_spa)
+echo $request >> hv2/second.expected
+request=$(send_arp hv2 migrator 0000000000ff 000000000003 $migrator_tpa 
$third_spa)
+echo $request >> hv3/third.expected
+
+# mcast from hv1:Migrator arrives to First, Second, and Third
+request=$(send_arp hv1 migrator 0000000000ff ffffffffffff $migrator_tpa 
$first_spa)
+echo $request >> hv1/first.expected
+echo $request >> hv2/second.expected
+echo $request >> hv3/third.expected
+
+# mcast from hv2:Migrator arrives to First, Second, and Third
+request=$(send_arp hv2 migrator 0000000000ff ffffffffffff $migrator_tpa 
$first_spa)
+echo $request >> hv1/first.expected
+echo $request >> hv2/second.expected
+echo $request >> hv3/third.expected
+
+check_packets
+reset_env
+
+# Complete migration: destination is bound
+check ovn-nbctl lsp-set-options migrator requested-chassis=hv2
+wait_for_ports_up
+wait_column "$hv2_uuid" Port_Binding chassis logical_port=migrator
+wait_column "$hv2_uuid" Port_Binding requested_chassis logical_port=migrator
+wait_column "" Port_Binding additional_chassis logical_port=migrator
+wait_column "" Port_Binding requested_additional_chassis logical_port=migrator
+
+# check that...
+# unicast from Third doesn't arrive to hv1:Migrator
+# unicast from Third arrives to hv2:Migrator
+request=$(send_arp hv3 third 000000000003 0000000000ff $third_spa 
$migrator_tpa)
+echo $request >> hv2/migrator.expected
+
+# mcast from Third doesn't arrive to hv1:Migrator
+# mcast from Third arrives to hv2:Migrator
+request=$(send_arp hv3 third 000000000003 ffffffffffff $third_spa 
$migrator_tpa)
+echo $request >> hv2/migrator.expected
+echo $request >> hv1/first.expected
+echo $request >> hv2/second.expected
+
+# unicast from First doesn't arrive to hv1:Migrator
+# unicast from First arrives to hv2:Migrator
+request=$(send_arp hv1 first 000000000001 0000000000ff $first_spa 
$migrator_tpa)
+echo $request >> hv2/migrator.expected
+
+# mcast from First doesn't arrive to hv1:Migrator
+# mcast from First arrives to hv2:Migrator binding
+request=$(send_arp hv1 first 000000000001 ffffffffffff $first_spa 
$migrator_tpa)
+echo $request >> hv2/migrator.expected
+echo $request >> hv2/second.expected
+echo $request >> hv3/third.expected
+
+# unicast from Second doesn't arrive to hv1:Migrator
+# unicast from Second arrives to hv2:Migrator
+request=$(send_arp hv2 second 000000000002 0000000000ff $second_spa 
$migrator_tpa)
+echo $request >> hv2/migrator.expected
+
+# mcast from Second doesn't arrive to hv1:Migrator
+# mcast from Second arrives to hv2:Migrator
+request=$(send_arp hv2 second 000000000002 ffffffffffff $second_spa 
$migrator_tpa)
+echo $request >> hv2/migrator.expected
+echo $request >> hv1/first.expected
+echo $request >> hv3/third.expected
+
+# unicast from hv1:Migrator doesn't arrive to First, Second, or Third
+request=$(send_arp hv1 migrator 0000000000ff 000000000001 $migrator_tpa 
$first_spa)
+request=$(send_arp hv1 migrator 0000000000ff 000000000002 $migrator_tpa 
$second_spa)
+request=$(send_arp hv1 migrator 0000000000ff 000000000003 $migrator_tpa 
$third_spa)
+
+# unicast from hv2:Migrator arrives to First, Second, and Third
+request=$(send_arp hv2 migrator 0000000000ff 000000000001 $migrator_tpa 
$first_spa)
+echo $request >> hv1/first.expected
+request=$(send_arp hv2 migrator 0000000000ff 000000000002 $migrator_tpa 
$second_spa)
+echo $request >> hv2/second.expected
+request=$(send_arp hv2 migrator 0000000000ff 000000000003 $migrator_tpa 
$third_spa)
+echo $request >> hv3/third.expected
+
+# mcast from hv1:Migrator doesn't arrive to First, Second, or Third
+request=$(send_arp hv1 migrator 0000000000ff ffffffffffff $migrator_tpa 
$first_spa)
+
+# mcast from hv2:Migrator arrives to First, Second, and Third
+request=$(send_arp hv2 migrator 0000000000ff ffffffffffff $migrator_tpa 
$first_spa)
+echo $request >> hv1/first.expected
+echo $request >> hv2/second.expected
+echo $request >> hv3/third.expected
+
+check_packets
+
+OVN_CLEANUP([hv1],[hv2],[hv3])
+
+AT_CLEANUP
+])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([localnet connectivity with multiple requested-chassis])
+ovn_start
+
+net_add n1
+for i in 1 2 3; do
+    sim_add hv$i
+    as hv$i
+    check ovs-vsctl add-br br-phys
+    ovn_attach n1 br-phys 192.168.0.$i
+    check ovs-vsctl set open . external-ids:ovn-bridge-mappings=phys:br-phys
+done
+
+# Disable local ARP responder to pass ARP requests through tunnels
+check ovn-nbctl ls-add ls0 -- add Logical_Switch ls0 other_config 
vlan-passthru=true
+
+check ovn-nbctl lsp-add ls0 first
+check ovn-nbctl lsp-add ls0 second
+check ovn-nbctl lsp-add ls0 third
+check ovn-nbctl lsp-add ls0 migrator
+check ovn-nbctl lsp-set-addresses first "00:00:00:00:00:01 10.0.0.1"
+check ovn-nbctl lsp-set-addresses second "00:00:00:00:00:02 10.0.0.2"
+check ovn-nbctl lsp-set-addresses third "00:00:00:00:00:03 10.0.0.3"
+check ovn-nbctl lsp-set-addresses migrator "00:00:00:00:00:ff 10.0.0.100"
+
+check ovn-nbctl lsp-add ls0 public
+check ovn-nbctl lsp-set-type public localnet
+check ovn-nbctl lsp-set-addresses public unknown
+check ovn-nbctl lsp-set-options public network_name=phys
+
+# The test scenario will migrate Migrator port between hv1 and hv2 and check
+# that connectivity to and from the port is functioning properly for both
+# chassis locations. Connectivity will be checked for resources located at hv1
+# (First) and hv2 (Second) as well as for hv3 (Third) that does not take part
+# in port migration.
+check ovn-nbctl lsp-set-options first requested-chassis=hv1
+check ovn-nbctl lsp-set-options second requested-chassis=hv2
+check ovn-nbctl lsp-set-options third requested-chassis=hv3
+
+as hv1 check ovs-vsctl -- add-port br-int first -- \
+    set Interface first external-ids:iface-id=first \
+    options:tx_pcap=hv1/first-tx.pcap \
+    options:rxq_pcap=hv1/first-rx.pcap
+as hv2 check ovs-vsctl -- add-port br-int second -- \
+    set Interface second external-ids:iface-id=second \
+    options:tx_pcap=hv2/second-tx.pcap \
+    options:rxq_pcap=hv2/second-rx.pcap
+as hv3 check ovs-vsctl -- add-port br-int third -- \
+    set Interface third external-ids:iface-id=third \
+    options:tx_pcap=hv3/third-tx.pcap \
+    options:rxq_pcap=hv3/third-rx.pcap
+
+# Create Migrator interfaces on both hv1 and hv2
+for hv in hv1 hv2; do
+    as $hv check ovs-vsctl -- add-port br-int migrator -- \
+        set Interface migrator external-ids:iface-id=migrator \
+        options:tx_pcap=$hv/migrator-tx.pcap \
+        options:rxq_pcap=$hv/migrator-rx.pcap
+done
+
+send_arp() {
+    local hv=$1 inport=$2 eth_src=$3 eth_dst=$4 spa=$5 tpa=$6
+    local 
request=${eth_dst}${eth_src}08060001080006040001${eth_src}${spa}${eth_dst}${tpa}
+    as ${hv} ovs-appctl netdev-dummy/receive $inport $request
+    echo "${request}"
+}
+
+send_garp() {
+    local hv=$1 inport=$2 eth_src=$3 eth_dst=$4 spa=$5 tpa=$6
+    local 
request=${eth_dst}${eth_src}08060001080006040002${eth_src}${spa}${eth_dst}${tpa}
+    as ${hv} ovs-appctl netdev-dummy/receive $inport $request
+    echo "${request}"
+}
+
+reset_pcap_file() {
+    local hv=$1
+    local iface=$2
+    local pcap_file=$3
+    as $hv check ovs-vsctl -- set Interface $iface 
options:tx_pcap=dummy-tx.pcap \
+                                                   
options:rxq_pcap=dummy-rx.pcap
+    check rm -f ${pcap_file}*.pcap
+    as $hv check ovs-vsctl -- set Interface $iface 
options:tx_pcap=${pcap_file}-tx.pcap \
+                                                   
options:rxq_pcap=${pcap_file}-rx.pcap
+}
+
+reset_env() {
+    reset_pcap_file hv1 first hv1/first
+    reset_pcap_file hv2 second hv2/second
+    reset_pcap_file hv3 third hv3/third
+    reset_pcap_file hv1 migrator hv1/migrator
+    reset_pcap_file hv2 migrator hv2/migrator
+
+    for port in hv1/migrator hv2/migrator hv1/first hv2/second hv3/third; do
+        : > $port.expected
+    done
+}
+
+check_packets() {
+    # the test scenario gets spurious garps generated by vifs because of 
localnet
+    # attachment, hence using CONTAIN instead of strict matching
+    OVN_CHECK_PACKETS_CONTAIN([hv1/migrator-tx.pcap], [hv1/migrator.expected])
+    OVN_CHECK_PACKETS_CONTAIN([hv2/migrator-tx.pcap], [hv2/migrator.expected])
+    OVN_CHECK_PACKETS_CONTAIN([hv1/first-tx.pcap], [hv1/first.expected])
+    OVN_CHECK_PACKETS_CONTAIN([hv2/second-tx.pcap], [hv2/second.expected])
+    OVN_CHECK_PACKETS_CONTAIN([hv3/third-tx.pcap], [hv3/third.expected])
+}
+
+migrator_tpa=$(ip_to_hex 10 0 0 100)
+first_spa=$(ip_to_hex 10 0 0 1)
+second_spa=$(ip_to_hex 10 0 0 2)
+third_spa=$(ip_to_hex 10 0 0 3)
+
+for hv in hv1 hv2 hv3; do
+    wait_row_count Chassis 1 name=$hv
+done
+hv1_uuid=$(fetch_column Chassis _uuid name=hv1)
+hv2_uuid=$(fetch_column Chassis _uuid name=hv2)
+
+OVN_POPULATE_ARP
+
+# Start with Migrator on hv1 but not hv2
+check ovn-nbctl lsp-set-options migrator requested-chassis=hv1
+wait_column "$hv1_uuid" Port_Binding chassis logical_port=migrator
+wait_column "$hv1_uuid" Port_Binding requested_chassis logical_port=migrator
+wait_column "" Port_Binding additional_chassis logical_port=migrator
+wait_column "" Port_Binding requested_additional_chassis logical_port=migrator
+wait_for_ports_up
+
+# advertise location of ports through localnet port
+send_garp hv1 migrator 0000000000ff ffffffffffff $migrator_spa $migrator_tpa
+send_garp hv1 first 000000000001 ffffffffffff $first_spa $first_tpa
+send_garp hv2 second 000000000002 ffffffffffff $second_spa $second_tpa
+send_garp hv3 third 000000000003 ffffffffffff $third_spa $third_tpa
+reset_env
+
+# check that...
+# unicast from First arrives to hv1:Migrator
+# unicast from First doesn't arrive to hv2:Migrator
+request=$(send_arp hv1 first 000000000001 0000000000ff $first_spa 
$migrator_tpa)
+echo $request >> hv1/migrator.expected
+
+# mcast from First arrives to hv1:Migrator
+# mcast from First doesn't arrive to hv2:Migrator
+request=$(send_arp hv1 first 000000000001 ffffffffffff $first_spa 
$migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv2/second.expected
+echo $request >> hv3/third.expected
+
+# unicast from Second arrives to hv1:Migrator
+# unicast from Second doesn't arrive to hv2:Migrator
+request=$(send_arp hv2 second 000000000002 0000000000ff $second_spa 
$migrator_tpa)
+echo $request >> hv1/migrator.expected
+
+# mcast from Second arrives to hv1:Migrator
+# mcast from Second doesn't arrive to hv2:Migrator
+request=$(send_arp hv2 second 000000000002 ffffffffffff $second_spa 
$migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv1/first.expected
+echo $request >> hv3/third.expected
+
+# unicast from Third arrives to hv1:Migrator
+# unicast from Third doesn't arrive to hv2:Migrator
+request=$(send_arp hv3 third 000000000003 0000000000ff $third_spa 
$migrator_tpa)
+echo $request >> hv1/migrator.expected
+
+# mcast from Third arrives to hv1:Migrator
+# mcast from Third doesn't arrive to hv2:Migrator
+request=$(send_arp hv3 third 000000000003 ffffffffffff $third_spa 
$migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv1/first.expected
+echo $request >> hv2/second.expected
+
+# unicast from hv1:Migrator arrives to First, Second, and Third
+request=$(send_arp hv1 migrator 0000000000ff 000000000001 $migrator_tpa 
$first_spa)
+echo $request >> hv1/first.expected
+request=$(send_arp hv1 migrator 0000000000ff 000000000002 $migrator_tpa 
$second_spa)
+echo $request >> hv2/second.expected
+request=$(send_arp hv1 migrator 0000000000ff 000000000003 $migrator_tpa 
$third_spa)
+echo $request >> hv3/third.expected
+
+# unicast from hv2:Migrator doesn't arrive to First, Second, or Third
+request=$(send_arp hv2 migrator 0000000000ff 000000000001 $migrator_tpa 
$first_spa)
+request=$(send_arp hv2 migrator 0000000000ff 000000000002 $migrator_tpa 
$second_spa)
+request=$(send_arp hv2 migrator 0000000000ff 000000000003 $migrator_tpa 
$third_spa)
+
+# mcast from hv1:Migrator arrives to First, Second, and Third
+request=$(send_arp hv1 migrator 0000000000ff ffffffffffff $migrator_tpa 
$first_spa)
+echo $request >> hv1/first.expected
+echo $request >> hv2/second.expected
+echo $request >> hv3/third.expected
+
+# mcast from hv2:Migrator doesn't arrive to First, Second, or Third
+request=$(send_arp hv2 migrator 0000000000ff ffffffffffff $migrator_tpa 
$first_spa)
+
+check_packets
+reset_env
+
+# Start port migration hv1 -> hv2: both hypervisors are now bound
+check ovn-nbctl lsp-set-options migrator requested-chassis=hv1,hv2
+wait_for_ports_up
+wait_column "$hv1_uuid" Port_Binding chassis logical_port=migrator
+wait_column "$hv1_uuid" Port_Binding requested_chassis logical_port=migrator
+wait_column "$hv2_uuid" Port_Binding additional_chassis logical_port=migrator
+wait_column "$hv2_uuid" Port_Binding requested_additional_chassis 
logical_port=migrator
+
+# check that...
+# unicast from First arrives to hv1:Migrator
+# unicast from First arrives to hv2:Migrator
+request=$(send_arp hv1 first 000000000001 0000000000ff $first_spa 
$migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv2/migrator.expected
+
+# mcast from First arrives to hv1:Migrator
+# mcast from First arrives to hv2:Migrator
+request=$(send_arp hv1 first 000000000001 ffffffffffff $first_spa 
$migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv2/migrator.expected
+echo $request >> hv3/third.expected
+echo $request >> hv2/second.expected
+
+# unicast from Second arrives to hv1:Migrator
+# unicast from Second arrives to hv2:Migrator
+request=$(send_arp hv2 second 000000000002 0000000000ff $second_spa 
$migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv2/migrator.expected
+
+# mcast from Second arrives to hv1:Migrator
+# mcast from Second arrives to hv2:Migrator
+request=$(send_arp hv2 second 000000000002 ffffffffffff $second_spa 
$migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv2/migrator.expected
+echo $request >> hv3/third.expected
+echo $request >> hv1/first.expected
+
+# unicast from Third arrives to hv1:Migrator binding
+# unicast from Third arrives to hv2:Migrator binding
+request=$(send_arp hv3 third 000000000003 0000000000ff $third_spa 
$migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv2/migrator.expected
+
+# mcast from Third arrives to hv1:Migrator
+# mcast from Third arrives to hv2:Migrator
+request=$(send_arp hv3 third 000000000003 ffffffffffff $third_spa 
$migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv2/migrator.expected
+echo $request >> hv1/first.expected
+echo $request >> hv2/second.expected
+
+# unicast from hv1:Migrator arrives to First, Second, and Third
+request=$(send_arp hv1 migrator 0000000000ff 000000000001 $migrator_tpa 
$first_spa)
+echo $request >> hv1/first.expected
+request=$(send_arp hv1 migrator 0000000000ff 000000000002 $migrator_tpa 
$second_spa)
+echo $request >> hv2/second.expected
+request=$(send_arp hv1 migrator 0000000000ff 000000000003 $migrator_tpa 
$third_spa)
+echo $request >> hv3/third.expected
+
+# unicast from hv2:Migrator arrives to First, Second, and Third
+request=$(send_arp hv2 migrator 0000000000ff 000000000001 $migrator_tpa 
$first_spa)
+echo $request >> hv1/first.expected
+request=$(send_arp hv2 migrator 0000000000ff 000000000002 $migrator_tpa 
$second_spa)
+echo $request >> hv2/second.expected
+request=$(send_arp hv2 migrator 0000000000ff 000000000003 $migrator_tpa 
$third_spa)
+echo $request >> hv3/third.expected
+
+# mcast from hv1:Migrator arrives to First, Second, and Third
+request=$(send_arp hv1 migrator 0000000000ff ffffffffffff $migrator_tpa 
$first_spa)
+echo $request >> hv1/first.expected
+echo $request >> hv2/second.expected
+echo $request >> hv3/third.expected
+
+# mcast from hv2:Migrator arrives to First, Second, and Third
+request=$(send_arp hv2 migrator 0000000000ff ffffffffffff $migrator_tpa 
$first_spa)
+echo $request >> hv1/first.expected
+echo $request >> hv2/second.expected
+echo $request >> hv3/third.expected
+
+check_packets
+
+# Complete migration: destination is bound
+check ovn-nbctl lsp-set-options migrator requested-chassis=hv2
+wait_column "$hv2_uuid" Port_Binding chassis logical_port=migrator
+wait_column "$hv2_uuid" Port_Binding requested_chassis logical_port=migrator
+wait_column "" Port_Binding additional_chassis logical_port=migrator
+wait_column "" Port_Binding requested_additional_chassis logical_port=migrator
+wait_for_ports_up
+
+check ovn-nbctl --wait=hv sync
+sleep 1
+
+# advertise new location of the port through localnet port
+send_garp hv2 migrator 0000000000ff ffffffffffff $migrator_spa $migrator_tpa
+reset_env
+
+# check that...
+# unicast from Third doesn't arrive to hv1:Migrator
+# unicast from Third arrives to hv2:Migrator
+request=$(send_arp hv3 third 000000000003 0000000000ff $third_spa 
$migrator_tpa)
+echo $request >> hv2/migrator.expected
+
+# mcast from Third doesn't arrive to hv1:Migrator
+# mcast from Third arrives to hv2:Migrator
+request=$(send_arp hv3 third 000000000003 ffffffffffff $third_spa 
$migrator_tpa)
+echo $request >> hv2/migrator.expected
+echo $request >> hv1/first.expected
+echo $request >> hv2/second.expected
+
+# unicast from First doesn't arrive to hv1:Migrator
+# unicast from First arrives to hv2:Migrator
+request=$(send_arp hv1 first 000000000001 0000000000ff $first_spa 
$migrator_tpa)
+echo $request >> hv2/migrator.expected
+
+# mcast from First doesn't arrive to hv1:Migrator
+# mcast from First arrives to hv2:Migrator binding
+request=$(send_arp hv1 first 000000000001 ffffffffffff $first_spa 
$migrator_tpa)
+echo $request >> hv2/migrator.expected
+echo $request >> hv2/second.expected
+echo $request >> hv3/third.expected
+
+# unicast from Second doesn't arrive to hv1:Migrator
+# unicast from Second arrives to hv2:Migrator
+request=$(send_arp hv2 second 000000000002 0000000000ff $second_spa 
$migrator_tpa)
+echo $request >> hv2/migrator.expected
+
+# mcast from Second doesn't arrive to hv1:Migrator
+# mcast from Second arrives to hv2:Migrator
+request=$(send_arp hv2 second 000000000002 ffffffffffff $second_spa 
$migrator_tpa)
+echo $request >> hv2/migrator.expected
+echo $request >> hv1/first.expected
+echo $request >> hv3/third.expected
+
+# unicast from hv1:Migrator doesn't arrive to First, Second, or Third
+request=$(send_arp hv1 migrator 0000000000ff 000000000001 $migrator_tpa 
$first_spa)
+request=$(send_arp hv1 migrator 0000000000ff 000000000002 $migrator_tpa 
$second_spa)
+request=$(send_arp hv1 migrator 0000000000ff 000000000003 $migrator_tpa 
$third_spa)
+
+# unicast from hv2:Migrator arrives to First, Second, and Third
+request=$(send_arp hv2 migrator 0000000000ff 000000000001 $migrator_tpa 
$first_spa)
+echo $request >> hv1/first.expected
+request=$(send_arp hv2 migrator 0000000000ff 000000000002 $migrator_tpa 
$second_spa)
+echo $request >> hv2/second.expected
+request=$(send_arp hv2 migrator 0000000000ff 000000000003 $migrator_tpa 
$third_spa)
+echo $request >> hv3/third.expected
+
+# mcast from hv1:Migrator doesn't arrive to First, Second, or Third
+request=$(send_arp hv1 migrator 0000000000ff ffffffffffff $migrator_tpa 
$first_spa)
+
+# mcast from hv2:Migrator arrives to First, Second, and Third
+request=$(send_arp hv2 migrator 0000000000ff ffffffffffff $migrator_tpa 
$first_spa)
+echo $request >> hv1/first.expected
+echo $request >> hv2/second.expected
+echo $request >> hv3/third.expected
+
+check_packets
+
+OVN_CLEANUP([hv1],[hv2],[hv3])
+
+AT_CLEANUP
+])
+
 OVN_FOR_EACH_NORTHD([
 AT_SETUP([options:requested-chassis for logical port])
 ovn_start
-- 
2.34.1


_______________________________________________
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to