Handle container ports migration in a similar way as vif migration.
When a container port is migrated (i.e. when requested-chassis is configured
for its parent port), install flows in both src and dst hypervisors.
When migration completes, remove flows from src hypervisor.

Cherry-picked from origin/main + picked M_START_TCPDUMP from
"provider networks: Provide the option to tunnel traffic.".

Reported-at: https://issues.redhat.com/browse/FDP-1037

Signed-off-by: Xavier Simonart <[email protected]>
---
 controller/binding.c      |  27 ++--
 controller/physical.c     |   4 +-
 tests/multinode-macros.at |  11 ++
 tests/multinode.at        | 257 ++++++++++++++++++++++++++++++++++++++
 tests/ovn.at              | 160 +++++++++++++++++++++++-
 5 files changed, 443 insertions(+), 16 deletions(-)

diff --git a/controller/binding.c b/controller/binding.c
index a09763f63..b77a32aa2 100644
--- a/controller/binding.c
+++ b/controller/binding.c
@@ -1326,9 +1326,9 @@ claim_lport(const struct sbrec_port_binding *pb,
             bool sb_readonly, bool is_vif,
             struct hmap *tracked_datapaths,
             struct if_status_mgr *if_mgr,
-            struct sset *postponed_ports)
+            struct sset *postponed_ports,
+            enum can_bind can_bind)
 {
-    enum can_bind can_bind = lport_can_bind_on_this_chassis(chassis_rec, pb);
     bool update_tracked = false;
 
     if (can_bind == CAN_BIND_AS_MAIN) {
@@ -1527,7 +1527,7 @@ release_binding_lport(const struct sbrec_chassis 
*chassis_rec,
 
 static bool
 consider_vif_lport_(const struct sbrec_port_binding *pb,
-                    bool can_bind,
+                    enum can_bind can_bind,
                     struct binding_ctx_in *b_ctx_in,
                     struct binding_ctx_out *b_ctx_out,
                     struct binding_lport *b_lport)
@@ -1545,7 +1545,7 @@ consider_vif_lport_(const struct sbrec_port_binding *pb,
                              !b_ctx_in->ovnsb_idl_txn,
                              !parent_pb, b_ctx_out->tracked_dp_bindings,
                              b_ctx_out->if_mgr,
-                             b_ctx_out->postponed_ports)) {
+                             b_ctx_out->postponed_ports, can_bind)) {
                 return false;
             }
 
@@ -1607,7 +1607,8 @@ consider_vif_lport(const struct sbrec_port_binding *pb,
                    struct binding_ctx_out *b_ctx_out,
                    struct local_binding *lbinding)
 {
-    bool can_bind = lport_can_bind_on_this_chassis(b_ctx_in->chassis_rec, pb);
+    enum can_bind can_bind =
+        lport_can_bind_on_this_chassis(b_ctx_in->chassis_rec, pb);
 
     if (!lbinding) {
         lbinding = local_binding_find(&b_ctx_out->lbinding_data->bindings,
@@ -1722,9 +1723,10 @@ consider_container_lport(const struct sbrec_port_binding 
*pb,
 
     ovs_assert(parent_b_lport && parent_b_lport->pb);
     /* cannot bind to this chassis if the parent_port cannot be bounded. */
-    bool can_bind = lport_can_bind_on_this_chassis(b_ctx_in->chassis_rec,
-                                                   parent_b_lport->pb) &&
-                    lport_can_bind_on_this_chassis(b_ctx_in->chassis_rec, pb);
+    enum can_bind can_bind =
+        lport_can_bind_on_this_chassis(b_ctx_in->chassis_rec, pb) ?
+         lport_can_bind_on_this_chassis(b_ctx_in->chassis_rec,
+                                        parent_b_lport->pb) : CANNOT_BIND;
 
     return consider_vif_lport_(pb, can_bind, b_ctx_in, b_ctx_out,
                                container_b_lport);
@@ -1774,7 +1776,7 @@ consider_virtual_lport(const struct sbrec_port_binding 
*pb,
         }
     }
 
-    if (!consider_vif_lport_(pb, true, b_ctx_in, b_ctx_out,
+    if (!consider_vif_lport_(pb, CAN_BIND_AS_MAIN, b_ctx_in, b_ctx_out,
                              virtual_b_lport)) {
         return false;
     }
@@ -1848,11 +1850,13 @@ consider_nonvif_lport_(const struct sbrec_port_binding 
*pb,
                            b_ctx_out->tracked_dp_bindings);
 
         update_related_lport(pb, b_ctx_out);
+        enum can_bind can_bind = lport_can_bind_on_this_chassis(
+                                     b_ctx_in->chassis_rec, pb);
         return claim_lport(pb, NULL, b_ctx_in->chassis_rec, NULL,
                            !b_ctx_in->ovnsb_idl_txn, false,
                            b_ctx_out->tracked_dp_bindings,
                            b_ctx_out->if_mgr,
-                           b_ctx_out->postponed_ports);
+                           b_ctx_out->postponed_ports, can_bind);
     }
 
     if (pb->chassis == b_ctx_in->chassis_rec
@@ -2804,7 +2808,8 @@ handle_updated_vif_lport(const struct sbrec_port_binding 
*pb,
     bool now_claimed = (pb->chassis == b_ctx_in->chassis_rec);
 
     if (lport_type == LP_VIRTUAL || lport_type == LP_CONTAINER ||
-            claimed == now_claimed) {
+            (claimed == now_claimed &&
+             !is_additional_chassis(pb, b_ctx_in->chassis_rec))) {
         return true;
     }
 
diff --git a/controller/physical.c b/controller/physical.c
index 53da4b9fa..3ec1424d4 100644
--- a/controller/physical.c
+++ b/controller/physical.c
@@ -1640,8 +1640,8 @@ consider_port_binding(struct ovsdb_idl_index 
*sbrec_port_binding_by_name,
                 sbrec_port_binding_by_name, binding->parent_port);
 
             if (parent_port
-                && (lport_can_bind_on_this_chassis(chassis,
-                    parent_port) != CAN_BIND_AS_MAIN)) {
+                && !lport_can_bind_on_this_chassis(chassis,
+                                                   parent_port)) {
                 /* Even though there is an ofport for this container
                  * parent port, it is requested on different chassis ignore
                  * this container port.
diff --git a/tests/multinode-macros.at b/tests/multinode-macros.at
index c04506a52..37a31d43b 100644
--- a/tests/multinode-macros.at
+++ b/tests/multinode-macros.at
@@ -15,6 +15,17 @@ m4_define([M_NS_CHECK_EXEC],
     [ AT_CHECK([M_NS_EXEC([$1], [$2], [$3])], 
m4_shift(m4_shift(m4_shift($@)))) ]
 )
 
+# M_START_TCPDUMP([fake_node], [params], [name])
+#
+# Helper to properly start tcpdump and wait for the startup.
+# The tcpdump output is available in <name>.tcpdump file.
+m4_define([M_START_TCPDUMP],
+    [
+     podman exec $1 tcpdump -l $2 >$3.tcpdump 2>$3.stderr &
+     OVS_WAIT_UNTIL([grep -q "listening" $3.stderr])
+    ]
+)
+
 OVS_START_SHELL_HELPERS
 
 m_as() {
diff --git a/tests/multinode.at b/tests/multinode.at
index b959a2550..da8093b02 100644
--- a/tests/multinode.at
+++ b/tests/multinode.at
@@ -891,3 +891,260 @@ M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ping -q -c 3 
-i 0.3 -w 2 172.20.1.2 |
 M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ping -c 20 -i 0.5 -s 1300 -M do 
172.20.1.2 2>&1 |grep -q "mtu = 1150"])
 
 AT_CLEANUP
+
+AT_SETUP([Migration of container ports])
+# Migrate vif port between chassis-1 and chassis-3; send packets between
+# chassis-2 and chassis-1/chassis-3, and check that
+# - packet handing on src works before migration.
+# - packet handing on src & dst works during migration.
+# - packet handing on dst works after migration.
+# Do the same for container ports.
+# The container port migration is tested in two different orders,
+# setting iface-id on dst resp. before and after requested-chassis.
+
+# Check that ovn-fake-multinode setup is up and running
+# check_fake_multinode_setup
+check_fake_multinode_setup
+
+# Delete the multinode NB and OVS resources before starting the test.
+cleanup_multinode_resources
+m_as ovn-chassis-3 ovs-vsctl del-br br-int
+m_as ovn-chassis-3 ip --all netns delete
+
+OVS_WAIT_UNTIL([m_as ovn-chassis-1 ip link show | grep -q genev_sys])
+OVS_WAIT_UNTIL([m_as ovn-chassis-2 ip link show | grep -q genev_sys])
+OVS_WAIT_UNTIL([m_as ovn-chassis-3 ip link show | grep -q genev_sys])
+
+check multinode_nbctl ls-add sw0
+check multinode_nbctl lsp-add sw0 migrator
+check multinode_nbctl lsp-set-addresses migrator "50:54:00:00:00:03 10.0.0.3 
1000::3"
+check multinode_nbctl lsp-add sw0 sw0-port2
+check multinode_nbctl lsp-set-addresses sw0-port2 "50:54:00:00:00:04 10.0.0.4 
1000::4"
+
+# Set requested chassis before creating migrator on chassis-3
+check multinode_nbctl --wait=hv set Logical_Switch_Port migrator 
options:requested-chassis=ovn-chassis-1
+
+m_as ovn-chassis-1 /data/create_fake_vm.sh migrator migrator 50:54:00:00:00:03 
10.0.0.3 24 10.0.0.1 1000::3/64 1000::a
+m_as ovn-chassis-3 /data/create_fake_vm.sh migrator migrator 50:54:00:00:00:03 
10.0.0.3 24 10.0.0.1 1000::3/64 1000::a
+m_as ovn-chassis-2 /data/create_fake_vm.sh sw0-port2 sw0p2 50:54:00:00:00:04 
10.0.0.4 24 10.0.0.1 1000::4/64 1000::a
+
+m_wait_for_ports_up
+
+M_START_TCPDUMP([ovn-chassis-1], [-neei genev_sys_6081 arp or ip], [ch1_genev])
+M_START_TCPDUMP([ovn-chassis-1], [-neei migrator-p arp or ip], [ch1_migrator])
+M_START_TCPDUMP([ovn-chassis-2], [-neei genev_sys_6081 arp or ip], [ch2_genev])
+M_START_TCPDUMP([ovn-chassis-2], [-neei sw0p2-p arp or ip], [ch2_sw0p2])
+M_START_TCPDUMP([ovn-chassis-3], [-neei genev_sys_6081 arp or ip], [ch3_genev])
+M_START_TCPDUMP([ovn-chassis-3], [-neei migrator-p arp or ip], [ch3_migrator])
+
+AS_BOX([Migration with vifs])
+echo "Migrator on chassis-1 => sw0p2 on chassis-2"
+M_NS_CHECK_EXEC([ovn-chassis-1], [migrator], [ping -q -c 3 -i 0.3 -w 2 
10.0.0.4 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+echo "sw0p2 on chassis-2 => Migrator on chassis-1"
+M_NS_CHECK_EXEC([ovn-chassis-2], [sw0p2], [ping -q -c 3 -i 0.3 -w 2 10.0.0.3 | 
FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+echo "== Starting migration =="
+check multinode_nbctl --wait=hv set Logical_Switch_Port migrator 
options:requested-chassis=ovn-chassis-1,ovn-chassis-3
+
+echo "Migrator on chassis-1 => sw0p2 on chassis-2"
+M_NS_CHECK_EXEC([ovn-chassis-1], [migrator], [ping -q -c 3 -i 0.3 -w 2 
10.0.0.4 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+
+echo "Migrator on chassis-3 => sw0p2 on chassis-2"
+M_NS_CHECK_EXEC([ovn-chassis-3], [migrator], [ping -q -c 3 -i 0.3 -w 2 
10.0.0.4 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+echo "sw0p2 on chassis-2 => migrator on  ..."
+M_NS_CHECK_EXEC([ovn-chassis-2], [sw0p2], [ping -q -c 3 -i 0.3 -w 2 10.0.0.3 | 
FORMAT_PING], \
+[0], [stdout])
+
+# Both VM are running ... We might get duplicates replies.
+AT_CHECK([cat stdout | grep "3 packets transmitted" | grep -c "3 received"], 
[0],[dnl
+1
+])
+
+echo "== Finalizing migration =="
+check multinode_nbctl --wait=hv set Logical_Switch_Port migrator 
options:requested-chassis=ovn-chassis-3
+
+echo "Migrator on chassis-1 => sw0p2 on chassis-2"
+M_NS_CHECK_EXEC([ovn-chassis-1], [migrator], [ping -q -c 3 -i 0.3 -w 2 
10.0.0.4 | FORMAT_PING], \
+[0], [stdout])
+
+# VM still running on chassis-1 but flows should have been deleted as 
migration completed.
+AT_CHECK([cat stdout | grep -c "100% packet loss"], [0],[dnl
+1
+])
+
+echo "Migrator on chassis-3 => sw0p2 on chassis-2"
+M_NS_CHECK_EXEC([ovn-chassis-3], [migrator], [ping -q -c 3 -i 0.3 -w 2 
10.0.0.4 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+# We should not have duplicates anymore
+echo "sw0p2 on chassis-2 => migrator on chassis-3"
+M_NS_CHECK_EXEC([ovn-chassis-2], [sw0p2], [ping -q -c 3 -i 0.3 -w 2 10.0.0.3 | 
FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+AS_BOX([Migration with container ports])
+# Create container ports.
+check multinode_nbctl ls-add sw1
+check multinode_nbctl lsp-add sw1 mig-cont migrator 10 \
+                   -- lsp-set-addresses mig-cont "f0:00:00:01:02:03 20.0.0.3"
+check multinode_nbctl lsp-add sw1 cont2 sw0-port2 10 \
+                   -- lsp-set-addresses cont2 "f0:00:00:01:02:04 20.0.0.4"
+
+# Create the interface for lport mig-cont
+M_NS_CHECK_EXEC([ovn-chassis-1], [migrator], [ip link add link migrator name 
cont type vlan id 10], [0])
+M_NS_CHECK_EXEC([ovn-chassis-1], [migrator], [ip link set cont address 
f0:00:00:01:02:03], [0])
+M_NS_CHECK_EXEC([ovn-chassis-1], [migrator], [ip link set cont up], [0])
+M_NS_CHECK_EXEC([ovn-chassis-1], [migrator], [ip addr add 20.0.0.3/24 dev 
cont], [0])
+
+M_NS_CHECK_EXEC([ovn-chassis-3], [migrator], [ip link add link migrator name 
cont type vlan id 10], [0])
+M_NS_CHECK_EXEC([ovn-chassis-3], [migrator], [ip link set cont address 
f0:00:00:01:02:03], [0])
+M_NS_CHECK_EXEC([ovn-chassis-3], [migrator], [ip link set cont up], [0])
+M_NS_CHECK_EXEC([ovn-chassis-3], [migrator], [ip addr add 20.0.0.3/24 dev 
cont], [0])
+
+# Create the interface for lport sw1-port2
+M_NS_CHECK_EXEC([ovn-chassis-2], [sw0p2], [ip link add link sw0p2 name cont2 
type vlan id 10], [0])
+M_NS_CHECK_EXEC([ovn-chassis-2], [sw0p2], [ip link set cont2 address 
f0:00:00:01:02:04], [0])
+M_NS_CHECK_EXEC([ovn-chassis-2], [sw0p2], [ip link set cont2 up], [0])
+M_NS_CHECK_EXEC([ovn-chassis-2], [sw0p2], [ip addr add 20.0.0.4/24 dev cont2], 
[0])
+
+echo "mig-cont on chassis-3 => cont2 on chassis-2"
+M_NS_CHECK_EXEC([ovn-chassis-3], [migrator], [ping -q -c 3 -i 0.3 -w 2 
20.0.0.4 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+echo "cont2 on chassis-2 => mig-cont on chassis-3"
+M_NS_CHECK_EXEC([ovn-chassis-2], [sw0p2], [ping -q -c 3 -i 0.3 -w 2 20.0.0.3 | 
FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+echo "== Starting migration back =="
+check multinode_nbctl --wait=hv set Logical_Switch_Port migrator 
options:requested-chassis=ovn-chassis-3,ovn-chassis-1
+
+echo "mig-cont on chassis-3 => cont2 on chassis-2"
+M_NS_CHECK_EXEC([ovn-chassis-3], [migrator], [ping -q -c 3 -i 0.3 -w 2 
20.0.0.4 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+echo "mig-cont on chassis-1 => cont2 on chassis-2"
+M_NS_CHECK_EXEC([ovn-chassis-1], [migrator], [ping -q -c 3 -i 0.3 -w 2 
20.0.0.4 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+echo "cont2 on chassis-2 => mig-cont on  ..."
+M_NS_CHECK_EXEC([ovn-chassis-2], [sw0p2], [ping -q -c 3 -i 0.3 -w 2 20.0.0.3 | 
FORMAT_PING], \
+[0], [stdout])
+
+# Both VM are running ... We might get duplicates replies.
+AT_CHECK([cat stdout | grep "3 packets transmitted" | grep -c "3 received"], 
[0],[dnl
+1
+])
+
+echo "== Finalizing migration =="
+check multinode_nbctl --wait=hv set Logical_Switch_Port migrator 
options:requested-chassis=ovn-chassis-1
+
+echo "mig-cont on chassis-3 => cont2 on chassis-2"
+M_NS_CHECK_EXEC([ovn-chassis-3], [migrator], [ping -q -c 3 -i 0.3 -w 2 
20.0.0.4 | FORMAT_PING], \
+[0], [stdout])
+
+# VM still running on chassis-3 but flows should have been deleted....
+AT_CHECK([cat stdout | grep -c "100% packet loss"], [0],[dnl
+1
+])
+
+echo "mig-cont on chassis-1 => cont2 on chassis-2"
+M_NS_CHECK_EXEC([ovn-chassis-1], [migrator], [ping -q -c 3 -i 0.3 -w 2 
20.0.0.4 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+# We should not have duplicates anymore
+echo "cont2 on chassis-2 => mig-cont on chassis-1"
+M_NS_CHECK_EXEC([ovn-chassis-2], [sw0p2], [ping -q -c 3 -i 0.3 -w 2 20.0.0.3 | 
FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+echo "== Starting another migration, this time before starting dst VM =="
+# Unbind migrator from chassis-3
+m_as ovn-chassis-3 ovs-vsctl -- set Interface migrator-p 
external_ids:iface-id=foo
+
+check multinode_nbctl --wait=hv set Logical_Switch_Port migrator 
options:requested-chassis=ovn-chassis-1,ovn-chassis-3
+sleep 1
+m_as ovn-chassis-3 ovs-vsctl -- set Interface migrator-p 
external_ids:iface-id=migrator
+
+
+echo "mig-cont on chassis-3 => cont2 on chassis-2"
+M_NS_CHECK_EXEC([ovn-chassis-3], [migrator], [ping -q -c 3 -i 0.3 -w 2 
20.0.0.4 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+echo "mig-cont on chassis-1 => cont2 on chassis-2"
+M_NS_CHECK_EXEC([ovn-chassis-1], [migrator], [ping -q -c 3 -i 0.3 -w 2 
20.0.0.4 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+echo "cont2 on chassis-2 => migrator on  ..."
+M_NS_CHECK_EXEC([ovn-chassis-2], [sw0p2], [ping -q -c 3 -i 0.3 -w 2 20.0.0.3 | 
FORMAT_PING], \
+[0], [stdout])
+
+# Both VM are running ... We might get duplicates replies.
+AT_CHECK([cat stdout | grep "3 packets transmitted" | grep -c "3 received"], 
[0],[dnl
+1
+])
+
+echo "== Finalizing migration =="
+check multinode_nbctl --wait=hv set Logical_Switch_Port migrator 
options:requested-chassis=ovn-chassis-3
+
+echo "mig-cont on chassis-1 => cont2 on chassis-2"
+M_NS_CHECK_EXEC([ovn-chassis-1], [migrator], [ping -q -c 3 -i 0.3 -w 2 
20.0.0.4 | FORMAT_PING], \
+[0], [stdout])
+
+# VM still running on chassis-1 but flows should have been deleted....
+AT_CHECK([cat stdout | grep -c "100% packet loss"], [0],[dnl
+1
+])
+
+echo "mig-cont on chassis-3 => cont2 on chassis-2"
+M_NS_CHECK_EXEC([ovn-chassis-3], [migrator], [ping -q -c 3 -i 0.3 -w 2 
20.0.0.4 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+# We should not have duplicates anymore
+echo "cont2 on chassis-2 => mig-cont on chassis-3"
+M_NS_CHECK_EXEC([ovn-chassis-2], [sw0p2], [ping -q -c 3 -i 0.3 -w 2 20.0.0.3 | 
FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+m_as ovn-chassis-1 killall tcpdump
+m_as ovn-chassis-2 killall tcpdump
+m_as ovn-chassis-3 killall tcpdump
+
+AT_CLEANUP
+
+
diff --git a/tests/ovn.at b/tests/ovn.at
index a219d1e94..bf77fe4fa 100644
--- a/tests/ovn.at
+++ b/tests/ovn.at
@@ -16557,6 +16557,160 @@ OVN_CLEANUP([hv1],[hv2])
 AT_CLEANUP
 ])
 
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([No fight between two chassis for the same port])
+ovn_start
+
+# lsp1 on hv1, lsp2 on hv2.
+# migrator1 (and its mig1-cont container) initially on hv1.
+# migrator2 (and its mig2-cont container) initially on hv2.
+# For migrator1, iface-id is set (in both hv) before starting migration.
+# For migrator2, iface-id is set (in dst hv) after initiating migration.
+check ovn-nbctl ls-add ls0
+check ovn-nbctl lsp-add ls0 lsp1
+check ovn-nbctl lsp-add ls0 lsp2
+check ovn-nbctl lsp-add ls0 migrator1
+check ovn-nbctl lsp-add ls0 migrator2
+check ovn-nbctl lsp-add ls0 mig1-cont migrator1 1
+check ovn-nbctl lsp-add ls0 mig2-cont migrator2 1
+check ovn-nbctl set Logical_Switch_Port migrator1  
options:requested-chassis=hv1
+check ovn-nbctl set Logical_Switch_Port migrator2  
options:requested-chassis=hv2
+
+net_add n1
+for i in 1 2; do
+    sim_add hv$i
+    as hv$i
+    ovs-vsctl add-br br-phys
+    ovn_attach n1 br-phys 192.168.0.$i
+done
+
+as hv2 ovs-vsctl -- add-port br-int vif-migrator2 \
+                 -- set Interface vif-migrator2 external-ids:iface-id=migrator2
+
+# Set iface-id in both hv1 and hv2.
+for i in 1 2; do
+    as hv$i
+    ovs-vsctl -- add-port br-int vif-migrator1 \
+              -- set Interface vif-migrator1 external-ids:iface-id=migrator1
+    ovs-vsctl -- add-port br-int vif$i \
+              -- set Interface vif$i external-ids:iface-id=lsp$i
+done
+
+hv1_uuid=$(ovn-sbctl --bare --columns _uuid list chassis hv1)
+hv2_uuid=$(ovn-sbctl --bare --columns _uuid list chassis hv2)
+
+check_in_port_flows_count() {
+  hv=$1
+  of=$2
+  count=$3
+  AT_CHECK_UNQUOTED([as $hv ovs-ofctl dump-flows br-int table=0 | grep 
"in_port=$of" | wc -l], [0], [dnl
+$count
+])
+}
+
+check_claims() {
+    port=$1
+    hv1_claims=$(grep -c "Claiming lport $port\|Changing chassis for lport 
$port" hv1/ovn-controller.log)
+    hv2_claims=$(grep -c "Claiming lport $port\|Changing chassis for lport 
$port" hv2/ovn-controller.log)
+    echo "$hv1_claims claims for port $port in hv1"
+    echo "$hv2_claims claims for port $port in hv2"
+    AT_CHECK([test "${hv1_claims}" -le "${max_claims}"], [0], [])
+    AT_CHECK([test "${hv2_claims}" -le "${max_claims}"], [0], [])
+}
+
+# Waiting for flows in hv1 && hv2 for resp. lsp1 and lsp2.
+OVS_WAIT_UNTIL([
+    of_hv1=$(as hv1 ovs-vsctl --bare --columns ofport find Interface name=vif1)
+    of_hv2=$(as hv2 ovs-vsctl --bare --columns ofport find Interface name=vif2)
+    test -n $of_hv1 && test -n $of_hv2 &&
+        test 1 -le $(as hv1 ovs-ofctl dump-flows br-int table=0 | grep 
"in_port=$of_hv1" | wc -l) &&
+        test 1 -le $(as hv2 ovs-ofctl dump-flows br-int table=0 | grep 
"in_port=$of_hv2" | wc -l)
+])
+
+of1=$(as hv1 ovs-vsctl --bare --columns ofport find Interface 
name=vif-migrator1)
+of2=$(as hv2 ovs-vsctl --bare --columns ofport find Interface 
name=vif-migrator1)
+
+AS_BOX([$(date +%H:%M:%S.%03N) Before migrating migrator1])
+wait_column "" Port_Binding additional_chassis logical_port=migrator1
+wait_column "" Port_Binding additional_chassis logical_port=mig1-cont
+# We should have two flows in table OFTABLE_PHY_TO_LOG from in_port=migrator1 
in hv1 (one for vif and one for the container) and 0 flows in hv2.
+check_in_port_flows_count hv1 $of1 2
+check_in_port_flows_count hv2 $of2 0
+
+
+AS_BOX([$(date +%H:%M:%S.%03N) Starting migrating migrator1])
+check ovn-nbctl --wait=hv set Logical_Switch_Port migrator1 
options:requested-chassis=hv1,hv2
+wait_column "$hv2_uuid" Port_Binding additional_chassis logical_port=migrator1
+wait_column "$hv2_uuid" Port_Binding additional_chassis logical_port=mig1-cont
+# We should have two flows in table OFTABLE_PHY_TO_LOG from in_port=migrator1 
in hv1 and hv2.
+check_in_port_flows_count hv1 $of1 2
+check_in_port_flows_count hv2 $of2 2
+
+# While migration in underway, check that there is no fight between hv
+sleep 3
+max_claims=2
+check_claims migrator1
+check_claims mig1-cont
+
+AS_BOX([$(date +%H:%M:%S.%03N) After migrating migrator1])
+check ovn-nbctl --wait=hv set Logical_Switch_Port migrator1 
options:requested-chassis=hv2
+wait_column "" Port_Binding additional_chassis logical_port=migrator1
+wait_column "" Port_Binding additional_chassis logical_port=mig1-cont
+check_in_port_flows_count hv1 $of1 0
+check_in_port_flows_count hv2 $of2 2
+
+# Now do the same, but with iface-id set after starting migration
+of2=$(as hv2 ovs-vsctl --bare --columns ofport find Interface 
name=vif-migrator2)
+# There is no ofport yet for hv1 => we cannot check/find a flows with 
in_port=of1
+
+AS_BOX([$(date +%H:%M:%S.%03N) Before migrating migrator2])
+# We should have zero flows in table OFTABLE_PHY_TO_LOG from in_port=migrator2 
in hv1 and two flows in hv2.
+wait_column "" Port_Binding additional_chassis logical_port=migrator2
+wait_column "" Port_Binding additional_chassis logical_port=mig2-cont
+check_in_port_flows_count hv2 $of2 2
+
+check ovn-nbctl --wait=hv set Logical_Switch_Port migrator2  
options:requested-chassis=hv2,hv1
+wait_column "" Port_Binding additional_chassis logical_port=migrator2
+wait_column "" Port_Binding additional_chassis logical_port=mig2-cont
+# As no iface-id for migrator2 yet, migration not really started
+# We should have zero flows in table OFTABLE_PHY_TO_LOG from in_port=migrator2 
in hv1 and two flows in hv2.
+check_in_port_flows_count hv2 $of2 2
+
+AS_BOX([$(date +%H:%M:%S.%03N) Starting migrating migrator2])
+as hv1 ovs-vsctl -- add-port br-int vif-migrator2 \
+                 -- set Interface vif-migrator2 external-ids:iface-id=migrator2
+
+wait_column "$hv1_uuid" Port_Binding additional_chassis logical_port=migrator2
+wait_column "$hv1_uuid" Port_Binding additional_chassis logical_port=mig2-cont
+# We need for oflow to be created and handled by ovn-controller. Wait sync 
would not be enough.
+OVS_WAIT_UNTIL([
+    of1=$(as hv1 ovs-vsctl --bare --columns ofport find Interface 
name=vif-migrator2)
+    test 1 -le "$of1"
+])
+
+check ovn-nbctl --wait=hv sync
+
+# We should have two flows in table OFTABLE_PHY_TO_LOG from in_port=migrator2 
in hv1 and hv2.
+check_in_port_flows_count hv1 $of1 2
+check_in_port_flows_count hv2 $of2 2
+
+# While migration in underway, check that there is no fight between hv
+sleep 3
+check_claims migrator2
+check_claims mig2-cont
+
+AS_BOX([$(date +%H:%M:%S.%03N) After migrating migrator2])
+check ovn-nbctl --wait=hv set Logical_Switch_Port migrator2  
options:requested-chassis=hv1
+wait_column "" Port_Binding additional_chassis logical_port=migrator2
+wait_column "" Port_Binding additional_chassis logical_port=mig2-cont
+check_in_port_flows_count hv1 $of1 2
+check_in_port_flows_count hv2 $of2 0
+
+OVN_CLEANUP([hv1],[hv2])
+
+AT_CLEANUP
+])
+
 OVN_FOR_EACH_NORTHD([
 AT_SETUP([options:requested-chassis with hostname])
 
@@ -38442,7 +38596,7 @@ OVS_WAIT_FOR_OUTPUT([as hv2 ovs-ofctl dump-flows br-int 
table=0 |grep priority=1
 ])
 
 # Add hv2 to lport Additional requested chassis as MAIN chassis
-# and check that no flows installed in table 0 in hv1
+# and check that flows remain installed in table 0 in hv1
 check ovn-nbctl set Logical_Switch_Port lsp1 \
     options:requested-chassis=hv2,hv1
 
@@ -38457,8 +38611,8 @@ wait_column "$hv2_uuid" Port_Binding chassis 
logical_port=lsp1
 OVS_WAIT_FOR_OUTPUT([as hv2 ovs-ofctl dump-flows br-int table=0 |grep 
priority=150|grep dl_vlan=7| grep -c in_port=8], [0],[dnl
 1
 ])
-OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=0 |grep 
priority=150|grep dl_vlan=7| grep -c in_port=8], [1],[dnl
-0
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=0 |grep 
priority=150|grep dl_vlan=7| grep -c in_port=8], [0],[dnl
+1
 ])
 
 OVN_CLEANUP([hv1])
-- 
2.47.1

_______________________________________________
dev mailing list
[email protected]
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to