Blob Blame History Raw
diff --git a/.github/workflows/ovn-kubernetes.yml b/.github/workflows/ovn-kubernetes.yml
index c82b23a1f..1d554cd03 100644
--- a/.github/workflows/ovn-kubernetes.yml
+++ b/.github/workflows/ovn-kubernetes.yml
@@ -56,7 +56,7 @@ jobs:
     name: e2e
     if: github.event_name != 'schedule'
     runs-on: ubuntu-20.04
-    timeout-minutes: 120
+    timeout-minutes: 220
     strategy:
       fail-fast: false
       matrix:
@@ -137,6 +137,9 @@ jobs:
       working-directory: src/github.com/ovn-org/ovn-kubernetes
 
     - name: Run Tests
+      # e2e tests take ~60 minutes normally, 120 should be more than enough
+      # set 180 for control-plane tests as these might take a while
+      timeout-minutes: ${{ matrix.target == 'control-plane' && 180 || 120 }}
       run: |
         make -C test ${{ matrix.target }}
       working-directory: src/github.com/ovn-org/ovn-kubernetes
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 0f8d9d193..edf4fb2fd 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -17,7 +17,8 @@ jobs:
       dependencies: |
         automake libtool gcc bc libjemalloc2 libjemalloc-dev    \
         libssl-dev llvm-dev libelf-dev libnuma-dev libpcap-dev  \
-        selinux-policy-dev ncat python3-scapy isc-dhcp-server
+        selinux-policy-dev ncat python3-scapy isc-dhcp-server \
+        iputils-arping
       m32_dependecies: gcc-multilib
       ARCH:        ${{ matrix.cfg.arch }}
       CC:          ${{ matrix.cfg.compiler }}
diff --git a/NEWS b/NEWS
index 5e8aed06d..d7ba71ef5 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,17 @@
+OVN v23.03.1 - xx xxx xxxx
+--------------------------
+  - CT entries are not flushed by default anymore whenever a load balancer
+    backend is removed.  A new, per-LB, option 'ct_flush' can be used to
+    restore the previous behavior.  Disabled by default.
+  - Always allow IPv6 Router Discovery, Neighbor Discovery, and Multicast
+    Listener Discovery protocols, regardless of ACLs defined.
+  - Send ICMP Fragmentation Needed packets back to offending ports when
+    communicating with multichassis ports using frames that don't fit through a
+    tunnel. This is done only for logical switches that are attached to a
+    physical network via a localnet port, in which case multichassis ports may
+    have an effective MTU different from regular ports and hence may need this
+    mechanism to maintain connectivity with other peers in the network.
+
 OVN v23.03.0 - 03 Mar 2023
 --------------------------
   - ovn-controller: Experimental support for co-hosting multiple controller
diff --git a/configure.ac b/configure.ac
index b51d0f01e..0ba9e8d7e 100644
--- a/configure.ac
+++ b/configure.ac
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 AC_PREREQ(2.63)
-AC_INIT(ovn, 23.03.0, bugs@openvswitch.org)
+AC_INIT(ovn, 23.03.1, bugs@openvswitch.org)
 AC_CONFIG_MACRO_DIR([m4])
 AC_CONFIG_AUX_DIR([build-aux])
 AC_CONFIG_HEADERS([config.h])
diff --git a/controller/binding.c b/controller/binding.c
index 5df62baef..8fce6fc3f 100644
--- a/controller/binding.c
+++ b/controller/binding.c
@@ -57,6 +57,10 @@ struct claimed_port {
 static struct shash _claimed_ports = SHASH_INITIALIZER(&_claimed_ports);
 static struct sset _postponed_ports = SSET_INITIALIZER(&_postponed_ports);
 
+static void
+remove_additional_chassis(const struct sbrec_port_binding *pb,
+                          const struct sbrec_chassis *chassis_rec);
+
 struct sset *
 get_postponed_ports(void)
 {
@@ -746,6 +750,19 @@ local_binding_get_lport_ofport(const struct shash *local_bindings,
             u16_to_ofp(lbinding->iface->ofport[0]) : 0;
 }
 
+bool
+local_binding_is_ovn_installed(struct shash *local_bindings,
+                               const char *pb_name)
+{
+    struct local_binding *lbinding =
+        local_binding_find(local_bindings, pb_name);
+    if (lbinding && lbinding->iface) {
+        return smap_get_bool(&lbinding->iface->external_ids,
+                             OVN_INSTALLED_EXT_ID, false);
+    }
+    return false;
+}
+
 bool
 local_binding_is_up(struct shash *local_bindings, const char *pb_name,
                     const struct sbrec_chassis *chassis_rec)
@@ -783,6 +800,7 @@ local_binding_is_down(struct shash *local_bindings, const char *pb_name,
         } else if (b_lport->pb->chassis) {
             VLOG_DBG("lport %s already claimed by other chassis",
                      b_lport->pb->logical_port);
+            return true;
         }
     }
 
@@ -834,6 +852,38 @@ local_binding_set_up(struct shash *local_bindings, const char *pb_name,
     }
 }
 
+void
+local_binding_remove_ovn_installed(
+        struct shash *local_bindings,
+        const struct ovsrec_interface_table *iface_table,
+        const char *pb_name, bool ovs_readonly)
+{
+    if (ovs_readonly) {
+        return;
+    }
+    struct local_binding *lbinding =
+        local_binding_find(local_bindings, pb_name);
+    if (lbinding && lbinding->iface) {
+        const struct uuid *iface_uuid = &lbinding->iface->header_.uuid;
+        remove_ovn_installed_for_uuid(iface_table, iface_uuid);
+    }
+}
+
+void
+remove_ovn_installed_for_uuid(const struct ovsrec_interface_table *iface_table,
+                              const struct uuid *iface_uuid)
+{
+    const struct ovsrec_interface *iface_rec =
+        ovsrec_interface_table_get_for_uuid(iface_table, iface_uuid);
+    if (iface_rec && smap_get_bool(&iface_rec->external_ids,
+                                   OVN_INSTALLED_EXT_ID, false)) {
+        VLOG_INFO("Removing iface %s ovn-installed in OVS",
+                  iface_rec->name);
+        ovsrec_interface_update_external_ids_delkey(iface_rec,
+                                                    OVN_INSTALLED_EXT_ID);
+    }
+}
+
 void
 local_binding_set_down(struct shash *local_bindings, const char *pb_name,
                        const struct sbrec_chassis *chassis_rec,
@@ -853,7 +903,6 @@ local_binding_set_down(struct shash *local_bindings, const char *pb_name,
 
     if (!sb_readonly && b_lport && b_lport->pb->n_up && b_lport->pb->up[0] &&
             (!b_lport->pb->chassis || b_lport->pb->chassis == chassis_rec)) {
-        VLOG_INFO("Setting lport %s down in Southbound", pb_name);
         binding_lport_set_down(b_lport, sb_readonly);
         LIST_FOR_EACH (b_lport, list_node, &lbinding->binding_lports) {
             binding_lport_set_down(b_lport, sb_readonly);
@@ -1028,6 +1077,26 @@ set_pb_chassis_in_sbrec(const struct sbrec_port_binding *pb,
     }
 }
 
+void
+set_pb_additional_chassis_in_sbrec(const struct sbrec_port_binding *pb,
+                                   const struct sbrec_chassis *chassis_rec,
+                                   bool is_set)
+{
+    if (!is_additional_chassis(pb, chassis_rec)) {
+        VLOG_INFO("Claiming lport %s for this additional chassis.",
+                  pb->logical_port);
+        for (size_t i = 0; i < pb->n_mac; i++) {
+            VLOG_INFO("%s: Claiming %s", pb->logical_port, pb->mac[i]);
+        }
+        sbrec_port_binding_update_additional_chassis_addvalue(pb, chassis_rec);
+        if (pb->chassis == chassis_rec) {
+            sbrec_port_binding_set_chassis(pb, NULL);
+        }
+    } else if (!is_set) {
+        remove_additional_chassis(pb, chassis_rec);
+    }
+}
+
 bool
 local_bindings_pb_chassis_is_set(struct shash *local_bindings,
                                  const char *pb_name,
@@ -1228,8 +1297,8 @@ claim_lport(const struct sbrec_port_binding *pb,
                 }
                 set_pb_chassis_in_sbrec(pb, chassis_rec, true);
             } else {
-                if_status_mgr_claim_iface(if_mgr, pb, chassis_rec,
-                                          sb_readonly);
+                if_status_mgr_claim_iface(if_mgr, pb, chassis_rec, iface_rec,
+                                          sb_readonly, can_bind);
             }
             register_claim_timestamp(pb->logical_port, now);
             sset_find_and_delete(postponed_ports, pb->logical_port);
@@ -1239,29 +1308,19 @@ claim_lport(const struct sbrec_port_binding *pb,
                     return false;
                 }
             } else {
-                if (pb->n_up && !pb->up[0]) {
+                if ((pb->n_up && !pb->up[0]) ||
+                    !smap_get_bool(&iface_rec->external_ids,
+                                   OVN_INSTALLED_EXT_ID, false)) {
                     if_status_mgr_claim_iface(if_mgr, pb, chassis_rec,
-                                              sb_readonly);
+                                              iface_rec, sb_readonly,
+                                              can_bind);
                 }
             }
         }
     } else if (can_bind == CAN_BIND_AS_ADDITIONAL) {
         if (!is_additional_chassis(pb, chassis_rec)) {
-            if (sb_readonly) {
-                return false;
-            }
-
-            VLOG_INFO("Claiming lport %s for this additional chassis.",
-                      pb->logical_port);
-            for (size_t i = 0; i < pb->n_mac; i++) {
-                VLOG_INFO("%s: Claiming %s", pb->logical_port, pb->mac[i]);
-            }
-
-            sbrec_port_binding_update_additional_chassis_addvalue(pb,
-                                                                  chassis_rec);
-            if (pb->chassis == chassis_rec) {
-                sbrec_port_binding_set_chassis(pb, NULL);
-            }
+            if_status_mgr_claim_iface(if_mgr, pb, chassis_rec, iface_rec,
+                                      sb_readonly, can_bind);
             update_tracked = true;
         }
     }
@@ -1464,9 +1523,11 @@ consider_vif_lport_(const struct sbrec_port_binding *pb,
             const char *requested_chassis_option = smap_get(
                 &pb->options, "requested-chassis");
             VLOG_INFO_RL(&rl,
-                "Not claiming lport %s, chassis %s requested-chassis %s",
+                "Not claiming lport %s, chassis %s requested-chassis %s "
+                "pb->chassis %s",
                 pb->logical_port, b_ctx_in->chassis_rec->name,
-                requested_chassis_option ? requested_chassis_option : "[]");
+                requested_chassis_option ? requested_chassis_option : "[]",
+                pb->chassis ? pb->chassis->name: "");
         }
     }
 
@@ -2030,7 +2091,7 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct binding_ctx_out *b_ctx_out)
         free(lnet_lport);
     }
 
-    /* Run through external lport list to see if these are external ports
+    /* Run through external lport list to see if there are external ports
      * on local datapaths discovered from above loop, and update the
      * corresponding local datapath accordingly. */
     struct lport *ext_lport;
@@ -2039,7 +2100,7 @@ binding_run(struct binding_ctx_in *b_ctx_in, struct binding_ctx_out *b_ctx_out)
         free(ext_lport);
     }
 
-    /* Run through multichassis lport list to see if these are ports
+    /* Run through multichassis lport list to see if there are ports
      * on local datapaths discovered from above loop, and update the
      * corresponding local datapath accordingly. */
     struct lport *multichassis_lport;
@@ -2288,6 +2349,11 @@ consider_iface_release(const struct ovsrec_interface *iface_rec,
                 return false;
             }
         }
+        if (lbinding->iface && lbinding->iface->name) {
+            if_status_mgr_remove_ovn_installed(b_ctx_out->if_mgr,
+                                               lbinding->iface->name,
+                                               &lbinding->iface->header_.uuid);
+        }
 
     } else if (lbinding && b_lport && b_lport->type == LP_LOCALPORT) {
         /* lbinding is associated with a localport.  Remove it from the
@@ -2558,6 +2624,7 @@ handle_deleted_lport(const struct sbrec_port_binding *pb,
     if (ld) {
         remove_pb_from_local_datapath(pb,
                                       b_ctx_out, ld);
+        if_status_mgr_release_iface(b_ctx_out->if_mgr, pb->logical_port);
         return;
     }
 
@@ -2581,6 +2648,7 @@ handle_deleted_lport(const struct sbrec_port_binding *pb,
             remove_pb_from_local_datapath(pb, b_ctx_out,
                                           ld);
         }
+        if_status_mgr_release_iface(b_ctx_out->if_mgr, pb->logical_port);
     }
 }
 
@@ -2627,6 +2695,11 @@ handle_deleted_vif_lport(const struct sbrec_port_binding *pb,
     }
 
     handle_deleted_lport(pb, b_ctx_in, b_ctx_out);
+    if (lbinding && lbinding->iface && lbinding->iface->name) {
+        if_status_mgr_remove_ovn_installed(b_ctx_out->if_mgr,
+                                           lbinding->iface->name,
+                                           &lbinding->iface->header_.uuid);
+    }
     return true;
 }
 
@@ -3314,6 +3387,24 @@ binding_lport_delete(struct shash *binding_lports,
     binding_lport_destroy(b_lport);
 }
 
+void
+port_binding_set_down(const struct sbrec_chassis *chassis_rec,
+                      const struct sbrec_port_binding_table *pb_table,
+                      const char *iface_id,
+                      const struct uuid *pb_uuid)
+{
+        const struct sbrec_port_binding *pb =
+            sbrec_port_binding_table_get_for_uuid(pb_table, pb_uuid);
+        if (!pb) {
+            VLOG_DBG("port_binding already deleted for %s", iface_id);
+        } else if (pb->n_up && pb->up[0]) {
+            bool up = false;
+            sbrec_port_binding_set_up(pb, &up, 1);
+            VLOG_INFO("Setting lport %s down in Southbound", pb->logical_port);
+            set_pb_chassis_in_sbrec(pb, chassis_rec, false);
+        }
+}
+
 static void
 binding_lport_set_up(struct binding_lport *b_lport, bool sb_readonly)
 {
@@ -3331,6 +3422,7 @@ binding_lport_set_down(struct binding_lport *b_lport, bool sb_readonly)
     if (sb_readonly || !b_lport || !b_lport->pb->n_up || !b_lport->pb->up[0]) {
         return;
     }
+    VLOG_INFO("Setting lport %s down in Southbound", b_lport->name);
 
     bool up = false;
     sbrec_port_binding_set_up(b_lport->pb, &up, 1);
diff --git a/controller/binding.h b/controller/binding.h
index 6c3a98b02..46e618b97 100644
--- a/controller/binding.h
+++ b/controller/binding.h
@@ -159,6 +159,14 @@ bool local_binding_is_up(struct shash *local_bindings, const char *pb_name,
 bool local_binding_is_down(struct shash *local_bindings, const char *pb_name,
                            const struct sbrec_chassis *);
 
+bool local_binding_is_ovn_installed(struct shash *local_bindings,
+                                    const char *pb_name);
+void local_binding_remove_ovn_installed(
+        struct shash *local_bindings,
+        const struct ovsrec_interface_table *iface_table,
+        const char *pb_name,
+        bool ovs_readonly);
+
 void local_binding_set_up(struct shash *local_bindings, const char *pb_name,
                           const struct sbrec_chassis *chassis_rec,
                           const char *ts_now_str, bool sb_readonly,
@@ -194,6 +202,18 @@ bool is_additional_chassis(const struct sbrec_port_binding *pb,
 void set_pb_chassis_in_sbrec(const struct sbrec_port_binding *pb,
                              const struct sbrec_chassis *chassis_rec,
                              bool is_set);
+void
+set_pb_additional_chassis_in_sbrec(const struct sbrec_port_binding *pb,
+                                   const struct sbrec_chassis *chassis_rec,
+                                   bool is_set);
+
+void remove_ovn_installed_for_uuid(const struct ovsrec_interface_table *,
+                                   const struct uuid *);
+
+void port_binding_set_down(const struct sbrec_chassis *chassis_rec,
+                           const struct sbrec_port_binding_table *pb_table,
+                           const char *iface_id,
+                           const struct uuid *pb_uuid);
 
 /* Corresponds to each Port_Binding.type. */
 enum en_lport_type {
diff --git a/controller/encaps.c b/controller/encaps.c
index 2662eaf98..b69d72584 100644
--- a/controller/encaps.c
+++ b/controller/encaps.c
@@ -36,6 +36,8 @@ VLOG_DEFINE_THIS_MODULE(encaps);
  */
 #define	OVN_MVTEP_CHASSISID_DELIM '@'
 
+static char *current_br_int_name = NULL;
+
 void
 encaps_register_ovs_idl(struct ovsdb_idl *ovs_idl)
 {
@@ -386,6 +388,21 @@ chassis_tzones_overlap(const struct sset *transport_zones,
     return false;
 }
 
+static void
+clear_old_tunnels(const struct ovsrec_bridge *old_br_int, const char *prefix,
+                  size_t prefix_len)
+{
+    for (size_t i = 0; i < old_br_int->n_ports; i++) {
+        const struct ovsrec_port *port = old_br_int->ports[i];
+        const char *id = smap_get(&port->external_ids, "ovn-chassis-id");
+        if (id && !strncmp(port->name, prefix, prefix_len)) {
+            VLOG_DBG("Clearing old tunnel port \"%s\" (%s) from bridge "
+                     "\"%s\".", port->name, id, old_br_int->name);
+            ovsrec_bridge_update_ports_delvalue(old_br_int, port);
+        }
+    }
+}
+
 void
 encaps_run(struct ovsdb_idl_txn *ovs_idl_txn,
            const struct ovsrec_bridge *br_int,
@@ -393,12 +410,42 @@ encaps_run(struct ovsdb_idl_txn *ovs_idl_txn,
            const struct sbrec_chassis *this_chassis,
            const struct sbrec_sb_global *sbg,
            const struct ovsrec_open_vswitch_table *ovs_table,
-           const struct sset *transport_zones)
+           const struct sset *transport_zones,
+           const struct ovsrec_bridge_table *bridge_table)
 {
     if (!ovs_idl_txn || !br_int) {
         return;
     }
 
+    if (!current_br_int_name) {
+        /* The controller has just started, we need to look through all
+         * bridges for old tunnel ports. */
+        char *tunnel_prefix = xasprintf("ovn%s-", get_chassis_idx(ovs_table));
+        size_t prefix_len = strlen(tunnel_prefix);
+
+        const struct ovsrec_bridge *br;
+        OVSREC_BRIDGE_TABLE_FOR_EACH (br, bridge_table) {
+            if (!strcmp(br->name, br_int->name)) {
+                continue;
+            }
+            clear_old_tunnels(br, tunnel_prefix, prefix_len);
+        }
+
+        free(tunnel_prefix);
+        current_br_int_name = xstrdup(br_int->name);
+    } else if (strcmp(current_br_int_name, br_int->name)) {
+        /* The integration bridge was changed, clear tunnel ports from
+         * the old one. */
+        const struct ovsrec_bridge *old_br_int =
+            get_bridge(bridge_table, current_br_int_name);
+        if (old_br_int) {
+            clear_old_tunnels(old_br_int, "", 0);
+        }
+
+        free(current_br_int_name);
+        current_br_int_name = xstrdup(br_int->name);
+    }
+
     const struct sbrec_chassis *chassis_rec;
 
     struct tunnel_ctx tc = {
@@ -511,3 +558,9 @@ encaps_cleanup(struct ovsdb_idl_txn *ovs_idl_txn,
 
     return !any_changes;
 }
+
+void
+encaps_destroy(void)
+{
+    free(current_br_int_name);
+}
diff --git a/controller/encaps.h b/controller/encaps.h
index 867c6f28c..3e58b3c82 100644
--- a/controller/encaps.h
+++ b/controller/encaps.h
@@ -35,7 +35,8 @@ void encaps_run(struct ovsdb_idl_txn *ovs_idl_txn,
                 const struct sbrec_chassis *,
                 const struct sbrec_sb_global *,
                 const struct ovsrec_open_vswitch_table *,
-                const struct sset *transport_zones);
+                const struct sset *transport_zones,
+                const struct ovsrec_bridge_table *bridge_table);
 
 bool encaps_cleanup(struct ovsdb_idl_txn *ovs_idl_txn,
                     const struct ovsrec_bridge *br_int);
@@ -46,4 +47,6 @@ bool  encaps_tunnel_id_parse(const char *tunnel_id, char **chassis_id,
 bool  encaps_tunnel_id_match(const char *tunnel_id, const char *chassis_id,
                              const char *encap_ip);
 
+void encaps_destroy(void);
+
 #endif /* controller/encaps.h */
diff --git a/controller/if-status.c b/controller/if-status.c
index d1c14ac30..2b2eb1679 100644
--- a/controller/if-status.c
+++ b/controller/if-status.c
@@ -18,12 +18,14 @@
 #include "binding.h"
 #include "if-status.h"
 #include "ofctrl-seqno.h"
+#include "ovsport.h"
 #include "simap.h"
 
 #include "lib/hmapx.h"
 #include "lib/util.h"
 #include "timeval.h"
 #include "openvswitch/vlog.h"
+#include "lib/vswitch-idl.h"
 #include "lib/ovn-sb-idl.h"
 
 VLOG_DEFINE_THIS_MODULE(if_status);
@@ -54,44 +56,54 @@ VLOG_DEFINE_THIS_MODULE(if_status);
  */
 
 enum if_state {
-    OIF_CLAIMED,       /* Newly claimed interface. pb->chassis update not yet
-                          initiated. */
-    OIF_INSTALL_FLOWS, /* Claimed interface with pb->chassis update sent to
-                        * SB (but update notification not confirmed, so the
-                        * update may be resent in any of the following states)
-                        * and for which flows are still being installed.
-                        */
-    OIF_MARK_UP,       /* Interface with flows successfully installed in OVS
-                        * but not yet marked "up" in the binding module (in
-                        * SB and OVS databases).
-                        */
-    OIF_MARK_DOWN,     /* Released interface but not yet marked "down" in the
-                        * binding module (in SB and/or OVS databases).
-                        */
-    OIF_INSTALLED,     /* Interface flows programmed in OVS and binding marked
-                        * "up" in the binding module.
-                        */
+    OIF_CLAIMED,          /* Newly claimed interface. pb->chassis update not
+                             yet initiated. */
+    OIF_INSTALL_FLOWS,    /* Claimed interface with pb->chassis update sent to
+                           * SB (but update notification not confirmed, so the
+                           * update may be resent in any of the following
+                           * states and for which flows are still being
+                           * installed.
+                           */
+    OIF_REM_OLD_OVN_INST, /* Interface with flows successfully installed in OVS
+                           * but with ovn-installed still in OVSDB.
+                           */
+    OIF_MARK_UP,          /* Interface with flows successfully installed in OVS
+                           * but not yet marked "up" in the binding module (in
+                           * SB and OVS databases).
+                           */
+    OIF_MARK_DOWN,        /* Released interface but not yet marked "down" in
+                           * the binding module (in SB and/or OVS databases).
+                           */
+    OIF_INSTALLED,        /* Interface flows programmed in OVS and binding
+                           * marked "up" in the binding module.
+                           */
+    OIF_UPDATE_PORT,      /* Logical ports need to be set down, and pb->chassis
+                           * removed.
+                           */
     OIF_MAX,
 };
 
 static const char *if_state_names[] = {
-    [OIF_CLAIMED]       = "CLAIMED",
-    [OIF_INSTALL_FLOWS] = "INSTALL_FLOWS",
-    [OIF_MARK_UP]       = "MARK_UP",
-    [OIF_MARK_DOWN]     = "MARK_DOWN",
-    [OIF_INSTALLED]     = "INSTALLED",
+    [OIF_CLAIMED]          = "CLAIMED",
+    [OIF_INSTALL_FLOWS]    = "INSTALL_FLOWS",
+    [OIF_REM_OLD_OVN_INST] = "REM_OLD_OVN_INST",
+    [OIF_MARK_UP]          = "MARK_UP",
+    [OIF_MARK_DOWN]        = "MARK_DOWN",
+    [OIF_INSTALLED]        = "INSTALLED",
+    [OIF_UPDATE_PORT]      = "UPDATE_PORT",
 };
 
 /*
  *       +----------------------+
  * +---> |                      |
- * | +-> |         NULL         | <--------------------------------------+++-+
- * | |   +----------------------+                                            |
- * | |     ^ release_iface   | claim_iface()                                 |
- * | |     |                 V - sbrec_update_chassis(if sb is rw)           |
- * | |   +----------------------+                                            |
- * | |   |                      | <----------------------------------------+ |
- * | |   |       CLAIMED        | <--------------------------------------+ | |
+ * | +-> |         NULL         |
+ * | |   +----------------------+
+ * | |     ^ release_iface   | claim_iface()
+ * | |     |                 V - sbrec_update_chassis(if sb is rw)
+ * | |   +----------------------+
+ * | |   |                      | <------------------------------------------+
+ * | |   |       CLAIMED        | <----------------------------------------+ |
+ * | |   |                      | <--------------------------------------+ | |
  * | |   +----------------------+                                        | | |
  * | |                 |  V  ^                                           | | |
  * | |                 |  |  | handle_claims()                           | | |
@@ -109,43 +121,69 @@ static const char *if_state_names[] = {
  * |     |                      |   - remove ovn-installed from ovsdb    | | |
  * |     |                      |  mgr_update()                          | | |
  * |     +----------------------+   - sbrec_update_chassis if needed     | | |
- * |                    |                                                | | |
- * |                    |  mgr_run(seqno rcvd)                           | | |
- * |                    |  - set port up in sb                           | | |
- * | release_iface      |  - set ovn-installed in ovs                    | | |
- * |                    V                                                | | |
+ * |        |            |                                               | | |
+ * |        |            +----------------------------------------+      | | |
+ * |        |                                                     |      | | |
+ * |        | mgr_run(seqno rcvd, ovn-installed present)          |      | | |
+ * |        V                                                     |      | | |
+ * |    +--------------------+                                    |      | | |
+ * |    |                    |  mgr_run()                         |      | | |
+ * +--- | REM_OLD_OVN_INST   |  - remove ovn-installed in ovs     |      | | |
+ * |    +--------------------+                                    |      | | |
+ * |               |                                              |      | | |
+ * |               |                                              |      | | |
+ * |               | mgr_update( ovn_installed not present)       |      | | |
+ * |               |                                              |      | | |
+ * |               |  +-------------------------------------------+      | | |
+ * |               |  |                                                  | | |
+ * |               |  |  mgr_run(seqno rcvd, ovn-installed not present)  | | |
+ * |               |  |  - set port up in sb                             | | |
+ * |               |  |  - set ovn-installed in ovs                      | | |
+ * |release_iface  |  |                                                  | | |
+ * |               V  V                                                  | | |
  * |   +----------------------+                                          | | |
  * |   |                      |  mgr_run()                               | | |
- * +-- |       MARK_UP        |  - set port up in sb                     | | |
- *     |                      |  - set ovn-installed in ovs              | | |
- *     |                      |  mgr_update()                            | | |
- *     +----------------------+  - sbrec_update_chassis if needed        | | |
- *              |                                                        | | |
- *              | mgr_update(rcvd port up / ovn_installed & chassis set) | | |
- *              V                                                        | | |
- *     +----------------------+                                          | | |
- *     |      INSTALLED       | ------------> claim_iface ---------------+ | |
- *     +----------------------+                                            | |
- *              |                                                          | |
- *              | release_iface                                            | |
- *              V                                                          | |
- *     +----------------------+                                            | |
- *     |                      | ------------> claim_iface -----------------+ |
- *     |      MARK_DOWN       | ------> mgr_update(rcvd port down) ----------+
- *     |                      | mgr_run()
- *     |                      | - set port down in sb
- *     |                      | mgr_update()
+ * +---|       MARK_UP        |  - set port up in sb                     | | |
+ * |   |                      |  - set ovn-installed in ovs              | | |
+ * |   |                      |  mgr_update()                            | | |
+ * |   +----------------------+  - sbrec_update_chassis if needed        | | |
+ * |            |                                                        | | |
+ * |            | mgr_update(rcvd port up / ovn_installed & chassis set) | | |
+ * |            V                                                        | | |
+ * |   +----------------------+                                          | | |
+ * |   |      INSTALLED       | ------------> claim_iface ---------------+ | |
+ * |   +----------------------+                                            | |
+ * |                  |                                                    | |
+ * |                  | release_iface                                      | |
+ * |mgr_update(       |                                                    | |
+ * |  rcvd port down) |                                                    | |
+ * |                  V                                                    | |
+ * |   +----------------------+                                            | |
+ * |   |                      | ------------> claim_iface -----------------+ |
+ * +---+      MARK_DOWN       | mgr_run()                                    |
+ * |   |                      | - set port down in sb                        |
+ * |   |                      | mgr_update(sb is rw)                         |
+ * |   +----------------------+ - sbrec_update_chassis(NULL)                 |
+ * |                  |                                                      |
+ * |                  | mgr_update(local binding not found)                  |
+ * |                  |                                                      |
+ * |                  V                                                      |
+ * |   +----------------------+                                              |
+ * |   |                      | ------------> claim_iface -------------------+
+ * +---+      UPDATE_PORT     | mgr_run()
  *     +----------------------+ - sbrec_update_chassis(NULL)
  */
 
 
 struct ovs_iface {
     char *id;               /* Extracted from OVS external_ids.iface_id. */
+    struct uuid pb_uuid;    /* Port_binding uuid */
     enum if_state state;    /* State of the interface in the state machine. */
     uint32_t install_seqno; /* Seqno at which this interface is expected to
                              * be fully programmed in OVS.  Only used in state
                              * OIF_INSTALL_FLOWS.
                              */
+    uint16_t mtu;           /* Extracted from OVS interface.mtu field. */
 };
 
 static uint64_t ifaces_usage;
@@ -155,6 +193,9 @@ struct if_status_mgr {
     /* All local interfaces, mapping from 'iface-id' to 'struct ovs_iface'. */
     struct shash ifaces;
 
+    /* local interfaces which need ovn-install removal */
+    struct shash ovn_uninstall_hash;
+
     /* All local interfaces, stored per state. */
     struct hmapx ifaces_per_state[OIF_MAX];
 
@@ -167,18 +208,24 @@ struct if_status_mgr {
     uint32_t iface_seqno;
 };
 
-static struct ovs_iface *ovs_iface_create(struct if_status_mgr *,
-                                          const char *iface_id,
-                                          enum if_state );
+static struct ovs_iface *
+ovs_iface_create(struct if_status_mgr *, const char *iface_id,
+                 const struct ovsrec_interface *iface_rec,
+                 enum if_state);
+static void add_to_ovn_uninstall_hash(struct if_status_mgr *, const char *,
+                                      const struct uuid *);
 static void ovs_iface_destroy(struct if_status_mgr *, struct ovs_iface *);
+static void ovn_uninstall_hash_destroy(struct if_status_mgr *mgr, char *name);
 static void ovs_iface_set_state(struct if_status_mgr *, struct ovs_iface *,
                                 enum if_state);
 
 static void if_status_mgr_update_bindings(
     struct if_status_mgr *mgr, struct local_binding_data *binding_data,
     const struct sbrec_chassis *,
+    const struct ovsrec_interface_table *iface_table,
     bool sb_readonly, bool ovs_readonly);
 
+static void ovn_uninstall_hash_account_mem(const char *name, bool erase);
 struct if_status_mgr *
 if_status_mgr_create(void)
 {
@@ -189,6 +236,7 @@ if_status_mgr_create(void)
         hmapx_init(&mgr->ifaces_per_state[i]);
     }
     shash_init(&mgr->ifaces);
+    shash_init(&mgr->ovn_uninstall_hash);
     return mgr;
 }
 
@@ -202,6 +250,11 @@ if_status_mgr_clear(struct if_status_mgr *mgr)
     }
     ovs_assert(shash_is_empty(&mgr->ifaces));
 
+    SHASH_FOR_EACH_SAFE (node, &mgr->ovn_uninstall_hash) {
+        ovn_uninstall_hash_destroy(mgr, node->data);
+    }
+    ovs_assert(shash_is_empty(&mgr->ovn_uninstall_hash));
+
     for (size_t i = 0; i < ARRAY_SIZE(mgr->ifaces_per_state); i++) {
         ovs_assert(hmapx_is_empty(&mgr->ifaces_per_state[i]));
     }
@@ -212,6 +265,7 @@ if_status_mgr_destroy(struct if_status_mgr *mgr)
 {
     if_status_mgr_clear(mgr);
     shash_destroy(&mgr->ifaces);
+    shash_destroy(&mgr->ovn_uninstall_hash);
     for (size_t i = 0; i < ARRAY_SIZE(mgr->ifaces_per_state); i++) {
         hmapx_destroy(&mgr->ifaces_per_state[i]);
     }
@@ -222,27 +276,35 @@ void
 if_status_mgr_claim_iface(struct if_status_mgr *mgr,
                           const struct sbrec_port_binding *pb,
                           const struct sbrec_chassis *chassis_rec,
-                          bool sb_readonly)
+                          const struct ovsrec_interface *iface_rec,
+                          bool sb_readonly, enum can_bind bind_type)
 {
     const char *iface_id = pb->logical_port;
     struct ovs_iface *iface = shash_find_data(&mgr->ifaces, iface_id);
 
     if (!iface) {
-        iface = ovs_iface_create(mgr, iface_id, OIF_CLAIMED);
+        iface = ovs_iface_create(mgr, iface_id, iface_rec, OIF_CLAIMED);
     }
 
+    memcpy(&iface->pb_uuid, &pb->header_.uuid, sizeof(iface->pb_uuid));
     if (!sb_readonly) {
-        set_pb_chassis_in_sbrec(pb, chassis_rec, true);
+        if (bind_type == CAN_BIND_AS_MAIN) {
+            set_pb_chassis_in_sbrec(pb, chassis_rec, true);
+        } else if (bind_type == CAN_BIND_AS_ADDITIONAL) {
+            set_pb_additional_chassis_in_sbrec(pb, chassis_rec, true);
+        }
     }
 
     switch (iface->state) {
     case OIF_CLAIMED:
     case OIF_INSTALL_FLOWS:
+    case OIF_REM_OLD_OVN_INST:
     case OIF_MARK_UP:
         /* Nothing to do here. */
         break;
     case OIF_INSTALLED:
     case OIF_MARK_DOWN:
+    case OIF_UPDATE_PORT:
         ovs_iface_set_state(mgr, iface, OIF_CLAIMED);
         break;
     case OIF_MAX:
@@ -271,9 +333,10 @@ if_status_mgr_release_iface(struct if_status_mgr *mgr, const char *iface_id)
     switch (iface->state) {
     case OIF_CLAIMED:
     case OIF_INSTALL_FLOWS:
-        /* Not yet fully installed interfaces can be safely deleted. */
-        ovs_iface_destroy(mgr, iface);
-        break;
+        /* Not yet fully installed interfaces:
+         * pb->chassis still need to be deleted.
+         */
+    case OIF_REM_OLD_OVN_INST:
     case OIF_MARK_UP:
     case OIF_INSTALLED:
         /* Properly mark interfaces "down" if their flows were already
@@ -282,6 +345,7 @@ if_status_mgr_release_iface(struct if_status_mgr *mgr, const char *iface_id)
         ovs_iface_set_state(mgr, iface, OIF_MARK_DOWN);
         break;
     case OIF_MARK_DOWN:
+    case OIF_UPDATE_PORT:
         /* Nothing to do here. */
         break;
     case OIF_MAX:
@@ -302,9 +366,10 @@ if_status_mgr_delete_iface(struct if_status_mgr *mgr, const char *iface_id)
     switch (iface->state) {
     case OIF_CLAIMED:
     case OIF_INSTALL_FLOWS:
-        /* Not yet fully installed interfaces can be safely deleted. */
-        ovs_iface_destroy(mgr, iface);
-        break;
+        /* Not yet fully installed interfaces:
+         * pb->chassis still need to be deleted.
+         */
+    case OIF_REM_OLD_OVN_INST:
     case OIF_MARK_UP:
     case OIF_INSTALLED:
         /* Properly mark interfaces "down" if their flows were already
@@ -313,6 +378,7 @@ if_status_mgr_delete_iface(struct if_status_mgr *mgr, const char *iface_id)
         ovs_iface_set_state(mgr, iface, OIF_MARK_DOWN);
         break;
     case OIF_MARK_DOWN:
+    case OIF_UPDATE_PORT:
         /* Nothing to do here. */
         break;
     case OIF_MAX:
@@ -346,12 +412,34 @@ if_status_handle_claims(struct if_status_mgr *mgr,
     return rc;
 }
 
+static void
+clean_ovn_installed(struct if_status_mgr *mgr,
+                    const struct ovsrec_interface_table *iface_table)
+{
+    struct shash_node *node;
+
+    SHASH_FOR_EACH_SAFE (node, &mgr->ovn_uninstall_hash) {
+        const struct uuid *iface_uuid = node->data;
+        remove_ovn_installed_for_uuid(iface_table, iface_uuid);
+        free(node->data);
+        char *node_name = shash_steal(&mgr->ovn_uninstall_hash, node);
+        ovn_uninstall_hash_account_mem(node_name, true);
+        free(node_name);
+    }
+}
+
 void
 if_status_mgr_update(struct if_status_mgr *mgr,
                      struct local_binding_data *binding_data,
                      const struct sbrec_chassis *chassis_rec,
+                     const struct ovsrec_interface_table *iface_table,
+                     const struct sbrec_port_binding_table *pb_table,
+                     bool ovs_readonly,
                      bool sb_readonly)
 {
+    if (!ovs_readonly) {
+        clean_ovn_installed(mgr, iface_table);
+    }
     if (!binding_data) {
         return;
     }
@@ -359,6 +447,17 @@ if_status_mgr_update(struct if_status_mgr *mgr,
     struct shash *bindings = &binding_data->bindings;
     struct hmapx_node *node;
 
+    /* Move all interfaces that have been confirmed without ovn-installed,
+     * from OIF_REM_OLD_OVN_INST to OIF_MARK_UP.
+     */
+    HMAPX_FOR_EACH_SAFE (node, &mgr->ifaces_per_state[OIF_REM_OLD_OVN_INST]) {
+        struct ovs_iface *iface = node->data;
+
+        if (!local_binding_is_ovn_installed(bindings, iface->id)) {
+            ovs_iface_set_state(mgr, iface, OIF_MARK_UP);
+        }
+    }
+
     /* Interfaces in OIF_MARK_UP/INSTALL_FLOWS state have already set their
      * pb->chassis. However, the update might still be in fly (confirmation
      * not received yet) or pb->chassis was overwitten by another chassis.
@@ -390,6 +489,10 @@ if_status_mgr_update(struct if_status_mgr *mgr,
     HMAPX_FOR_EACH_SAFE (node, &mgr->ifaces_per_state[OIF_MARK_DOWN]) {
         struct ovs_iface *iface = node->data;
 
+        if (!local_binding_find(bindings, iface->id)) {
+            ovs_iface_set_state(mgr, iface, OIF_UPDATE_PORT);
+            continue;
+        }
         if (!sb_readonly) {
             local_binding_set_pb(bindings, iface->id, chassis_rec,
                                  NULL, false);
@@ -437,6 +540,21 @@ if_status_mgr_update(struct if_status_mgr *mgr,
         }
     }
 
+    if (!sb_readonly) {
+        HMAPX_FOR_EACH_SAFE (node, &mgr->ifaces_per_state[OIF_UPDATE_PORT]) {
+            struct ovs_iface *iface = node->data;
+            port_binding_set_down(chassis_rec, pb_table, iface->id,
+                                  &iface->pb_uuid);
+            ovs_iface_destroy(mgr, node->data);
+        }
+    } else {
+        HMAPX_FOR_EACH_SAFE (node, &mgr->ifaces_per_state[OIF_UPDATE_PORT]) {
+            struct ovs_iface *iface = node->data;
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+            VLOG_INFO_RL(&rl, "Not setting lport %s down as sb is readonly",
+                         iface->id);
+        }
+    }
     /* Register for a notification about flows being installed in OVS for all
      * newly claimed interfaces for which pb->chassis has been updated.
      * Request a seqno update when the flows for new interfaces have been
@@ -450,10 +568,23 @@ if_status_mgr_update(struct if_status_mgr *mgr,
     }
 }
 
+void
+if_status_mgr_remove_ovn_installed(struct if_status_mgr *mgr,
+                                   const char *name,
+                                   const struct uuid *uuid)
+{
+    VLOG_DBG("Adding %s to list of interfaces for which to remove "
+              "ovn-installed", name);
+    if (!shash_find_data(&mgr->ovn_uninstall_hash, name)) {
+        add_to_ovn_uninstall_hash(mgr, name, uuid);
+    }
+}
+
 void
 if_status_mgr_run(struct if_status_mgr *mgr,
                   struct local_binding_data *binding_data,
                   const struct sbrec_chassis *chassis_rec,
+                  const struct ovsrec_interface_table *iface_table,
                   bool sb_readonly, bool ovs_readonly)
 {
     struct ofctrl_acked_seqnos *acked_seqnos =
@@ -471,12 +602,25 @@ if_status_mgr_run(struct if_status_mgr *mgr,
                                           iface->install_seqno)) {
             continue;
         }
-        ovs_iface_set_state(mgr, iface, OIF_MARK_UP);
+        /* Wait for ovn-installed to be absent before moving to MARK_UP state.
+         * Most of the times ovn-installed is already absent and hence we will
+         * not have to wait.
+         * If there is no binding_data, we can't determine if ovn-installed is
+         * present or not; hence also go to the OIF_REM_OLD_OVN_INST state.
+         */
+        if (!binding_data ||
+            local_binding_is_ovn_installed(&binding_data->bindings,
+                                           iface->id)) {
+            ovs_iface_set_state(mgr, iface, OIF_REM_OLD_OVN_INST);
+        } else {
+            ovs_iface_set_state(mgr, iface, OIF_MARK_UP);
+        }
     }
     ofctrl_acked_seqnos_destroy(acked_seqnos);
 
     /* Update binding states. */
     if_status_mgr_update_bindings(mgr, binding_data, chassis_rec,
+                                  iface_table,
                                   sb_readonly, ovs_readonly);
 }
 
@@ -492,8 +636,46 @@ ovs_iface_account_mem(const char *iface_id, bool erase)
     }
 }
 
+static void
+ovn_uninstall_hash_account_mem(const char *name, bool erase)
+{
+    uint32_t size = (strlen(name) + sizeof(struct uuid) +
+                     sizeof(struct shash_node));
+    if (erase) {
+        ifaces_usage -= size;
+    } else {
+        ifaces_usage += size;
+    }
+}
+
+uint16_t
+if_status_mgr_iface_get_mtu(const struct if_status_mgr *mgr,
+                            const char *iface_id)
+{
+    const struct ovs_iface *iface = shash_find_data(&mgr->ifaces, iface_id);
+    return iface ? iface->mtu : 0;
+}
+
+bool
+if_status_mgr_iface_update(const struct if_status_mgr *mgr,
+                           const struct ovsrec_interface *iface_rec)
+{
+    const char *iface_id = smap_get(&iface_rec->external_ids, "iface-id");
+    if (!iface_id) {
+        return false;
+    }
+    uint16_t mtu = get_iface_mtu(iface_rec);
+    struct ovs_iface *iface = shash_find_data(&mgr->ifaces, iface_id);
+    if (iface && iface->mtu != mtu) {
+        iface->mtu = mtu;
+        return true;
+    }
+    return false;
+}
+
 static struct ovs_iface *
 ovs_iface_create(struct if_status_mgr *mgr, const char *iface_id,
+                 const struct ovsrec_interface *iface_rec,
                  enum if_state state)
 {
     struct ovs_iface *iface = xzalloc(sizeof *iface);
@@ -503,9 +685,20 @@ ovs_iface_create(struct if_status_mgr *mgr, const char *iface_id,
     shash_add_nocopy(&mgr->ifaces, iface->id, iface);
     ovs_iface_set_state(mgr, iface, state);
     ovs_iface_account_mem(iface_id, false);
+    if_status_mgr_iface_update(mgr, iface_rec);
     return iface;
 }
 
+static void
+add_to_ovn_uninstall_hash(struct if_status_mgr *mgr, const char *name,
+                          const struct uuid *uuid)
+{
+    struct uuid *new_uuid = xzalloc(sizeof *new_uuid);
+    memcpy(new_uuid, uuid, sizeof(*new_uuid));
+    shash_add(&mgr->ovn_uninstall_hash, name, new_uuid);
+    ovn_uninstall_hash_account_mem(name, false);
+}
+
 static void
 ovs_iface_destroy(struct if_status_mgr *mgr, struct ovs_iface *iface)
 {
@@ -521,6 +714,23 @@ ovs_iface_destroy(struct if_status_mgr *mgr, struct ovs_iface *iface)
     free(iface);
 }
 
+static void
+ovn_uninstall_hash_destroy(struct if_status_mgr *mgr, char *name)
+{
+    struct shash_node *node = shash_find(&mgr->ovn_uninstall_hash, name);
+    char *node_name = NULL;
+    if (node) {
+        free(node->data);
+        VLOG_DBG("Interface name %s destroy", name);
+        node_name = shash_steal(&mgr->ovn_uninstall_hash, node);
+        ovn_uninstall_hash_account_mem(name, true);
+        free(node_name);
+    } else {
+        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+        VLOG_WARN_RL(&rl, "Interface name %s not found", name);
+    }
+}
+
 static void
 ovs_iface_set_state(struct if_status_mgr *mgr, struct ovs_iface *iface,
                     enum if_state state)
@@ -539,6 +749,7 @@ static void
 if_status_mgr_update_bindings(struct if_status_mgr *mgr,
                               struct local_binding_data *binding_data,
                               const struct sbrec_chassis *chassis_rec,
+                              const struct ovsrec_interface_table *iface_table,
                               bool sb_readonly, bool ovs_readonly)
 {
     if (!binding_data) {
@@ -558,7 +769,17 @@ if_status_mgr_update_bindings(struct if_status_mgr *mgr,
                                sb_readonly, ovs_readonly);
     }
 
-    /* Notifiy the binding module to set "up" all bindings that have had
+    /* Notify the binding module to remove "ovn-installed" for all bindings
+     * in the OIF_REM_OLD_OVN_INST state.
+     */
+    HMAPX_FOR_EACH (node, &mgr->ifaces_per_state[OIF_REM_OLD_OVN_INST]) {
+        struct ovs_iface *iface = node->data;
+
+        local_binding_remove_ovn_installed(bindings, iface_table, iface->id,
+                                           ovs_readonly);
+    }
+
+    /* Notify the binding module to set "up" all bindings that have had
      * their flows installed but are not yet marked "up" in the binding
      * module.
      */
diff --git a/controller/if-status.h b/controller/if-status.h
index 5bd187a25..15624bcfa 100644
--- a/controller/if-status.h
+++ b/controller/if-status.h
@@ -17,8 +17,10 @@
 #define IF_STATUS_H 1
 
 #include "openvswitch/shash.h"
+#include "lib/vswitch-idl.h"
 
 #include "binding.h"
+#include "lport.h"
 
 struct if_status_mgr;
 struct simap;
@@ -29,15 +31,20 @@ void if_status_mgr_destroy(struct if_status_mgr *);
 void if_status_mgr_claim_iface(struct if_status_mgr *,
                                const struct sbrec_port_binding *pb,
                                const struct sbrec_chassis *chassis_rec,
-                               bool sb_readonly);
+                               const struct ovsrec_interface *iface_rec,
+                               bool sb_readonly, enum can_bind bind_type);
 void if_status_mgr_release_iface(struct if_status_mgr *, const char *iface_id);
 void if_status_mgr_delete_iface(struct if_status_mgr *, const char *iface_id);
 
 void if_status_mgr_update(struct if_status_mgr *, struct local_binding_data *,
                           const struct sbrec_chassis *chassis,
+                          const struct ovsrec_interface_table *iface_table,
+                          const struct sbrec_port_binding_table *pb_table,
+                          bool ovs_readonly,
                           bool sb_readonly);
 void if_status_mgr_run(struct if_status_mgr *mgr, struct local_binding_data *,
                        const struct sbrec_chassis *,
+                       const struct ovsrec_interface_table *iface_table,
                        bool sb_readonly, bool ovs_readonly);
 void if_status_mgr_get_memory_usage(struct if_status_mgr *mgr,
                                     struct simap *usage);
@@ -48,5 +55,12 @@ bool if_status_handle_claims(struct if_status_mgr *mgr,
                              const struct sbrec_chassis *chassis_rec,
                              struct hmap *tracked_datapath,
                              bool sb_readonly);
+void if_status_mgr_remove_ovn_installed(struct if_status_mgr *mgr,
+                                        const char *name,
+                                        const struct uuid *uuid);
+uint16_t if_status_mgr_iface_get_mtu(const struct if_status_mgr *mgr,
+                                     const char *iface_id);
+bool if_status_mgr_iface_update(const struct if_status_mgr *mgr,
+                                const struct ovsrec_interface *iface_rec);
 
 # endif /* controller/if-status.h */
diff --git a/controller/lflow.c b/controller/lflow.c
index 6a98b19e1..22faaf013 100644
--- a/controller/lflow.c
+++ b/controller/lflow.c
@@ -397,7 +397,7 @@ consider_lflow_for_added_as_ips__(
                             : OFTABLE_LOG_EGRESS_PIPELINE);
     uint8_t ptable = first_ptable + lflow->table_id;
     uint8_t output_ptable = (ingress
-                             ? OFTABLE_REMOTE_OUTPUT
+                             ? OFTABLE_OUTPUT_INIT
                              : OFTABLE_SAVE_INPORT);
 
     uint64_t ovnacts_stub[1024 / 8];
@@ -1067,7 +1067,7 @@ consider_logical_flow__(const struct sbrec_logical_flow *lflow,
                             : OFTABLE_LOG_EGRESS_PIPELINE);
     uint8_t ptable = first_ptable + lflow->table_id;
     uint8_t output_ptable = (ingress
-                             ? OFTABLE_REMOTE_OUTPUT
+                             ? OFTABLE_OUTPUT_INIT
                              : OFTABLE_SAVE_INPORT);
 
     /* Parse OVN logical actions.
@@ -1729,6 +1729,7 @@ add_lb_vip_hairpin_flows(const struct ovn_controller_lb *lb,
 
 static void
 add_lb_ct_snat_hairpin_for_dp(const struct ovn_controller_lb *lb,
+                              bool has_vip_port,
                               const struct sbrec_datapath_binding *datapath,
                               const struct hmap *local_datapaths,
                               struct match *dp_match,
@@ -1742,15 +1743,21 @@ add_lb_ct_snat_hairpin_for_dp(const struct ovn_controller_lb *lb,
         match_set_metadata(dp_match, htonll(datapath->tunnel_key));
     }
 
+    uint16_t priority = datapath ? 200 : 100;
+    if (!has_vip_port) {
+        /* If L4 ports are not specified for the current LB, we will decrease
+         * the flow priority in order to not collide with other LBs with more
+         * fine-grained configuration.
+         */
+        priority -= 10;
+    }
     /* A flow added for the "hairpin_snat_ip" case will have an extra
      * datapath match, but it will also match on the less restrictive
      * general case.  Therefore, we set the priority in the
      * "hairpin_snat_ip" case to be higher than the general case. */
-    ofctrl_add_or_append_flow(flow_table, OFTABLE_CT_SNAT_HAIRPIN,
-                              datapath ? 200 : 100,
-                              lb->slb->header_.uuid.parts[0],
-                              dp_match, dp_acts, &lb->slb->header_.uuid,
-                              NX_CTLR_NO_METER, NULL);
+    ofctrl_add_flow(flow_table, OFTABLE_CT_SNAT_HAIRPIN,
+                    priority, lb->slb->header_.uuid.parts[0],
+                    dp_match, dp_acts, &lb->slb->header_.uuid);
 }
 
 /* Add a ct_snat flow for each VIP of the LB.  If this LB does not use
@@ -1836,8 +1843,8 @@ add_lb_ct_snat_hairpin_vip_flow(const struct ovn_controller_lb *lb,
         }
     }
 
-    match_set_nw_proto(&match, lb->proto);
     if (lb_vip->vip_port) {
+        match_set_nw_proto(&match, lb->proto);
         if (!lb->hairpin_orig_tuple) {
             match_set_ct_nw_proto(&match, lb->proto);
             match_set_ct_tp_dst(&match, htons(lb_vip->vip_port));
@@ -1854,18 +1861,20 @@ add_lb_ct_snat_hairpin_vip_flow(const struct ovn_controller_lb *lb,
     }
 
     if (!use_hairpin_snat_ip) {
-        add_lb_ct_snat_hairpin_for_dp(lb, NULL, NULL,
+        add_lb_ct_snat_hairpin_for_dp(lb, !!lb_vip->vip_port, NULL, NULL,
                                       &match, &ofpacts, flow_table);
     } else {
         for (size_t i = 0; i < lb->slb->n_datapaths; i++) {
-            add_lb_ct_snat_hairpin_for_dp(lb, lb->slb->datapaths[i],
-                                          local_datapaths,
-                                          &match, &ofpacts, flow_table);
+            add_lb_ct_snat_hairpin_for_dp(lb, !!lb_vip->vip_port,
+                                          lb->slb->datapaths[i],
+                                          local_datapaths, &match,
+                                          &ofpacts, flow_table);
         }
         if (lb->slb->datapath_group) {
             for (size_t i = 0; i < lb->slb->datapath_group->n_datapaths; i++) {
                 add_lb_ct_snat_hairpin_for_dp(
-                    lb, lb->slb->datapath_group->datapaths[i],
+                    lb, !!lb_vip->vip_port,
+                    lb->slb->datapath_group->datapaths[i],
                     local_datapaths, &match, &ofpacts, flow_table);
             }
         }
diff --git a/controller/lflow.h b/controller/lflow.h
index dd742257b..2472dec29 100644
--- a/controller/lflow.h
+++ b/controller/lflow.h
@@ -63,27 +63,36 @@ struct uuid;
  *
  * These are heavily documented in ovn-architecture(7), please update it if
  * you make any changes. */
-#define OFTABLE_PHY_TO_LOG            0
-#define OFTABLE_LOG_INGRESS_PIPELINE  8 /* First of LOG_PIPELINE_LEN tables. */
-#define OFTABLE_REMOTE_OUTPUT        37
-#define OFTABLE_LOCAL_OUTPUT         38
-#define OFTABLE_CHECK_LOOPBACK       39
-#define OFTABLE_LOG_EGRESS_PIPELINE  40 /* First of LOG_PIPELINE_LEN tables. */
-#define OFTABLE_SAVE_INPORT          64
-#define OFTABLE_LOG_TO_PHY           65
-#define OFTABLE_MAC_BINDING          66
-#define OFTABLE_MAC_LOOKUP           67
-#define OFTABLE_CHK_LB_HAIRPIN       68
-#define OFTABLE_CHK_LB_HAIRPIN_REPLY 69
-#define OFTABLE_CT_SNAT_HAIRPIN      70
-#define OFTABLE_GET_FDB              71
-#define OFTABLE_LOOKUP_FDB           72
-#define OFTABLE_CHK_IN_PORT_SEC      73
-#define OFTABLE_CHK_IN_PORT_SEC_ND   74
-#define OFTABLE_CHK_OUT_PORT_SEC     75
-#define OFTABLE_ECMP_NH_MAC          76
-#define OFTABLE_ECMP_NH              77
-#define OFTABLE_CHK_LB_AFFINITY      78
+#define OFTABLE_PHY_TO_LOG                0
+
+/* Start of LOG_PIPELINE_LEN tables. */
+#define OFTABLE_LOG_INGRESS_PIPELINE      8
+#define OFTABLE_OUTPUT_LARGE_PKT_DETECT  37
+#define OFTABLE_OUTPUT_LARGE_PKT_PROCESS 38
+#define OFTABLE_REMOTE_OUTPUT            39
+#define OFTABLE_LOCAL_OUTPUT             40
+#define OFTABLE_CHECK_LOOPBACK           41
+
+/* Start of the OUTPUT section of the pipeline. */
+#define OFTABLE_OUTPUT_INIT OFTABLE_OUTPUT_LARGE_PKT_DETECT
+
+/* Start of LOG_PIPELINE_LEN tables. */
+#define OFTABLE_LOG_EGRESS_PIPELINE      42
+#define OFTABLE_SAVE_INPORT              64
+#define OFTABLE_LOG_TO_PHY               65
+#define OFTABLE_MAC_BINDING              66
+#define OFTABLE_MAC_LOOKUP               67
+#define OFTABLE_CHK_LB_HAIRPIN           68
+#define OFTABLE_CHK_LB_HAIRPIN_REPLY     69
+#define OFTABLE_CT_SNAT_HAIRPIN          70
+#define OFTABLE_GET_FDB                  71
+#define OFTABLE_LOOKUP_FDB               72
+#define OFTABLE_CHK_IN_PORT_SEC          73
+#define OFTABLE_CHK_IN_PORT_SEC_ND       74
+#define OFTABLE_CHK_OUT_PORT_SEC         75
+#define OFTABLE_ECMP_NH_MAC              76
+#define OFTABLE_ECMP_NH                  77
+#define OFTABLE_CHK_LB_AFFINITY          78
 
 struct lflow_ctx_in {
     struct ovsdb_idl_index *sbrec_multicast_group_by_name_datapath;
diff --git a/controller/local_data.c b/controller/local_data.c
index acaf1de6d..cf0b21bb1 100644
--- a/controller/local_data.c
+++ b/controller/local_data.c
@@ -22,6 +22,7 @@
 #include "lib/util.h"
 #include "lib/vswitch-idl.h"
 #include "openvswitch/vlog.h"
+#include "socket-util.h"
 
 /* OVN includes. */
 #include "encaps.h"
@@ -447,6 +448,7 @@ local_nonvif_data_run(const struct ovsrec_bridge *br_int,
                 tun->chassis_id = xstrdup(tunnel_id);
                 tun->ofport = u16_to_ofp(ofport);
                 tun->type = tunnel_type;
+                tun->is_ipv6 = ip ? addr_is_ipv6(ip) : false;
 
                 free(hash_id);
                 free(ip);
diff --git a/controller/local_data.h b/controller/local_data.h
index 748f009aa..ad0fa7f94 100644
--- a/controller/local_data.h
+++ b/controller/local_data.h
@@ -133,6 +133,7 @@ struct chassis_tunnel {
     char *chassis_id;
     ofp_port_t ofport;
     enum chassis_tunnel_type type;
+    bool is_ipv6;
 };
 
 void local_nonvif_data_run(const struct ovsrec_bridge *br_int,
diff --git a/controller/mirror.c b/controller/mirror.c
index 665736966..0e5885e9b 100644
--- a/controller/mirror.c
+++ b/controller/mirror.c
@@ -22,6 +22,7 @@
 
 /* OVS includes. */
 #include "lib/vswitch-idl.h"
+#include "lib/socket-util.h"
 #include "include/openvswitch/shash.h"
 #include "openvswitch/vlog.h"
 
@@ -69,6 +70,7 @@ static void set_mirror_iface_options(struct ovsrec_interface *,
 static const struct ovsrec_port *get_iface_port(
     const struct ovsrec_interface *, const struct ovsrec_bridge *);
 
+char *get_mirror_tunnel_type(const struct sbrec_mirror *);
 
 void
 mirror_register_ovs_idl(struct ovsdb_idl *ovs_idl)
@@ -244,24 +246,26 @@ set_mirror_iface_options(struct ovsrec_interface *iface,
     smap_destroy(&options);
 }
 
+char *
+get_mirror_tunnel_type(const struct sbrec_mirror *sb_mirror)
+{
+    bool is_ipv6 = addr_is_ipv6(sb_mirror->sink);
+
+    return xasprintf(is_ipv6 ? "ip6%s" : "%s", sb_mirror->type);
+}
+
 static void
 check_and_update_interface_table(const struct sbrec_mirror *sb_mirror,
                                  const struct ovsrec_mirror *ovs_mirror)
 {
-    char *type;
-    struct ovsrec_interface *iface =
-                          ovs_mirror->output_port->interfaces[0];
-    struct smap *opts = &iface->options;
-    const char *erspan_ver = smap_get(opts, "erspan_ver");
-    if (erspan_ver) {
-        type = "erspan";
-    } else {
-        type = "gre";
-    }
-    if (strcmp(type, sb_mirror->type)) {
-        ovsrec_interface_set_type(iface, sb_mirror->type);
+    struct ovsrec_interface *iface = ovs_mirror->output_port->interfaces[0];
+    char *type = get_mirror_tunnel_type(sb_mirror);
+
+    if (strcmp(type, iface->type)) {
+        ovsrec_interface_set_type(iface, type);
     }
     set_mirror_iface_options(iface, sb_mirror);
+    free(type);
 }
 
 static void
@@ -327,8 +331,11 @@ create_ovs_mirror(struct ovn_mirror *m, struct ovsdb_idl_txn *ovs_idl_txn,
     char *port_name = xasprintf("ovn-%s", m->name);
 
     ovsrec_interface_set_name(iface, port_name);
-    ovsrec_interface_set_type(iface, m->sb_mirror->type);
+
+    char *type = get_mirror_tunnel_type(m->sb_mirror);
+    ovsrec_interface_set_type(iface, type);
     set_mirror_iface_options(iface, m->sb_mirror);
+    free(type);
 
     struct ovsrec_port *port = ovsrec_port_insert(ovs_idl_txn);
     ovsrec_port_set_name(port, port_name);
diff --git a/controller/ofctrl.c b/controller/ofctrl.c
index b1ba1c743..64a444ff6 100644
--- a/controller/ofctrl.c
+++ b/controller/ofctrl.c
@@ -766,13 +766,18 @@ ofctrl_get_mf_field_id(void)
 
 /* Runs the OpenFlow state machine against 'br_int', which is local to the
  * hypervisor on which we are running.  Attempts to negotiate a Geneve option
- * field for class OVN_GENEVE_CLASS, type OVN_GENEVE_TYPE. */
-void
+ * field for class OVN_GENEVE_CLASS, type OVN_GENEVE_TYPE.
+ *
+ * Returns 'true' if an OpenFlow reconnect happened; 'false' otherwise.
+ */
+bool
 ofctrl_run(const struct ovsrec_bridge *br_int,
            const struct ovsrec_open_vswitch_table *ovs_table,
            struct shash *pending_ct_zones)
 {
     char *target = xasprintf("unix:%s/%s.mgmt", ovs_rundir(), br_int->name);
+    bool reconnected = false;
+
     if (strcmp(target, rconn_get_target(swconn))) {
         VLOG_INFO("%s: connecting to switch", target);
         rconn_connect(swconn, target, target);
@@ -782,10 +787,12 @@ ofctrl_run(const struct ovsrec_bridge *br_int,
     rconn_run(swconn);
 
     if (!rconn_is_connected(swconn)) {
-        return;
+        return reconnected;
     }
+
     if (seqno != rconn_get_connection_seqno(swconn)) {
         seqno = rconn_get_connection_seqno(swconn);
+        reconnected = true;
         state = S_NEW;
 
         /* Reset the state of any outstanding ct flushes to resend them. */
@@ -855,6 +862,8 @@ ofctrl_run(const struct ovsrec_bridge *br_int,
          * point, so ensure that we come back again without waiting. */
         poll_immediate_wake();
     }
+
+    return reconnected;
 }
 
 void
@@ -909,6 +918,7 @@ ofctrl_recv(const struct ofp_header *oh, enum ofptype type)
     } else if (type == OFPTYPE_ERROR) {
         static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
         log_openflow_rl(&rl, VLL_INFO, oh, "OpenFlow error");
+        rconn_reconnect(swconn);
     } else {
         static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
         log_openflow_rl(&rl, VLL_DBG, oh, "OpenFlow packet ignored");
diff --git a/controller/ofctrl.h b/controller/ofctrl.h
index f5751e3ee..105f9370b 100644
--- a/controller/ofctrl.h
+++ b/controller/ofctrl.h
@@ -51,7 +51,7 @@ struct ovn_desired_flow_table {
 void ofctrl_init(struct ovn_extend_table *group_table,
                  struct ovn_extend_table *meter_table,
                  int inactivity_probe_interval);
-void ofctrl_run(const struct ovsrec_bridge *br_int,
+bool ofctrl_run(const struct ovsrec_bridge *br_int,
                 const struct ovsrec_open_vswitch_table *,
                 struct shash *pending_ct_zones);
 enum mf_field_id ofctrl_get_mf_field_id(void);
diff --git a/controller/ovn-controller.8.xml b/controller/ovn-controller.8.xml
index ab52e2d34..f61f43008 100644
--- a/controller/ovn-controller.8.xml
+++ b/controller/ovn-controller.8.xml
@@ -121,11 +121,11 @@
           that is needed in the current chassis.
         </p>
         <p>
-          It is more optimal to set it to <code>true</code> in use cases when
-          the chassis would anyway need to monitor most of the records in
-          <var>ovs-database</var>, which would save the overhead of conditions
-          processing, especially for server side.  Typically, set it to
-          <code>true</code> for environments that all workloads need to be
+          It is more efficient to set it to <code>true</code> in use cases
+          where the chassis would anyway need to monitor most of the records in
+          <var>OVN Southbound</var> database, which would save the overhead of
+          conditions processing, especially for server side.  Typically, set it
+          to <code>true</code> for environments that all workloads need to be
           reachable from each other.
         </p>
         <p>
@@ -171,16 +171,14 @@
         </p>
 
         <p>
-          Supported tunnel types for connecting hypervisors
-          are <code>geneve</code> and <code>stt</code>.  Gateways may
-          use <code>geneve</code>, <code>vxlan</code>, or
-          <code>stt</code>.
+          Supported tunnel types for connecting hypervisors and gateways
+          are <code>geneve</code>, <code>vxlan</code>, and <code>stt</code>.
         </p>
 
         <p>
           Due to the limited amount of metadata in <code>vxlan</code>,
-          the capabilities and performance of connected gateways will be
-          reduced versus other tunnel formats.
+          the capabilities and performance of connected gateways and
+          hypervisors will be reduced versus other tunnel formats.
         </p>
       </dd>
 
diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c
index 2d18bbfca..ead789fb9 100644
--- a/controller/ovn-controller.c
+++ b/controller/ovn-controller.c
@@ -60,6 +60,7 @@
 #include "lib/ovn-dirs.h"
 #include "lib/ovn-sb-idl.h"
 #include "lib/ovn-util.h"
+#include "ovsport.h"
 #include "patch.h"
 #include "vif-plug.h"
 #include "vif-plug-provider.h"
@@ -712,7 +713,7 @@ get_snat_ct_zone(const struct sbrec_datapath_binding *dp)
 }
 
 static void
-update_ct_zones(const struct shash *binding_lports,
+update_ct_zones(const struct sset *local_lports,
                 const struct hmap *local_datapaths,
                 struct simap *ct_zones, unsigned long *ct_zone_bitmap,
                 struct shash *pending_ct_zones)
@@ -725,9 +726,9 @@ update_ct_zones(const struct shash *binding_lports,
     unsigned long unreq_snat_zones_map[BITMAP_N_LONGS(MAX_CT_ZONES)];
     struct simap unreq_snat_zones = SIMAP_INITIALIZER(&unreq_snat_zones);
 
-    struct shash_node *shash_node;
-    SHASH_FOR_EACH (shash_node, binding_lports) {
-        sset_add(&all_users, shash_node->name);
+    const char *local_lport;
+    SSET_FOR_EACH (local_lport, local_lports) {
+        sset_add(&all_users, local_lport);
     }
 
     /* Local patched datapath (gateway routers) need zones assigned. */
@@ -1060,6 +1061,7 @@ ctrl_register_ovs_idl(struct ovsdb_idl *ovs_idl)
     ovsdb_idl_track_add_column(ovs_idl, &ovsrec_interface_col_name);
     ovsdb_idl_track_add_column(ovs_idl, &ovsrec_interface_col_bfd);
     ovsdb_idl_track_add_column(ovs_idl, &ovsrec_interface_col_bfd_status);
+    ovsdb_idl_track_add_column(ovs_idl, &ovsrec_interface_col_mtu);
     ovsdb_idl_track_add_column(ovs_idl, &ovsrec_interface_col_type);
     ovsdb_idl_track_add_column(ovs_idl, &ovsrec_interface_col_options);
     ovsdb_idl_track_add_column(ovs_idl, &ovsrec_interface_col_ofport);
@@ -1158,6 +1160,56 @@ en_ofctrl_is_connected_run(struct engine_node *node, void *data)
     engine_set_node_state(node, EN_UNCHANGED);
 }
 
+struct ed_type_if_status_mgr {
+    const struct if_status_mgr *manager;
+    const struct ovsrec_interface_table *iface_table;
+};
+
+static void *
+en_if_status_mgr_init(struct engine_node *node OVS_UNUSED,
+                      struct engine_arg *arg OVS_UNUSED)
+{
+    struct ed_type_if_status_mgr *data = xzalloc(sizeof *data);
+    return data;
+}
+
+static void
+en_if_status_mgr_cleanup(void *data OVS_UNUSED)
+{
+}
+
+static void
+en_if_status_mgr_run(struct engine_node *node, void *data_)
+{
+    enum engine_node_state state = EN_UNCHANGED;
+    struct ed_type_if_status_mgr *data = data_;
+    struct controller_engine_ctx *ctrl_ctx = engine_get_context()->client_ctx;
+    data->manager = ctrl_ctx->if_mgr;
+    data->iface_table = EN_OVSDB_GET(engine_get_input("OVS_interface", node));
+
+    const struct ovsrec_interface *iface;
+    OVSREC_INTERFACE_TABLE_FOR_EACH (iface, data->iface_table) {
+        if (if_status_mgr_iface_update(data->manager, iface)) {
+            state = EN_UPDATED;
+        }
+    }
+    engine_set_node_state(node, state);
+}
+
+static bool
+if_status_mgr_ovs_interface_handler(struct engine_node *node, void *data)
+{
+    struct ed_type_if_status_mgr *data_ = data;
+
+    const struct ovsrec_interface *iface;
+    OVSREC_INTERFACE_TABLE_FOR_EACH_TRACKED (iface, data_->iface_table) {
+        if (if_status_mgr_iface_update(data_->manager, iface)) {
+            engine_set_node_state(node, EN_UPDATED);
+        }
+    }
+    return true;
+}
+
 /* This engine node is to wrap the OVS_interface input and maintain a copy of
  * the old version of data for the column external_ids.
  *
@@ -2010,7 +2062,11 @@ addr_sets_update(const struct sbrec_address_set_table *address_set_table,
         if (sbrec_address_set_is_deleted(as)) {
             expr_const_sets_remove(addr_sets, as->name);
             sset_add(deleted, as->name);
-        } else {
+        }
+    }
+
+    SBREC_ADDRESS_SET_TABLE_FOR_EACH_TRACKED (as, address_set_table) {
+        if (!sbrec_address_set_is_deleted(as)) {
             struct expr_constant_set *cs_old = shash_find_data(addr_sets,
                                                                as->name);
             if (!cs_old) {
@@ -2381,7 +2437,7 @@ en_ct_zones_run(struct engine_node *node, void *data)
         EN_OVSDB_GET(engine_get_input("OVS_bridge", node));
 
     restore_ct_zones(bridge_table, ovs_table, ct_zones_data);
-    update_ct_zones(&rt_data->lbinding_data.lports, &rt_data->local_datapaths,
+    update_ct_zones(&rt_data->local_lports, &rt_data->local_datapaths,
                     &ct_zones_data->current, ct_zones_data->bitmap,
                     &ct_zones_data->pending);
 
@@ -2471,8 +2527,10 @@ ct_zones_runtime_data_handler(struct engine_node *node, void *data)
         SHASH_FOR_EACH (shash_node, &tdp->lports) {
             struct tracked_lport *t_lport = shash_node->data;
             if (strcmp(t_lport->pb->type, "")
-                && strcmp(t_lport->pb->type, "localport")) {
-                /* We allocate zone-id's only to VIF and localport lports. */
+                && strcmp(t_lport->pb->type, "localport")
+                && strcmp(t_lport->pb->type, "localnet")) {
+                /* We allocate zone-id's only to VIF, localport, and localnet
+                 * lports. */
                 continue;
             }
 
@@ -2697,7 +2755,8 @@ static void
 lb_data_removed_five_tuples_add(struct ed_type_lb_data *lb_data,
                                 const struct ovn_controller_lb *lb)
 {
-    if (!ovs_feature_is_supported(OVS_CT_TUPLE_FLUSH_SUPPORT)) {
+    if (!ovs_feature_is_supported(OVS_CT_TUPLE_FLUSH_SUPPORT) ||
+        !lb->ct_flush) {
         return;
     }
 
@@ -2716,7 +2775,8 @@ static void
 lb_data_removed_five_tuples_remove(struct ed_type_lb_data *lb_data,
                                    const struct ovn_controller_lb *lb)
 {
-    if (!ovs_feature_is_supported(OVS_CT_TUPLE_FLUSH_SUPPORT)) {
+    if (!ovs_feature_is_supported(OVS_CT_TUPLE_FLUSH_SUPPORT) ||
+        !lb->ct_flush) {
         return;
     }
 
@@ -4048,6 +4108,9 @@ static void init_physical_ctx(struct engine_node *node,
     const struct ed_type_mff_ovn_geneve *ed_mff_ovn_geneve =
         engine_get_input_data("mff_ovn_geneve", node);
 
+    const struct ovsrec_interface_table *ovs_interface_table =
+        EN_OVSDB_GET(engine_get_input("if_status_mgr", node));
+
     const struct ovsrec_open_vswitch_table *ovs_table =
         EN_OVSDB_GET(engine_get_input("OVS_open_vswitch", node));
     const struct ovsrec_bridge_table *bridge_table =
@@ -4072,6 +4135,7 @@ static void init_physical_ctx(struct engine_node *node,
     p_ctx->sbrec_port_binding_by_name = sbrec_port_binding_by_name;
     p_ctx->sbrec_port_binding_by_datapath = sbrec_port_binding_by_datapath;
     p_ctx->port_binding_table = port_binding_table;
+    p_ctx->ovs_interface_table = ovs_interface_table;
     p_ctx->mc_group_table = multicast_group_table;
     p_ctx->br_int = br_int;
     p_ctx->chassis_table = chassis_table;
@@ -4085,6 +4149,9 @@ static void init_physical_ctx(struct engine_node *node,
     p_ctx->patch_ofports = &non_vif_data->patch_ofports;
     p_ctx->chassis_tunnels = &non_vif_data->chassis_tunnels;
 
+    struct controller_engine_ctx *ctrl_ctx = engine_get_context()->client_ctx;
+    p_ctx->if_mgr = ctrl_ctx->if_mgr;
+
     pflow_output_get_debug(node, &p_ctx->debug);
 }
 
@@ -4128,6 +4195,63 @@ en_pflow_output_run(struct engine_node *node, void *data)
     engine_set_node_state(node, EN_UPDATED);
 }
 
+static bool
+pflow_output_if_status_mgr_handler(struct engine_node *node,
+                                   void *data)
+{
+    struct ed_type_pflow_output *pfo = data;
+    struct ed_type_runtime_data *rt_data =
+        engine_get_input_data("runtime_data", node);
+    struct ed_type_non_vif_data *non_vif_data =
+        engine_get_input_data("non_vif_data", node);
+    struct ed_type_if_status_mgr *if_mgr_data =
+        engine_get_input_data("if_status_mgr", node);
+
+    struct physical_ctx p_ctx;
+    init_physical_ctx(node, rt_data, non_vif_data, &p_ctx);
+
+    const struct ovsrec_interface *iface;
+    OVSREC_INTERFACE_TABLE_FOR_EACH_TRACKED (iface, if_mgr_data->iface_table) {
+        const char *iface_id = smap_get(&iface->external_ids, "iface-id");
+        if (!iface_id) {
+            continue;
+        }
+
+        const struct sbrec_port_binding *pb = lport_lookup_by_name(
+            p_ctx.sbrec_port_binding_by_name, iface_id);
+        if (!pb) {
+            continue;
+        }
+        if (pb->n_additional_chassis) {
+            /* Update flows for all ports in datapath. */
+            struct sbrec_port_binding *target =
+                sbrec_port_binding_index_init_row(
+                    p_ctx.sbrec_port_binding_by_datapath);
+            sbrec_port_binding_index_set_datapath(target, pb->datapath);
+
+            const struct sbrec_port_binding *binding;
+            SBREC_PORT_BINDING_FOR_EACH_EQUAL (
+                    binding, target, p_ctx.sbrec_port_binding_by_datapath) {
+                bool removed = sbrec_port_binding_is_deleted(binding);
+                if (!physical_handle_flows_for_lport(binding, removed, &p_ctx,
+                                                     &pfo->flow_table)) {
+                    return false;
+                }
+            }
+            sbrec_port_binding_index_destroy_row(target);
+        } else {
+            /* If any multichassis ports, update flows for the port. */
+            bool removed = sbrec_port_binding_is_deleted(pb);
+            if (!physical_handle_flows_for_lport(pb, removed, &p_ctx,
+                                                 &pfo->flow_table)) {
+                return false;
+            }
+        }
+        engine_set_node_state(node, EN_UPDATED);
+    }
+    return true;
+}
+
 static bool
 pflow_output_sb_port_binding_handler(struct engine_node *node,
                                      void *data)
@@ -4611,6 +4735,7 @@ main(int argc, char *argv[])
     ENGINE_NODE_WITH_CLEAR_TRACK_DATA(port_groups, "port_groups");
     ENGINE_NODE(northd_options, "northd_options");
     ENGINE_NODE(dhcp_options, "dhcp_options");
+    ENGINE_NODE(if_status_mgr, "if_status_mgr");
     ENGINE_NODE_WITH_CLEAR_TRACK_DATA(lb_data, "lb_data");
 
 #define SB_NODE(NAME, NAME_STR) ENGINE_NODE_SB(NAME, NAME_STR);
@@ -4649,6 +4774,9 @@ main(int argc, char *argv[])
     engine_add_input(&en_non_vif_data, &en_ovs_interface,
                      non_vif_data_ovs_iface_handler);
 
+    engine_add_input(&en_if_status_mgr, &en_ovs_interface,
+                     if_status_mgr_ovs_interface_handler);
+
     /* Note: The order of inputs is important, all OVS interface changes must
      * be handled before any ct_zone changes.
      */
@@ -4659,6 +4787,8 @@ main(int argc, char *argv[])
     engine_add_input(&en_pflow_output, &en_sb_chassis,
                      pflow_lflow_output_sb_chassis_handler);
 
+    engine_add_input(&en_pflow_output, &en_if_status_mgr,
+                     pflow_output_if_status_mgr_handler);
     engine_add_input(&en_pflow_output, &en_sb_port_binding,
                      pflow_output_sb_port_binding_handler);
     engine_add_input(&en_pflow_output, &en_sb_multicast_group,
@@ -5061,8 +5191,14 @@ main(int argc, char *argv[])
 
             if (br_int) {
                 ct_zones_data = engine_get_data(&en_ct_zones);
-                if (ct_zones_data) {
-                    ofctrl_run(br_int, ovs_table, &ct_zones_data->pending);
+                if (ct_zones_data && ofctrl_run(br_int, ovs_table,
+                                                &ct_zones_data->pending)) {
+                    static struct vlog_rate_limit rl
+                            = VLOG_RATE_LIMIT_INIT(1, 1);
+
+                    VLOG_INFO_RL(&rl, "OVS OpenFlow connection reconnected,"
+                                      "force recompute.");
+                    engine_set_force_recompute(true);
                 }
 
                 if (chassis) {
@@ -5071,7 +5207,8 @@ main(int argc, char *argv[])
                                chassis,
                                sbrec_sb_global_first(ovnsb_idl_loop.idl),
                                ovs_table,
-                               &transport_zones);
+                               &transport_zones,
+                               bridge_table);
 
                     stopwatch_start(CONTROLLER_LOOP_STOPWATCH_NAME,
                                     time_msec());
@@ -5225,6 +5362,11 @@ main(int argc, char *argv[])
                     stopwatch_start(IF_STATUS_MGR_UPDATE_STOPWATCH_NAME,
                                     time_msec());
                     if_status_mgr_update(if_mgr, binding_data, chassis,
+                                         ovsrec_interface_table_get(
+                                                    ovs_idl_loop.idl),
+                                         sbrec_port_binding_table_get(
+                                                    ovnsb_idl_loop.idl),
+                                         !ovs_idl_txn,
                                          !ovnsb_idl_txn);
                     stopwatch_stop(IF_STATUS_MGR_UPDATE_STOPWATCH_NAME,
                                    time_msec());
@@ -5254,11 +5396,12 @@ main(int argc, char *argv[])
                     stopwatch_start(IF_STATUS_MGR_RUN_STOPWATCH_NAME,
                                     time_msec());
                     if_status_mgr_run(if_mgr, binding_data, chassis,
+                                      ovsrec_interface_table_get(
+                                                  ovs_idl_loop.idl),
                                       !ovnsb_idl_txn, !ovs_idl_txn);
                     stopwatch_stop(IF_STATUS_MGR_RUN_STOPWATCH_NAME,
                                    time_msec());
                 }
-
             }
 
             if (!engine_has_run()) {
@@ -5449,6 +5592,7 @@ loop_done:
     binding_destroy();
     patch_destroy();
     mirror_destroy();
+    encaps_destroy();
     if_status_mgr_destroy(if_mgr);
     shash_destroy(&vif_plug_deleted_iface_ids);
     shash_destroy(&vif_plug_changed_iface_ids);
@@ -5466,6 +5610,7 @@ loop_done:
         free(cli_system_id);
     }
     service_stop();
+    ovsrcu_exit();
 
     exit(retval);
 }
diff --git a/controller/ovsport.c b/controller/ovsport.c
index ec38c3fca..ebcb9cb6d 100644
--- a/controller/ovsport.c
+++ b/controller/ovsport.c
@@ -264,3 +264,12 @@ maintain_interface_smap_column(
         }
     }
 }
+
+uint16_t
+get_iface_mtu(const struct ovsrec_interface *iface)
+{
+    if (!iface || !iface->n_mtu || iface->mtu[0] <= 0) {
+        return 0;
+    }
+    return (uint16_t) iface->mtu[0];
+}
diff --git a/controller/ovsport.h b/controller/ovsport.h
index e355ff7ff..c40c1855a 100644
--- a/controller/ovsport.h
+++ b/controller/ovsport.h
@@ -57,4 +57,6 @@ const struct ovsrec_port * ovsport_lookup_by_interfaces(
 const struct ovsrec_port * ovsport_lookup_by_interface(
         struct ovsdb_idl_index *, struct ovsrec_interface *);
 
+uint16_t get_iface_mtu(const struct ovsrec_interface *);
+
 #endif /* lib/ovsport.h */
diff --git a/controller/physical.c b/controller/physical.c
index ec861f49c..d19eb9200 100644
--- a/controller/physical.c
+++ b/controller/physical.c
@@ -41,6 +41,7 @@
 #include "lib/ovn-sb-idl.h"
 #include "lib/ovn-util.h"
 #include "ovn/actions.h"
+#include "if-status.h"
 #include "physical.h"
 #include "pinctrl.h"
 #include "openvswitch/shash.h"
@@ -91,6 +92,7 @@ physical_register_ovs_idl(struct ovsdb_idl *ovs_idl)
 
     ovsdb_idl_add_table(ovs_idl, &ovsrec_table_interface);
     ovsdb_idl_track_add_column(ovs_idl, &ovsrec_interface_col_name);
+    ovsdb_idl_track_add_column(ovs_idl, &ovsrec_interface_col_mtu);
     ovsdb_idl_track_add_column(ovs_idl, &ovsrec_interface_col_ofport);
     ovsdb_idl_track_add_column(ovs_idl, &ovsrec_interface_col_external_ids);
 }
@@ -876,12 +878,12 @@ put_local_common_flows(uint32_t dp_key,
 
     uint32_t port_key = pb->tunnel_key;
 
-    /* Table 38, priority 100.
+    /* Table 40, priority 100.
      * =======================
      *
      * Implements output to local hypervisor.  Each flow matches a
      * logical output port on the local hypervisor, and resubmits to
-     * table 39.
+     * table 41.
      */
 
     ofpbuf_clear(ofpacts_p);
@@ -891,13 +893,13 @@ put_local_common_flows(uint32_t dp_key,
 
     put_zones_ofpacts(zone_ids, ofpacts_p);
 
-    /* Resubmit to table 39. */
+    /* Resubmit to table 41. */
     put_resubmit(OFTABLE_CHECK_LOOPBACK, ofpacts_p);
     ofctrl_add_flow(flow_table, OFTABLE_LOCAL_OUTPUT, 100,
                     pb->header_.uuid.parts[0], &match, ofpacts_p,
                     &pb->header_.uuid);
 
-    /* Table 39, Priority 100.
+    /* Table 41, Priority 100.
      * =======================
      *
      * Drop packets whose logical inport and outport are the same
@@ -1104,6 +1106,240 @@ setup_activation_strategy(const struct sbrec_port_binding *binding,
     }
 }
 
+/*
+ * Insert a flow to determine if an IP packet is too big for the corresponding
+ * egress interface.
+ */
+static void
+determine_if_pkt_too_big(struct ovn_desired_flow_table *flow_table,
+                         const struct sbrec_port_binding *binding,
+                         const struct sbrec_port_binding *mcp,
+                         uint16_t mtu, bool is_ipv6, int direction)
+{
+    struct ofpbuf ofpacts;
+    ofpbuf_init(&ofpacts, 0);
+
+    /* Store packet too large flag in reg9[1]. */
+    struct match match;
+    match_init_catchall(&match);
+    match_set_dl_type(&match, htons(is_ipv6 ? ETH_TYPE_IPV6 : ETH_TYPE_IP));
+    match_set_metadata(&match, htonll(binding->datapath->tunnel_key));
+    match_set_reg(&match, direction - MFF_REG0, mcp->tunnel_key);
+
+    /* reg9[1] is REGBIT_PKT_LARGER as defined by northd */
+    struct ofpact_check_pkt_larger *pkt_larger =
+        ofpact_put_CHECK_PKT_LARGER(&ofpacts);
+    pkt_larger->pkt_len = mtu;
+    pkt_larger->dst.field = mf_from_id(MFF_REG9);
+    pkt_larger->dst.ofs = 1;
+
+    put_resubmit(OFTABLE_OUTPUT_LARGE_PKT_PROCESS, &ofpacts);
+    ofctrl_add_flow(flow_table, OFTABLE_OUTPUT_LARGE_PKT_DETECT, 100,
+                    binding->header_.uuid.parts[0], &match, &ofpacts,
+                    &binding->header_.uuid);
+    ofpbuf_uninit(&ofpacts);
+}
+
+/*
+ * Insert a flow to reply with ICMP error for IP packets that are too big for
+ * the corresponding egress interface.
+ */
+/*
+ * NOTE(ihrachys) This reimplements icmp_error as found in
+ * build_icmperr_pkt_big_flows. We may look into reusing the existing OVN
+ * action for this flow in the future.
+ */
+static void
+reply_imcp_error_if_pkt_too_big(struct ovn_desired_flow_table *flow_table,
+                                const struct sbrec_port_binding *binding,
+                                const struct sbrec_port_binding *mcp,
+                                uint16_t mtu, bool is_ipv6, int direction)
+{
+    struct match match;
+    match_init_catchall(&match);
+    match_set_dl_type(&match, htons(is_ipv6 ? ETH_TYPE_IPV6 : ETH_TYPE_IP));
+    match_set_metadata(&match, htonll(binding->datapath->tunnel_key));
+    match_set_reg(&match, direction - MFF_REG0, mcp->tunnel_key);
+    match_set_reg_masked(&match, MFF_REG9 - MFF_REG0, 1 << 1, 1 << 1);
+
+    /* Return ICMP error with a part of the original IP packet included. */
+    struct ofpbuf ofpacts;
+    ofpbuf_init(&ofpacts, 0);
+    size_t oc_offset = encode_start_controller_op(
+        ACTION_OPCODE_ICMP, true, NX_CTLR_NO_METER, &ofpacts);
+
+    struct ofpbuf inner_ofpacts;
+    ofpbuf_init(&inner_ofpacts, 0);
+
+    /* The error packet is no longer too large, set REGBIT_PKT_LARGER = 0 */
+    /* reg9[1] is REGBIT_PKT_LARGER as defined by northd */
+    ovs_be32 value = htonl(0);
+    ovs_be32 mask = htonl(1 << 1);
+    ofpact_put_set_field(
+        &inner_ofpacts, mf_from_id(MFF_REG9), &value, &mask);
+
+    /* The new error packet is delivered locally */
+    /* REGBIT_EGRESS_LOOPBACK = 1 */
+    value = htonl(1 << MLF_ALLOW_LOOPBACK_BIT);
+    mask = htonl(1 << MLF_ALLOW_LOOPBACK_BIT);
+    ofpact_put_set_field(
+        &inner_ofpacts, mf_from_id(MFF_LOG_FLAGS), &value, &mask);
+
+    /* eth.src <-> eth.dst */
+    put_stack(MFF_ETH_DST, ofpact_put_STACK_PUSH(&inner_ofpacts));
+    put_stack(MFF_ETH_SRC, ofpact_put_STACK_PUSH(&inner_ofpacts));
+    put_stack(MFF_ETH_DST, ofpact_put_STACK_POP(&inner_ofpacts));
+    put_stack(MFF_ETH_SRC, ofpact_put_STACK_POP(&inner_ofpacts));
+
+    /* ip.src <-> ip.dst */
+    put_stack(is_ipv6 ? MFF_IPV6_DST : MFF_IPV4_DST,
+        ofpact_put_STACK_PUSH(&inner_ofpacts));
+    put_stack(is_ipv6 ? MFF_IPV6_SRC : MFF_IPV4_SRC,
+        ofpact_put_STACK_PUSH(&inner_ofpacts));
+    put_stack(is_ipv6 ? MFF_IPV6_DST : MFF_IPV4_DST,
+        ofpact_put_STACK_POP(&inner_ofpacts));
+    put_stack(is_ipv6 ? MFF_IPV6_SRC : MFF_IPV4_SRC,
+        ofpact_put_STACK_POP(&inner_ofpacts));
+
+    /* ip.ttl = 255 */
+    struct ofpact_ip_ttl *ip_ttl = ofpact_put_SET_IP_TTL(&inner_ofpacts);
+    ip_ttl->ttl = 255;
+
+    uint16_t frag_mtu = mtu - ETHERNET_OVERHEAD;
+    size_t frag_mtu_oc_offset;
+    if (is_ipv6) {
+        /* icmp6.type = 2 (Packet Too Big) */
+        /* icmp6.code = 0 */
+        uint8_t icmp_type = 2;
+        uint8_t icmp_code = 0;
+        ofpact_put_set_field(
+            &inner_ofpacts, mf_from_id(MFF_ICMPV6_TYPE), &icmp_type, NULL);
+        ofpact_put_set_field(
+            &inner_ofpacts, mf_from_id(MFF_ICMPV6_CODE), &icmp_code, NULL);
+
+        /* icmp6.frag_mtu */
+        frag_mtu_oc_offset = encode_start_controller_op(
+            ACTION_OPCODE_PUT_ICMP6_FRAG_MTU, true, NX_CTLR_NO_METER,
+            &inner_ofpacts);
+        ovs_be32 frag_mtu_ovs = htonl(frag_mtu);
+        ofpbuf_put(&inner_ofpacts, &frag_mtu_ovs, sizeof(frag_mtu_ovs));
+    } else {
+        /* icmp4.type = 3 (Destination Unreachable) */
+        /* icmp4.code = 4 (Fragmentation Needed) */
+        uint8_t icmp_type = 3;
+        uint8_t icmp_code = 4;
+        ofpact_put_set_field(
+            &inner_ofpacts, mf_from_id(MFF_ICMPV4_TYPE), &icmp_type, NULL);
+        ofpact_put_set_field(
+            &inner_ofpacts, mf_from_id(MFF_ICMPV4_CODE), &icmp_code, NULL);
+
+        /* icmp4.frag_mtu = */
+        frag_mtu_oc_offset = encode_start_controller_op(
+            ACTION_OPCODE_PUT_ICMP4_FRAG_MTU, true, NX_CTLR_NO_METER,
+            &inner_ofpacts);
+        ovs_be16 frag_mtu_ovs = htons(frag_mtu);
+        ofpbuf_put(&inner_ofpacts, &frag_mtu_ovs, sizeof(frag_mtu_ovs));
+    }
+    encode_finish_controller_op(frag_mtu_oc_offset, &inner_ofpacts);
+
+    /* Finally, submit the ICMP error back to the ingress pipeline */
+    put_resubmit(OFTABLE_LOG_INGRESS_PIPELINE, &inner_ofpacts);
+
+    /* Attach nested actions to ICMP error controller handler */
+    ofpacts_put_openflow_actions(inner_ofpacts.data, inner_ofpacts.size,
+                                 &ofpacts, OFP15_VERSION);
+
+    /* Finalize the ICMP error controller handler */
+    encode_finish_controller_op(oc_offset, &ofpacts);
+
+    ofctrl_add_flow(flow_table, OFTABLE_OUTPUT_LARGE_PKT_PROCESS, 100,
+                    binding->header_.uuid.parts[0], &match, &ofpacts,
+                    &binding->header_.uuid);
+
+    ofpbuf_uninit(&inner_ofpacts);
+    ofpbuf_uninit(&ofpacts);
+}
+
+static uint16_t
+get_tunnel_overhead(struct chassis_tunnel const *tun)
+{
+    uint16_t overhead = 0;
+    enum chassis_tunnel_type type = tun->type;
+    if (type == GENEVE) {
+        overhead += GENEVE_TUNNEL_OVERHEAD;
+    } else if (type == STT) {
+        overhead += STT_TUNNEL_OVERHEAD;
+    } else if (type == VXLAN) {
+        overhead += VXLAN_TUNNEL_OVERHEAD;
+    } else {
+        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+        VLOG_WARN_RL(&rl, "Unknown tunnel type %d, can't determine overhead "
+                          "size for Path MTU Discovery", type);
+        return 0;
+    }
+    overhead += tun->is_ipv6? IPV6_HEADER_LEN : IP_HEADER_LEN;
+    return overhead;
+}
+
+static uint16_t
+get_effective_mtu(const struct sbrec_port_binding *mcp,
+                  struct ovs_list *remote_tunnels,
+                  const struct if_status_mgr *if_mgr)
+{
+    /* Use interface MTU as a base for calculation */
+    uint16_t iface_mtu = if_status_mgr_iface_get_mtu(if_mgr,
+                                                     mcp->logical_port);
+    if (!iface_mtu) {
+        return 0;
+    }
+
+    /* Iterate over all peer tunnels and find the biggest tunnel overhead */
+    uint16_t overhead = 0;
+    struct tunnel *tun;
+    LIST_FOR_EACH (tun, list_node, remote_tunnels) {
+        overhead = MAX(overhead, get_tunnel_overhead(tun->tun));
+    }
+    if (!overhead) {
+        return 0;
+    }
+
+    return iface_mtu - overhead;
+}
+
+static void
+handle_pkt_too_big_for_ip_version(struct ovn_desired_flow_table *flow_table,
+                                  const struct sbrec_port_binding *binding,
+                                  const struct sbrec_port_binding *mcp,
+                                  uint16_t mtu, bool is_ipv6)
+{
+    /* ingress */
+    determine_if_pkt_too_big(flow_table, binding, mcp, mtu, is_ipv6,
+                             MFF_LOG_INPORT);
+    reply_imcp_error_if_pkt_too_big(flow_table, binding, mcp, mtu, is_ipv6,
+                                    MFF_LOG_INPORT);
+
+    /* egress */
+    determine_if_pkt_too_big(flow_table, binding, mcp, mtu, is_ipv6,
+                             MFF_LOG_OUTPORT);
+    reply_imcp_error_if_pkt_too_big(flow_table, binding, mcp, mtu, is_ipv6,
+                                    MFF_LOG_OUTPORT);
+}
+
+static void
+handle_pkt_too_big(struct ovn_desired_flow_table *flow_table,
+                   struct ovs_list *remote_tunnels,
+                   const struct sbrec_port_binding *binding,
+                   const struct sbrec_port_binding *mcp,
+                   const struct if_status_mgr *if_mgr)
+{
+    uint16_t mtu = get_effective_mtu(mcp, remote_tunnels, if_mgr);
+    if (!mtu) {
+        return;
+    }
+    handle_pkt_too_big_for_ip_version(flow_table, binding, mcp, mtu, false);
+    handle_pkt_too_big_for_ip_version(flow_table, binding, mcp, mtu, true);
+}
+
 static void
 enforce_tunneling_for_multichassis_ports(
     struct local_datapath *ld,
@@ -1111,7 +1347,8 @@ enforce_tunneling_for_multichassis_ports(
     const struct sbrec_chassis *chassis,
     const struct hmap *chassis_tunnels,
     enum mf_field_id mff_ovn_geneve,
-    struct ovn_desired_flow_table *flow_table)
+    struct ovn_desired_flow_table *flow_table,
+    const struct if_status_mgr *if_mgr)
 {
     if (shash_is_empty(&ld->multichassis_ports)) {
         return;
@@ -1156,6 +1393,8 @@ enforce_tunneling_for_multichassis_ports(
                         binding->header_.uuid.parts[0], &match, &ofpacts,
                         &binding->header_.uuid);
         ofpbuf_uninit(&ofpacts);
+
+        handle_pkt_too_big(flow_table, tuns, binding, mcp, if_mgr);
     }
 
     struct tunnel *tun_elem;
@@ -1177,6 +1416,7 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
                       const struct sbrec_port_binding *binding,
                       const struct sbrec_chassis *chassis,
                       const struct physical_debug *debug,
+                      const struct if_status_mgr *if_mgr,
                       struct ovn_desired_flow_table *flow_table,
                       struct ofpbuf *ofpacts_p)
 {
@@ -1233,12 +1473,12 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
             || ha_chassis_group_is_active(binding->ha_chassis_group,
                                           active_tunnels, chassis))) {
 
-        /* Table 38, priority 100.
+        /* Table 40, priority 100.
          * =======================
          *
          * Implements output to local hypervisor.  Each flow matches a
          * logical output port on the local hypervisor, and resubmits to
-         * table 39.  For ports of type "chassisredirect", the logical
+         * table 41.  For ports of type "chassisredirect", the logical
          * output port is changed from the "chassisredirect" port to the
          * underlying distributed port. */
 
@@ -1275,7 +1515,7 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
                                                     ct_zones);
             put_zones_ofpacts(&zone_ids, ofpacts_p);
 
-            /* Resubmit to table 39. */
+            /* Resubmit to table 41. */
             put_resubmit(OFTABLE_CHECK_LOOPBACK, ofpacts_p);
         }
 
@@ -1491,7 +1731,7 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
                                               ofport, flow_table);
         }
 
-        /* Table 39, priority 160.
+        /* Table 41, priority 160.
          * =======================
          *
          * Do not forward local traffic from a localport to a localnet port.
@@ -1561,13 +1801,13 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
             }
         }
 
-        /* Table 37, priority 150.
+        /* Table 39, priority 150.
          * =======================
          *
          * Handles packets received from ports of type "localport".  These
          * ports are present on every hypervisor.  Traffic that originates at
          * one should never go over a tunnel to a remote hypervisor,
-         * so resubmit them to table 38 for local delivery. */
+         * so resubmit them to table 40 for local delivery. */
         if (!strcmp(binding->type, "localport")) {
             ofpbuf_clear(ofpacts_p);
             put_resubmit(OFTABLE_LOCAL_OUTPUT, ofpacts_p);
@@ -1581,7 +1821,7 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
         }
     } else if (access_type == PORT_LOCALNET) {
         /* Remote port connected by localnet port */
-        /* Table 38, priority 100.
+        /* Table 40, priority 100.
          * =======================
          *
          * Implements switching to localnet port. Each flow matches a
@@ -1596,14 +1836,16 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
 
         put_load(localnet_port->tunnel_key, MFF_LOG_OUTPORT, 0, 32, ofpacts_p);
 
-        /* Resubmit to table 38. */
+        /* Resubmit to table 40. */
         put_resubmit(OFTABLE_LOCAL_OUTPUT, ofpacts_p);
         ofctrl_add_flow(flow_table, OFTABLE_LOCAL_OUTPUT, 100,
                         binding->header_.uuid.parts[0],
                         &match, ofpacts_p, &binding->header_.uuid);
 
-        enforce_tunneling_for_multichassis_ports(
-            ld, binding, chassis, chassis_tunnels, mff_ovn_geneve, flow_table);
+        enforce_tunneling_for_multichassis_ports(ld, binding, chassis,
+                                                 chassis_tunnels,
+                                                 mff_ovn_geneve, flow_table,
+                                                 if_mgr);
 
         /* No more tunneling to set up. */
         goto out;
@@ -1613,7 +1855,7 @@ consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
     const char *redirect_type = smap_get(&binding->options,
                                          "redirect-type");
 
-    /* Table 38, priority 100.
+    /* Table 40, priority 100.
      * =======================
      *
      * Handles traffic that needs to be sent to a remote hypervisor.  Each
@@ -1841,7 +2083,7 @@ consider_mc_group(struct ovsdb_idl_index *sbrec_port_binding_by_name,
         }
     }
 
-    /* Table 38, priority 100.
+    /* Table 40, priority 100.
      * =======================
      *
      * Handle output to the local logical ports in the multicast group, if
@@ -1857,7 +2099,7 @@ consider_mc_group(struct ovsdb_idl_index *sbrec_port_binding_by_name,
                         &match, &ofpacts, &mc->header_.uuid);
     }
 
-    /* Table 37, priority 100.
+    /* Table 39, priority 100.
      * =======================
      *
      * Handle output to the remote chassis in the multicast group, if
@@ -1908,7 +2150,7 @@ physical_eval_port_binding(struct physical_ctx *p_ctx,
                           p_ctx->patch_ofports,
                           p_ctx->chassis_tunnels,
                           pb, p_ctx->chassis, &p_ctx->debug,
-                          flow_table, &ofpacts);
+                          p_ctx->if_mgr, flow_table, &ofpacts);
     ofpbuf_uninit(&ofpacts);
 }
 
@@ -2032,10 +2274,10 @@ physical_run(struct physical_ctx *p_ctx,
                               p_ctx->patch_ofports,
                               p_ctx->chassis_tunnels, binding,
                               p_ctx->chassis, &p_ctx->debug,
-                              flow_table, &ofpacts);
+                              p_ctx->if_mgr, flow_table, &ofpacts);
     }
 
-    /* Handle output to multicast groups, in tables 37 and 38. */
+    /* Handle output to multicast groups, in tables 40 and 41. */
     const struct sbrec_multicast_group *mc;
     SBREC_MULTICAST_GROUP_TABLE_FOR_EACH (mc, p_ctx->mc_group_table) {
         consider_mc_group(p_ctx->sbrec_port_binding_by_name,
@@ -2056,7 +2298,7 @@ physical_run(struct physical_ctx *p_ctx,
      * encapsulations have metadata about the ingress and egress logical ports.
      * VXLAN encapsulations have metadata about the egress logical port only.
      * We set MFF_LOG_DATAPATH, MFF_LOG_INPORT, and MFF_LOG_OUTPORT from the
-     * tunnel key data where possible, then resubmit to table 38 to handle
+     * tunnel key data where possible, then resubmit to table 40 to handle
      * packets to the local hypervisor. */
     struct chassis_tunnel *tun;
     HMAP_FOR_EACH (tun, hmap_node, p_ctx->chassis_tunnels) {
@@ -2158,27 +2400,52 @@ physical_run(struct physical_ctx *p_ctx,
      */
     add_default_drop_flow(p_ctx, OFTABLE_PHY_TO_LOG, flow_table);
 
-    /* Table 37, priority 150.
+    /* Table 37-38, priority 0.
+     * ========================
+     *
+     * Default resubmit actions for OFTABLE_OUTPUT_LARGE_PKT_* tables.
+     */
+    struct match match;
+    match_init_catchall(&match);
+    ofpbuf_clear(&ofpacts);
+    put_resubmit(OFTABLE_REMOTE_OUTPUT, &ofpacts);
+    ofctrl_add_flow(flow_table, OFTABLE_OUTPUT_LARGE_PKT_DETECT, 0, 0, &match,
+                    &ofpacts, hc_uuid);
+
+    match_init_catchall(&match);
+    match_set_reg_masked(&match, MFF_LOG_FLAGS - MFF_REG0,
+                         MLF_ALLOW_LOOPBACK, MLF_ALLOW_LOOPBACK);
+    ofpbuf_clear(&ofpacts);
+    put_resubmit(OFTABLE_LOCAL_OUTPUT, &ofpacts);
+    ofctrl_add_flow(flow_table, OFTABLE_OUTPUT_LARGE_PKT_PROCESS, 10, 0,
+                    &match, &ofpacts, hc_uuid);
+
+    match_init_catchall(&match);
+    ofpbuf_clear(&ofpacts);
+    put_resubmit(OFTABLE_REMOTE_OUTPUT, &ofpacts);
+    ofctrl_add_flow(flow_table, OFTABLE_OUTPUT_LARGE_PKT_PROCESS, 0, 0, &match,
+                    &ofpacts, hc_uuid);
+
+    /* Table 39, priority 150.
      * =======================
      *
      * Handles packets received from a VXLAN tunnel which get resubmitted to
      * OFTABLE_LOG_INGRESS_PIPELINE due to lack of needed metadata in VXLAN,
-     * explicitly skip sending back out any tunnels and resubmit to table 38
+     * explicitly skip sending back out any tunnels and resubmit to table 40
      * for local delivery, except packets which have MLF_ALLOW_LOOPBACK bit
      * set.
      */
-    struct match match;
     match_init_catchall(&match);
     match_set_reg_masked(&match, MFF_LOG_FLAGS - MFF_REG0, MLF_RCV_FROM_RAMP,
                          MLF_RCV_FROM_RAMP | MLF_ALLOW_LOOPBACK);
 
-    /* Resubmit to table 38. */
+    /* Resubmit to table 40. */
     ofpbuf_clear(&ofpacts);
     put_resubmit(OFTABLE_LOCAL_OUTPUT, &ofpacts);
     ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 150, 0,
                     &match, &ofpacts, hc_uuid);
 
-    /* Table 37, priority 150.
+    /* Table 39, priority 150.
      * =======================
      *
      * Packets that should not be sent to other hypervisors.
@@ -2186,13 +2453,13 @@ physical_run(struct physical_ctx *p_ctx,
     match_init_catchall(&match);
     match_set_reg_masked(&match, MFF_LOG_FLAGS - MFF_REG0,
                          MLF_LOCAL_ONLY, MLF_LOCAL_ONLY);
-    /* Resubmit to table 38. */
+    /* Resubmit to table 40. */
     ofpbuf_clear(&ofpacts);
     put_resubmit(OFTABLE_LOCAL_OUTPUT, &ofpacts);
     ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 150, 0,
                     &match, &ofpacts, hc_uuid);
 
-    /* Table 37, Priority 0.
+    /* Table 39, Priority 0.
      * =======================
      *
      * Resubmit packets that are not directed at tunnels or part of a
@@ -2203,18 +2470,18 @@ physical_run(struct physical_ctx *p_ctx,
     ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 0, 0, &match,
                     &ofpacts, hc_uuid);
 
-    /* Table 38, priority 0.
+    /* Table 40, priority 0.
      * ======================
      *
      * Drop packets that do not match previous flows.
      */
     add_default_drop_flow(p_ctx, OFTABLE_LOCAL_OUTPUT, flow_table);
 
-    /* Table 39, Priority 0.
+    /* Table 41, Priority 0.
      * =======================
      *
      * Resubmit packets that don't output to the ingress port (already checked
-     * in table 38) to the logical egress pipeline, clearing the logical
+     * in table 40) to the logical egress pipeline, clearing the logical
      * registers (for consistent behavior with packets that get tunneled). */
     match_init_catchall(&match);
     ofpbuf_clear(&ofpacts);
diff --git a/controller/physical.h b/controller/physical.h
index f450dca94..1f1ed55ef 100644
--- a/controller/physical.h
+++ b/controller/physical.h
@@ -52,11 +52,13 @@ struct physical_ctx {
     struct ovsdb_idl_index *sbrec_port_binding_by_name;
     struct ovsdb_idl_index *sbrec_port_binding_by_datapath;
     const struct sbrec_port_binding_table *port_binding_table;
+    const struct ovsrec_interface_table *ovs_interface_table;
     const struct sbrec_multicast_group_table *mc_group_table;
     const struct ovsrec_bridge *br_int;
     const struct sbrec_chassis_table *chassis_table;
     const struct sbrec_chassis *chassis;
     const struct sset *active_tunnels;
+    const struct if_status_mgr *if_mgr;
     struct hmap *local_datapaths;
     struct sset *local_lports;
     const struct simap *ct_zones;
diff --git a/controller/pinctrl.c b/controller/pinctrl.c
index 795847729..cd9760f07 100644
--- a/controller/pinctrl.c
+++ b/controller/pinctrl.c
@@ -627,7 +627,7 @@ set_actions_and_enqueue_msg(struct rconn *swconn,
 }
 
 /* Forwards a packet to 'out_port_key' even if that's on a remote
- * hypervisor, i.e., the packet is re-injected in table OFTABLE_REMOTE_OUTPUT.
+ * hypervisor, i.e., the packet is re-injected in table OFTABLE_OUTPUT_INIT.
  */
 static void
 pinctrl_forward_pkt(struct rconn *swconn, int64_t dp_key,
@@ -644,7 +644,7 @@ pinctrl_forward_pkt(struct rconn *swconn, int64_t dp_key,
 
     struct ofpact_resubmit *resubmit = ofpact_put_RESUBMIT(&ofpacts);
     resubmit->in_port = OFPP_CONTROLLER;
-    resubmit->table_id = OFTABLE_REMOTE_OUTPUT;
+    resubmit->table_id = OFTABLE_OUTPUT_INIT;
 
     struct ofputil_packet_out po = {
         .packet = dp_packet_data(pkt),
@@ -870,7 +870,7 @@ pinctrl_parse_dhcpv6_advt(struct rconn *swconn, const struct flow *ip_flow,
              0, 32, &ofpacts);
     struct ofpact_resubmit *resubmit = ofpact_put_RESUBMIT(&ofpacts);
     resubmit->in_port = OFPP_CONTROLLER;
-    resubmit->table_id = OFTABLE_REMOTE_OUTPUT;
+    resubmit->table_id = OFTABLE_OUTPUT_INIT;
 
     struct ofputil_packet_out po = {
         .packet = dp_packet_data(&packet),
@@ -1499,7 +1499,7 @@ buffered_push_packet(struct buffered_packets *bp,
 
     struct ofpact_resubmit *resubmit = ofpact_put_RESUBMIT(&bi->ofpacts);
     resubmit->in_port = OFPP_CONTROLLER;
-    resubmit->table_id = OFTABLE_REMOTE_OUTPUT;
+    resubmit->table_id = OFTABLE_OUTPUT_INIT;
 
     bi->p = packet;
 
@@ -2444,19 +2444,19 @@ compose_out_dhcpv6_opts(struct ofpbuf *userdata,
                         struct ofpbuf *out_dhcpv6_opts, ovs_be32 iaid)
 {
     while (userdata->size) {
-        struct dhcp_opt6_header *userdata_opt = ofpbuf_try_pull(
+        struct dhcpv6_opt_header *userdata_opt = ofpbuf_try_pull(
             userdata, sizeof *userdata_opt);
         if (!userdata_opt) {
             return false;
         }
 
-        size_t size = ntohs(userdata_opt->size);
+        size_t size = ntohs(userdata_opt->len);
         uint8_t *userdata_opt_data = ofpbuf_try_pull(userdata, size);
         if (!userdata_opt_data) {
             return false;
         }
 
-        switch (ntohs(userdata_opt->opt_code)) {
+        switch (ntohs(userdata_opt->code)) {
         case DHCPV6_OPT_SERVER_ID_CODE:
         {
             /* The Server Identifier option carries a DUID
@@ -2988,6 +2988,13 @@ pinctrl_handle_dns_lookup(
         goto exit;
     }
 
+    /* Check if there is an additional record present, which is unsupported */
+    if (in_dns_header->arcount) {
+        VLOG_DBG_RL(&rl, "Received DNS query with additional records, which"
+                    " is unsupported");
+        goto exit;
+    }
+
     struct udp_header *in_udp = dp_packet_l4(pkt_in);
     size_t udp_len = ntohs(in_udp->udp_len);
     size_t l4_len = dp_packet_l4_size(pkt_in);
@@ -7190,7 +7197,9 @@ bfd_monitor_send_msg(struct rconn *swconn, long long int *bfd_time)
         pinctrl_send_bfd_tx_msg(swconn, entry, false);
 
         tx_timeout = MAX(entry->local_min_tx, entry->remote_min_rx);
-        tx_timeout -= random_range((tx_timeout * 25) / 100);
+        if (tx_timeout >= 4) {
+            tx_timeout -= random_range(tx_timeout / 4);
+        }
         entry->next_tx = cur_time + tx_timeout;
 next:
         if (*bfd_time > entry->next_tx) {
diff --git a/debian/changelog b/debian/changelog
index 11a07dd38..02a9953ba 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+OVN (23.03.1-1) unstable; urgency=low
+   [ OVN team ]
+   * New upstream version
+
+ -- OVN team <dev@openvswitch.org>  Fri, 03 Mar 2023 10:40:37 -0500
+
 ovn (23.03.0-1) unstable; urgency=low
 
    * New upstream version
diff --git a/include/ovn/actions.h b/include/ovn/actions.h
index 28479ede1..c973fce9c 100644
--- a/include/ovn/actions.h
+++ b/include/ovn/actions.h
@@ -895,6 +895,9 @@ void ovnacts_free(struct ovnact[], size_t ovnacts_len);
 char *ovnact_op_to_string(uint32_t);
 int encode_ra_dnssl_opt(char *data, char *buf, int buf_len);
 
+size_t encode_start_controller_op(enum action_opcode opcode, bool pause,
+                                  uint32_t meter_id, struct ofpbuf *ofpacts);
+void encode_finish_controller_op(size_t ofs, struct ofpbuf *ofpacts);
 void encode_controller_op(enum action_opcode opcode, uint32_t meter_id,
                           struct ofpbuf *ofpacts);
 
diff --git a/lib/actions.c b/lib/actions.c
index 781549d75..ec27223f9 100644
--- a/lib/actions.c
+++ b/lib/actions.c
@@ -79,7 +79,7 @@ ovnact_init(struct ovnact *ovnact, enum ovnact_type type, size_t len)
     ovnact->len = len;
 }
 
-static size_t
+size_t
 encode_start_controller_op(enum action_opcode opcode, bool pause,
                            uint32_t meter_id, struct ofpbuf *ofpacts)
 {
@@ -100,7 +100,7 @@ encode_start_controller_op(enum action_opcode opcode, bool pause,
     return ofs;
 }
 
-static void
+void
 encode_finish_controller_op(size_t ofs, struct ofpbuf *ofpacts)
 {
     struct ofpact_controller *oc = ofpbuf_at_assert(ofpacts, ofs, sizeof *oc);
@@ -2882,26 +2882,26 @@ static void
 encode_put_dhcpv6_option(const struct ovnact_gen_option *o,
                          struct ofpbuf *ofpacts)
 {
-    struct dhcp_opt6_header *opt = ofpbuf_put_uninit(ofpacts, sizeof *opt);
+    struct dhcpv6_opt_header *opt = ofpbuf_put_uninit(ofpacts, sizeof *opt);
     const union expr_constant *c = o->value.values;
     size_t n_values = o->value.n_values;
     size_t size;
 
-    opt->opt_code = htons(o->option->code);
+    opt->code = htons(o->option->code);
 
     if (!strcmp(o->option->type, "ipv6")) {
         size = n_values * sizeof(struct in6_addr);
-        opt->size = htons(size);
+        opt->len = htons(size);
         for (size_t i = 0; i < n_values; i++) {
             ofpbuf_put(ofpacts, &c[i].value.ipv6, sizeof(struct in6_addr));
         }
     } else if (!strcmp(o->option->type, "mac")) {
         size = sizeof(struct eth_addr);
-        opt->size = htons(size);
+        opt->len = htons(size);
         ofpbuf_put(ofpacts, &c->value.mac, size);
     } else if (!strcmp(o->option->type, "str")) {
         size = strlen(c->string);
-        opt->size = htons(size);
+        opt->len = htons(size);
         ofpbuf_put(ofpacts, c->string, size);
     }
 }
diff --git a/lib/lb.c b/lib/lb.c
index e941434c4..f88c1855b 100644
--- a/lib/lb.c
+++ b/lib/lb.c
@@ -38,6 +38,7 @@ static const char *lb_neighbor_responder_mode_names[] = {
 static struct nbrec_load_balancer_health_check *
 ovn_lb_get_health_check(const struct nbrec_load_balancer *nbrec_lb,
                         const char *vip_port_str, bool template);
+static void ovn_lb_backends_clear(struct ovn_lb_vip *vip);
 
 struct ovn_lb_ip_set *
 ovn_lb_ip_set_create(void)
@@ -238,6 +239,8 @@ ovn_lb_backends_init_template(struct ovn_lb_vip *lb_vip, const char *value_)
             ds_put_format(&errors, "%s: should be a template of the form: "
                           "'^backendip_variable1[:^port_variable1|:port]', ",
                           atom);
+            free(backend_port);
+            free(backend_ip);
         }
         free(atom);
     }
@@ -285,8 +288,27 @@ ovn_lb_vip_init_template(struct ovn_lb_vip *lb_vip, const char *lb_key_,
                          lb_key_);
     }
 
+    /* Backends can either be templates or explicit IPs and ports. */
     lb_vip->address_family = address_family;
-    return ovn_lb_backends_init_template(lb_vip, lb_value);
+    lb_vip->template_backends = true;
+    char *template_error = ovn_lb_backends_init_template(lb_vip, lb_value);
+
+    if (template_error) {
+        lb_vip->template_backends = false;
+        ovn_lb_backends_clear(lb_vip);
+
+        char *explicit_error = ovn_lb_backends_init_explicit(lb_vip, lb_value);
+        if (explicit_error) {
+            char *error =
+                xasprintf("invalid backend: template (%s) OR explicit (%s)",
+                          template_error, explicit_error);
+            free(explicit_error);
+            free(template_error);
+            return error;
+        }
+        free(template_error);
+    }
+    return NULL;
 }
 
 /* Returns NULL on success, an error string on failure.  The caller is
@@ -304,15 +326,29 @@ ovn_lb_vip_init(struct ovn_lb_vip *lb_vip, const char *lb_key,
                                        address_family);
 }
 
-void
-ovn_lb_vip_destroy(struct ovn_lb_vip *vip)
+static void
+ovn_lb_backends_destroy(struct ovn_lb_vip *vip)
 {
-    free(vip->vip_str);
-    free(vip->port_str);
     for (size_t i = 0; i < vip->n_backends; i++) {
         free(vip->backends[i].ip_str);
         free(vip->backends[i].port_str);
     }
+}
+
+static void
+ovn_lb_backends_clear(struct ovn_lb_vip *vip)
+{
+    ovn_lb_backends_destroy(vip);
+    vip->backends = NULL;
+    vip->n_backends = 0;
+}
+
+void
+ovn_lb_vip_destroy(struct ovn_lb_vip *vip)
+{
+    free(vip->vip_str);
+    free(vip->port_str);
+    ovn_lb_backends_destroy(vip);
     free(vip->backends);
 }
 
@@ -357,11 +393,10 @@ ovn_lb_vip_format(const struct ovn_lb_vip *vip, struct ds *s, bool template)
 }
 
 void
-ovn_lb_vip_backends_format(const struct ovn_lb_vip *vip, struct ds *s,
-                           bool template)
+ovn_lb_vip_backends_format(const struct ovn_lb_vip *vip, struct ds *s)
 {
     bool needs_brackets = vip->address_family == AF_INET6 && vip->port_str
-                          && !template;
+                          && !vip->template_backends;
     for (size_t i = 0; i < vip->n_backends; i++) {
         struct ovn_lb_backend *backend = &vip->backends[i];
 
@@ -798,6 +833,7 @@ ovn_controller_lb_create(const struct sbrec_load_balancer *sbrec_lb,
     lb->hairpin_orig_tuple = smap_get_bool(&sbrec_lb->options,
                                            "hairpin_orig_tuple",
                                            false);
+    lb->ct_flush = smap_get_bool(&sbrec_lb->options, "ct_flush", false);
     ovn_lb_get_hairpin_snat_ip(&sbrec_lb->header_.uuid, &sbrec_lb->options,
                                &lb->hairpin_snat_ips);
     return lb;
diff --git a/lib/lb.h b/lib/lb.h
index 7a67b7426..e24f519db 100644
--- a/lib/lb.h
+++ b/lib/lb.h
@@ -96,6 +96,9 @@ struct ovn_lb_vip {
                           */
     struct ovn_lb_backend *backends;
     size_t n_backends;
+    bool template_backends; /* True if the backends are templates. False if
+                             * they're explicitly specified.
+                             */
     bool empty_backend_rej;
     int address_family;
 };
@@ -188,6 +191,7 @@ struct ovn_controller_lb {
     bool hairpin_orig_tuple; /* True if ovn-northd stores the original
                               * destination tuple in registers.
                               */
+    bool ct_flush; /* True if we should flush CT after backend removal. */
 
     struct lport_addresses hairpin_snat_ips; /* IP (v4 and/or v6) to be used
                                               * as source for hairpinned
@@ -210,8 +214,7 @@ char *ovn_lb_vip_init(struct ovn_lb_vip *lb_vip, const char *lb_key,
 void ovn_lb_vip_destroy(struct ovn_lb_vip *vip);
 void ovn_lb_vip_format(const struct ovn_lb_vip *vip, struct ds *s,
                        bool template);
-void ovn_lb_vip_backends_format(const struct ovn_lb_vip *vip, struct ds *s,
-                                bool template);
+void ovn_lb_vip_backends_format(const struct ovn_lb_vip *vip, struct ds *s);
 
 struct ovn_lb_5tuple {
     struct hmap_node hmap_node;
diff --git a/lib/ovn-l7.h b/lib/ovn-l7.h
index 2b20bc380..d718ed39a 100644
--- a/lib/ovn-l7.h
+++ b/lib/ovn-l7.h
@@ -240,12 +240,6 @@ struct dhcp_opt_header {
 #define DHCP_OPT_PAYLOAD(hdr) \
     (void *)((char *)hdr + sizeof(struct dhcp_opt_header))
 
-/* Used in the OpenFlow PACKET_IN userdata */
-struct dhcp_opt6_header {
-    ovs_be16 opt_code;
-    ovs_be16 size;
-};
-
 /* These are not defined in ovs/lib/dhcp.h, hence defining here. */
 #define OVN_DHCP_MSG_DECLINE        4
 #define OVN_DHCP_MSG_RELEASE        7
diff --git a/lib/ovn-util.h b/lib/ovn-util.h
index a1a418a24..7510fda4b 100644
--- a/lib/ovn-util.h
+++ b/lib/ovn-util.h
@@ -28,6 +28,13 @@
 #define ROUTE_ORIGIN_CONNECTED "connected"
 #define ROUTE_ORIGIN_STATIC "static"
 
+#define ETH_CRC_LENGTH 4
+#define ETHERNET_OVERHEAD (ETH_HEADER_LEN + ETH_CRC_LENGTH)
+
+#define GENEVE_TUNNEL_OVERHEAD 38
+#define STT_TUNNEL_OVERHEAD 18
+#define VXLAN_TUNNEL_OVERHEAD 30
+
 struct eth_addr;
 struct nbrec_logical_router_port;
 struct ovsrec_flow_sample_collector_set_table;
diff --git a/northd/en-sync-sb.c b/northd/en-sync-sb.c
index 6e33901a8..20f0d8a82 100644
--- a/northd/en-sync-sb.c
+++ b/northd/en-sync-sb.c
@@ -22,7 +22,6 @@
 #include "openvswitch/util.h"
 
 #include "en-sync-sb.h"
-#include "include/ovn/expr.h"
 #include "lib/inc-proc-eng.h"
 #include "lib/lb.h"
 #include "lib/ovn-nb-idl.h"
@@ -34,8 +33,15 @@
 
 VLOG_DEFINE_THIS_MODULE(en_sync_to_sb);
 
+/* This is just a type wrapper to enforce that it has to be sorted. */
+struct sorted_addresses {
+    const char **arr;
+    size_t n;
+};
+
+
 static void sync_addr_set(struct ovsdb_idl_txn *ovnsb_txn, const char *name,
-                          const char **addrs, size_t n_addrs,
+                          struct sorted_addresses *addresses,
                           struct shash *sb_address_sets);
 static void sync_addr_sets(const struct nbrec_address_set_table *,
                            const struct nbrec_port_group_table *,
@@ -44,11 +50,17 @@ static void sync_addr_sets(const struct nbrec_address_set_table *,
                            struct hmap *datapaths);
 static const struct sbrec_address_set *sb_address_set_lookup_by_name(
     struct ovsdb_idl_index *, const char *name);
-static void update_sb_addr_set(const char **nb_addresses, size_t n_addresses,
+static void update_sb_addr_set(struct sorted_addresses *,
                                const struct sbrec_address_set *);
 static void build_port_group_address_set(const struct nbrec_port_group *,
                                          struct svec *ipv4_addrs,
                                          struct svec *ipv6_addrs);
+static struct sorted_addresses
+sorted_addresses_from_nbrec(const struct nbrec_address_set *nb_as);
+static struct sorted_addresses
+sorted_addresses_from_svec(struct svec *addresses);
+static struct sorted_addresses
+sorted_addresses_from_sset(struct sset *addresses);
 
 void *
 en_sync_to_sb_init(struct engine_node *node OVS_UNUSED,
@@ -133,8 +145,9 @@ sync_to_sb_addr_set_nb_address_set_handler(struct engine_node *node,
         if (!sb_addr_set) {
             return false;
         }
-        update_sb_addr_set((const char **) nb_addr_set->addresses,
-                           nb_addr_set->n_addresses, sb_addr_set);
+        struct sorted_addresses addrs =
+                sorted_addresses_from_nbrec(nb_addr_set);
+        update_sb_addr_set(&addrs, sb_addr_set);
     }
 
     return true;
@@ -180,10 +193,14 @@ sync_to_sb_addr_set_nb_port_group_handler(struct engine_node *node,
         struct svec ipv4_addrs = SVEC_EMPTY_INITIALIZER;
         struct svec ipv6_addrs = SVEC_EMPTY_INITIALIZER;
         build_port_group_address_set(nb_pg, &ipv4_addrs, &ipv6_addrs);
-        update_sb_addr_set((const char **) ipv4_addrs.names, ipv4_addrs.n,
-                           sb_addr_set_v4);
-        update_sb_addr_set((const char **) ipv6_addrs.names, ipv6_addrs.n,
-                           sb_addr_set_v6);
+
+        struct sorted_addresses ipv4_addrs_sorted =
+                sorted_addresses_from_svec(&ipv4_addrs);
+        struct sorted_addresses ipv6_addrs_sorted =
+                sorted_addresses_from_svec(&ipv6_addrs);
+
+        update_sb_addr_set(&ipv4_addrs_sorted, sb_addr_set_v4);
+        update_sb_addr_set(&ipv6_addrs_sorted, sb_addr_set_v6);
 
         free(ipv4_addrs_name);
         free(ipv6_addrs_name);
@@ -197,7 +214,7 @@ sync_to_sb_addr_set_nb_port_group_handler(struct engine_node *node,
 /* static functions. */
 static void
 sync_addr_set(struct ovsdb_idl_txn *ovnsb_txn, const char *name,
-              const char **addrs, size_t n_addrs,
+              struct sorted_addresses *addresses,
               struct shash *sb_address_sets)
 {
     const struct sbrec_address_set *sb_address_set;
@@ -206,10 +223,10 @@ sync_addr_set(struct ovsdb_idl_txn *ovnsb_txn, const char *name,
     if (!sb_address_set) {
         sb_address_set = sbrec_address_set_insert(ovnsb_txn);
         sbrec_address_set_set_name(sb_address_set, name);
-        sbrec_address_set_set_addresses(sb_address_set,
-                                        addrs, n_addrs);
+        sbrec_address_set_set_addresses(sb_address_set, addresses->arr,
+                                        addresses->n);
     } else {
-        update_sb_addr_set(addrs, n_addrs, sb_address_set);
+        update_sb_addr_set(addresses, sb_address_set);
     }
 }
 
@@ -243,8 +260,11 @@ sync_addr_sets(const struct nbrec_address_set_table *nb_address_set_table,
 
     /* Service monitor MAC. */
     const char *svc_monitor_macp = northd_get_svc_monitor_mac();
-    sync_addr_set(ovnsb_txn, "svc_monitor_mac", &svc_monitor_macp, 1,
-                     &sb_address_sets);
+    struct sorted_addresses svc = {
+            .arr = &svc_monitor_macp,
+            .n = 1,
+    };
+    sync_addr_set(ovnsb_txn, "svc_monitor_mac", &svc, &sb_address_sets);
 
     /* sync port group generated address sets first */
     const struct nbrec_port_group *nb_port_group;
@@ -255,14 +275,16 @@ sync_addr_sets(const struct nbrec_address_set_table *nb_address_set_table,
         build_port_group_address_set(nb_port_group, &ipv4_addrs, &ipv6_addrs);
         char *ipv4_addrs_name = xasprintf("%s_ip4", nb_port_group->name);
         char *ipv6_addrs_name = xasprintf("%s_ip6", nb_port_group->name);
+
+        struct sorted_addresses ipv4_addrs_sorted =
+                sorted_addresses_from_svec(&ipv4_addrs);
+        struct sorted_addresses ipv6_addrs_sorted =
+                sorted_addresses_from_svec(&ipv6_addrs);
+
         sync_addr_set(ovnsb_txn, ipv4_addrs_name,
-                      /* "char **" is not compatible with "const char **" */
-                      (const char **) ipv4_addrs.names,
-                      ipv4_addrs.n, &sb_address_sets);
+                      &ipv4_addrs_sorted, &sb_address_sets);
         sync_addr_set(ovnsb_txn, ipv6_addrs_name,
-                      /* "char **" is not compatible with "const char **" */
-                      (const char **) ipv6_addrs.names,
-                      ipv6_addrs.n, &sb_address_sets);
+                      &ipv6_addrs_sorted, &sb_address_sets);
         free(ipv4_addrs_name);
         free(ipv6_addrs_name);
         svec_destroy(&ipv4_addrs);
@@ -279,27 +301,26 @@ sync_addr_sets(const struct nbrec_address_set_table *nb_address_set_table,
         if (sset_count(&od->lb_ips->ips_v4_reachable)) {
             char *ipv4_addrs_name = lr_lb_address_set_name(od->tunnel_key,
                                                            AF_INET);
-            const char **ipv4_addrs =
-                sset_array(&od->lb_ips->ips_v4_reachable);
 
-            sync_addr_set(ovnsb_txn, ipv4_addrs_name, ipv4_addrs,
-                          sset_count(&od->lb_ips->ips_v4_reachable),
-                          &sb_address_sets);
+            struct sorted_addresses ipv4_addrs_sorted =
+                    sorted_addresses_from_sset(&od->lb_ips->ips_v4_reachable);
+
+            sync_addr_set(ovnsb_txn, ipv4_addrs_name,
+                          &ipv4_addrs_sorted, &sb_address_sets);
+            free(ipv4_addrs_sorted.arr);
             free(ipv4_addrs_name);
-            free(ipv4_addrs);
         }
 
         if (sset_count(&od->lb_ips->ips_v6_reachable)) {
             char *ipv6_addrs_name = lr_lb_address_set_name(od->tunnel_key,
                                                            AF_INET6);
-            const char **ipv6_addrs =
-                sset_array(&od->lb_ips->ips_v6_reachable);
+            struct sorted_addresses ipv6_addrs_sorted =
+                    sorted_addresses_from_sset(&od->lb_ips->ips_v6_reachable);
 
-            sync_addr_set(ovnsb_txn, ipv6_addrs_name, ipv6_addrs,
-                          sset_count(&od->lb_ips->ips_v6_reachable),
-                          &sb_address_sets);
+            sync_addr_set(ovnsb_txn, ipv6_addrs_name,
+                          &ipv6_addrs_sorted, &sb_address_sets);
+            free(ipv6_addrs_sorted.arr);
             free(ipv6_addrs_name);
-            free(ipv6_addrs);
         }
     }
 
@@ -308,10 +329,10 @@ sync_addr_sets(const struct nbrec_address_set_table *nb_address_set_table,
     const struct nbrec_address_set *nb_address_set;
     NBREC_ADDRESS_SET_TABLE_FOR_EACH (nb_address_set,
                                       nb_address_set_table) {
+        struct sorted_addresses addrs =
+                sorted_addresses_from_nbrec(nb_address_set);
         sync_addr_set(ovnsb_txn, nb_address_set->name,
-            /* "char **" is not compatible with "const char **" */
-            (const char **) nb_address_set->addresses,
-            nb_address_set->n_addresses, &sb_address_sets);
+                      &addrs, &sb_address_sets);
     }
 
     struct shash_node *node;
@@ -323,48 +344,39 @@ sync_addr_sets(const struct nbrec_address_set_table *nb_address_set_table,
 }
 
 static void
-update_sb_addr_set(const char **nb_addresses, size_t n_addresses,
+update_sb_addr_set(struct sorted_addresses *nb_addresses,
                    const struct sbrec_address_set *sb_as)
 {
-    struct expr_constant_set *cs_nb_as =
-        expr_constant_set_create_integers(
-            (const char *const *) nb_addresses, n_addresses);
-    struct expr_constant_set *cs_sb_as =
-        expr_constant_set_create_integers(
-            (const char *const *) sb_as->addresses, sb_as->n_addresses);
-
-    struct expr_constant_set *addr_added = NULL;
-    struct expr_constant_set *addr_deleted = NULL;
-    expr_constant_set_integers_diff(cs_sb_as, cs_nb_as, &addr_added,
-                                    &addr_deleted);
-
-    struct ds ds = DS_EMPTY_INITIALIZER;
-    if (addr_added && addr_added->n_values) {
-        for (size_t i = 0; i < addr_added->n_values; i++) {
-            ds_clear(&ds);
-            expr_constant_format(&addr_added->values[i], EXPR_C_INTEGER, &ds);
-            sbrec_address_set_update_addresses_addvalue(sb_as, ds_cstr(&ds));
+    size_t nb_index, sb_index;
+
+    const char **nb_arr = nb_addresses->arr;
+    char **sb_arr = sb_as->addresses;
+    size_t nb_n = nb_addresses->n;
+    size_t sb_n = sb_as->n_addresses;
+
+    for (nb_index = sb_index = 0; nb_index < nb_n && sb_index < sb_n;) {
+        int cmp = strcmp(nb_arr[nb_index], sb_arr[sb_index]);
+        if (cmp < 0) {
+            sbrec_address_set_update_addresses_addvalue(sb_as,
+                                                        nb_arr[nb_index]);
+            nb_index++;
+        } else if (cmp > 0) {
+            sbrec_address_set_update_addresses_delvalue(sb_as,
+                                                        sb_arr[sb_index]);
+            sb_index++;
+        } else {
+            nb_index++;
+            sb_index++;
         }
     }
 
-    if (addr_deleted && addr_deleted->n_values) {
-        for (size_t i = 0; i < addr_deleted->n_values; i++) {
-            ds_clear(&ds);
-            expr_constant_format(&addr_deleted->values[i],
-                                 EXPR_C_INTEGER, &ds);
-            sbrec_address_set_update_addresses_delvalue(sb_as, ds_cstr(&ds));
-        }
+    for (; nb_index < nb_n; nb_index++) {
+        sbrec_address_set_update_addresses_addvalue(sb_as, nb_arr[nb_index]);
     }
 
-    ds_destroy(&ds);
-    expr_constant_set_destroy(cs_nb_as);
-    free(cs_nb_as);
-    expr_constant_set_destroy(cs_sb_as);
-    free(cs_sb_as);
-    expr_constant_set_destroy(addr_added);
-    free(addr_added);
-    expr_constant_set_destroy(addr_deleted);
-    free(addr_deleted);
+    for (; sb_index < sb_n; sb_index++) {
+        sbrec_address_set_update_addresses_delvalue(sb_as, sb_arr[sb_index]);
+    }
 }
 
 static void
@@ -403,3 +415,32 @@ sb_address_set_lookup_by_name(struct ovsdb_idl_index *sbrec_addr_set_by_name,
 
     return retval;
 }
+
+static struct sorted_addresses
+sorted_addresses_from_nbrec(const struct nbrec_address_set *nb_as)
+{
+    /* The DB is already sorted. */
+    return (struct sorted_addresses) {
+        .arr = (const char **) nb_as->addresses,
+        .n = nb_as->n_addresses,
+    };
+}
+
+static struct sorted_addresses
+sorted_addresses_from_svec(struct svec *addresses)
+{
+    svec_sort(addresses);
+    return (struct sorted_addresses) {
+        .arr = (const char **) addresses->names,
+        .n = addresses->n,
+    };
+}
+
+static struct sorted_addresses
+sorted_addresses_from_sset(struct sset *addresses)
+{
+    return (struct sorted_addresses) {
+        .arr = sset_sort(addresses),
+        .n = sset_count(addresses),
+    };
+}
diff --git a/northd/inc-proc-northd.c b/northd/inc-proc-northd.c
index d23993a55..fd025c92b 100644
--- a/northd/inc-proc-northd.c
+++ b/northd/inc-proc-northd.c
@@ -34,10 +34,13 @@
 #include "en-lflow.h"
 #include "en-northd-output.h"
 #include "en-sync-sb.h"
+#include "unixctl.h"
 #include "util.h"
 
 VLOG_DEFINE_THIS_MODULE(inc_proc_northd);
 
+static unixctl_cb_func chassis_features_list;
+
 #define NB_NODES \
     NB_NODE(nb_global, "nb_global") \
     NB_NODE(copp, "copp") \
@@ -306,6 +309,12 @@ void inc_proc_northd_init(struct ovsdb_idl_loop *nb,
     engine_ovsdb_node_add_index(&en_sb_address_set,
                                 "sbrec_address_set_by_name",
                                 sbrec_address_set_by_name);
+
+    struct northd_data *northd_data =
+        engine_get_internal_data(&en_northd);
+    unixctl_command_register("debug/chassis-features-list", "", 0, 0,
+                             chassis_features_list,
+                             &northd_data->features);
 }
 
 /* Returns true if the incremental processing ended up updating nodes. */
@@ -356,3 +365,20 @@ void inc_proc_northd_cleanup(void)
     engine_cleanup();
     engine_set_context(NULL);
 }
+
+static void
+chassis_features_list(struct unixctl_conn *conn, int argc OVS_UNUSED,
+                      const char *argv[] OVS_UNUSED, void *features_)
+{
+    struct chassis_features *features = features_;
+    struct ds ds = DS_EMPTY_INITIALIZER;
+
+    ds_put_format(&ds, "ct_no_masked_label:    %s\n",
+                  features->ct_no_masked_label ? "true" : "false");
+    ds_put_format(&ds, "ct_lb_related:         %s\n",
+                  features->ct_lb_related ? "true" : "false");
+    ds_put_format(&ds, "mac_binding_timestamp: %s\n",
+                  features->mac_binding_timestamp ? "true" : "false");
+    unixctl_command_reply(conn, ds_cstr(&ds));
+    ds_destroy(&ds);
+}
diff --git a/northd/northd.c b/northd/northd.c
index 7ad4cdfad..66f14e9dd 100644
--- a/northd/northd.c
+++ b/northd/northd.c
@@ -239,6 +239,8 @@ enum ovn_stage {
  * one of the logical router's own IP addresses. */
 #define REGBIT_EGRESS_LOOPBACK  "reg9[0]"
 /* Register to store the result of check_pkt_larger action. */
+/* This register is also used by ovn-controller in
+ * OFTABLE_OUTPUT_LARGE_PKT_DETECT table, for a similar goal. */
 #define REGBIT_PKT_LARGER        "reg9[1]"
 #define REGBIT_LOOKUP_NEIGHBOR_RESULT "reg9[2]"
 #define REGBIT_LOOKUP_NEIGHBOR_IP_RESULT "reg9[3]"
@@ -432,6 +434,13 @@ build_chassis_features(const struct northd_input *input_data,
     const struct sbrec_chassis *chassis;
 
     SBREC_CHASSIS_TABLE_FOR_EACH (chassis, input_data->sbrec_chassis) {
+        /* Only consider local AZ chassis.  Remote ones don't install
+         * flows generated by the local northd.
+         */
+        if (smap_get_bool(&chassis->other_config, "is-remote", false)) {
+            continue;
+        }
+
         bool ct_no_masked_label =
             smap_get_bool(&chassis->other_config,
                           OVN_FEATURE_CT_NO_MASKED_LABEL,
@@ -552,7 +561,7 @@ free_chassis_queueid(struct hmap *set, const struct uuid *uuid,
 static inline bool
 port_has_qos_params(const struct smap *opts)
 {
-    return (smap_get(opts, "qos_max_rate") ||
+    return (smap_get(opts, "qos_max_rate") || smap_get(opts, "qos_min_rate") ||
             smap_get(opts, "qos_burst"));
 }
 
@@ -1641,6 +1650,10 @@ ovn_port_destroy(struct hmap *ports, struct ovn_port *port)
          * use it. */
         hmap_remove(ports, &port->key_node);
 
+        if (port->peer) {
+            port->peer->peer = NULL;
+        }
+
         for (int i = 0; i < port->n_lsp_addrs; i++) {
             destroy_lport_addresses(&port->lsp_addrs[i]);
         }
@@ -3881,7 +3894,7 @@ build_lb_vip_actions(struct ovn_lb_vip *lb_vip,
     const char *ct_lb_action =
         features->ct_no_masked_label ? "ct_lb_mark" : "ct_lb";
     bool reject = !lb_vip->n_backends && lb_vip->empty_backend_rej;
-    bool drop = false;
+    bool drop = !lb_vip->n_backends && !lb_vip->empty_backend_rej;
 
     if (lb_vip_nb->lb_health_check) {
         ds_put_format(action, "%s(backends=", ct_lb_action);
@@ -5779,20 +5792,24 @@ skip_port_from_conntrack(struct ovn_datapath *od, struct ovn_port *op,
      * know about the connection, as the icmp request went through the logical
      * router on hostA, not hostB. This would only work with distributed
      * conntrack state across all chassis. */
-    struct ds match_in = DS_EMPTY_INITIALIZER;
-    struct ds match_out = DS_EMPTY_INITIALIZER;
 
-    ds_put_format(&match_in, "ip && inport == %s", op->json_key);
-    ds_put_format(&match_out, "ip && outport == %s", op->json_key);
+    const char *ingress_action = "next;";
+    const char *egress_action = od->has_stateful_acl
+                                ? "next;"
+                                : "ct_clear; next;";
+
+    char *ingress_match = xasprintf("ip && inport == %s", op->json_key);
+    char *egress_match = xasprintf("ip && outport == %s", op->json_key);
+
     ovn_lflow_add_with_lport_and_hint(lflows, od, in_stage, priority,
-                                      ds_cstr(&match_in), "next;", op->key,
-                                      &op->nbsp->header_);
+                                      ingress_match, ingress_action,
+                                      op->key, &op->nbsp->header_);
     ovn_lflow_add_with_lport_and_hint(lflows, od, out_stage, priority,
-                                      ds_cstr(&match_out), "next;", op->key,
-                                      &op->nbsp->header_);
+                                      egress_match, egress_action,
+                                      op->key, &op->nbsp->header_);
 
-    ds_destroy(&match_in);
-    ds_destroy(&match_out);
+    free(ingress_match);
+    free(egress_match);
 }
 
 static void
@@ -5867,7 +5884,8 @@ build_pre_acls(struct ovn_datapath *od, const struct hmap *port_groups,
         }
         for (size_t i = 0; i < od->n_localnet_ports; i++) {
             skip_port_from_conntrack(od, od->localnet_ports[i],
-                                     S_SWITCH_IN_PRE_ACL, S_SWITCH_OUT_PRE_ACL,
+                                     S_SWITCH_IN_PRE_ACL,
+                                     S_SWITCH_OUT_PRE_ACL,
                                      110, lflows);
         }
 
@@ -6036,10 +6054,17 @@ build_pre_lb(struct ovn_datapath *od, const struct shash *meter_groups,
                                  S_SWITCH_IN_PRE_LB, S_SWITCH_OUT_PRE_LB,
                                  110, lflows);
     }
-    for (size_t i = 0; i < od->n_localnet_ports; i++) {
-        skip_port_from_conntrack(od, od->localnet_ports[i],
-                                 S_SWITCH_IN_PRE_LB, S_SWITCH_OUT_PRE_LB,
-                                 110, lflows);
+    /* Localnet ports have no need for going through conntrack, unless
+     * the logical switch has a load balancer. Then, conntrack is necessary
+     * so that traffic arriving via the localnet port can be load
+     * balanced.
+     */
+    if (!od->has_lb_vip) {
+        for (size_t i = 0; i < od->n_localnet_ports; i++) {
+            skip_port_from_conntrack(od, od->localnet_ports[i],
+                                     S_SWITCH_IN_PRE_LB, S_SWITCH_OUT_PRE_LB,
+                                     110, lflows);
+        }
     }
 
     /* Do not sent statless flows via conntrack */
@@ -6700,6 +6725,8 @@ build_port_group_lswitches(struct northd_input *input_data,
     }
 }
 
+#define IPV6_CT_OMIT_MATCH "nd || nd_ra || nd_rs || mldv1 || mldv2"
+
 static void
 build_acls(struct ovn_datapath *od, const struct chassis_features *features,
            struct hmap *lflows, const struct hmap *port_groups,
@@ -6847,20 +6874,26 @@ build_acls(struct ovn_datapath *od, const struct chassis_features *features,
         ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, UINT16_MAX - 3,
                       ds_cstr(&match), ct_out_acl_action);
 
-        /* Ingress and Egress ACL Table (Priority 65532).
-         *
-         * Not to do conntrack on ND packets. */
-        ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, UINT16_MAX - 3,
-                      "nd || nd_ra || nd_rs || mldv1 || mldv2", "next;");
-        ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, UINT16_MAX - 3,
-                      "nd || nd_ra || nd_rs || mldv1 || mldv2", "next;");
-
         /* Reply and related traffic matched by an "allow-related" ACL
          * should be allowed in the ls_in_acl_after_lb stage too. */
         ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL_AFTER_LB, UINT16_MAX - 3,
                       REGBIT_ACL_HINT_ALLOW_REL" == 1", "next;");
     }
 
+    /* Ingress and Egress ACL Table (Priority 65532).
+     *
+     * Always allow service IPv6 protocols regardless of other ACLs defined.
+     *
+     * Also, don't send them to conntrack because session tracking
+     * for these protocols is not working properly:
+     * https://bugzilla.kernel.org/show_bug.cgi?id=11797. */
+    ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, UINT16_MAX - 3,
+                  IPV6_CT_OMIT_MATCH, "next;");
+    ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, UINT16_MAX - 3,
+                  IPV6_CT_OMIT_MATCH, "next;");
+    ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL_AFTER_LB, UINT16_MAX - 3,
+                  IPV6_CT_OMIT_MATCH, "next;");
+
     /* Ingress or Egress ACL Table (Various priorities). */
     for (size_t i = 0; i < od->nbs->n_acls; i++) {
         struct nbrec_acl *acl = od->nbs->acls[i];
@@ -7089,7 +7122,9 @@ build_lb_rules_pre_stateful(struct hmap *lflows, struct ovn_northd_lb *lb,
  * - load balancing affinity check:
  *   table=lr_in_lb_aff_check, priority=100
  *      match=(new_lb_match)
- *      action=(REGBIT_KNOWN_LB_SESSION = chk_lb_aff(); next;)
+ *      action=(REG_NEXT_HOP_IPV4 = ip4.dst;
+ *              REG_ORIG_TP_DPORT_ROUTER = tcp.dst;
+ *              REGBIT_KNOWN_LB_SESSION = chk_lb_aff(); next;)
  *
  * - load balancing:
  *   table=lr_in_dnat, priority=150
@@ -7130,16 +7165,11 @@ build_lb_affinity_lr_flows(struct hmap *lflows, struct ovn_northd_lb *lb,
         return;
     }
 
-    static char *aff_check = REGBIT_KNOWN_LB_SESSION" = chk_lb_aff(); next;";
-
-    ovn_lflow_add_with_dp_group(
-        lflows, dp_bitmap, S_ROUTER_IN_LB_AFF_CHECK, 100,
-        new_lb_match, aff_check, &lb->nlb->header_);
-
     struct ds aff_action = DS_EMPTY_INITIALIZER;
     struct ds aff_action_learn = DS_EMPTY_INITIALIZER;
     struct ds aff_match = DS_EMPTY_INITIALIZER;
     struct ds aff_match_learn = DS_EMPTY_INITIALIZER;
+    struct ds aff_check_action = DS_EMPTY_INITIALIZER;
 
     bool ipv6 = !IN6_IS_ADDR_V4MAPPED(&lb_vip->vip);
     const char *ip_match = ipv6 ? "ip6" : "ip4";
@@ -7155,6 +7185,20 @@ build_lb_affinity_lr_flows(struct hmap *lflows, struct ovn_northd_lb *lb,
         ct_flag = "; force_snat";
     }
 
+    /* Create affinity check flow. */
+    ds_put_format(&aff_check_action, "%s = %s.dst; ", reg_vip, ip_match);
+
+    if (lb_vip->port_str) {
+        ds_put_format(&aff_check_action, REG_ORIG_TP_DPORT_ROUTER" = %s.dst; ",
+                      lb->proto);
+    }
+    ds_put_cstr(&aff_check_action, REGBIT_KNOWN_LB_SESSION
+                " = chk_lb_aff(); next;");
+
+    ovn_lflow_add_with_dp_group(
+        lflows, dp_bitmap, S_ROUTER_IN_LB_AFF_CHECK, 100,
+        new_lb_match, ds_cstr(&aff_check_action), &lb->nlb->header_);
+
     /* Prepare common part of affinity LB and affinity learn action. */
     ds_put_format(&aff_action, "%s = %s; ", reg_vip, lb_vip->vip_str);
     ds_put_cstr(&aff_action_learn, "commit_lb_aff(vip = \"");
@@ -7252,6 +7296,7 @@ build_lb_affinity_lr_flows(struct hmap *lflows, struct ovn_northd_lb *lb,
     ds_destroy(&aff_action_learn);
     ds_destroy(&aff_match);
     ds_destroy(&aff_match_learn);
+    ds_destroy(&aff_check_action);
 }
 
 /* Builds the logical switch flows related to load balancer affinity.
@@ -7628,38 +7673,36 @@ build_lb_hairpin(struct ovn_datapath *od, struct hmap *lflows)
 static void
 build_vtep_hairpin(struct ovn_datapath *od, struct hmap *lflows)
 {
-    /* Ingress Pre-ARP flows for VTEP hairpining traffic. Priority 1000:
-     * Packets that received from non-VTEP ports should continue processing. */
+    if (!od->has_vtep_lports) {
+        /* There is no need in these flows if datapath has no vtep lports. */
+        return;
+    }
 
+    /* Ingress Pre-ARP flows for VTEP hairpining traffic. Priority 1000:
+     * Packets received from VTEP ports must go directly to L2LKP table.
+     */
     char *action = xasprintf("next(pipeline=ingress, table=%d);",
                              ovn_stage_get_table(S_SWITCH_IN_L2_LKUP));
-    /* send all traffic from VTEP directly to L2LKP table. */
     ovn_lflow_add(lflows, od, S_SWITCH_IN_HAIRPIN, 1000,
                   REGBIT_FROM_RAMP" == 1", action);
     free(action);
 
-    struct ds match = DS_EMPTY_INITIALIZER;
-    size_t n_ports = od->n_router_ports;
-    bool dp_has_l3dgw_ports = false;
-    for (int i = 0; i < n_ports; i++) {
-        if (is_l3dgw_port(od->router_ports[i]->peer)) {
-            ds_put_format(&match, "%sis_chassis_resident(%s)%s",
-                          i == 0 ? REGBIT_FROM_RAMP" == 1 && (" : "",
-                          od->router_ports[i]->peer->cr_port->json_key,
-                          i < n_ports - 1 ? " || " : ")");
-            dp_has_l3dgw_ports = true;
-        }
-    }
-
     /* Ingress pre-arp flow for traffic from VTEP (ramp) switch.
     * Priority 2000: Packets, that were received from VTEP (ramp) switch and
     * router ports of current datapath are l3dgw ports and they reside on
     * current chassis, should be passed to next table for ARP/ND hairpin
-    * processing.
-    */
-    if (dp_has_l3dgw_ports) {
-        ovn_lflow_add(lflows, od, S_SWITCH_IN_HAIRPIN, 2000, ds_cstr(&match),
-                      "next;");
+    * processing. */
+    struct ds match = DS_EMPTY_INITIALIZER;
+    for (int i = 0; i < od->n_router_ports; i++) {
+        struct ovn_port *op = od->router_ports[i]->peer;
+        if (is_l3dgw_port(op)) {
+            ds_clear(&match);
+            ds_put_format(&match,
+                          REGBIT_FROM_RAMP" == 1 && is_chassis_resident(%s)",
+                          op->cr_port->json_key);
+            ovn_lflow_add(lflows, od, S_SWITCH_IN_HAIRPIN, 2000,
+                          ds_cstr(&match), "next;");
+        }
     }
     ds_destroy(&match);
 }
@@ -8877,7 +8920,7 @@ build_lswitch_destination_lookup_bmcast(struct ovn_datapath *od,
     if (od->nbs) {
 
         ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_LKUP, 110,
-                      "eth.dst == $svc_monitor_mac",
+                      "eth.dst == $svc_monitor_mac && (tcp || icmp || icmp6)",
                       "handle_svc_check(inport);");
 
         struct mcast_switch_info *mcast_sw_info = &od->mcast_info.sw;
@@ -10450,10 +10493,8 @@ enum lrouter_nat_lb_flow_type {
 
 struct lrouter_nat_lb_flows_ctx {
     const char *new_action[LROUTER_NAT_LB_FLOW_MAX];
-    const char *est_action[LROUTER_NAT_LB_FLOW_MAX];
 
     struct ds *new_match;
-    struct ds *est_match;
     struct ds *undnat_match;
 
     struct ovn_lb_vip *lb_vip;
@@ -10471,10 +10512,22 @@ build_distr_lrouter_nat_flows_for_lb(struct lrouter_nat_lb_flows_ctx *ctx,
                                      enum lrouter_nat_lb_flow_type type,
                                      struct ovn_datapath *od)
 {
-    char *gw_action = od->is_gw_router ? "ct_dnat;" : "ct_dnat_in_czone;";
+    const char *undnat_action;
+
+    switch (type) {
+    case LROUTER_NAT_LB_FLOW_FORCE_SNAT:
+        undnat_action = "flags.force_snat_for_lb = 1; next;";
+        break;
+    case LROUTER_NAT_LB_FLOW_SKIP_SNAT:
+        undnat_action = "flags.skip_snat_for_lb = 1; next;";
+        break;
+    case LROUTER_NAT_LB_FLOW_NORMAL:
+    case LROUTER_NAT_LB_FLOW_MAX:
+        undnat_action = od->is_gw_router ? "ct_dnat;" : "ct_dnat_in_czone;";
+        break;
+    }
     /* Store the match lengths, so we can reuse the ds buffer. */
     size_t new_match_len = ctx->new_match->length;
-    size_t est_match_len = ctx->est_match->length;
     size_t undnat_match_len = ctx->undnat_match->length;
 
 
@@ -10487,33 +10540,24 @@ build_distr_lrouter_nat_flows_for_lb(struct lrouter_nat_lb_flows_ctx *ctx,
     if (ctx->lb_vip->n_backends || !ctx->lb_vip->empty_backend_rej) {
         ds_put_format(ctx->new_match, " && is_chassis_resident(%s)",
                       od->l3dgw_ports[0]->cr_port->json_key);
-        ds_put_format(ctx->est_match, " && is_chassis_resident(%s)",
-                      od->l3dgw_ports[0]->cr_port->json_key);
     }
 
     ovn_lflow_add_with_hint__(ctx->lflows, od, S_ROUTER_IN_DNAT, ctx->prio,
                               ds_cstr(ctx->new_match), ctx->new_action[type],
                               NULL, meter, &ctx->lb->nlb->header_);
-    ovn_lflow_add_with_hint(ctx->lflows, od, S_ROUTER_IN_DNAT, ctx->prio,
-                            ds_cstr(ctx->est_match), ctx->est_action[type],
-                            &ctx->lb->nlb->header_);
 
     ds_truncate(ctx->new_match, new_match_len);
-    ds_truncate(ctx->est_match, est_match_len);
 
     if (!ctx->lb_vip->n_backends) {
         return;
     }
 
-    const char *action = (type == LROUTER_NAT_LB_FLOW_NORMAL)
-                         ? gw_action : ctx->est_action[type];
-
     ds_put_format(ctx->undnat_match,
                   ") && outport == %s && is_chassis_resident(%s)",
                   od->l3dgw_ports[0]->json_key,
                   od->l3dgw_ports[0]->cr_port->json_key);
     ovn_lflow_add_with_hint(ctx->lflows, od, S_ROUTER_OUT_UNDNAT, 120,
-                            ds_cstr(ctx->undnat_match), action,
+                            ds_cstr(ctx->undnat_match), undnat_action,
                             &ctx->lb->nlb->header_);
     ds_truncate(ctx->undnat_match, undnat_match_len);
 }
@@ -10556,11 +10600,6 @@ build_gw_lrouter_nat_flows_for_lb(struct lrouter_nat_lb_flows_ctx *ctx,
             ctx->new_action[type], &ctx->lb->nlb->header_);
     }
     bitmap_free(dp_non_meter);
-
-    ovn_lflow_add_with_dp_group(
-        ctx->lflows, dp_bitmap, S_ROUTER_IN_DNAT, ctx->prio,
-        ds_cstr(ctx->est_match), ctx->est_action[type],
-        &ctx->lb->nlb->header_);
 }
 
 static void
@@ -10572,19 +10611,13 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip,
                                const struct shash *meter_groups,
                                const struct chassis_features *features)
 {
-    const char *ct_natted = features->ct_no_masked_label
-                            ? "ct_mark.natted"
-                            : "ct_label.natted";
-
     bool ipv4 = lb_vip->address_family == AF_INET;
     const char *ip_match = ipv4 ? "ip4" : "ip6";
-    const char *ip_reg = ipv4 ? REG_NEXT_HOP_IPV4 : REG_NEXT_HOP_IPV6;
 
     int prio = 110;
 
     struct ds skip_snat_act = DS_EMPTY_INITIALIZER;
     struct ds force_snat_act = DS_EMPTY_INITIALIZER;
-    struct ds est_match = DS_EMPTY_INITIALIZER;
     struct ds undnat_match = DS_EMPTY_INITIALIZER;
     struct ds unsnat_match = DS_EMPTY_INITIALIZER;
 
@@ -10601,19 +10634,14 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip,
      * of "ct_lb_mark($targets);". The other flow is for ct.est with
      * an action of "next;".
      */
-    ds_put_format(match, "ct.new && !ct.rel && %s && %s == %s",
-                  ip_match, ip_reg, lb_vip->vip_str);
+    ds_put_format(match, "ct.new && !ct.rel && %s && %s.dst == %s",
+                  ip_match, ip_match, lb_vip->vip_str);
     if (lb_vip->port_str) {
         prio = 120;
-        ds_put_format(match, " && %s && "REG_ORIG_TP_DPORT_ROUTER" == %s",
-                      lb->proto, lb_vip->port_str);
+        ds_put_format(match, " && %s && %s.dst == %s",
+                      lb->proto, lb->proto, lb_vip->port_str);
     }
 
-    ds_put_cstr(&est_match, "ct.est");
-    /* Clone the match after initial "ct.new" (6 bytes). */
-    ds_put_cstr(&est_match, ds_cstr(match) + 6);
-    ds_put_format(&est_match, " && %s == 1", ct_natted);
-
     /* Add logical flows to UNDNAT the load balanced reverse traffic in
      * the router egress pipleine stage - S_ROUTER_OUT_UNDNAT if the logical
      * router has a gateway router port associated.
@@ -10650,20 +10678,12 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip,
         .lflows = lflows,
         .meter_groups = meter_groups,
         .new_match = match,
-        .est_match = &est_match,
         .undnat_match = &undnat_match
     };
 
     ctx.new_action[LROUTER_NAT_LB_FLOW_NORMAL] = ds_cstr(action);
-    ctx.est_action[LROUTER_NAT_LB_FLOW_NORMAL] = "next;";
-
     ctx.new_action[LROUTER_NAT_LB_FLOW_SKIP_SNAT] = ds_cstr(&skip_snat_act);
-    ctx.est_action[LROUTER_NAT_LB_FLOW_SKIP_SNAT] =
-                                        "flags.skip_snat_for_lb = 1; next;";
-
     ctx.new_action[LROUTER_NAT_LB_FLOW_FORCE_SNAT] = ds_cstr(&force_snat_act);
-    ctx.est_action[LROUTER_NAT_LB_FLOW_FORCE_SNAT] =
-                                        "flags.force_snat_for_lb = 1; next;";
 
     enum {
         LROUTER_NAT_LB_AFF            = LROUTER_NAT_LB_FLOW_MAX,
@@ -10746,7 +10766,6 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip,
 
     ds_destroy(&unsnat_match);
     ds_destroy(&undnat_match);
-    ds_destroy(&est_match);
     ds_destroy(&skip_snat_act);
     ds_destroy(&force_snat_act);
 
@@ -10820,39 +10839,19 @@ build_lrouter_defrag_flows_for_lb(struct ovn_northd_lb *lb,
         return;
     }
 
-    struct ds defrag_actions = DS_EMPTY_INITIALIZER;
     for (size_t i = 0; i < lb->n_vips; i++) {
         struct ovn_lb_vip *lb_vip = &lb->vips[i];
+        bool ipv6 = lb_vip->address_family == AF_INET6;
         int prio = 100;
 
-        ds_clear(&defrag_actions);
         ds_clear(match);
-
-        if (lb_vip->address_family == AF_INET) {
-            ds_put_format(match, "ip && ip4.dst == %s", lb_vip->vip_str);
-            ds_put_format(&defrag_actions, REG_NEXT_HOP_IPV4" = %s; ",
-                          lb_vip->vip_str);
-        } else {
-            ds_put_format(match, "ip && ip6.dst == %s", lb_vip->vip_str);
-            ds_put_format(&defrag_actions, REG_NEXT_HOP_IPV6" = %s; ",
-                          lb_vip->vip_str);
-        }
-
-        if (lb_vip->port_str) {
-            ds_put_format(match, " && %s", lb->proto);
-            prio = 110;
-
-            ds_put_format(&defrag_actions, REG_ORIG_TP_DPORT_ROUTER
-                          " = %s.dst; ", lb->proto);
-        }
-
-        ds_put_format(&defrag_actions, "ct_dnat;");
+        ds_put_format(match, "ip && ip%c.dst == %s", ipv6 ? '6' : '4',
+                      lb_vip->vip_str);
 
         ovn_lflow_add_with_dp_group(
             lflows, lb->nb_lr_map, S_ROUTER_IN_DEFRAG, prio,
-            ds_cstr(match), ds_cstr(&defrag_actions), &lb->nlb->header_);
+            ds_cstr(match), "ct_dnat;", &lb->nlb->header_);
     }
-    ds_destroy(&defrag_actions);
 }
 
 static void
@@ -10991,15 +10990,10 @@ copy_ra_to_sb(struct ovn_port *op, const char *address_mode)
 }
 
 static inline bool
-lrouter_nat_is_stateless(const struct nbrec_nat *nat)
+lrouter_dnat_and_snat_is_stateless(const struct nbrec_nat *nat)
 {
-    const char *stateless = smap_get(&nat->options, "stateless");
-
-    if (stateless && !strcmp(stateless, "true")) {
-        return true;
-    }
-
-    return false;
+    return smap_get_bool(&nat->options, "stateless", false) &&
+           !strcmp(nat->type, "dnat_and_snat");
 }
 
 /* Handles the match criteria and actions in logical flow
@@ -11698,6 +11692,25 @@ build_neigh_learning_flows_for_lrouter(
         ovn_lflow_add(lflows, od, S_ROUTER_IN_LOOKUP_NEIGHBOR, 100, "nd_na",
                       ds_cstr(actions));
 
+        if (!learn_from_arp_request) {
+            /* Add flow to skip GARP LLA if we don't know it already.
+             * From RFC 2461, section 4.4, Neighbor Advertisement Message
+             * Format, the Destination Address should be:
+             *   For solicited advertisements, the Source Address of
+             *   an invoking Neighbor Solicitation or, if the
+             *   solicitation's Source Address is the unspecified
+             *   address, the all-nodes multicast address. */
+            ds_clear(actions);
+            ds_put_format(actions, REGBIT_LOOKUP_NEIGHBOR_RESULT
+                                   " = lookup_nd(inport, ip6.src, nd.tll); "
+                                   REGBIT_LOOKUP_NEIGHBOR_IP_RESULT
+                                   " = lookup_nd_ip(inport, ip6.src); next;");
+            ovn_lflow_add(lflows, od, S_ROUTER_IN_LOOKUP_NEIGHBOR, 110,
+                          "nd_na && ip6.src == fe80::/10 "
+                          "&& ip6.dst == ff00::/8",
+                          ds_cstr(actions));
+        }
+
         ds_clear(actions);
         ds_put_format(actions, REGBIT_LOOKUP_NEIGHBOR_RESULT
                       " = lookup_nd(inport, ip6.src, nd.sll); %snext;",
@@ -12814,8 +12827,7 @@ build_gateway_redirect_flows_for_lrouter(
         for (int j = 0; j < od->n_nat_entries; j++) {
             const struct ovn_nat *nat = &od->nat_entries[j];
 
-            if (!lrouter_nat_is_stateless(nat->nb) ||
-                strcmp(nat->nb->type, "dnat_and_snat") ||
+            if (!lrouter_dnat_and_snat_is_stateless(nat->nb) ||
                 (!nat->nb->allowed_ext_ips && !nat->nb->exempted_ext_ips)) {
                 continue;
             }
@@ -13038,9 +13050,27 @@ build_misc_local_traffic_drop_flows_for_lrouter(
         ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 50,
                       "eth.bcast", debug_drop_action());
 
+        /* Avoid ICMP time exceeded for multicast, silent drop instead.
+         * See RFC1812 section 5.3.1:
+         *  If the TTL is reduced to zero (or less), the packet MUST be
+         *  discarded, and if the destination is NOT A MULTICAST address the
+         *  router MUST send an ICMP Time Exceeded message ...
+         *
+         * The reason behind is that TTL has special meanings for multicast.
+         * For example, TTL = 1 means restricted to the same subnet, not
+         * forwarded by the router. So it is very common to see multicast
+         * packets with ttl = 1, and generating ICMP for such packets is
+         * harmful from both slowpath performance and functionality point of
+         * view.
+         *
+         * (priority-31 flows will send ICMP time exceeded) */
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 32,
+                      "ip.ttl == {0, 1} && !ip.later_frag && "
+                      "(ip4.mcast || ip6.mcast)", debug_drop_action());
+
         /* TTL discard */
         ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 30,
-                      "ip4 && ip.ttl == {0, 1}", debug_drop_action());
+                      "ip.ttl == {0, 1}", debug_drop_action());
 
         /* Pass other traffic not already handled to the next table for
          * routing. */
@@ -13224,7 +13254,7 @@ build_ipv6_input_flows_for_lrouter_port(
                           "outport = %s; flags.loopback = 1; output; };",
                           ds_cstr(&ip_ds), op->json_key);
             ovn_lflow_add_with_hint__(lflows, op->od, S_ROUTER_IN_IP_INPUT,
-                    100, ds_cstr(match), ds_cstr(actions), NULL,
+                    31, ds_cstr(match), ds_cstr(actions), NULL,
                     copp_meter_get(COPP_ICMP6_ERR, op->od->nbr->copp,
                                    meter_groups),
                     &op->nbrp->header_);
@@ -13352,7 +13382,7 @@ build_lrouter_ipv4_ip_input(struct ovn_port *op,
                           "outport = %s; flags.loopback = 1; output; };",
                           ds_cstr(&ip_ds), op->json_key);
             ovn_lflow_add_with_hint__(lflows, op->od, S_ROUTER_IN_IP_INPUT,
-                    100, ds_cstr(match), ds_cstr(actions), NULL,
+                    31, ds_cstr(match), ds_cstr(actions), NULL,
                     copp_meter_get(COPP_ICMP4_ERR, op->od->nbr->copp,
                                    meter_groups),
                     &op->nbrp->header_);
@@ -13597,13 +13627,13 @@ build_lrouter_in_unsnat_flow(struct hmap *lflows, struct ovn_datapath *od,
         return;
     }
 
-    bool stateless = lrouter_nat_is_stateless(nat);
+    bool stateless = lrouter_dnat_and_snat_is_stateless(nat);
     if (od->is_gw_router) {
         ds_clear(match);
         ds_clear(actions);
         ds_put_format(match, "ip && ip%s.dst == %s",
                       is_v6 ? "6" : "4", nat->external_ip);
-        if (!strcmp(nat->type, "dnat_and_snat") && stateless) {
+        if (stateless) {
             ds_put_format(actions, "next;");
         } else {
             ds_put_cstr(actions, "ct_snat;");
@@ -13628,7 +13658,7 @@ build_lrouter_in_unsnat_flow(struct hmap *lflows, struct ovn_datapath *od,
                           l3dgw_port->cr_port->json_key);
         }
 
-        if (!strcmp(nat->type, "dnat_and_snat") && stateless) {
+        if (stateless) {
             ds_put_format(actions, "next;");
         } else {
             ds_put_cstr(actions, "ct_snat_in_czone;");
@@ -13670,7 +13700,7 @@ build_lrouter_in_dnat_flow(struct hmap *lflows, struct ovn_datapath *od,
     * IP address that needs to be DNATted from a external IP address
     * to a logical IP address. */
     if (!strcmp(nat->type, "dnat") || !strcmp(nat->type, "dnat_and_snat")) {
-        bool stateless = lrouter_nat_is_stateless(nat);
+        bool stateless = lrouter_dnat_and_snat_is_stateless(nat);
 
         if (od->is_gw_router) {
             /* Packet when it goes from the initiator to destination.
@@ -13692,7 +13722,7 @@ build_lrouter_in_dnat_flow(struct hmap *lflows, struct ovn_datapath *od,
                 ds_put_format(actions, "flags.force_snat_for_dnat = 1; ");
             }
 
-            if (!strcmp(nat->type, "dnat_and_snat") && stateless) {
+            if (stateless) {
                 ds_put_format(actions, "flags.loopback = 1; "
                               "ip%s.dst=%s; next;",
                               is_v6 ? "6" : "4", nat->logical_ip);
@@ -13782,8 +13812,7 @@ build_lrouter_out_undnat_flow(struct hmap *lflows, struct ovn_datapath *od,
                       ETH_ADDR_ARGS(mac));
     }
 
-    if (!strcmp(nat->type, "dnat_and_snat") &&
-        lrouter_nat_is_stateless(nat)) {
+    if (lrouter_dnat_and_snat_is_stateless(nat)) {
         ds_put_format(actions, "next;");
     } else {
         ds_put_format(actions,
@@ -13839,7 +13868,7 @@ build_lrouter_out_snat_flow(struct hmap *lflows, struct ovn_datapath *od,
         return;
     }
 
-    bool stateless = lrouter_nat_is_stateless(nat);
+    bool stateless = lrouter_dnat_and_snat_is_stateless(nat);
     if (od->is_gw_router) {
         ds_clear(match);
         ds_put_format(match, "ip && ip%s.src == %s",
@@ -13905,7 +13934,7 @@ build_lrouter_out_snat_flow(struct hmap *lflows, struct ovn_datapath *od,
                           ETH_ADDR_ARGS(mac));
         }
 
-        if (!strcmp(nat->type, "dnat_and_snat") && stateless) {
+        if (stateless) {
             ds_put_format(actions, "ip%s.src=%s; next;",
                           is_v6 ? "6" : "4", nat->external_ip);
         } else {
@@ -14217,10 +14246,10 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, struct hmap *lflows,
     ovn_lflow_add(lflows, od, S_ROUTER_OUT_EGR_LOOP, 0, "1", "next;");
     ovn_lflow_add(lflows, od, S_ROUTER_IN_ECMP_STATEFUL, 0, "1", "next;");
 
-    /* Ingress DNAT and DEFRAG Table (Priority 50/70).
-     *
-     * The defrag stage needs to have flows for ICMP in order to get
-     * the correct ct_state that can be used by DNAT stage.
+    const char *ct_flag_reg = features->ct_no_masked_label
+                              ? "ct_mark"
+                              : "ct_label";
+    /* Ingress DNAT (Priority 50/70).
      *
      * Allow traffic that is related to an existing conntrack entry.
      * At the same time apply NAT for this traffic.
@@ -14231,16 +14260,10 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, struct hmap *lflows,
      * that's generated from a non-listening UDP port.  */
     if (od->has_lb_vip && features->ct_lb_related) {
         ds_clear(match);
-        const char *ct_flag_reg = features->ct_no_masked_label
-                                  ? "ct_mark"
-                                  : "ct_label";
 
         ds_put_cstr(match, "ct.rel && !ct.est && !ct.new");
         size_t match_len = match->length;
 
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_DEFRAG, 50, "icmp || icmp6",
-                      "ct_dnat;");
-
         ds_put_format(match, " && %s.skip_snat == 1", ct_flag_reg);
         ovn_lflow_add(lflows, od, S_ROUTER_IN_DNAT, 70, ds_cstr(match),
                       "flags.skip_snat_for_lb = 1; ct_commit_nat;");
@@ -14251,10 +14274,34 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, struct hmap *lflows,
                       "flags.force_snat_for_lb = 1; ct_commit_nat;");
 
         ds_truncate(match, match_len);
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_DNAT, 50,
-                      "ct.rel && !ct.est && !ct.new", "ct_commit_nat;");
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_DNAT, 50, ds_cstr(match),
+                      "ct_commit_nat;");
+    }
 
+    /* Ingress DNAT (Priority 50/70).
+     *
+     * Pass the traffic that is already established to the next table with
+     * proper flags set.
+     */
+    if (od->has_lb_vip) {
         ds_clear(match);
+
+        ds_put_format(match, "ct.est && !ct.rel && !ct.new && %s.natted",
+                      ct_flag_reg);
+        size_t match_len = match->length;
+
+        ds_put_format(match, " && %s.skip_snat == 1", ct_flag_reg);
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_DNAT, 70, ds_cstr(match),
+                      "flags.skip_snat_for_lb = 1; next;");
+
+        ds_truncate(match, match_len);
+        ds_put_format(match, " && %s.force_snat == 1", ct_flag_reg);
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_DNAT, 70, ds_cstr(match),
+                      "flags.force_snat_for_lb = 1; next;");
+
+        ds_truncate(match, match_len);
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_DNAT, 50, ds_cstr(match),
+                      "next;");
     }
 
     /* If the router has load balancer or DNAT rules, re-circulate every packet
@@ -14267,6 +14314,9 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, struct hmap *lflows,
      * flag set. Some NICs are unable to offload these flows.
      */
     if (od->is_gw_router && (od->nbr->n_nat || od->has_lb_vip)) {
+        /* Do not send ND or ICMP packets to connection tracking. */
+        ovn_lflow_add(lflows, od, S_ROUTER_OUT_UNDNAT, 100,
+                      "nd || nd_rs || nd_ra", "next;");
         ovn_lflow_add(lflows, od, S_ROUTER_OUT_UNDNAT, 50,
                       "ip", "flags.loopback = 1; ct_dnat;");
         ovn_lflow_add(lflows, od, S_ROUTER_OUT_POST_UNDNAT, 50,
diff --git a/northd/ovn-northd.8.xml b/northd/ovn-northd.8.xml
index 2eab2c4ae..5b3559d45 100644
--- a/northd/ovn-northd.8.xml
+++ b/northd/ovn-northd.8.xml
@@ -748,6 +748,12 @@
       drop behavior.
     </p>
 
+    <p>
+      A priority-65532 flow is added to allow IPv6 Neighbor solicitation,
+      Neighbor discover, Router solicitation, Router advertisement and MLD
+      packets regardless of other ACLs defined.
+    </p>
+
     <p>
       If the logical datapath has a stateful ACL or a load balancer with VIP
       configured, the following flows will also be added:
@@ -824,12 +830,6 @@
         in the request direction are skipped here to let a newly created
         ACL re-allow this connection.
       </li>
-
-      <li>
-        A priority-65532 flow that allows IPv6 Neighbor solicitation,
-        Neighbor discover, Router solicitation, Router advertisement and MLD
-        packets.
-      </li>
     </ul>
 
     <p>
@@ -1090,24 +1090,28 @@
     <ul>
       <li>
         <p>
-          For each distributed gateway router port <var>RP</var> attached to
-          the logical switch, a priority-2000 flow is added with the match
-          <code>reg0[14] == 1 &amp;&amp; is_chassis_resident(<var>RP</var>)
-          </code> and action <code>next;</code> to pass the traffic to the
-          next table to respond to the ARP requests for the router port IPs.
+          If logical switch has attached logical switch port of <var>vtep</var>
+          type, then for each distributed gateway router port <var>RP</var>
+          attached to this logical switch and has chassis redirect port
+          <var>cr-RP</var>, a priority-2000 flow is added with the match
+          <pre>
+<code>reg0[14] == 1 &amp;&amp; is_chassis_resident(<var>cr-RP</var>)</code>
+          </pre>
+          and action <code>next;</code>.
         </p>
 
         <p>
           <code>reg0[14]</code> register bit is set in the ingress L2 port
-           security check table for traffic received from HW VTEP (ramp)
-           ports.
+          security check table for traffic received from HW VTEP (ramp) ports.
         </p>
       </li>
 
       <li>
-        A priority-1000 flow that matches on <code>reg0[14]</code> register
-        bit for the traffic received from HW VTEP (ramp) ports.  This traffic
-        is passed to ingress table ls_in_l2_lkup.
+        If logical switch has attached logical switch port of <var>vtep</var>
+        type, then a priority-1000 flow that matches on
+        <code>reg0[14]</code> register bit for the traffic received from HW
+        VTEP (ramp) ports.  This traffic is passed to ingress table
+        ls_in_l2_lkup.
       </li>
       <li>
         A priority-1 flow that hairpins traffic matched by non-default
@@ -2056,6 +2060,16 @@ output;
       db="OVN_Northbound"/> table.
     </p>
 
+    <p>
+      This table also has a priority-110 flow with the match
+      <code>outport == <var>I</var></code> for all logical switch
+      datapaths to move traffic to the next table, and, if there are no
+      stateful_acl, clear the ct_state. Where <var>I</var>
+      is the peer of a logical router port. This flow is added to
+      skip the connection tracking of packets which will be entering
+      logical router datapath from logical switch datapath for routing.
+    </p>
+
     <h3>Egress Table 2: Pre-stateful</h3>
 
     <p>
@@ -2098,6 +2112,12 @@ output;
       <code>to-lport</code> ACLs.
     </p>
 
+    <p>
+      Similar to ingress table, a priority-65532 flow is added to allow IPv6
+      Neighbor solicitation, Neighbor discover, Router solicitation, Router
+      advertisement and MLD packets regardless of other ACLs defined.
+    </p>
+
     <p>
       In addition, the following flows are added.
     </p>
@@ -3066,10 +3086,18 @@ nd.tll = <var>external_mac</var>;
         broadcast address.  By definition this traffic should not be forwarded.
       </li>
 
+      <li>
+        Avoid ICMP time exceeded for multicast.  A priority-32 flow with match
+        <code>ip.ttl == {0, 1} &amp;&amp; !ip.later_frag &amp;&amp;
+        (ip4.mcast || ip6.mcast)</code> and actions <code>drop;</code> drops
+        multicast packets whose TTL has expired without sending ICMP time
+        exceeded.
+      </li>
+
       <li>
         <p>
           ICMP time exceeded.  For each router port <var>P</var>, whose IP
-          address is <var>A</var>, a priority-100 flow with match <code>inport
+          address is <var>A</var>, a priority-31 flow with match <code>inport
           == <var>P</var> &amp;&amp; ip.ttl == {0, 1} &amp;&amp;
           !ip.later_frag</code> matches packets whose TTL has expired, with the
           following actions to send an ICMP time exceeded reply for IPv4 and
@@ -3282,35 +3310,16 @@ icmp6 {
     </p>
 
     <p>
-      If load balancing rules with only virtual IP addresses are configured in
+      For all load balancing rules that are configured in
       <code>OVN_Northbound</code> database for a Gateway router,
       a priority-100 flow is added for each configured virtual IP address
       <var>VIP</var>. For IPv4 <var>VIPs</var> the flow matches
       <code>ip &amp;&amp; ip4.dst == <var>VIP</var></code>.  For IPv6
       <var>VIPs</var>, the flow matches <code>ip &amp;&amp; ip6.dst ==
-      <var>VIP</var></code>. The flow applies the action <code>reg0 =
-      <var>VIP</var>; ct_dnat;</code>  (or <code>xxreg0</code> for IPv6) to
-      send IP packets to the connection tracker for packet de-fragmentation and
-      to dnat the destination IP for the committed connection before sending it
-      to the next table.
-    </p>
-
-    <p>
-      If load balancing rules with virtual IP addresses and ports are
-      configured in <code>OVN_Northbound</code> database for a Gateway router,
-      a priority-110 flow is added for each configured virtual IP address
-      <var>VIP</var>, protocol <var>PROTO</var> and port <var>PORT</var>.
-      For IPv4 <var>VIPs</var> the flow matches
-      <code>ip &amp;&amp; ip4.dst == <var>VIP</var> &amp;&amp;
-      <var>PROTO</var> &amp;&amp; <var>PROTO</var>.dst ==
-      <var>PORT</var></code>. For IPv6 <var>VIPs</var>, the flow matches
-      <code>ip &amp;&amp; ip6.dst == <var>VIP</var> &amp;&amp;
-      <var>PROTO</var> &amp;&amp; <var>PROTO</var>.dst ==
-      <var>PORT</var></code>. The flow applies the action <code>reg0 =
-      <var>VIP</var>; reg9[16..31] = <var>PROTO</var>.dst; ct_dnat;</code>
-      (or <code>xxreg0</code> for IPv6) to send IP packets to the connection
-      tracker for packet de-fragmentation and to dnat the destination IP for
-      the committed connection before sending it to the next table.
+      <var>VIP</var></code>. The flow applies the action <code> ct_dnat;</code>
+      to send IP packets to the connection tracker for packet de-fragmentation
+      and to dnat the destination IP for the committed connection before
+      sending it to the next table.
     </p>
 
     <p>
@@ -3349,10 +3358,11 @@ icmp6 {
         column, that includes a L4 port <var>PORT</var> of protocol
         <var>P</var> and IPv4 or IPv6 address <var>VIP</var>, a priority-100
         flow that matches on <code>ct.new &amp;&amp; ip &amp;&amp;
-        reg0 == <var>VIP</var> &amp;&amp; <var>P</var> &amp;&amp; reg9[16..31]
+        ip.dst == <var>VIP</var> &amp;&amp; <var>P</var> &amp;&amp; P.dst
         == </code> <code><var>PORT</var></code> (<code>xxreg0 == <var>VIP
-        </var></code> in the IPv6 case) with an action of <code>reg9[6] =
-        chk_lb_aff(); next;</code>
+        </var></code> in the IPv6 case) with an action of <code>reg0 = ip.dst;
+        reg9[16..31] = P.dst; reg9[6] = chk_lb_aff(); next;</code>
+        (<code>xxreg0 == <var>ip6.dst</var> </code> in the IPv6 case)
       </li>
 
       <li>
@@ -3385,9 +3395,8 @@ icmp6 {
         column, that includes a L4 port <var>PORT</var> of protocol
         <var>P</var> and IPv4 or IPv6 address <var>VIP</var>, a priority-150
         flow that matches on <code>reg9[6] == 1 &amp;&amp; ct.new &amp;&amp;
-        ip &amp;&amp; reg0 == <var>VIP</var> &amp;&amp; <var>P</var> &amp;&amp;
-        reg9[16..31] == </code> <code><var>PORT</var></code> (<code>xxreg0
-        == <var>VIP</var></code> in the IPv6 case) with an action of
+        ip &amp;&amp; ip.dst == <var>VIP</var> &amp;&amp; <var>P</var> &amp;&amp;
+        P.dst == </code> <code><var>PORT</var></code> with an action of
         <code>ct_lb_mark(<var>args</var>) </code>, where <var>args</var>
         contains comma separated IP addresses (and optional port numbers)
         to load balance to.  The address family of the IP addresses of
@@ -3410,56 +3419,25 @@ icmp6 {
           Router with gateway port in <code>OVN_Northbound</code> database that
           includes a L4 port <var>PORT</var> of protocol <var>P</var> and IPv4
           or IPv6 address <var>VIP</var>, a priority-120 flow that matches on
-          <code>ct.new &amp;&amp; !ct.rel &amp;&amp; ip &amp;&amp; reg0 ==
-          <var>VIP</var> &amp;&amp; <var>P</var> &amp;&amp; reg9[16..31] ==
-          </code> <code><var>PORT</var></code> (<code>xxreg0 == <var>VIP</var>
-          </code> in the IPv6 case) with an action of
+          <code>ct.new &amp;&amp; !ct.rel &amp;&amp; ip &amp;&amp; ip.dst ==
+          <var>VIP</var> &amp;&amp; <var>P</var> &amp;&amp; P.dst ==
+          </code> <code><var>PORT</var></code> with an action of
           <code>ct_lb_mark(<var>args</var>)</code>, where <var>args</var> contains
           comma separated IPv4 or IPv6 addresses (and optional port numbers) to
           load balance to.  If the router is configured to force SNAT any
           load-balanced packets, the above action will be replaced by
-          <code>flags.force_snat_for_lb = 1; ct_lb_mark(<var>args</var>);</code>.
+          <code>flags.force_snat_for_lb = 1; ct_lb_mark(<var>args</var>;
+          force_snat);</code>.
           If the load balancing rule is configured with <code>skip_snat</code>
           set to true, the above action will be replaced by
-          <code>flags.skip_snat_for_lb = 1; ct_lb_mark(<var>args</var>);</code>.
+          <code>flags.skip_snat_for_lb = 1; ct_lb_mark(<var>args</var>;
+          skip_snat);</code>.
           If health check is enabled, then
           <var>args</var> will only contain those endpoints whose service
           monitor status entry in <code>OVN_Southbound</code> db is
           either <code>online</code> or empty.
         </p>
 
-        <p>
-          The previous table <code>lr_in_defrag</code> sets the register
-          <code>reg0</code> (or <code>xxreg0</code> for IPv6) and does
-          <code>ct_dnat</code>.  Hence for established traffic, this
-          table just advances the packet to the next stage.
-        </p>
-      </li>
-
-      <li>
-        <p>
-          For all the configured load balancing rules for a router in
-          <code>OVN_Northbound</code> database that includes a L4 port
-          <var>PORT</var> of protocol <var>P</var> and IPv4 or IPv6 address
-          <var>VIP</var>, a priority-120 flow that matches on
-          <code>ct.est &amp;&amp; !ct.rel &amp;&amp; ip4 &amp;&amp; reg0 ==
-          <var>VIP</var> &amp;&amp; <var>P</var> &amp;&amp; reg9[16..31] ==
-          </code> <code><var>PORT</var></code> (<code>ip6</code> and
-          <code>xxreg0 == <var>VIP</var></code> in the IPv6 case) with an
-          action of <code>next;</code>. If the router is configured to force
-          SNAT any load-balanced packets, the above action will be replaced by
-          <code>flags.force_snat_for_lb = 1; next;</code>. If the load
-          balancing rule is configured with <code>skip_snat</code> set to true,
-          the above action will be replaced by
-          <code>flags.skip_snat_for_lb = 1; next;</code>.
-        </p>
-
-        <p>
-          The previous table <code>lr_in_defrag</code> sets the register
-          <code>reg0</code> (or <code>xxreg0</code> for IPv6) and does
-          <code>ct_dnat</code>.  Hence for established traffic, this
-          table just advances the packet to the next stage.
-        </p>
       </li>
 
       <li>
@@ -3467,42 +3445,17 @@ icmp6 {
           For all the configured load balancing rules for a router in
           <code>OVN_Northbound</code> database that includes just an IP address
           <var>VIP</var> to match on, a priority-110 flow that matches on
-          <code>ct.new &amp;&amp; !ct.rel &amp;&amp; ip4 &amp;&amp; reg0 ==
-          <var>VIP</var></code> (<code>ip6</code> and <code>xxreg0 ==
-          <var>VIP</var></code> in the IPv6 case) with an action of
+          <code>ct.new &amp;&amp; !ct.rel &amp;&amp; ip4 &amp;&amp; ip.dst ==
+          <var>VIP</var></code> with an action of
           <code>ct_lb_mark(<var>args</var>)</code>, where <var>args</var> contains
           comma separated IPv4 or IPv6 addresses.  If the router is configured
           to force SNAT any load-balanced packets, the above action will be
           replaced by <code>flags.force_snat_for_lb = 1;
-          ct_lb_mark(<var>args</var>);</code>.
-          If the load balancing rule is configured with <code>skip_snat</code>
-          set to true, the above action will be replaced by
-          <code>flags.skip_snat_for_lb = 1; ct_lb_mark(<var>args</var>);</code>.
-        </p>
-
-        <p>
-          The previous table <code>lr_in_defrag</code> sets the register
-          <code>reg0</code> (or <code>xxreg0</code> for IPv6) and does
-          <code>ct_dnat</code>.  Hence for established traffic, this
-          table just advances the packet to the next stage.
-        </p>
-      </li>
-
-
-      <li>
-        <p>
-          For all the configured load balancing rules for a router in
-          <code>OVN_Northbound</code> database that includes just an IP address
-          <var>VIP</var> to match on, a priority-110 flow that matches on
-          <code>ct.est &amp;&amp; !ct.rel &amp;&amp; ip4 &amp;&amp; reg0 ==
-          <var>VIP</var></code> (or <code>ip6</code> and
-          <code>xxreg0 == <var>VIP</var></code>) with an action of
-          <code>next;</code>. If the router is configured to force SNAT any
-          load-balanced packets, the above action will be replaced by
-          <code>flags.force_snat_for_lb = 1; next;</code>.
+          ct_lb_mark(<var>args</var>; force_snat);</code>.
           If the load balancing rule is configured with <code>skip_snat</code>
           set to true, the above action will be replaced by
-          <code>flags.skip_snat_for_lb = 1; next;</code>.
+          <code>flags.skip_snat_for_lb = 1; ct_lb_mark(<var>args</var>;
+          skip_snat);</code>.
         </p>
 
         <p>
@@ -3529,7 +3482,20 @@ icmp6 {
             with an action of <code>ct_commit_nat;</code>, if the router
             has load balancer assigned to it. Along with two priority 70 flows
             that match <code>skip_snat</code> and <code>force_snat</code>
-            flags.
+            flags, setting the <code>flags.force_snat_for_lb = 1</code> or
+            <code>flags.skip_snat_for_lb = 1</code> accordingly.
+        </p>
+      </li>
+      <li>
+        <p>
+          For the established traffic, a priority 50 flow that matches
+          <code>ct.est &amp;&amp; !ct.rel &amp;&amp; !ct.new &amp;&amp;
+          ct_mark.natted</code> with an action of <code>next;</code>,
+          if the router has load balancer assigned to it. Along with two
+          priority 70 flows that match <code>skip_snat</code> and
+          <code>force_snat</code> flags, setting the
+          <code>flags.force_snat_for_lb = 1</code> or
+          <code>flags.skip_snat_for_lb = 1</code> accordingly.
         </p>
       </li>
     </ul>
@@ -4721,6 +4687,11 @@ nd_ns {
     <h3>Egress Table 1: UNDNAT on Gateway Routers</h3>
 
     <ul>
+      <li>
+        For IPv6 Neighbor Discovery or Router Solicitation/Advertisement
+        traffic, a priority-100 flow with action <code>next;</code>.
+      </li>
+
       <li>
         For all IP packets, a priority-50 flow with an action
         <code>flags.loopback = 1; ct_dnat;</code>.
@@ -4998,7 +4969,19 @@ nd_ns {
       </li>
     </ul>
 
-    <h3>Egress Table 4: Egress Loopback</h3>
+    <h3>Egress Table 4: Post SNAT</h3>
+
+    <p>
+      Packets reaching this table are processed according to the flows below:
+      <ul>
+        <li>
+          A priority-0 logical flow that matches all packets not already
+          handled (match <code>1</code>) and action <code>next;</code>.
+        </li>
+      </ul>
+    </p>
+
+    <h3>Egress Table 5: Egress Loopback</h3>
 
     <p>
       For distributed logical routers where one of the logical router
@@ -5070,7 +5053,7 @@ clone {
       </li>
     </ul>
 
-    <h3>Egress Table 5: Delivery</h3>
+    <h3>Egress Table 6: Delivery</h3>
 
     <p>
       Packets that reach this table are ready for delivery.  It contains:
diff --git a/northd/ovn-northd.c b/northd/ovn-northd.c
index 5f895b053..7d24648ff 100644
--- a/northd/ovn-northd.c
+++ b/northd/ovn-northd.c
@@ -33,6 +33,7 @@
 #include "lib/ovn-l7.h"
 #include "lib/ovn-nb-idl.h"
 #include "lib/ovn-sb-idl.h"
+#include "lib/ovs-rcu.h"
 #include "openvswitch/poll-loop.h"
 #include "simap.h"
 #include "stopwatch.h"
@@ -1048,6 +1049,8 @@ main(int argc, char *argv[])
     ovsdb_idl_loop_destroy(&ovnnb_idl_loop);
     ovsdb_idl_loop_destroy(&ovnsb_idl_loop);
     service_stop();
+    run_update_worker_pool(0);
+    ovsrcu_exit();
 
     exit(res);
 }
diff --git a/ovn-architecture.7.xml b/ovn-architecture.7.xml
index cb1064f71..a2a87ec28 100644
--- a/ovn-architecture.7.xml
+++ b/ovn-architecture.7.xml
@@ -1233,8 +1233,8 @@
         output port field, and since they do not carry a logical output port
         field in the tunnel key, when a packet is received from ramp switch
         VXLAN tunnel by an OVN hypervisor, the packet is resubmitted to table 8
-        to determine the output port(s); when the packet reaches table 37,
-        these packets are resubmitted to table 38 for local delivery by
+        to determine the output port(s); when the packet reaches table 39,
+        these packets are resubmitted to table 40 for local delivery by
         checking a MLF_RCV_FROM_RAMP flag, which is set when the packet
         arrives from a ramp tunnel.
       </p>
@@ -1318,7 +1318,7 @@
         output port is known. These pieces of information are obtained
         from the tunnel encapsulation metadata (see <code>Tunnel
         Encapsulations</code> for encoding details). Then the actions resubmit
-        to table 33 to enter the logical egress pipeline.
+        to table 38 to enter the logical egress pipeline.
       </p>
     </li>
 
@@ -1439,38 +1439,42 @@
 
     <li>
       <p>
-        OpenFlow tables 37 through 39 implement the <code>output</code> action
-        in the logical ingress pipeline.  Specifically, table 37 handles
-        packets to remote hypervisors, table 38 handles packets to the local
-        hypervisor, and table 39 checks whether packets whose logical ingress
-        and egress port are the same should be discarded.
+        OpenFlow tables 37 through 41 implement the <code>output</code> action
+        in the logical ingress pipeline.  Specifically, table 37 serves as an
+        entry point to egress pipeline. Table 37 detects IP packets that are
+        too big for a corresponding interface. Table 38 produces ICMPv4
+        Fragmentation Needed (or ICMPv6 Too Big) errors and deliver them back
+        to the offending port. table 39 handles packets to remote hypervisors,
+        table 40 handles packets to the local hypervisor, and table 41 checks
+        whether packets whose logical ingress and egress port are the same
+        should be discarded.
       </p>
 
       <p>
         Logical patch ports are a special case.  Logical patch ports do not
         have a physical location and effectively reside on every hypervisor.
-        Thus, flow table 38, for output to ports on the local hypervisor,
+        Thus, flow table 40, for output to ports on the local hypervisor,
         naturally implements output to unicast logical patch ports too.
         However, applying the same logic to a logical patch port that is part
         of a logical multicast group yields packet duplication, because each
         hypervisor that contains a logical port in the multicast group will
         also output the packet to the logical patch port.  Thus, multicast
-        groups implement output to logical patch ports in table 37.
+        groups implement output to logical patch ports in table 39.
       </p>
 
       <p>
-        Each flow in table 37 matches on a logical output port for unicast or
+        Each flow in table 39 matches on a logical output port for unicast or
         multicast logical ports that include a logical port on a remote
         hypervisor.  Each flow's actions implement sending a packet to the port
         it matches.  For unicast logical output ports on remote hypervisors,
         the actions set the tunnel key to the correct value, then send the
         packet on the tunnel port to the correct hypervisor.  (When the remote
         hypervisor receives the packet, table 0 there will recognize it as a
-        tunneled packet and pass it along to table 38.)  For multicast logical
+        tunneled packet and pass it along to table 40.)  For multicast logical
         output ports, the actions send one copy of the packet to each remote
         hypervisor, in the same way as for unicast destinations.  If a
         multicast group includes a logical port or ports on the local
-        hypervisor, then its actions also resubmit to table 38.  Table 37 also
+        hypervisor, then its actions also resubmit to table 40.  Table 39 also
         includes:
       </p>
 
@@ -1478,7 +1482,7 @@
         <li>
           A higher-priority rule to match packets received from ramp switch
           tunnels, based on flag MLF_RCV_FROM_RAMP, and resubmit these packets
-          to table 38 for local delivery.  Packets received from ramp switch
+          to table 40 for local delivery.  Packets received from ramp switch
           tunnels reach here because of a lack of logical output port field in
           the tunnel key and thus these packets needed to be submitted to table
           8 to determine the output port.
@@ -1486,7 +1490,7 @@
         <li>
           A higher-priority rule to match packets received from ports of type
           <code>localport</code>, based on the logical input port, and resubmit
-          these packets to table 38 for local delivery.  Ports of type
+          these packets to table 40 for local delivery.  Ports of type
           <code>localport</code> exist on every hypervisor and by definition
           their traffic should never go out through a tunnel.
         </li>
@@ -1501,41 +1505,41 @@
           packets, the packets only need to be delivered to local ports.
         </li>
         <li>
-          A fallback flow that resubmits to table 38 if there is no other
+          A fallback flow that resubmits to table 40 if there is no other
           match.
         </li>
       </ul>
 
       <p>
-        Flows in table 38 resemble those in table 37 but for logical ports that
+        Flows in table 40 resemble those in table 39 but for logical ports that
         reside locally rather than remotely.  For unicast logical output ports
-        on the local hypervisor, the actions just resubmit to table 39.  For
+        on the local hypervisor, the actions just resubmit to table 41.  For
         multicast output ports that include one or more logical ports on the
         local hypervisor, for each such logical port <var>P</var>, the actions
         change the logical output port to <var>P</var>, then resubmit to table
-        39.
+        41.
       </p>
 
       <p>
         A special case is that when a localnet port exists on the datapath,
         remote port is connected by switching to the localnet port. In this
-        case, instead of adding a flow in table 37 to reach the remote port, a
-        flow is added in table 38 to switch the logical outport to the localnet
-        port, and resubmit to table 38 as if it were unicasted to a logical
+        case, instead of adding a flow in table 39 to reach the remote port, a
+        flow is added in table 40 to switch the logical outport to the localnet
+        port, and resubmit to table 40 as if it were unicasted to a logical
         port on the local hypervisor.
       </p>
 
       <p>
-        Table 39 matches and drops packets for which the logical input and
+        Table 41 matches and drops packets for which the logical input and
         output ports are the same and the MLF_ALLOW_LOOPBACK flag is not
         set. It also drops MLF_LOCAL_ONLY packets directed to a localnet port.
-        It resubmits other packets to table 40.
+        It resubmits other packets to table 42.
       </p>
     </li>
 
     <li>
       <p>
-        OpenFlow tables 40 through 63 execute the logical egress pipeline from
+        OpenFlow tables 42 through 62 execute the logical egress pipeline from
         the <code>Logical_Flow</code> table in the OVN Southbound database.
         The egress pipeline can perform a final stage of validation before
         packet delivery.  Eventually, it may execute an <code>output</code>
@@ -1554,7 +1558,7 @@
     <li>
      <p>
        Table 64 bypasses OpenFlow loopback when MLF_ALLOW_LOOPBACK is set.
-       Logical loopback was handled in table 39, but OpenFlow by default also
+       Logical loopback was handled in table 41, but OpenFlow by default also
        prevents loopback to the OpenFlow ingress port.  Thus, when
        MLF_ALLOW_LOOPBACK is set, OpenFlow table 64 saves the OpenFlow ingress
        port, sets it to zero, resubmits to table 65 for logical-to-physical
@@ -1592,8 +1596,8 @@
     traverse tables 0 to 65 as described in the previous section
     <code>Architectural Physical Life Cycle of a Packet</code>, using the
     logical datapath representing the logical switch that the sender is
-    attached to.  At table 37, the packet will use the fallback flow that
-    resubmits locally to table 38 on the same hypervisor.  In this case,
+    attached to.  At table 39, the packet will use the fallback flow that
+    resubmits locally to table 40 on the same hypervisor.  In this case,
     all of the processing from table 0 to table 65 occurs on the hypervisor
     where the sender resides.
   </p>
@@ -1624,7 +1628,7 @@
   <p>
     The packet traverses tables 8 to 65 a third and final time.  If the
     destination VM or container resides on a remote hypervisor, then table
-    37 will send the packet on a tunnel port from the sender's hypervisor
+    39 will send the packet on a tunnel port from the sender's hypervisor
     to the remote hypervisor.  Finally table 65 will output the packet
     directly to the destination VM or container.
   </p>
@@ -1651,9 +1655,9 @@
     When a hypervisor processes a packet on a logical datapath
     representing a logical switch, and the logical egress port is a
     <code>l3gateway</code> port representing connectivity to a gateway
-    router, the packet will match a flow in table 37 that sends the
+    router, the packet will match a flow in table 39 that sends the
     packet on a tunnel port to the chassis where the gateway router
-    resides.  This processing in table 37 is done in the same manner as
+    resides.  This processing in table 39 is done in the same manner as
     for VIFs.
   </p>
 
@@ -1746,21 +1750,21 @@
     chassis, one additional mechanism is required.  When a packet
     leaves the ingress pipeline and the logical egress port is the
     distributed gateway port, one of two different sets of actions is
-    required at table 37:
+    required at table 39:
   </p>
 
   <ul>
     <li>
       If the packet can be handled locally on the sender's hypervisor
       (e.g. one-to-one NAT traffic), then the packet should just be
-      resubmitted locally to table 38, in the normal manner for
+      resubmitted locally to table 40, in the normal manner for
       distributed logical patch ports.
     </li>
 
     <li>
       However, if the packet needs to be handled on the chassis
       associated with the distributed gateway port (e.g. one-to-many
-      SNAT traffic or non-NAT traffic), then table 37 must send the
+      SNAT traffic or non-NAT traffic), then table 39 must send the
       packet on a tunnel port to that chassis.
     </li>
   </ul>
@@ -1772,11 +1776,11 @@
     egress port to the type <code>chassisredirect</code> logical port is
     simply a way to indicate that although the packet is destined for
     the distributed gateway port, it needs to be redirected to a
-    different chassis.  At table 37, packets with this logical egress
-    port are sent to a specific chassis, in the same way that table 37
+    different chassis.  At table 39, packets with this logical egress
+    port are sent to a specific chassis, in the same way that table 39
     directs packets whose logical egress port is a VIF or a type
     <code>l3gateway</code> port to different chassis.  Once the packet
-    arrives at that chassis, table 38 resets the logical egress port to
+    arrives at that chassis, table 40 resets the logical egress port to
     the value representing the distributed gateway port.  For each
     distributed gateway port, there is one type
     <code>chassisredirect</code> port, in addition to the distributed
diff --git a/ovn-nb.xml b/ovn-nb.xml
index 8d56d0c6e..35acda107 100644
--- a/ovn-nb.xml
+++ b/ovn-nb.xml
@@ -2036,6 +2036,14 @@ or
         the affinity timeslot. Max supported affinity_timeout is 65535
         seconds.
       </column>
+
+      <column name="options" key="ct_flush" type='{"type": "boolean"}'>
+        The value indicates whether ovn-controller should flush CT entries
+        that are related to this LB. The flush happens if the LB is removed,
+        any of the backends is updated/removed or the LB is not considered
+        local anymore by the ovn-controller. This option is set to
+        <code>false</code> by default.
+      </column>
     </group>
   </table>
 
diff --git a/ovn-sb.xml b/ovn-sb.xml
index a77f8f4ef..8ca206109 100644
--- a/ovn-sb.xml
+++ b/ovn-sb.xml
@@ -472,9 +472,8 @@
 
     <column name="type">
       The encapsulation to use to transmit packets to this chassis.
-      Hypervisors must use either <code>geneve</code> or
-      <code>stt</code>.  Gateways may use <code>vxlan</code>,
-      <code>geneve</code>, or <code>stt</code>.
+      Hypervisors and gateways must use one of: <code>geneve</code>,
+      <code>vxlan</code>, or <code>stt</code>.
     </column>
 
     <column name="options">
diff --git a/rhel/usr_lib_systemd_system_ovn-db@.service b/rhel/usr_lib_systemd_system_ovn-db@.service
index 98556a673..c835e4967 100644
--- a/rhel/usr_lib_systemd_system_ovn-db@.service
+++ b/rhel/usr_lib_systemd_system_ovn-db@.service
@@ -33,7 +33,7 @@ EnvironmentFile=-/etc/sysconfig/ovn-%i
 ExecStartPre=-/usr/bin/chown -R ${OVN_USER_ID} ${OVN_DBDIR}
 ExecStart=/usr/share/ovn/scripts/ovn-ctl \
           --ovn-user=${OVN_USER_ID} start_%i_ovsdb $OPTIONS $ovn_%i_opts
-ExecStop=/usr/share/ovn/scripts/ovn-ctl stop_%i_ovsdb
+ExecStop=/usr/share/ovn/scripts/ovn-ctl stop_%i_ovsdb $OPTIONS $ovn_%i_opts
 
 [Install]
 WantedBy=multi-user.target
diff --git a/rhel/usr_lib_systemd_system_ovn-northd.service b/rhel/usr_lib_systemd_system_ovn-northd.service
index d281f861c..6c4c6621c 100644
--- a/rhel/usr_lib_systemd_system_ovn-northd.service
+++ b/rhel/usr_lib_systemd_system_ovn-northd.service
@@ -26,7 +26,7 @@ EnvironmentFile=-/etc/sysconfig/ovn-northd
 ExecStartPre=-/usr/bin/chown -R ${OVN_USER_ID} ${OVN_DBDIR}
 ExecStart=/usr/share/ovn/scripts/ovn-ctl \
           --ovn-user=${OVN_USER_ID} start_northd $OVN_NORTHD_OPTS
-ExecStop=/usr/share/ovn/scripts/ovn-ctl stop_northd
+ExecStop=/usr/share/ovn/scripts/ovn-ctl stop_northd $OVN_NORTHD_OPTS
 
 [Install]
 WantedBy=multi-user.target
diff --git a/tests/ovn-controller.at b/tests/ovn-controller.at
index bbe142ae3..dd7eda516 100644
--- a/tests/ovn-controller.at
+++ b/tests/ovn-controller.at
@@ -493,7 +493,8 @@ check ovn-nbctl --wait=hv sync
 
 # And check that it gets propagated to br-int external_ids.
 as hv1
-OVS_WAIT_UNTIL([ovs-vsctl get Bridge br-int external_ids:ovn-nb-cfg], [0], [1])
+OVS_WAIT_FOR_OUTPUT([ovs-vsctl get Bridge br-int external_ids:ovn-nb-cfg], [0], ["1"
+])
 
 nb_cfg_ts=$(fetch_column Chassis_Private nb_cfg_timestamp name=hv1)
 as hv1
@@ -672,22 +673,26 @@ check ovs-vsctl del-ssl
 start_daemon ovn-controller -p $key -c $cert -C $cacert
 
 # SSL should not connect because of key and cert mismatch
-OVS_WAIT_UNTIL([ovn-appctl -t ovn-controller connection-status], [0], [not connected])
+OVS_WAIT_FOR_OUTPUT([ovn-appctl -t ovn-controller connection-status], [0], [not connected
+])
 
 # Modify the files with the correct key and cert, and reconnect should succeed
 cp $PKIDIR/$key $key
 cp $PKIDIR/$cert $cert
 
-OVS_WAIT_UNTIL([ovn-appctl -t ovn-controller connection-status], [0], [connected])
+OVS_WAIT_FOR_OUTPUT([ovn-appctl -t ovn-controller connection-status], [0], [connected
+])
 
 # Remove the files and expect the connection to drop
 rm $key $cert
-OVS_WAIT_UNTIL([ovn-appctl -t ovn-controller connection-status], [0], [not connected])
+OVS_WAIT_FOR_OUTPUT([ovn-appctl -t ovn-controller connection-status], [0], [not connected
+])
 
 # Restore the files again and expect the connection to recover
 cp $PKIDIR/$key $key
 cp $PKIDIR/$cert $cert
-OVS_WAIT_UNTIL([ovn-appctl -t ovn-controller connection-status], [0], [connected])
+OVS_WAIT_FOR_OUTPUT([ovn-appctl -t ovn-controller connection-status], [0], [connected
+])
 
 cat hv1/ovn-controller.log
 
@@ -868,7 +873,7 @@ meta=$(ovn-sbctl get datapath ls1 tunnel_key)
 port=$(ovn-sbctl get port_binding ls1-rp tunnel_key)
 check ovn-nbctl lrp-add lr0 rp-ls1 00:00:01:01:02:03 192.168.1.254/24
 
-OVS_WAIT_UNTIL([as hv1 ovs-ofctl dump-flows br-int | grep table=38 | grep -q "reg15=0x${port},metadata=0x${meta}"])
+OVS_WAIT_UNTIL([as hv1 ovs-ofctl dump-flows br-int | grep table=40 | grep -q "reg15=0x${port},metadata=0x${meta}"])
 
 OVN_CLEANUP([hv1])
 AT_CLEANUP
@@ -912,14 +917,14 @@ for i in $(seq 10); do
     check ovn-nbctl add address_set as1 addresses 10.0.0.$i
     check ovn-nbctl --wait=hv sync
     if test "$i" = 3; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
             grep -v reply | awk '{print $7, $8}' | sort], [0], [dnl
 priority=1100,ip,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.0.1 actions=drop
 priority=1100,ip,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.0.2 actions=drop
 priority=1100,ip,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.0.3 actions=drop
 ])
     fi
-    AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100"], [0], [$i
+    AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100"], [0], [$i
 ])
 done
 
@@ -934,15 +939,15 @@ for i in $(seq 10); do
     check ovn-nbctl remove address_set as1 addresses 10.0.0.$i
     check ovn-nbctl --wait=hv sync
     if test "$i" = 9; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
             grep -v reply | awk '{print $7, $8}'], [0], [dnl
 priority=1100,ip,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.0.10 actions=drop
 ])
     fi
     if test "$i" = 10; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep "priority=1100"], [1], [ignore])
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep "priority=1100"], [1], [ignore])
     else
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100"], [0], [$((10 - $i))
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100"], [0], [$((10 - $i))
 ])
     fi
 done
@@ -960,7 +965,7 @@ for i in $(seq 10); do
     check ovn-nbctl add address_set as1 addresses 10.0.0.$i,10.0.1.$i
     check ovn-nbctl --wait=hv sync
     if test "$i" = 3; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
             grep -v reply | awk '{print $7, $8}' | sort], [0], [dnl
 priority=1100,ip,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.0.1 actions=drop
 priority=1100,ip,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.0.2 actions=drop
@@ -970,7 +975,7 @@ priority=1100,ip,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.1.2 actions=dr
 priority=1100,ip,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.1.3 actions=drop
 ])
     fi
-    AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100"], [0], [$(($i * 2))
+    AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100"], [0], [$(($i * 2))
 ])
 done
 
@@ -987,11 +992,11 @@ reprocess_count_old=$(read_counter consider_logical_flow)
 check ovn-nbctl add address_set as1 addresses 10.0.0.21,10.0.0.22 -- \
                 remove address_set as1 addresses 10.0.0.10
 check ovn-nbctl --wait=hv sync
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep -c 10\.0\.0\.21], [0], [1
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep -c 10\.0\.0\.21], [0], [1
 ])
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep -c 10\.0\.0\.22], [0], [1
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep -c 10\.0\.0\.22], [0], [1
 ])
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep 10\.0\.0\.10], [1], [ignore])
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep 10\.0\.0\.10], [1], [ignore])
 
 reprocess_count_new=$(read_counter consider_logical_flow)
 AT_CHECK([echo $(($reprocess_count_new - $reprocess_count_old))], [0], [0
@@ -1003,9 +1008,9 @@ reprocess_count_old=$(read_counter consider_logical_flow)
 check ovn-nbctl remove address_set as1 addresses 10.0.0.21,10.0.0.22 -- \
                 add address_set as1 addresses 10.0.0.10
 check ovn-nbctl --wait=hv sync
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep 10\.0\.0\.21], [1], [ignore])
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep 10\.0\.0\.22], [1], [ignore])
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep -c 10\.0\.0\.10], [0], [1
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep 10\.0\.0\.21], [1], [ignore])
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep 10\.0\.0\.22], [1], [ignore])
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep -c 10\.0\.0\.10], [0], [1
 ])
 
 reprocess_count_new=$(read_counter consider_logical_flow)
@@ -1018,9 +1023,9 @@ reprocess_count_old=$(read_counter consider_logical_flow)
 check ovn-nbctl add address_set as1 addresses 10.0.0.21 -- \
                 remove address_set as1 addresses 10.0.0.10
 check ovn-nbctl --wait=hv sync
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep -c 10\.0\.0\.21], [0], [1
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep -c 10\.0\.0\.21], [0], [1
 ])
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep 10\.0\.0\.10], [1], [ignore])
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep 10\.0\.0\.10], [1], [ignore])
 
 reprocess_count_new=$(read_counter consider_logical_flow)
 AT_CHECK([echo $(($reprocess_count_new - $reprocess_count_old))], [0], [0
@@ -1032,12 +1037,12 @@ reprocess_count_old=$(read_counter consider_logical_flow)
 check ovn-nbctl add address_set as1 addresses 10.0.0.22,10.0.0.23 -- \
                 remove address_set as1 addresses 10.0.0.9,10.0.0.8
 check ovn-nbctl --wait=hv sync
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep -c 10\.0\.0\.22], [0], [1
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep -c 10\.0\.0\.22], [0], [1
 ])
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep -c 10\.0\.0\.23], [0], [1
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep -c 10\.0\.0\.23], [0], [1
 ])
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep 10\.0\.0\.8], [1], [ignore])
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep 10\.0\.0\.9], [1], [ignore])
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep 10\.0\.0\.8], [1], [ignore])
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep 10\.0\.0\.9], [1], [ignore])
 
 reprocess_count_new=$(read_counter consider_logical_flow)
 AT_CHECK([echo $(($reprocess_count_new - $reprocess_count_old))], [0], [0
@@ -1085,7 +1090,7 @@ for i in $(seq 10); do
     check ovn-nbctl add address_set as1 addresses 10.0.0.$i
     check ovn-nbctl --wait=hv sync
     if test "$i" = 1; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
             grep -v reply | awk '{print $7, $8}' | sort], [0], [dnl
 priority=1100,tcp,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.0.1,tp_dst=111 actions=drop
 priority=1100,tcp,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.0.1,tp_dst=222 actions=drop
@@ -1093,12 +1098,12 @@ priority=1100,tcp,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.0.1,tp_dst=33
 ])
     else
         # (1 conj_id flow + 3 tp_dst flows) = 4 extra flows
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100"], [0], [$(($i + 4))
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100"], [0], [$(($i + 4))
 ])
     fi
 
     if test "$i" = 3; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
             grep -v reply | awk '{print $7, $8}' | \
             sed -r 's/conjunction.*,/conjunction,/' | \
             sed -r 's/conj_id=.*,/conj_id=,/' | sort], [0], [dnl
@@ -1124,17 +1129,17 @@ for i in $(seq 10); do
     check ovn-nbctl remove address_set as1 addresses 10.0.0.$i
     check ovn-nbctl --wait=hv sync
     if test "$i" = 10; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep "priority=1100"], [1], [ignore])
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep "priority=1100"], [1], [ignore])
     elif test "$i" = 9; then
         # no conjunction left
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
             grep -v reply | awk '{print $7, $8}' | sort], [0], [dnl
 priority=1100,tcp,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.0.10,tp_dst=111 actions=drop
 priority=1100,tcp,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.0.10,tp_dst=222 actions=drop
 priority=1100,tcp,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.0.10,tp_dst=333 actions=drop
 ])
     else
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100"], [0], [$((14 - $i))
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100"], [0], [$((14 - $i))
 ])
     fi
 done
@@ -1150,7 +1155,7 @@ for i in $(seq 10); do
     check ovn-nbctl add address_set as1 addresses 10.0.0.$i,10.0.1.$i
     check ovn-nbctl --wait=hv sync
     if test "$i" = 3; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
             grep -v reply | awk '{print $7, $8}' | \
             sed -r 's/conjunction.*,/conjunction,/' | \
             sed -r 's/conj_id=.*,/conj_id=,/' | sort], [0], [dnl
@@ -1166,7 +1171,7 @@ priority=1100,tcp,reg15=0x$port_key,metadata=0x$dp_key,tp_dst=222 actions=conjun
 priority=1100,tcp,reg15=0x$port_key,metadata=0x$dp_key,tp_dst=333 actions=conjunction,2/2)
 ])
     fi
-    AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100"], [0], [$(($i * 2 + 4))
+    AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100"], [0], [$(($i * 2 + 4))
 ])
 done
 
@@ -1182,11 +1187,11 @@ reprocess_count_old=$(read_counter consider_logical_flow)
 check ovn-nbctl add address_set as1 addresses 10.0.0.21,10.0.0.22 -- \
                 remove address_set as1 addresses 10.0.0.10
 check ovn-nbctl --wait=hv sync
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep -c 10\.0\.0\.21], [0], [1
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep -c 10\.0\.0\.21], [0], [1
 ])
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep -c 10\.0\.0\.22], [0], [1
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep -c 10\.0\.0\.22], [0], [1
 ])
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep 10\.0\.0\.10], [1], [ignore])
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep 10\.0\.0\.10], [1], [ignore])
 
 reprocess_count_new=$(read_counter consider_logical_flow)
 AT_CHECK([echo $(($reprocess_count_new - $reprocess_count_old))], [0], [0
@@ -1198,9 +1203,9 @@ reprocess_count_old=$(read_counter consider_logical_flow)
 check ovn-nbctl remove address_set as1 addresses 10.0.0.21,10.0.0.22 -- \
                 add address_set as1 addresses 10.0.0.10
 check ovn-nbctl --wait=hv sync
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep 10\.0\.0\.21], [1], [ignore])
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep 10\.0\.0\.22], [1], [ignore])
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep -c 10\.0\.0\.10], [0], [1
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep 10\.0\.0\.21], [1], [ignore])
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep 10\.0\.0\.22], [1], [ignore])
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep -c 10\.0\.0\.10], [0], [1
 ])
 
 reprocess_count_new=$(read_counter consider_logical_flow)
@@ -1213,9 +1218,9 @@ reprocess_count_old=$(read_counter consider_logical_flow)
 check ovn-nbctl add address_set as1 addresses 10.0.0.21 -- \
                 remove address_set as1 addresses 10.0.0.10
 check ovn-nbctl --wait=hv sync
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep -c 10\.0\.0\.21], [0], [1
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep -c 10\.0\.0\.21], [0], [1
 ])
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep 10\.0\.0\.10], [1], [ignore])
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep 10\.0\.0\.10], [1], [ignore])
 
 reprocess_count_new=$(read_counter consider_logical_flow)
 AT_CHECK([echo $(($reprocess_count_new - $reprocess_count_old))], [0], [0
@@ -1227,12 +1232,12 @@ reprocess_count_old=$(read_counter consider_logical_flow)
 check ovn-nbctl add address_set as1 addresses 10.0.0.22,10.0.0.23 -- \
                 remove address_set as1 addresses 10.0.0.9,10.0.0.8
 check ovn-nbctl --wait=hv sync
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep -c 10\.0\.0\.22], [0], [1
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep -c 10\.0\.0\.22], [0], [1
 ])
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep -c 10\.0\.0\.23], [0], [1
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep -c 10\.0\.0\.23], [0], [1
 ])
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep 10\.0\.0\.8], [1], [ignore])
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep 10\.0\.0\.9], [1], [ignore])
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep 10\.0\.0\.8], [1], [ignore])
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep 10\.0\.0\.9], [1], [ignore])
 
 reprocess_count_new=$(read_counter consider_logical_flow)
 AT_CHECK([echo $(($reprocess_count_new - $reprocess_count_old))], [0], [0
@@ -1282,18 +1287,18 @@ for i in $(seq 10); do
                     add address_set as2 addresses 10.0.0.$j
     check ovn-nbctl --wait=hv sync
     if test "$i" = 1; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
             grep -v reply | awk '{print $7, $8}' | sort], [0], [dnl
 priority=1100,ip,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.0.1,nw_dst=10.0.0.6 actions=drop
 ])
     else
         # (1 conj_id + nw_src * i + nw_dst * i) = 1 + i*2 flows
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100"], [0], [$(($i*2 + 1))
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100"], [0], [$(($i*2 + 1))
 ])
     fi
 
     if test "$i" = 3; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
             grep -v reply | awk '{print $7, $8}' | \
             sed -r 's/conjunction.*,/conjunction,/' | \
             sed -r 's/conj_id=.*,/conj_id=,/' | sort], [0], [dnl
@@ -1321,15 +1326,15 @@ for i in $(seq 10); do
                     remove address_set as2 addresses 10.0.0.$j
     check ovn-nbctl --wait=hv sync
     if test "$i" = 10; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep "priority=1100"], [1], [ignore])
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep "priority=1100"], [1], [ignore])
     elif test "$i" = 9; then
         # no conjunction left
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
             grep -v reply | awk '{print $7, $8}' | sort], [0], [dnl
 priority=1100,ip,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.0.10,nw_dst=10.0.0.15 actions=drop
 ])
     else
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100"], [0], [$((21 - $i*2))
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100"], [0], [$((21 - $i*2))
 ])
     fi
 done
@@ -1350,14 +1355,14 @@ for i in $(seq 2 10); do
     check ovn-nbctl add address_set as1 addresses 10.0.0.$i
     check ovn-nbctl --wait=hv sync
     if test "$i" = 3; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
             grep -v reply | awk '{print $7, $8}' | sort], [0], [dnl
 priority=1100,ip,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.0.1,nw_dst=10.0.0.6 actions=drop
 priority=1100,ip,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.0.2,nw_dst=10.0.0.6 actions=drop
 priority=1100,ip,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.0.3,nw_dst=10.0.0.6 actions=drop
 ])
     fi
-    AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100"], [0], [$i
+    AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100"], [0], [$i
 ])
 done
 
@@ -1376,16 +1381,16 @@ for i in $(seq 10); do
     check ovn-nbctl remove address_set as1 addresses 10.0.0.$i
     check ovn-nbctl --wait=hv sync
     if test "$i" = 9; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
             grep -v reply | awk '{print $7, $8}'], [0], [dnl
 priority=1100,ip,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.0.10,nw_dst=10.0.0.6 actions=drop
 priority=1100,ip,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.0.10,nw_dst=10.0.0.7 actions=drop
 ])
     elif test "$i" = 10; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep "priority=1100"], [1], [ignore])
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep "priority=1100"], [1], [ignore])
     else
         # 2 dst + (10 - i) src + 1 conj_id
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100"], [0], [$((10 - $i + 3))
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100"], [0], [$((10 - $i + 3))
 ])
     fi
 done
@@ -1439,18 +1444,18 @@ for i in $(seq 10); do
                     add address_set as2 addresses 10.0.0.$j
     check ovn-nbctl --wait=hv sync
     if test "$i" = 1; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
             grep -v reply | awk '{print $7, $8}' | sort], [0], [dnl
 priority=1100,ip,reg15=0x$port_key,metadata=0x$dp_key,nw_dst=10.0.0.6 actions=drop
 priority=1100,ip,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.0.1 actions=drop
 ])
     else
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100"], [0], [$(($i*2))
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100"], [0], [$(($i*2))
 ])
     fi
 
     if test "$i" = 3; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
             grep -v reply | awk '{print $7, $8}' | \
             sed -r 's/conjunction.*,/conjunction,/' | \
             sed -r 's/conj_id=.*,/conj_id=,/' | sort], [0], [dnl
@@ -1477,9 +1482,9 @@ for i in $(seq 10); do
                     remove address_set as2 addresses 10.0.0.$j
     check ovn-nbctl --wait=hv sync
     if test "$i" = 10; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep "priority=1100"], [1], [ignore])
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep "priority=1100"], [1], [ignore])
     else
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100"], [0], [$((20 - $i*2))
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100"], [0], [$((20 - $i*2))
 ])
     fi
 done
@@ -1535,21 +1540,21 @@ for i in $(seq 10); do
                     add address_set as2 addresses 10.0.0.$j
     check ovn-nbctl --wait=hv sync
     if test "$i" = 1; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
             grep -v reply | awk '{print $7, $8}' | sort], [0], [dnl
 priority=1100,ip,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.0.1 actions=drop
 priority=1100,ip,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.0.6 actions=drop
 ])
     elif test "$i" -lt 6; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100"], [0], [$(($i*2))
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100"], [0], [$(($i*2))
 ])
     else
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100"], [0], [$((5 + $i))
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100"], [0], [$((5 + $i))
 ])
     fi
 
     if test "$i" = 3; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
             grep -v reply | awk '{print $7, $8}' | \
             sed -r 's/conjunction.*,/conjunction,/' | \
             sed -r 's/conj_id=.*,/conj_id=,/' | sort], [0], [dnl
@@ -1576,12 +1581,12 @@ for i in $(seq 10); do
                     remove address_set as2 addresses 10.0.0.$j
     check ovn-nbctl --wait=hv sync
     if test "$i" = 10; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep "priority=1100"], [1], [ignore])
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep "priority=1100"], [1], [ignore])
     elif test "$i" -lt 6; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100"], [0], [$((15 - $i))
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100"], [0], [$((15 - $i))
 ])
     else
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100"], [0], [$((10 - ($i - 5)*2))
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100"], [0], [$((10 - ($i - 5)*2))
 ])
     fi
 done
@@ -1633,18 +1638,18 @@ for i in $(seq 10); do
     check ovn-nbctl add address_set as1 addresses 10.0.0.$i
     check ovn-nbctl --wait=hv sync
     if test "$i" = 1; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
             grep -v reply | awk '{print $7, $8}' | sort], [0], [dnl
 priority=1100,ip,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.0.1,nw_dst=10.0.0.1 actions=drop
 ])
     else
         # (1 conj_id + nw_src * i + nw_dst * i) = 1 + i*2 flows
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100"], [0], [$(($i*2 + 1))
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100"], [0], [$(($i*2 + 1))
 ])
     fi
 
     if test "$i" = 3; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
             grep -v reply | awk '{print $7, $8}' | \
             sed -r 's/conjunction.*,/conjunction,/' | \
             sed -r 's/conj_id=.*,/conj_id=,/' | sort], [0], [dnl
@@ -1670,15 +1675,15 @@ for i in $(seq 10); do
     check ovn-nbctl remove address_set as1 addresses 10.0.0.$i
     check ovn-nbctl --wait=hv sync
     if test "$i" = 10; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep "priority=1100"], [1], [ignore])
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep "priority=1100"], [1], [ignore])
     elif test "$i" = 9; then
         # no conjunction left
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
             grep -v reply | awk '{print $7, $8}' | sort], [0], [dnl
 priority=1100,ip,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.0.10,nw_dst=10.0.0.10 actions=drop
 ])
     else
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100"], [0], [$((21 - $i*2))
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100"], [0], [$((21 - $i*2))
 ])
     fi
 done
@@ -1694,7 +1699,7 @@ for i in $(seq 10); do
     check ovn-nbctl add address_set as1 addresses 10.0.0.$i,10.0.1.$i
     check ovn-nbctl --wait=hv sync
     if test "$i" = 3; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
             grep -v reply | awk '{print $7, $8}' | \
             sed -r 's/conjunction.*,/conjunction,/' | \
             sed -r 's/conj_id=.*,/conj_id=,/' | sort], [0], [dnl
@@ -1713,7 +1718,7 @@ priority=1100,ip,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.1.2 actions=co
 priority=1100,ip,reg15=0x$port_key,metadata=0x$dp_key,nw_src=10.0.1.3 actions=conjunction,2/2)
 ])
     fi
-    AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100"], [0], [$(($i * 4 + 1))
+    AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100"], [0], [$(($i * 4 + 1))
 ])
 done
 
@@ -1734,7 +1739,7 @@ check ovn-nbctl --wait=hv sync
 reprocess_count_old=$(read_counter consider_logical_flow)
 check ovn-nbctl add address_set as1 addresses 10.0.0.4,10.0.0.5
 check ovn-nbctl --wait=hv sync
-AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
     grep -v reply | awk '{print $7, $8}' | \
     sed -r 's/conjunction.*,/conjunction,/' | \
     sed -r 's/conj_id=.*,/conj_id=,/' | sort], [0], [dnl
@@ -1758,7 +1763,7 @@ AT_CHECK([echo $(($reprocess_count_new - $reprocess_count_old))], [0], [1
 # Delete 2 IPs
 reprocess_count_old=$(read_counter consider_logical_flow)
 check ovn-nbctl --wait=hv remove address_set as1 addresses 10.0.0.4,10.0.0.5
-AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
     grep -v reply | awk '{print $7, $8}' | \
     sed -r 's/conjunction.*,/conjunction,/' | \
     sed -r 's/conj_id=.*,/conj_id=,/' | sort], [0], [dnl
@@ -1816,7 +1821,7 @@ check ovn-nbctl acl-add ls1 to-lport 100 'outport == "ls1-lp1" && ip4.src == $as
 check ovn-nbctl acl-add ls1 to-lport 100 'outport == "ls1-lp1" && ip4.src == $as2 && tcp && tcp.dst == {201, 202}' drop
 
 check ovn-nbctl --wait=hv sync
-AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
     grep -v reply | awk '{print $7, $8}' | \
     sed -r 's/conjunction.[[0-9]]*,/conjunction,/g' | \
     sed -r 's/conj_id=.*,/conj_id=,/' | sort], [0], [dnl
@@ -1841,7 +1846,7 @@ reprocess_count_old=$(read_counter consider_logical_flow)
 check ovn-nbctl add address_set as1 addresses 10.0.0.14,10.0.0.33 -- \
                 add address_set as2 addresses 10.0.0.24,10.0.0.33
 check ovn-nbctl --wait=hv sync
-AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
     grep -v reply | awk '{print $7, $8}' | \
     sed -r 's/conjunction.[[0-9]]*,/conjunction,/g' | \
     sed -r 's/conj_id=.*,/conj_id=,/' | sort], [0], [dnl
@@ -1872,7 +1877,7 @@ reprocess_count_old=$(read_counter consider_logical_flow)
 check ovn-nbctl remove address_set as1 addresses 10.0.0.14,10.0.0.33 -- \
                 remove address_set as2 addresses 10.0.0.24,10.0.0.33
 check ovn-nbctl --wait=hv sync
-AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
     grep -v reply | awk '{print $7, $8}' | \
     sed -r 's/conjunction.[[0-9]]*,/conjunction,/g' | \
     sed -r 's/conj_id=.*,/conj_id=,/' | sort], [0], [dnl
@@ -1937,14 +1942,14 @@ for i in $(seq 5); do
     check ovn-nbctl add address_set as1 addresses "aa\:aa\:aa\:aa\:aa\:0$i"
     check ovn-nbctl --wait=hv sync
     if test "$i" = 3; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
             grep -v reply | awk '{print $7, $8}' | sort], [0], [dnl
 priority=1100,reg15=0x$port_key,metadata=0x$dp_key,dl_src=aa:aa:aa:aa:aa:01 actions=drop
 priority=1100,reg15=0x$port_key,metadata=0x$dp_key,dl_src=aa:aa:aa:aa:aa:02 actions=drop
 priority=1100,reg15=0x$port_key,metadata=0x$dp_key,dl_src=aa:aa:aa:aa:aa:03 actions=drop
 ])
     fi
-    AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100"], [0], [$i
+    AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100"], [0], [$i
 ])
 done
 
@@ -1958,17 +1963,17 @@ reprocess_count_old=$(read_counter consider_logical_flow)
 for i in $(seq 5); do
     check ovn-nbctl remove address_set as1 addresses "aa\:aa\:aa\:aa\:aa\:0$i"
     check ovn-nbctl --wait=hv sync
-    ovs-ofctl dump-flows br-int table=44 | grep "priority=1100"
+    ovs-ofctl dump-flows br-int table=46 | grep "priority=1100"
     if test "$i" = 4; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
             grep -v reply | awk '{print $7, $8}'], [0], [dnl
 priority=1100,reg15=0x$port_key,metadata=0x$dp_key,dl_src=aa:aa:aa:aa:aa:05 actions=drop
 ])
     fi
     if test "$i" = 5; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep "priority=1100"], [1], [ignore])
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep "priority=1100"], [1], [ignore])
     else
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100"], [0], [$((5 - $i))
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100"], [0], [$((5 - $i))
 ])
     fi
 done
@@ -2018,14 +2023,14 @@ for i in $(seq 5); do
     check ovn-nbctl add address_set as1 addresses "ff\:\:0$i"
     check ovn-nbctl --wait=hv sync
     if test "$i" = 3; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
             grep -v reply | awk '{print $7, $8}' | sort], [0], [dnl
 priority=1100,ipv6,reg15=0x$port_key,metadata=0x$dp_key,ipv6_src=ff::1 actions=drop
 priority=1100,ipv6,reg15=0x$port_key,metadata=0x$dp_key,ipv6_src=ff::2 actions=drop
 priority=1100,ipv6,reg15=0x$port_key,metadata=0x$dp_key,ipv6_src=ff::3 actions=drop
 ])
     fi
-    AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100"], [0], [$i
+    AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100"], [0], [$i
 ])
 done
 
@@ -2040,15 +2045,15 @@ for i in $(seq 5); do
     check ovn-nbctl remove address_set as1 addresses "ff\:\:0$i"
     check ovn-nbctl --wait=hv sync
     if test "$i" = 4; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44,reg15=0x$port_key | \
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46,reg15=0x$port_key | \
             grep -v reply | awk '{print $7, $8}'], [0], [dnl
 priority=1100,ipv6,reg15=0x$port_key,metadata=0x$dp_key,ipv6_src=ff::5 actions=drop
 ])
     fi
     if test "$i" = 5; then
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep "priority=1100"], [1], [ignore])
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep "priority=1100"], [1], [ignore])
     else
-        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100"], [0], [$((5 - $i))
+        AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100"], [0], [$((5 - $i))
 ])
     fi
 done
@@ -2060,6 +2065,57 @@ AT_CHECK([echo $(($reprocess_count_new - $reprocess_count_old))], [0], [2
 OVN_CLEANUP([hv1])
 AT_CLEANUP
 
+AT_SETUP([ovn-controller - address set del-and-add])
+
+ovn_start
+
+net_add n1
+sim_add hv1
+as hv1
+check ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.1
+check ovs-vsctl -- add-port br-int hv1-vif1 -- \
+    set interface hv1-vif1 external-ids:iface-id=ls1-lp1
+
+check ovn-nbctl ls-add ls1
+
+check ovn-nbctl lsp-add ls1 ls1-lp1 \
+-- lsp-set-addresses ls1-lp1 "f0:00:00:00:00:01"
+
+wait_for_ports_up
+ovn-appctl -t ovn-controller vlog/set file:dbg
+
+ovn-nbctl create address_set name=as1 addresses=8.8.8.8
+check ovn-nbctl acl-add ls1 to-lport 100 'outport == "ls1-lp1" && ip4.src == $as1' drop
+check ovn-nbctl --wait=hv sync
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100"], [0], [1
+])
+
+# pause ovn-northd
+check as northd ovn-appctl -t ovn-northd pause
+check as northd-backup ovn-appctl -t ovn-northd pause
+
+# Simulate a SB address set "del and add" notification to ovn-controller in the
+# same IDL iteration. The flows programmed by ovn-controller should reflect the
+# newly added address set. In reality it can happen when CMS deletes an
+# address-set and immediately creates a new address-set with the same name
+# (with same or different content). The notification of the changes can come to
+# ovn-controller in one shot and the order of the "del" and "add" in the IDL is
+# undefined. This test runs the scenario ten times to make sure different
+# orders are covered and handled properly.
+
+flow_count=$(ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100")
+for i in $(seq 10); do
+    # Delete and recreate the SB address set with same name and an extra IP.
+    addrs_=$(fetch_column address_set addresses name=as1)
+    addrs=${addrs_// /,}
+    AT_CHECK([ovn-sbctl destroy address_set as1 -- create address_set name=as1 addresses=$addrs,1.1.1.$i], [0], [ignore])
+    OVS_WAIT_UNTIL([test $(as hv1 ovs-ofctl dump-flows br-int table=46 | grep -c "priority=1100") = "$(($i + 1))"])
+done
+
+OVN_CLEANUP([hv1])
+AT_CLEANUP
+
 AT_SETUP([ovn-controller - I-P handle lb_hairpin_use_ct_mark change])
 
 ovn_start --backup-northd=none
@@ -2161,7 +2217,7 @@ AT_CHECK([ovs-ofctl dump-flows br-int | grep 10.1.2.3], [0], [ignore])
 sleep 5
 
 # Check after the wait
-OVS_WAIT_UNTIL([ovs-ofctl dump-flows br-int | grep 10.1.2.4], [0], [ignore])
+OVS_WAIT_UNTIL([ovs-ofctl dump-flows br-int | grep 10.1.2.4])
 lflow_run_2=$(ovn-appctl -t ovn-controller coverage/read-counter lflow_run)
 
 # Verify that the flow compute completed during the wait (after the wait it
@@ -2172,7 +2228,7 @@ AT_CHECK_UNQUOTED([echo $lflow_run_1], [0], [$lflow_run_2
 # Restart OVS this time, and wait until flows are reinstalled
 OVS_APP_EXIT_AND_WAIT([ovs-vswitchd])
 start_daemon ovs-vswitchd --enable-dummy=system -vvconn -vofproto_dpif -vunixctl
-OVS_WAIT_UNTIL([ovs-ofctl dump-flows br-int | grep 10.1.2.4], [0], [ignore])
+OVS_WAIT_UNTIL([ovs-ofctl dump-flows br-int | grep 10.1.2.4])
 
 check ovn-nbctl --wait=hv lb-add lb3 2.2.2.2 10.1.2.5 \
 -- ls-lb-add ls1 lb3
diff --git a/tests/ovn-macros.at b/tests/ovn-macros.at
index ee942e8a6..6f2d085ae 100644
--- a/tests/ovn-macros.at
+++ b/tests/ovn-macros.at
@@ -817,6 +817,29 @@ ovn_trace_client() {
     ovs-appctl -t $target trace "$@" | tee trace | sed '/^# /d'
 }
 
+# Receives a string with scapy python code that represents a packet.
+# Returns a hex-string that contains bytes that reflect the packet symbolic
+# description.
+#
+# Scapy docs: https://scapy.readthedocs.io/en/latest/usage.html
+#
+# Example of usage:
+#
+# packet=$(fmt_pkt "
+#     Ether(dst='ff:ff:ff:ff:ff:ff', src='50:64:00:00:00:01') /
+#     IPv6(src='abed::1', dst='ff02::1:ff00:2') /
+#     ICMPv6ND_NS(tgt='abed::2')
+# ")
+#
+# ovs-appctl netdev-dummy/receive $vif $packet
+#
+fmt_pkt() {
+    echo "from scapy.all import *; \
+          import binascii; \
+          out = binascii.hexlify(raw($1)); \
+          print(out.decode())" | $PYTHON3
+}
+
 OVS_END_SHELL_HELPERS
 
 m4_define([OVN_POPULATE_ARP], [AT_CHECK(ovn_populate_arp__, [0], [ignore])])
diff --git a/tests/ovn-nbctl.at b/tests/ovn-nbctl.at
index 2fffe1850..478a32f5a 100644
--- a/tests/ovn-nbctl.at
+++ b/tests/ovn-nbctl.at
@@ -1482,6 +1482,32 @@ UUID                                    LB                  PROTO      VIP
 
 dnl ---------------------------------------------------------------------
 
+OVN_NBCTL_TEST([ovn_nbctl_template_lbs], [Template LBs], [
+check ovn-nbctl --template lb-add lb0 ^vip ^backend
+check ovn-nbctl --template lb-add lb1 ^vip:^vport ^backend udp
+check ovn-nbctl --template lb-add lb2 ^vip:^vport ^backend udp ipv4
+check ovn-nbctl --template lb-add lb3 ^vip:^vport ^backend udp ipv6
+check ovn-nbctl --template lb-add lb4 ^vip:^vport ^backend:^bport udp ipv4
+check ovn-nbctl --template lb-add lb5 ^vip:^vport ^backend:^bport udp ipv6
+check ovn-nbctl --template lb-add lb6 ^vip:^vport 1.1.1.1:111 udp ipv4
+check ovn-nbctl --template lb-add lb7 ^vip:^vport [[1::1]]:111 udp ipv6
+
+AT_CHECK([ovn-nbctl lb-list | uuidfilt], [0], [dnl
+UUID                                    LB                  PROTO      VIP            IPs
+<0>    lb0                 tcp        ^vip           ^backend
+<1>    lb1                 udp        ^vip:^vport    ^backend
+<2>    lb2                 udp        ^vip:^vport    ^backend
+<3>    lb3                 udp        ^vip:^vport    ^backend
+<4>    lb4                 udp        ^vip:^vport    ^backend:^bport
+<5>    lb5                 udp        ^vip:^vport    ^backend:^bport
+<6>    lb6                 udp        ^vip:^vport    1.1.1.1:111
+<7>    lb7                 udp        ^vip:^vport    [[1::1]]:111
+])
+
+])
+
+dnl ---------------------------------------------------------------------
+
 OVN_NBCTL_TEST([ovn_nbctl_basic_lr], [basic logical router commands], [
 AT_CHECK([ovn-nbctl lr-add lr0])
 AT_CHECK([ovn-nbctl lr-list | uuidfilt], [0], [dnl
@@ -2599,6 +2625,7 @@ OVN_NBCTL_TEST_STOP "/terminating with signal 15/d"
 AT_CLEANUP
 
 AT_SETUP([ovn-nbctl - daemon ssl files change])
+AT_SKIP_IF([test "$HAVE_OPENSSL" = no])
 dnl Create ovn-nb database.
 AT_CHECK([ovsdb-tool create ovn-nb.db $abs_top_srcdir/ovn-nb.ovsschema])
 
diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at
index 3fa02d2b3..93854dfdc 100644
--- a/tests/ovn-northd.at
+++ b/tests/ovn-northd.at
@@ -2486,6 +2486,7 @@ check ovn-nbctl --wait=sb \
 
 AT_CHECK([ovn-sbctl lflow-list ls | grep -e ls_in_acl_hint -e ls_out_acl_hint -e ls_in_acl -e ls_out_acl | sort], [0], [dnl
   table=17(ls_in_acl_after_lb ), priority=0    , match=(1), action=(next;)
+  table=17(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=17(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;)
   table=3 (ls_out_acl_hint    ), priority=0    , match=(1), action=(next;)
   table=3 (ls_out_acl_hint    ), priority=1    , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;)
@@ -2530,9 +2531,12 @@ ovn-nbctl --wait=sb clear logical_switch ls load_balancer
 
 AT_CHECK([ovn-sbctl lflow-list ls | grep -e ls_in_acl_hint -e ls_out_acl_hint -e ls_in_acl -e ls_out_acl | sort], [0], [dnl
   table=17(ls_in_acl_after_lb ), priority=0    , match=(1), action=(next;)
+  table=17(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=3 (ls_out_acl_hint    ), priority=65535, match=(1), action=(next;)
+  table=4 (ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=4 (ls_out_acl         ), priority=65535, match=(1), action=(next;)
   table=7 (ls_in_acl_hint     ), priority=65535, match=(1), action=(next;)
+  table=8 (ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=8 (ls_in_acl          ), priority=65535, match=(1), action=(next;)
 ])
 
@@ -2871,7 +2875,6 @@ AT_CHECK([ovn-sbctl lflow-list sw0 | grep ls_in_nat_hairpin | sort | sed 's/tabl
 AT_CHECK([ovn-sbctl lflow-list sw0 | grep ls_in_hairpin | sort | sed 's/table=../table=??/g'], [0], [dnl
   table=??(ls_in_hairpin      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_hairpin      ), priority=1    , match=((reg0[[6]] == 1 || reg0[[12]] == 1)), action=(eth.dst <-> eth.src; outport = inport; flags.loopback = 1; output;)
-  table=??(ls_in_hairpin      ), priority=1000 , match=(reg0[[14]] == 1), action=(next(pipeline=ingress, table=??);)
 ])
 
 check ovn-nbctl -- ls-lb-del sw0 lb0
@@ -2887,7 +2890,6 @@ AT_CHECK([ovn-sbctl lflow-list sw0 | grep ls_in_nat_hairpin | sort | sed 's/tabl
 
 AT_CHECK([ovn-sbctl lflow-list sw0 | grep ls_in_hairpin | sort | sed 's/table=../table=??/g'], [0], [dnl
   table=??(ls_in_hairpin      ), priority=0    , match=(1), action=(next;)
-  table=??(ls_in_hairpin      ), priority=1000 , match=(reg0[[14]] == 1), action=(next(pipeline=ingress, table=??);)
 ])
 
 check ovn-nbctl -- add load_balancer_group $lbg load_balancer $lb0
@@ -2908,7 +2910,6 @@ AT_CHECK([ovn-sbctl lflow-list sw0 | grep ls_in_nat_hairpin | sort | sed 's/tabl
 AT_CHECK([ovn-sbctl lflow-list sw0 | grep ls_in_hairpin | sort | sed 's/table=../table=??/g'], [0], [dnl
   table=??(ls_in_hairpin      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_hairpin      ), priority=1    , match=((reg0[[6]] == 1 || reg0[[12]] == 1)), action=(eth.dst <-> eth.src; outport = inport; flags.loopback = 1; output;)
-  table=??(ls_in_hairpin      ), priority=1000 , match=(reg0[[14]] == 1), action=(next(pipeline=ingress, table=??);)
 ])
 
 AT_CLEANUP
@@ -3757,18 +3758,18 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.10 && tcp), action=(reg0 = 10.0.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.100 && tcp), action=(reg0 = 10.0.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.10), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.100), action=(ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(ct_lb_mark(backends=10.0.0.4:8080);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(ct_lb_mark(backends=10.0.0.40:8080);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80), action=(ct_lb_mark(backends=10.0.0.4:8080);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.100 && tcp && tcp.dst == 80), action=(ct_lb_mark(backends=10.0.0.40:8080);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -3788,18 +3789,18 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.10 && tcp), action=(reg0 = 10.0.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.100 && tcp), action=(reg0 = 10.0.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.10), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.100), action=(ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.100 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080; force_snat);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -3813,6 +3814,7 @@ AT_CHECK([grep "lr_out_snat" lr0flows | sed 's/table=./table=?/' | sort], [0], [
 
 AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
   table=? (lr_out_undnat      ), priority=0    , match=(1), action=(next;)
+  table=? (lr_out_undnat      ), priority=100  , match=(nd || nd_rs || nd_ra), action=(next;)
   table=? (lr_out_undnat      ), priority=50   , match=(ip), action=(flags.loopback = 1; ct_dnat;)
 ])
 
@@ -3838,18 +3840,18 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.10 && tcp), action=(reg0 = 10.0.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.100 && tcp), action=(reg0 = 10.0.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.10), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.100), action=(ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.100 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080; force_snat);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -3864,6 +3866,7 @@ AT_CHECK([grep "lr_out_snat" lr0flows | sed 's/table=./table=?/' | sort], [0], [
 
 AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
   table=? (lr_out_undnat      ), priority=0    , match=(1), action=(next;)
+  table=? (lr_out_undnat      ), priority=100  , match=(nd || nd_rs || nd_ra), action=(next;)
   table=? (lr_out_undnat      ), priority=50   , match=(ip), action=(flags.loopback = 1; ct_dnat;)
 ])
 
@@ -3902,18 +3905,18 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.10 && tcp), action=(reg0 = 10.0.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.100 && tcp), action=(reg0 = 10.0.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.10), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.100), action=(ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.100 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080; force_snat);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -3929,6 +3932,7 @@ AT_CHECK([grep "lr_out_snat" lr0flows | sed 's/table=./table=?/' | sort], [0], [
 
 AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
   table=? (lr_out_undnat      ), priority=0    , match=(1), action=(next;)
+  table=? (lr_out_undnat      ), priority=100  , match=(nd || nd_rs || nd_ra), action=(next;)
   table=? (lr_out_undnat      ), priority=50   , match=(ip), action=(flags.loopback = 1; ct_dnat;)
 ])
 
@@ -3953,14 +3957,13 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.100 && tcp), action=(reg0 = 10.0.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.20 && tcp), action=(reg0 = 10.0.0.20; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.100), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.20), action=(ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | grep skip_snat_for_lb | sort], [0], [dnl
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.20 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.skip_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.20 && tcp && reg9[[16..31]] == 80), action=(flags.skip_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080; skip_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.20 && tcp && tcp.dst == 80), action=(flags.skip_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080; skip_snat);)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
 
@@ -3970,6 +3973,7 @@ AT_CHECK([grep "lr_out_snat" lr0flows | grep skip_snat_for_lb | sed 's/table=./t
 
 AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
   table=? (lr_out_undnat      ), priority=0    , match=(1), action=(next;)
+  table=? (lr_out_undnat      ), priority=100  , match=(nd || nd_rs || nd_ra), action=(next;)
   table=? (lr_out_undnat      ), priority=50   , match=(ip), action=(flags.loopback = 1; ct_dnat;)
 ])
 
@@ -4111,6 +4115,7 @@ check ovn-nbctl lsp-set-options sw0-lr0 router-port=lr0-sw0
 check ovn-nbctl --wait=sb sync
 
 check_stateful_flows() {
+    action=$1
     ovn-sbctl dump-flows sw0 > sw0flows
     AT_CAPTURE_FILE([sw0flows])
 
@@ -4144,12 +4149,12 @@ check_stateful_flows() {
   table=??(ls_in_stateful     ), priority=100  , match=(reg0[[1]] == 1 && reg0[[13]] == 1), action=(ct_commit { ct_mark.blocked = 0; ct_label.label = reg3; }; next;)
 ])
 
-    AT_CHECK([grep "ls_out_pre_lb" sw0flows | sort], [0], [dnl
+    AT_CHECK_UNQUOTED([grep "ls_out_pre_lb" sw0flows | sort], [0], [dnl
   table=1 (ls_out_pre_lb      ), priority=0    , match=(1), action=(next;)
   table=1 (ls_out_pre_lb      ), priority=100  , match=(ip), action=(reg0[[2]] = 1; next;)
   table=1 (ls_out_pre_lb      ), priority=110  , match=(eth.mcast), action=(next;)
-  table=1 (ls_out_pre_lb      ), priority=110  , match=(eth.src == $svc_monitor_mac), action=(next;)
-  table=1 (ls_out_pre_lb      ), priority=110  , match=(ip && outport == "sw0-lr0"), action=(next;)
+  table=1 (ls_out_pre_lb      ), priority=110  , match=(eth.src == \$svc_monitor_mac), action=(next;)
+  table=1 (ls_out_pre_lb      ), priority=110  , match=(ip && outport == "sw0-lr0"), action=($action)
   table=1 (ls_out_pre_lb      ), priority=110  , match=(nd || nd_rs || nd_ra || mldv1 || mldv2), action=(next;)
   table=1 (ls_out_pre_lb      ), priority=110  , match=(reg0[[16]] == 1), action=(next;)
 ])
@@ -4169,13 +4174,13 @@ check_stateful_flows() {
 ])
 }
 
-check_stateful_flows
+check_stateful_flows "ct_clear; next;"
 
 # Add few ACLs
 check ovn-nbctl --wait=sb acl-add sw0 from-lport 1002 "ip4 && tcp && tcp.dst == 80" allow-related
 check ovn-nbctl --wait=sb acl-add sw0 to-lport 1002 "ip4 && tcp && tcp.src == 80" drop
 
-check_stateful_flows
+check_stateful_flows "next;"
 
 # Remove load balancers from sw0
 check ovn-nbctl ls-lb-del sw0 lb0
@@ -4231,6 +4236,15 @@ AT_CHECK([grep "ls_out_stateful" sw0flows | sort], [0], [dnl
   table=7 (ls_out_stateful    ), priority=100  , match=(reg0[[1]] == 1 && reg0[[13]] == 1), action=(ct_commit { ct_mark.blocked = 0; ct_label.label = reg3; }; next;)
 ])
 
+# LB with event=false and reject=false
+AT_CHECK([ovn-nbctl create load_balancer name=lb1 options:reject=false options:event=false vips:\"10.0.0.20\"=\"\" protocol=tcp], [0], [ignore])
+check ovn-nbctl --wait=sb ls-lb-add sw0 lb1
+
+AT_CHECK([ovn-sbctl dump-flows sw0 | grep "ls_in_lb " | sort ], [0], [dnl
+  table=12(ls_in_lb           ), priority=0    , match=(1), action=(next;)
+  table=12(ls_in_lb           ), priority=110  , match=(ct.new && ip4.dst == 10.0.0.20), action=(drop;)
+])
+
 AT_CLEANUP
 ])
 
@@ -4871,7 +4885,7 @@ check ovn-nbctl lsp-set-options ls2-ro2 router-port=ro2-ls2
 ovn-sbctl lflow-list ls1 > ls1_lflows
 AT_CHECK([grep "ls_in_l2_lkup" ls1_lflows | sed 's/table=../table=??/' | sort], [0], [dnl
   table=??(ls_in_l2_lkup      ), priority=0    , match=(1), action=(outport = get_fdb(eth.dst); next;)
-  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(handle_svc_check(inport);)
+  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac && (tcp || icmp || icmp6)), action=(handle_svc_check(inport);)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:01:01), action=(outport = "ls1-ro1"; output;)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:01:02), action=(outport = "vm1"; output;)
   table=??(ls_in_l2_lkup      ), priority=70   , match=(eth.mcast), action=(outport = "_MC_flood"; output;)
@@ -4883,7 +4897,7 @@ AT_CHECK([grep "ls_in_l2_lkup" ls1_lflows | sed 's/table=../table=??/' | sort],
 ovn-sbctl lflow-list ls2 > ls2_lflows
 AT_CHECK([grep "ls_in_l2_lkup" ls2_lflows | sed 's/table=../table=??/' | sort], [0], [dnl
   table=??(ls_in_l2_lkup      ), priority=0    , match=(1), action=(outport = get_fdb(eth.dst); next;)
-  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(handle_svc_check(inport);)
+  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac && (tcp || icmp || icmp6)), action=(handle_svc_check(inport);)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:02:01), action=(outport = "ls2-ro2"; output;)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:02:02), action=(outport = "vm2"; output;)
   table=??(ls_in_l2_lkup      ), priority=70   , match=(eth.mcast), action=(outport = "_MC_flood"; output;)
@@ -4903,7 +4917,7 @@ check ovn-nbctl --wait=sb lr-nat-add ro2 snat 20.0.0.200 192.168.2.200/30
 ovn-sbctl lflow-list ls1 > ls1_lflows
 AT_CHECK([grep "ls_in_l2_lkup" ls1_lflows | sed 's/table=../table=??/' | sort], [0], [dnl
   table=??(ls_in_l2_lkup      ), priority=0    , match=(1), action=(outport = get_fdb(eth.dst); next;)
-  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(handle_svc_check(inport);)
+  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac && (tcp || icmp || icmp6)), action=(handle_svc_check(inport);)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:01:01), action=(outport = "ls1-ro1"; output;)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:01:02), action=(outport = "vm1"; output;)
   table=??(ls_in_l2_lkup      ), priority=70   , match=(eth.mcast), action=(outport = "_MC_flood"; output;)
@@ -4916,7 +4930,7 @@ AT_CHECK([grep "ls_in_l2_lkup" ls1_lflows | sed 's/table=../table=??/' | sort],
 ovn-sbctl lflow-list ls2 > ls2_lflows
 AT_CHECK([grep "ls_in_l2_lkup" ls2_lflows | sed 's/table=../table=??/' | sort], [0], [dnl
   table=??(ls_in_l2_lkup      ), priority=0    , match=(1), action=(outport = get_fdb(eth.dst); next;)
-  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(handle_svc_check(inport);)
+  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac && (tcp || icmp || icmp6)), action=(handle_svc_check(inport);)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:02:01), action=(outport = "ls2-ro2"; output;)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:02:02), action=(outport = "vm2"; output;)
   table=??(ls_in_l2_lkup      ), priority=70   , match=(eth.mcast), action=(outport = "_MC_flood"; output;)
@@ -4937,7 +4951,7 @@ check ovn-nbctl --wait=sb lr-nat-add ro2 snat 40.0.0.200 192.168.2.148/30
 ovn-sbctl lflow-list ls1 > ls1_lflows
 AT_CHECK([grep "ls_in_l2_lkup" ls1_lflows | sed 's/table=../table=??/' | sort], [0], [dnl
   table=??(ls_in_l2_lkup      ), priority=0    , match=(1), action=(outport = get_fdb(eth.dst); next;)
-  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(handle_svc_check(inport);)
+  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac && (tcp || icmp || icmp6)), action=(handle_svc_check(inport);)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:01:01), action=(outport = "ls1-ro1"; output;)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:01:02), action=(outport = "vm1"; output;)
   table=??(ls_in_l2_lkup      ), priority=70   , match=(eth.mcast), action=(outport = "_MC_flood"; output;)
@@ -4951,7 +4965,7 @@ AT_CHECK([grep "ls_in_l2_lkup" ls1_lflows | sed 's/table=../table=??/' | sort],
 ovn-sbctl lflow-list ls2 > ls2_lflows
 AT_CHECK([grep "ls_in_l2_lkup" ls2_lflows | sed 's/table=../table=??/' | sort], [0], [dnl
   table=??(ls_in_l2_lkup      ), priority=0    , match=(1), action=(outport = get_fdb(eth.dst); next;)
-  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(handle_svc_check(inport);)
+  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac && (tcp || icmp || icmp6)), action=(handle_svc_check(inport);)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:02:01), action=(outport = "ls2-ro2"; output;)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:02:02), action=(outport = "vm2"; output;)
   table=??(ls_in_l2_lkup      ), priority=70   , match=(eth.mcast), action=(outport = "_MC_flood"; output;)
@@ -4970,7 +4984,7 @@ ovn-nbctl --wait=sb lr-lb-add ro1 lb1
 ovn-sbctl lflow-list ls1 > ls1_lflows
 AT_CHECK([grep "ls_in_l2_lkup" ls1_lflows | sed 's/table=../table=??/' | sort], [0], [dnl
   table=??(ls_in_l2_lkup      ), priority=0    , match=(1), action=(outport = get_fdb(eth.dst); next;)
-  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(handle_svc_check(inport);)
+  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac && (tcp || icmp || icmp6)), action=(handle_svc_check(inport);)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:01:01), action=(outport = "ls1-ro1"; output;)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:01:02), action=(outport = "vm1"; output;)
   table=??(ls_in_l2_lkup      ), priority=70   , match=(eth.mcast), action=(outport = "_MC_flood"; output;)
@@ -4988,7 +5002,7 @@ ovn-nbctl --wait=sb lb-add lb1 192.168.4.100:80 10.0.0.10:80
 ovn-sbctl lflow-list ls1 > ls1_lflows
 AT_CHECK([grep "ls_in_l2_lkup" ls1_lflows | sed 's/table=../table=??/' | sort], [0], [dnl
   table=??(ls_in_l2_lkup      ), priority=0    , match=(1), action=(outport = get_fdb(eth.dst); next;)
-  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(handle_svc_check(inport);)
+  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac && (tcp || icmp || icmp6)), action=(handle_svc_check(inport);)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:01:01), action=(outport = "ls1-ro1"; output;)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:01:02), action=(outport = "vm1"; output;)
   table=??(ls_in_l2_lkup      ), priority=70   , match=(eth.mcast), action=(outport = "_MC_flood"; output;)
@@ -5012,7 +5026,7 @@ ovn-nbctl --wait=sb lrp-set-gateway-chassis ro1-ls1 chassis-1 30
 ovn-sbctl lflow-list ls1 > ls1_lflows
 AT_CHECK([grep "ls_in_l2_lkup" ls1_lflows | sed 's/table=../table=??/' | sort], [0], [dnl
   table=??(ls_in_l2_lkup      ), priority=0    , match=(1), action=(outport = get_fdb(eth.dst); next;)
-  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(handle_svc_check(inport);)
+  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac && (tcp || icmp || icmp6)), action=(handle_svc_check(inport);)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:01:01), action=(outport = "ls1-ro1"; output;)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:01:02), action=(outport = "vm1"; output;)
   table=??(ls_in_l2_lkup      ), priority=70   , match=(eth.mcast), action=(outport = "_MC_flood"; output;)
@@ -5211,25 +5225,23 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.200), action=(reg0 = 172.168.0.200; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.10 && tcp), action=(reg0 = 10.0.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.100 && tcp), action=(reg0 = 172.168.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.210 && udp), action=(reg0 = 172.168.0.210; reg9[[16..31]] = udp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.10), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.100), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.200), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.210), action=(ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
   table=7 (lr_in_dnat         ), priority=100  , match=(ip && ip4.dst == 172.168.0.20 && inport == "lr0-public" && is_chassis_resident("cr-lr0-public")), action=(ct_dnat_in_czone(10.0.0.3);)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.200 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.80,10.0.0.81);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.4:8080);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.200 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.80,10.0.0.81);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.4:8080);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.100 && tcp && tcp.dst == 8082 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.210 && udp && udp.dst == 60 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -5284,25 +5296,23 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.200), action=(reg0 = 172.168.0.200; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.10 && tcp), action=(reg0 = 10.0.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.100 && tcp), action=(reg0 = 172.168.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.210 && udp), action=(reg0 = 172.168.0.210; reg9[[16..31]] = udp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.10), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.100), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.200), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.210), action=(ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
   table=7 (lr_in_dnat         ), priority=100  , match=(ip && ip4.dst == 172.168.0.20), action=(flags.loopback = 1; ct_dnat(10.0.0.3);)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.200), action=(ct_lb_mark(backends=10.0.0.80,10.0.0.81);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(ct_lb_mark(backends=10.0.0.4:8080);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082), action=(ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.200), action=(ct_lb_mark(backends=10.0.0.80,10.0.0.81);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80), action=(ct_lb_mark(backends=10.0.0.4:8080);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.100 && tcp && tcp.dst == 8082), action=(ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.210 && udp && udp.dst == 60), action=(ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -5314,6 +5324,7 @@ AT_CHECK([grep "lr_out_chk_dnat_local" lr0flows | sed 's/table=./table=?/' | sor
 
 AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
   table=? (lr_out_undnat      ), priority=0    , match=(1), action=(next;)
+  table=? (lr_out_undnat      ), priority=100  , match=(nd || nd_rs || nd_ra), action=(next;)
   table=? (lr_out_undnat      ), priority=50   , match=(ip), action=(flags.loopback = 1; ct_dnat;)
 ])
 
@@ -5349,25 +5360,23 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.200), action=(reg0 = 172.168.0.200; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.10 && tcp), action=(reg0 = 10.0.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.100 && tcp), action=(reg0 = 172.168.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.210 && udp), action=(reg0 = 172.168.0.210; reg9[[16..31]] = udp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.10), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.100), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.200), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.210), action=(ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
   table=7 (lr_in_dnat         ), priority=100  , match=(ip && ip4.dst == 172.168.0.20), action=(flags.loopback = 1; ct_dnat(10.0.0.3);)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.200), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.80,10.0.0.81; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062; force_snat);)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.200), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.80,10.0.0.81; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.100 && tcp && tcp.dst == 8082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.210 && udp && udp.dst == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062; force_snat);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -5379,6 +5388,7 @@ AT_CHECK([grep "lr_out_chk_dnat_local" lr0flows | sed 's/table=./table=?/' | sor
 
 AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
   table=? (lr_out_undnat      ), priority=0    , match=(1), action=(next;)
+  table=? (lr_out_undnat      ), priority=100  , match=(nd || nd_rs || nd_ra), action=(next;)
   table=? (lr_out_undnat      ), priority=50   , match=(ip), action=(flags.loopback = 1; ct_dnat;)
 ])
 
@@ -5416,28 +5426,25 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.200), action=(reg0 = 172.168.0.200; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.10 && tcp), action=(reg0 = 10.0.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.10 && tcp), action=(reg0 = 172.168.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.100 && tcp), action=(reg0 = 172.168.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.210 && udp), action=(reg0 = 172.168.0.210; reg9[[16..31]] = udp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.10), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.10), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.100), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.200), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.210), action=(ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
   table=7 (lr_in_dnat         ), priority=100  , match=(ip && ip4.dst == 172.168.0.20), action=(flags.loopback = 1; ct_dnat(10.0.0.3);)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.200), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.80,10.0.0.81; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.10 && tcp && reg9[[16..31]] == 9082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.10 && tcp && reg9[[16..31]] == 9082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062; force_snat);)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.200), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.80,10.0.0.81; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.10 && tcp && tcp.dst == 9082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.100 && tcp && tcp.dst == 8082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.210 && udp && udp.dst == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062; force_snat);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -5449,6 +5456,7 @@ AT_CHECK([grep "lr_out_chk_dnat_local" lr0flows | sed 's/table=./table=?/' | sor
 
 AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
   table=? (lr_out_undnat      ), priority=0    , match=(1), action=(next;)
+  table=? (lr_out_undnat      ), priority=100  , match=(nd || nd_rs || nd_ra), action=(next;)
   table=? (lr_out_undnat      ), priority=50   , match=(ip), action=(flags.loopback = 1; ct_dnat;)
 ])
 
@@ -5496,31 +5504,27 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.200), action=(reg0 = 172.168.0.200; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.10 && tcp), action=(reg0 = 10.0.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.10 && tcp), action=(reg0 = 172.168.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.100 && tcp), action=(reg0 = 172.168.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.210 && udp), action=(reg0 = 172.168.0.210; reg9[[16..31]] = udp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip6.dst == def0::2 && tcp), action=(xxreg0 = def0::2; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.10), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.10), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.100), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.200), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.210), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip6.dst == def0::2), action=(ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
   table=7 (lr_in_dnat         ), priority=100  , match=(ip && ip4.dst == 172.168.0.20), action=(flags.loopback = 1; ct_dnat(10.0.0.3);)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.200), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.80,10.0.0.81; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.10 && tcp && reg9[[16..31]] == 9082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip6 && xxreg0 == def0::2 && tcp && reg9[[16..31]] == 8000 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.10 && tcp && reg9[[16..31]] == 9082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip6 && xxreg0 == def0::2 && tcp && reg9[[16..31]] == 8000), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=[[aef0::2]]:80,[[aef0::3]]:80; force_snat);)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.200), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.80,10.0.0.81; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.10 && tcp && tcp.dst == 9082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.100 && tcp && tcp.dst == 8082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.210 && udp && udp.dst == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip6 && ip6.dst == def0::2 && tcp && tcp.dst == 8000), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=[[aef0::2]]:80,[[aef0::3]]:80; force_snat);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -5532,6 +5536,7 @@ AT_CHECK([grep "lr_out_chk_dnat_local" lr0flows | sed 's/table=./table=?/' | sor
 
 AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
   table=? (lr_out_undnat      ), priority=0    , match=(1), action=(next;)
+  table=? (lr_out_undnat      ), priority=100  , match=(nd || nd_rs || nd_ra), action=(next;)
   table=? (lr_out_undnat      ), priority=50   , match=(ip), action=(flags.loopback = 1; ct_dnat;)
 ])
 
@@ -5572,18 +5577,17 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.210 && tcp), action=(reg0 = 172.168.0.210; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.210 && udp), action=(reg0 = 172.168.0.210; reg9[[16..31]] = udp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.210), action=(ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.210 && tcp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.210 && tcp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.210 && tcp && tcp.dst == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.210 && udp && udp.dst == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062; force_snat);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -5594,6 +5598,7 @@ AT_CHECK([grep "lr_out_chk_dnat_local" lr0flows | sed 's/table=./table=?/' | sor
 
 AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
   table=? (lr_out_undnat      ), priority=0    , match=(1), action=(next;)
+  table=? (lr_out_undnat      ), priority=100  , match=(nd || nd_rs || nd_ra), action=(next;)
   table=? (lr_out_undnat      ), priority=50   , match=(ip), action=(flags.loopback = 1; ct_dnat;)
 ])
 
@@ -5634,9 +5639,11 @@ ovn-sbctl set service_monitor $sm_vip2 status=offline
 
 AT_CHECK([ovn-sbctl dump-flows lr0 | grep "lr_in_dnat" | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.10.10 && ct_mark.natted == 1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.10.10), action=(reg0 = 0; reject { outport <-> inport; next(pipeline=egress,table=3);};)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.10.10), action=(reg0 = 0; reject { outport <-> inport; next(pipeline=egress,table=3);};)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -5646,9 +5653,11 @@ check ovn-nbctl --wait=sb set load_balancer lb5 options:skip_snat=true
 
 AT_CHECK([ovn-sbctl dump-flows lr0 | grep "lr_in_dnat" | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.10.10 && ct_mark.natted == 1), action=(flags.skip_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.10.10), action=(flags.skip_snat_for_lb = 1; reg0 = 0; reject { outport <-> inport; next(pipeline=egress,table=3);};)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.10.10), action=(flags.skip_snat_for_lb = 1; reg0 = 0; reject { outport <-> inport; next(pipeline=egress,table=3);};)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -5660,9 +5669,58 @@ check ovn-nbctl --wait=sb set logical_router lr0 options:lb_force_snat_ip="route
 
 AT_CHECK([ovn-sbctl dump-flows lr0 | grep "lr_in_dnat" | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.10.10 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.10.10), action=(flags.force_snat_for_lb = 1; reg0 = 0; reject { outport <-> inport; next(pipeline=egress,table=3);};)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.10.10), action=(flags.force_snat_for_lb = 1; reg0 = 0; reject { outport <-> inport; next(pipeline=egress,table=3);};)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
+])
+
+# LB with event=false and reject=false
+check ovn-nbctl lr-lb-del lr0
+check ovn-nbctl remove logical_router lr0 options lb_force_snat_ip
+AT_CHECK([ovn-nbctl create load_balancer name=lb6 options:reject=false options:event=false vips:\"172.168.10.30\"=\"\" protocol=tcp], [0], [ignore])
+check ovn-nbctl --wait=sb lr-lb-add lr0 lb6
+
+AT_CHECK([ovn-sbctl dump-flows lr0 | grep "lr_in_dnat" | sort], [0], [dnl
+  table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.10.30), action=(drop;)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
+])
+
+# LB with event=false, reject=false and skip_snat
+check ovn-nbctl --wait=sb set load_balancer lb6 options:skip_snat=true
+
+AT_CHECK([ovn-sbctl dump-flows lr0 | grep "lr_in_dnat" | sort], [0], [dnl
+  table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.10.30), action=(flags.skip_snat_for_lb = 1; drop;)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
+])
+
+check ovn-nbctl remove load_balancer lb6 options skip_snat
+
+# LB with event=false, reject=false and force_snat
+check ovn-nbctl --wait=sb set logical_router lr0 options:lb_force_snat_ip="router_ip"
+
+AT_CHECK([ovn-sbctl dump-flows lr0 | grep "lr_in_dnat" | sort], [0], [dnl
+  table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.10.30), action=(flags.force_snat_for_lb = 1; drop;)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -6692,11 +6750,12 @@ dnl Flows to skip TTL == {0, 1} check for IGMP and MLD packets.
 AT_CHECK([grep -e 'lr_in_ip_input    ' lrflows | grep -e 'igmp' -e 'mld' -e 'ip.ttl == {0, 1}' | sed 's/table=../table=??/'], [0], [dnl
   table=??(lr_in_ip_input     ), priority=120  , match=((mldv1 || mldv2) && ip.ttl == 1), action=(next;)
   table=??(lr_in_ip_input     ), priority=120  , match=(igmp && ip.ttl == 1), action=(next;)
-  table=??(lr_in_ip_input     ), priority=100  , match=(inport == "lrp1" && ip4 && ip.ttl == {0, 1} && !ip.later_frag), action=(icmp4 {eth.dst <-> eth.src; icmp4.type = 11; /* Time exceeded */ icmp4.code = 0; /* TTL exceeded in transit */ ip4.dst = ip4.src; ip4.src = 10.10.10.1 ; ip.ttl = 254; outport = "lrp1"; flags.loopback = 1; output; };)
-  table=??(lr_in_ip_input     ), priority=100  , match=(inport == "lrp1" && ip6 && ip6.src == 1010::/64 && ip.ttl == {0, 1} && !ip.later_frag), action=(icmp6 {eth.dst <-> eth.src; ip6.dst = ip6.src; ip6.src = 1010::1 ; ip.ttl = 254; icmp6.type = 3; /* Time exceeded */ icmp6.code = 0; /* TTL exceeded in transit */ outport = "lrp1"; flags.loopback = 1; output; };)
-  table=??(lr_in_ip_input     ), priority=100  , match=(inport == "lrp2" && ip4 && ip.ttl == {0, 1} && !ip.later_frag), action=(icmp4 {eth.dst <-> eth.src; icmp4.type = 11; /* Time exceeded */ icmp4.code = 0; /* TTL exceeded in transit */ ip4.dst = ip4.src; ip4.src = 20.20.20.1 ; ip.ttl = 254; outport = "lrp2"; flags.loopback = 1; output; };)
-  table=??(lr_in_ip_input     ), priority=100  , match=(inport == "lrp2" && ip6 && ip6.src == 2020::/64 && ip.ttl == {0, 1} && !ip.later_frag), action=(icmp6 {eth.dst <-> eth.src; ip6.dst = ip6.src; ip6.src = 2020::1 ; ip.ttl = 254; icmp6.type = 3; /* Time exceeded */ icmp6.code = 0; /* TTL exceeded in transit */ outport = "lrp2"; flags.loopback = 1; output; };)
-  table=??(lr_in_ip_input     ), priority=30   , match=(ip4 && ip.ttl == {0, 1}), action=(drop;)
+  table=??(lr_in_ip_input     ), priority=32   , match=(ip.ttl == {0, 1} && !ip.later_frag && (ip4.mcast || ip6.mcast)), action=(drop;)
+  table=??(lr_in_ip_input     ), priority=31   , match=(inport == "lrp1" && ip4 && ip.ttl == {0, 1} && !ip.later_frag), action=(icmp4 {eth.dst <-> eth.src; icmp4.type = 11; /* Time exceeded */ icmp4.code = 0; /* TTL exceeded in transit */ ip4.dst = ip4.src; ip4.src = 10.10.10.1 ; ip.ttl = 254; outport = "lrp1"; flags.loopback = 1; output; };)
+  table=??(lr_in_ip_input     ), priority=31   , match=(inport == "lrp1" && ip6 && ip6.src == 1010::/64 && ip.ttl == {0, 1} && !ip.later_frag), action=(icmp6 {eth.dst <-> eth.src; ip6.dst = ip6.src; ip6.src = 1010::1 ; ip.ttl = 254; icmp6.type = 3; /* Time exceeded */ icmp6.code = 0; /* TTL exceeded in transit */ outport = "lrp1"; flags.loopback = 1; output; };)
+  table=??(lr_in_ip_input     ), priority=31   , match=(inport == "lrp2" && ip4 && ip.ttl == {0, 1} && !ip.later_frag), action=(icmp4 {eth.dst <-> eth.src; icmp4.type = 11; /* Time exceeded */ icmp4.code = 0; /* TTL exceeded in transit */ ip4.dst = ip4.src; ip4.src = 20.20.20.1 ; ip.ttl = 254; outport = "lrp2"; flags.loopback = 1; output; };)
+  table=??(lr_in_ip_input     ), priority=31   , match=(inport == "lrp2" && ip6 && ip6.src == 2020::/64 && ip.ttl == {0, 1} && !ip.later_frag), action=(icmp6 {eth.dst <-> eth.src; ip6.dst = ip6.src; ip6.src = 2020::1 ; ip.ttl = 254; icmp6.type = 3; /* Time exceeded */ icmp6.code = 0; /* TTL exceeded in transit */ outport = "lrp2"; flags.loopback = 1; output; };)
+  table=??(lr_in_ip_input     ), priority=30   , match=(ip.ttl == {0, 1}), action=(drop;)
 ])
 
 dnl Flows to "route" (statically forward) without decrementing TTL for
@@ -6755,6 +6814,7 @@ AT_CHECK([grep -e "ls_in_acl" lsflows | sed 's/table=../table=??/' | sort], [0],
   table=??(ls_in_acl          ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;)
   table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(next;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl_hint     ), priority=1    , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;)
@@ -6809,6 +6869,7 @@ AT_CHECK([grep -e "ls_in_acl" lsflows | sed 's/table=../table=??/' | sort], [0],
   table=??(ls_in_acl_after_lb ), priority=2003 , match=(reg0[[8]] == 1 && (ip4 && icmp)), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=2004 , match=(reg0[[10]] == 1 && (ip4 && ip4.dst == 10.0.0.2)), action=(ct_commit { ct_mark.blocked = 1; }; /* drop */)
   table=??(ls_in_acl_after_lb ), priority=2004 , match=(reg0[[9]] == 1 && (ip4 && ip4.dst == 10.0.0.2)), action=(/* drop */)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl_hint     ), priority=1    , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;)
@@ -6863,6 +6924,7 @@ AT_CHECK([grep -e "ls_in_acl" lsflows | sed 's/table=../table=??/' | sort], [0],
   table=??(ls_in_acl_after_lb ), priority=2001 , match=(reg0[[9]] == 1 && (ip4)), action=(/* drop */)
   table=??(ls_in_acl_after_lb ), priority=2004 , match=(reg0[[10]] == 1 && (ip4 && ip4.dst == 10.0.0.2)), action=(ct_commit { ct_mark.blocked = 1; }; /* drop */)
   table=??(ls_in_acl_after_lb ), priority=2004 , match=(reg0[[9]] == 1 && (ip4 && ip4.dst == 10.0.0.2)), action=(/* drop */)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl_hint     ), priority=1    , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;)
@@ -7154,11 +7216,14 @@ flow="inport == \"lsp1\" && eth.src == 00:00:00:00:00:01 && eth.dst == 00:00:00:
 AS_BOX([No ACL, default_acl_drop not set])
 check ovn-nbctl --wait=sb sync
 AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/' | sort], [0], [dnl
+  table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl          ), priority=65535, match=(1), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(next;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_hint     ), priority=65535, match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
+  table=??(ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_out_acl         ), priority=65535, match=(1), action=(next;)
   table=??(ls_out_acl_hint    ), priority=65535, match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
@@ -7173,11 +7238,14 @@ output("lsp2");
 AS_BOX([No ACL, default_acl_drop false])
 check ovn-nbctl --wait=sb set NB_Global . options:default_acl_drop=false
 AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/' | sort], [0], [dnl
+  table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl          ), priority=65535, match=(1), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(next;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_hint     ), priority=65535, match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
+  table=??(ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_out_acl         ), priority=65535, match=(1), action=(next;)
   table=??(ls_out_acl_hint    ), priority=65535, match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
@@ -7192,11 +7260,14 @@ output("lsp2");
 AS_BOX([No ACL, default_acl_drop true])
 check ovn-nbctl --wait=sb set NB_Global . options:default_acl_drop=true
 AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/' | sort], [0], [dnl
+  table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl          ), priority=65535, match=(1), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(next;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_hint     ), priority=65535, match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
+  table=??(ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_out_acl         ), priority=65535, match=(1), action=(next;)
   table=??(ls_out_acl_hint    ), priority=65535, match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
@@ -7218,12 +7289,15 @@ AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/
   table=??(ls_in_acl          ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl          ), priority=1001 , match=(ip4 && tcp), action=(next;)
   table=??(ls_in_acl          ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;)
+  table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(next;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
   table=??(ls_out_acl         ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_acl         ), priority=34000, match=(eth.src == $svc_monitor_mac), action=(next;)
+  table=??(ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_out_acl_hint    ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=110  , match=(eth.src == $svc_monitor_mac), action=(next;)
@@ -7240,12 +7314,15 @@ AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/
   table=??(ls_in_acl          ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl          ), priority=1001 , match=(ip4 && tcp), action=(next;)
   table=??(ls_in_acl          ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;)
+  table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(next;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
   table=??(ls_out_acl         ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_acl         ), priority=34000, match=(eth.src == $svc_monitor_mac), action=(next;)
+  table=??(ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_out_acl_hint    ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=110  , match=(eth.src == $svc_monitor_mac), action=(next;)
@@ -7262,12 +7339,15 @@ AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/
   table=??(ls_in_acl          ), priority=0    , match=(1), action=(drop;)
   table=??(ls_in_acl          ), priority=1001 , match=(ip4 && tcp), action=(next;)
   table=??(ls_in_acl          ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;)
+  table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(drop;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
   table=??(ls_out_acl         ), priority=0    , match=(1), action=(drop;)
   table=??(ls_out_acl         ), priority=34000, match=(eth.src == $svc_monitor_mac), action=(next;)
+  table=??(ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_out_acl_hint    ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=110  , match=(eth.src == $svc_monitor_mac), action=(next;)
@@ -7292,6 +7372,7 @@ AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/
   table=??(ls_in_acl          ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;)
   table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(drop;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl_hint     ), priority=1    , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;)
@@ -7343,13 +7424,16 @@ check ovn-nbctl --wait=sb remove NB_Global . options default_acl_drop
 AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/' | sort], [0], [dnl
   table=??(ls_in_acl          ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl          ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;)
+  table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=1001 , match=(ip4 && tcp), action=(next;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
   table=??(ls_out_acl         ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_acl         ), priority=34000, match=(eth.src == $svc_monitor_mac), action=(next;)
+  table=??(ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_out_acl_hint    ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=110  , match=(eth.src == $svc_monitor_mac), action=(next;)
@@ -7365,13 +7449,16 @@ check ovn-nbctl --wait=sb set NB_Global . options:default_acl_drop=false
 AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/' | sort], [0], [dnl
   table=??(ls_in_acl          ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl          ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;)
+  table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=1001 , match=(ip4 && tcp), action=(next;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
   table=??(ls_out_acl         ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_acl         ), priority=34000, match=(eth.src == $svc_monitor_mac), action=(next;)
+  table=??(ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_out_acl_hint    ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=110  , match=(eth.src == $svc_monitor_mac), action=(next;)
@@ -7387,13 +7474,16 @@ check ovn-nbctl --wait=sb set NB_Global . options:default_acl_drop=true
 AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/' | sort], [0], [dnl
   table=??(ls_in_acl          ), priority=0    , match=(1), action=(drop;)
   table=??(ls_in_acl          ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;)
+  table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(drop;)
   table=??(ls_in_acl_after_lb ), priority=1001 , match=(ip4 && tcp), action=(next;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
   table=??(ls_out_acl         ), priority=0    , match=(1), action=(drop;)
   table=??(ls_out_acl         ), priority=34000, match=(eth.src == $svc_monitor_mac), action=(next;)
+  table=??(ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_out_acl_hint    ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=110  , match=(eth.src == $svc_monitor_mac), action=(next;)
@@ -7418,6 +7508,7 @@ AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(drop;)
   table=??(ls_in_acl_after_lb ), priority=1001 , match=(reg0[[7]] == 1 && (ip4 && tcp)), action=(reg0[[1]] = 1; next;)
   table=??(ls_in_acl_after_lb ), priority=1001 , match=(reg0[[8]] == 1 && (ip4 && tcp)), action=(next;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl_hint     ), priority=1    , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;)
@@ -7469,13 +7560,16 @@ check ovn-nbctl --wait=sb remove NB_Global . options default_acl_drop
 AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/' | sort], [0], [dnl
   table=??(ls_in_acl          ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl          ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;)
+  table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(next;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
   table=??(ls_out_acl         ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_acl         ), priority=1001 , match=(ip4 && tcp), action=(next;)
   table=??(ls_out_acl         ), priority=34000, match=(eth.src == $svc_monitor_mac), action=(next;)
+  table=??(ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_out_acl_hint    ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=110  , match=(eth.src == $svc_monitor_mac), action=(next;)
@@ -7491,13 +7585,16 @@ check ovn-nbctl --wait=sb set NB_Global . options:default_acl_drop=false
 AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/' | sort], [0], [dnl
   table=??(ls_in_acl          ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl          ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;)
+  table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(next;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
   table=??(ls_out_acl         ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_acl         ), priority=1001 , match=(ip4 && tcp), action=(next;)
   table=??(ls_out_acl         ), priority=34000, match=(eth.src == $svc_monitor_mac), action=(next;)
+  table=??(ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_out_acl_hint    ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=110  , match=(eth.src == $svc_monitor_mac), action=(next;)
@@ -7513,13 +7610,16 @@ check ovn-nbctl --wait=sb set NB_Global . options:default_acl_drop=true
 AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/' | sort], [0], [dnl
   table=??(ls_in_acl          ), priority=0    , match=(1), action=(drop;)
   table=??(ls_in_acl          ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;)
+  table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(drop;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
   table=??(ls_out_acl         ), priority=0    , match=(1), action=(drop;)
   table=??(ls_out_acl         ), priority=1001 , match=(ip4 && tcp), action=(next;)
   table=??(ls_out_acl         ), priority=34000, match=(eth.src == $svc_monitor_mac), action=(next;)
+  table=??(ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_out_acl_hint    ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=110  , match=(eth.src == $svc_monitor_mac), action=(next;)
@@ -7542,6 +7642,7 @@ AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/
   table=??(ls_in_acl          ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;)
   table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(drop;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl_hint     ), priority=1    , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;)
@@ -7719,7 +7820,7 @@ sort | sed 's/table=../table=??/' ], [0], [dnl
   table=??(ls_in_apply_port_sec), priority=0    , match=(1), action=(next;)
   table=??(ls_in_apply_port_sec), priority=50   , match=(reg0[[15]] == 1), action=(drop;)
   table=??(ls_in_l2_lkup      ), priority=0    , match=(1), action=(outport = get_fdb(eth.dst); next;)
-  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(handle_svc_check(inport);)
+  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac && (tcp || icmp || icmp6)), action=(handle_svc_check(inport);)
   table=??(ls_in_l2_lkup      ), priority=70   , match=(eth.mcast), action=(outport = "_MC_flood"; output;)
   table=??(ls_in_l2_unknown   ), priority=0    , match=(1), action=(output;)
   table=??(ls_in_l2_unknown   ), priority=50   , match=(outport == "none"), action=(drop;)
@@ -7744,7 +7845,7 @@ sort | sed 's/table=../table=??/' ], [0], [dnl
   table=??(ls_in_apply_port_sec), priority=0    , match=(1), action=(next;)
   table=??(ls_in_apply_port_sec), priority=50   , match=(reg0[[15]] == 1), action=(drop;)
   table=??(ls_in_l2_lkup      ), priority=0    , match=(1), action=(outport = get_fdb(eth.dst); next;)
-  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(handle_svc_check(inport);)
+  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac && (tcp || icmp || icmp6)), action=(handle_svc_check(inport);)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:00:01), action=(outport = "sw0p1"; output;)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:00:02), action=(outport = "sw0p2"; output;)
   table=??(ls_in_l2_lkup      ), priority=70   , match=(eth.mcast), action=(outport = "_MC_flood"; output;)
@@ -7770,7 +7871,7 @@ sort | sed 's/table=../table=??/' ], [0], [dnl
   table=??(ls_in_apply_port_sec), priority=0    , match=(1), action=(next;)
   table=??(ls_in_apply_port_sec), priority=50   , match=(reg0[[15]] == 1), action=(drop;)
   table=??(ls_in_l2_lkup      ), priority=0    , match=(1), action=(outport = get_fdb(eth.dst); next;)
-  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(handle_svc_check(inport);)
+  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac && (tcp || icmp || icmp6)), action=(handle_svc_check(inport);)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:00:01), action=(outport = "sw0p1"; output;)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:00:02), action=(outport = "sw0p2"; output;)
   table=??(ls_in_l2_lkup      ), priority=70   , match=(eth.mcast), action=(outport = "_MC_flood"; output;)
@@ -7797,7 +7898,7 @@ sort | sed 's/table=../table=??/' ], [0], [dnl
   table=??(ls_in_apply_port_sec), priority=0    , match=(1), action=(next;)
   table=??(ls_in_apply_port_sec), priority=50   , match=(reg0[[15]] == 1), action=(drop;)
   table=??(ls_in_l2_lkup      ), priority=0    , match=(1), action=(outport = get_fdb(eth.dst); next;)
-  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(handle_svc_check(inport);)
+  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac && (tcp || icmp || icmp6)), action=(handle_svc_check(inport);)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:00:01), action=(drop;)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:00:02), action=(outport = "sw0p2"; output;)
   table=??(ls_in_l2_lkup      ), priority=70   , match=(eth.mcast), action=(outport = "_MC_flood"; output;)
@@ -7824,7 +7925,7 @@ sort | sed 's/table=../table=??/' ], [0], [dnl
   table=??(ls_in_apply_port_sec), priority=0    , match=(1), action=(next;)
   table=??(ls_in_apply_port_sec), priority=50   , match=(reg0[[15]] == 1), action=(drop;)
   table=??(ls_in_l2_lkup      ), priority=0    , match=(1), action=(outport = get_fdb(eth.dst); next;)
-  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(handle_svc_check(inport);)
+  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac && (tcp || icmp || icmp6)), action=(handle_svc_check(inport);)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:00:01), action=(drop;)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:00:02), action=(outport = "sw0p2"; output;)
   table=??(ls_in_l2_lkup      ), priority=70   , match=(eth.mcast), action=(outport = "_MC_flood"; output;)
@@ -7854,7 +7955,7 @@ sort | sed 's/table=../table=??/' ], [0], [dnl
   table=??(ls_in_apply_port_sec), priority=0    , match=(1), action=(next;)
   table=??(ls_in_apply_port_sec), priority=50   , match=(reg0[[15]] == 1), action=(drop;)
   table=??(ls_in_l2_lkup      ), priority=0    , match=(1), action=(outport = get_fdb(eth.dst); next;)
-  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(handle_svc_check(inport);)
+  table=??(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac && (tcp || icmp || icmp6)), action=(handle_svc_check(inport);)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:00:01), action=(outport = "sw0p1"; output;)
   table=??(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:00:02), action=(outport = "sw0p2"; output;)
   table=??(ls_in_l2_lkup      ), priority=70   , match=(eth.mcast), action=(outport = "_MC_flood"; output;)
@@ -7886,8 +7987,10 @@ check ovn-nbctl                                               \
 AS_BOX([No chassis registered - use ct_lb_mark and ct_mark.natted])
 check ovn-nbctl --wait=sb sync
 AT_CHECK([ovn-sbctl lflow-list | grep -e natted -e ct_lb], [0], [dnl
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 66.66.66.66 && ct_mark.natted == 1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 66.66.66.66), action=(ct_lb_mark(backends=42.42.42.2);)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 66.66.66.66), action=(ct_lb_mark(backends=42.42.42.2);)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=6 (ls_in_pre_stateful ), priority=120  , match=(reg0[[2]] == 1 && ip4.dst == 66.66.66.66), action=(reg1 = 66.66.66.66; ct_lb_mark;)
   table=6 (ls_in_pre_stateful ), priority=110  , match=(reg0[[2]] == 1), action=(ct_lb_mark;)
   table=12(ls_in_lb           ), priority=110  , match=(ct.new && ip4.dst == 66.66.66.66), action=(reg0[[1]] = 0; ct_lb_mark(backends=42.42.42.2);)
@@ -7898,8 +8001,10 @@ AS_BOX([Chassis registered that doesn't support ct_lb_mark - use ct_lb and ct_la
 check ovn-sbctl chassis-add hv geneve 127.0.0.1
 check ovn-nbctl --wait=sb sync
 AT_CHECK([ovn-sbctl lflow-list | grep -e natted -e ct_lb], [0], [dnl
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 66.66.66.66 && ct_label.natted == 1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 66.66.66.66), action=(ct_lb(backends=42.42.42.2);)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 66.66.66.66), action=(ct_lb(backends=42.42.42.2);)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_label.natted && ct_label.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_label.natted && ct_label.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_label.natted), action=(next;)
   table=6 (ls_in_pre_stateful ), priority=120  , match=(reg0[[2]] == 1 && ip4.dst == 66.66.66.66), action=(reg1 = 66.66.66.66; ct_lb;)
   table=6 (ls_in_pre_stateful ), priority=110  , match=(reg0[[2]] == 1), action=(ct_lb;)
   table=12(ls_in_lb           ), priority=110  , match=(ct.new && ip4.dst == 66.66.66.66), action=(reg0[[1]] = 0; ct_lb(backends=42.42.42.2);)
@@ -7910,8 +8015,10 @@ AS_BOX([Chassis upgrades and supports ct_lb_mark - use ct_lb_mark and ct_mark.na
 check ovn-sbctl set chassis hv other_config:ct-no-masked-label=true
 check ovn-nbctl --wait=sb sync
 AT_CHECK([ovn-sbctl lflow-list | grep -e natted -e ct_lb], [0], [dnl
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 66.66.66.66 && ct_mark.natted == 1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 66.66.66.66), action=(ct_lb_mark(backends=42.42.42.2);)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 66.66.66.66), action=(ct_lb_mark(backends=42.42.42.2);)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=6 (ls_in_pre_stateful ), priority=120  , match=(reg0[[2]] == 1 && ip4.dst == 66.66.66.66), action=(reg1 = 66.66.66.66; ct_lb_mark;)
   table=6 (ls_in_pre_stateful ), priority=110  , match=(reg0[[2]] == 1), action=(ct_lb_mark;)
   table=12(ls_in_lb           ), priority=110  , match=(ct.new && ip4.dst == 66.66.66.66), action=(reg0[[1]] = 0; ct_lb_mark(backends=42.42.42.2);)
@@ -8244,15 +8351,17 @@ AT_CAPTURE_FILE([R1flows])
 
 AT_CHECK([grep "lr_in_lb_aff_check" R1flows | sort], [0], [dnl
   table=6 (lr_in_lb_aff_check ), priority=0    , match=(1), action=(next;)
-  table=6 (lr_in_lb_aff_check ), priority=100  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80), action=(reg9[[6]] = chk_lb_aff(); next;)
+  table=6 (lr_in_lb_aff_check ), priority=100  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.16.0.10 && tcp && tcp.dst == 80), action=(reg0 = ip4.dst; reg9[[16..31]] = tcp.dst; reg9[[6]] = chk_lb_aff(); next;)
 ])
 AT_CHECK([grep "lr_in_dnat " R1flows | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80), action=(ct_lb_mark(backends=10.0.0.2:80,20.0.0.2:80);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.16.0.10 && tcp && tcp.dst == 80), action=(ct_lb_mark(backends=10.0.0.2:80,20.0.0.2:80);)
   table=7 (lr_in_dnat         ), priority=150  , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 10.0.0.2 && reg8[[0..15]] == 80), action=(reg0 = 172.16.0.10; ct_lb_mark(backends=10.0.0.2:80);)
   table=7 (lr_in_dnat         ), priority=150  , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 20.0.0.2 && reg8[[0..15]] == 80), action=(reg0 = 172.16.0.10; ct_lb_mark(backends=20.0.0.2:80);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -8270,11 +8379,13 @@ AT_CAPTURE_FILE([R1flows_skip_snat])
 
 AT_CHECK([grep "lr_in_dnat " R1flows_skip_snat | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.skip_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.skip_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.2:80,20.0.0.2:80; skip_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.16.0.10 && tcp && tcp.dst == 80), action=(flags.skip_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.2:80,20.0.0.2:80; skip_snat);)
   table=7 (lr_in_dnat         ), priority=150  , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 10.0.0.2 && reg8[[0..15]] == 80), action=(reg0 = 172.16.0.10; flags.skip_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.2:80; skip_snat);)
   table=7 (lr_in_dnat         ), priority=150  , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 20.0.0.2 && reg8[[0..15]] == 80), action=(reg0 = 172.16.0.10; flags.skip_snat_for_lb = 1; ct_lb_mark(backends=20.0.0.2:80; skip_snat);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -8289,11 +8400,13 @@ AT_CAPTURE_FILE([R1flows_force_snat])
 
 AT_CHECK([grep "lr_in_dnat " R1flows_force_snat | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.2:80,20.0.0.2:80; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.16.0.10 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.2:80,20.0.0.2:80; force_snat);)
   table=7 (lr_in_dnat         ), priority=150  , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 10.0.0.2 && reg8[[0..15]] == 80), action=(reg0 = 172.16.0.10; flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.2:80; force_snat);)
   table=7 (lr_in_dnat         ), priority=150  , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 20.0.0.2 && reg8[[0..15]] == 80), action=(reg0 = 172.16.0.10; flags.force_snat_for_lb = 1; ct_lb_mark(backends=20.0.0.2:80; force_snat);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -8330,8 +8443,9 @@ rm -f northd/ovn-northd.log
 check as northd ovn-appctl -t NORTHD_TYPE vlog/reopen
 check as northd ovn-appctl -t NORTHD_TYPE vlog/set jsonrpc:dbg
 check as northd ovn-appctl -t NORTHD_TYPE inc-engine/clear-stats
-check ovn-nbctl add address_set $foo_as_uuid addresses 1.1.1.3
-wait_column '1.1.1.1 1.1.1.2 1.1.1.3' Address_Set addresses name=foo
+check ovn-nbctl add address_set $foo_as_uuid addresses 1.1.1.3 -- \
+                add address_set $foo_as_uuid addresses 1.1.2.1/4
+wait_column '1.1.1.1 1.1.1.2 1.1.1.3 1.1.2.1/4' Address_Set addresses name=foo
 
 # There should be no recompute of the sync_to_sb_addr_set engine node .
 AT_CHECK([as northd ovn-appctl -t NORTHD_TYPE inc-engine/show-stats sync_to_sb_addr_set recompute], [0], [0
@@ -8341,8 +8455,9 @@ AT_CHECK([grep transact northd/ovn-northd.log | grep Address_Set | \
 grep -c mutate], [0], [1
 ])
 
-check ovn-nbctl add address_set $foo_as_uuid addresses \
-1.1.1.4 -- remove address_set $foo_as_uuid addresses 1.1.1.1
+check ovn-nbctl add address_set $foo_as_uuid addresses 1.1.1.4 -- \
+                remove address_set $foo_as_uuid addresses 1.1.1.1 -- \
+                remove address_set $foo_as_uuid addresses 1.1.2.1/4
 wait_column '1.1.1.2 1.1.1.3 1.1.1.4' Address_Set addresses name=foo
 
 # There should be no recompute of the sync_to_sb_addr_set engine node .
@@ -8569,12 +8684,13 @@ ovn-sbctl dump-flows | DUMP_FLOWS_SORTED > lflows0
 
 AT_CHECK([grep -e "lr_in_defrag" -e "lr_in_dnat" lflows0], [0], [dnl
   table=? (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=? (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 192.168.0.1), action=(reg0 = 192.168.0.1; ct_dnat;)
-  table=? (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=? (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 192.168.0.1), action=(ct_dnat;)
   table=? (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=? (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 192.168.0.1 && ct_mark.natted == 1), action=(next;)
-  table=? (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 192.168.0.1), action=(ct_lb_mark(backends=192.168.1.10);)
+  table=? (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 192.168.0.1), action=(ct_lb_mark(backends=192.168.1.10);)
+  table=? (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=? (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=? (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=? (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=? (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=? (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -8588,6 +8704,7 @@ AT_CHECK([grep -e "ls_in_acl" -e "ls_out_acl" lflows0 | grep "priority=65532"],
   table=? (ls_out_acl         ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(next;)
   table=? (ls_out_acl         ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;)
   table=? (ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
+  table=?(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=?(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;)
 ])
 
@@ -8599,10 +8716,12 @@ ovn-sbctl dump-flows | DUMP_FLOWS_SORTED > lflows1
 
 AT_CHECK([grep -e "lr_in_defrag" -e "lr_in_dnat" lflows1], [0], [dnl
   table=? (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=? (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 192.168.0.1), action=(reg0 = 192.168.0.1; ct_dnat;)
+  table=? (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 192.168.0.1), action=(ct_dnat;)
   table=? (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=? (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 192.168.0.1 && ct_label.natted == 1), action=(next;)
-  table=? (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 192.168.0.1), action=(ct_lb(backends=192.168.1.10);)
+  table=? (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 192.168.0.1), action=(ct_lb(backends=192.168.1.10);)
+  table=? (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_label.natted), action=(next;)
+  table=? (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_label.natted && ct_label.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=? (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_label.natted && ct_label.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
 ])
 
 AT_CHECK([grep -e "ls_in_acl" -e "ls_out_acl" lflows1 | grep "priority=65532"], [0], [dnl
@@ -8614,6 +8733,7 @@ AT_CHECK([grep -e "ls_in_acl" -e "ls_out_acl" lflows1 | grep "priority=65532"],
   table=? (ls_out_acl         ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_label.blocked == 0), action=(next;)
   table=? (ls_out_acl         ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_label.blocked == 1)), action=(drop;)
   table=? (ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
+  table=?(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=?(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;)
 ])
 
@@ -8625,12 +8745,13 @@ ovn-sbctl dump-flows | DUMP_FLOWS_SORTED > lflows2
 
 AT_CHECK([grep -e "lr_in_defrag" -e "lr_in_dnat" lflows2], [0], [dnl
   table=? (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=? (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 192.168.0.1), action=(reg0 = 192.168.0.1; ct_dnat;)
-  table=? (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=? (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 192.168.0.1), action=(ct_dnat;)
   table=? (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=? (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 192.168.0.1 && ct_mark.natted == 1), action=(next;)
-  table=? (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 192.168.0.1), action=(ct_lb_mark(backends=192.168.1.10);)
+  table=? (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 192.168.0.1), action=(ct_lb_mark(backends=192.168.1.10);)
+  table=? (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=? (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=? (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=? (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=? (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=? (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -8644,8 +8765,104 @@ AT_CHECK([grep -e "ls_in_acl" -e "ls_out_acl" lflows2 | grep "priority=65532"],
   table=? (ls_out_acl         ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(next;)
   table=? (ls_out_acl         ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;)
   table=? (ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
+  table=?(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=?(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;)
 ])
 
 AT_CLEANUP
 ])
+
+OVN_FOR_EACH_NORTHD_NO_HV([
+AT_SETUP([Chassis-feature compatibitility - remote chassis])
+ovn_start
+
+AS_BOX([Local chassis])
+check ovn-sbctl chassis-add hv1 geneve 127.0.0.1 \
+  -- set chassis hv1 other_config:ct-no-masked-label=true \
+  -- set chassis hv1 other_config:ovn-ct-lb-related=true \
+  -- set chassis hv1 other_config:mac-binding-timestamp=true
+
+check ovn-nbctl --wait=sb sync
+
+AT_CHECK([as northd ovn-appctl -t NORTHD_TYPE debug/chassis-features-list], [0], [dnl
+ct_no_masked_label:    true
+ct_lb_related:         true
+mac_binding_timestamp: true
+])
+
+AS_BOX([Remote chassis])
+check ovn-sbctl chassis-add hv2 geneve 127.0.0.2 \
+  -- set chassis hv2 other_config:is-remote=true \
+  -- set chassis hv2 other_config:ct-no-masked-label=false \
+  -- set chassis hv2 other_config:ovn-ct-lb-related=false \
+  -- set chassis hv2 other_config:mac-binding-timestamp=false
+
+check ovn-nbctl --wait=sb sync
+
+AT_CHECK([as northd ovn-appctl -t NORTHD_TYPE debug/chassis-features-list], [0], [dnl
+ct_no_masked_label:    true
+ct_lb_related:         true
+mac_binding_timestamp: true
+])
+
+AT_CLEANUP
+])
+
+AT_SETUP([Localnet ports on LS with LB])
+ovn_start
+# In the past, traffic arriving on localnet ports has skipped conntrack.
+# This test ensures that we still skip conntrack for localnet ports,
+# *except* for the case where the logical switch has a load balancer
+# configured. In this case, the localnet port will not skip conntrack,
+# allowing for traffic to be load balanced on the localnet port.
+
+check ovn-nbctl ls-add sw
+check ovn-nbctl lsp-add sw sw-ln
+check ovn-nbctl lsp-set-type sw-ln localnet
+check ovn-nbctl lsp-set-addresses sw-ln unknown
+check ovn-nbctl --wait=sb sync
+
+# Since this test is only concerned with logical flows, we don't need to
+# configure anything else that we normally would with regards to localnet
+# ports
+
+
+# First, ensure that conntrack is skipped for the localnet port since there
+# isn't a load balancer configured.
+
+AT_CHECK([ovn-sbctl lflow-list sw | grep ls_in_pre_lb | grep priority=110 | grep sw-ln | sed 's/table=../table=??/'], [0], [dnl
+  table=??(ls_in_pre_lb       ), priority=110  , match=(ip && inport == "sw-ln"), action=(next;)
+])
+
+AT_CHECK([ovn-sbctl lflow-list sw | grep ls_out_pre_lb | grep priority=110 | grep sw-ln | sed 's/table=../table=??/'], [0], [dnl
+  table=??(ls_out_pre_lb      ), priority=110  , match=(ip && outport == "sw-ln"), action=(ct_clear; next;)
+])
+
+# Now add a load balancer and ensure that we no longer are skipping conntrack
+# for the localnet port
+
+check ovn-nbctl lb-add lb 10.0.0.1:80 10.0.0.100:8080 tcp
+check ovn-nbctl ls-lb-add sw lb
+check ovn-nbctl --wait=sb sync
+
+AT_CHECK([ovn-sbctl lflow-list sw | grep ls_in_pre_lb | grep priority=110 | grep sw-ln | sed 's/table=../table=??/'], [0], [dnl
+])
+
+AT_CHECK([ovn-sbctl lflow-list sw | grep ls_out_pre_lb | grep priority=110 | grep sw-ln | sed 's/table=../table=??/'], [0], [dnl
+])
+
+# And ensure that removing the load balancer from the switch results in skipping
+# conntrack again
+check ovn-nbctl ls-lb-del sw lb
+check ovn-nbctl --wait=sb sync
+
+AT_CHECK([ovn-sbctl lflow-list sw | grep ls_in_pre_lb | grep priority=110 | grep sw-ln | sed 's/table=../table=??/'], [0], [dnl
+  table=??(ls_in_pre_lb       ), priority=110  , match=(ip && inport == "sw-ln"), action=(next;)
+])
+
+AT_CHECK([ovn-sbctl lflow-list sw | grep ls_out_pre_lb | grep priority=110 | grep sw-ln | sed 's/table=../table=??/'], [0], [dnl
+  table=??(ls_out_pre_lb      ), priority=110  , match=(ip && outport == "sw-ln"), action=(ct_clear; next;)
+])
+
+AT_CLEANUP
+])
diff --git a/tests/ovn.at b/tests/ovn.at
index 55de7c85b..ed91d32d0 100644
--- a/tests/ovn.at
+++ b/tests/ovn.at
@@ -992,10 +992,10 @@ next(pipeline=ingress, table=11);
 
 next(pipeline=egress);
     formats as next(pipeline=egress, table=11);
-    encodes as resubmit(,51)
+    encodes as resubmit(,53)
 
 next(pipeline=egress, table=5);
-    encodes as resubmit(,45)
+    encodes as resubmit(,47)
 
 next(table=10);
     formats as next(10);
@@ -4414,24 +4414,13 @@ response=${sha}${lrpmac}08060001080006040002${lrpmac}${tpa}${sha}${spa}
 echo $response >> 3.expected
 
 # First ensure basic flow contents are as we expect.
-AT_CHECK([ovn-sbctl lflow-list lsw0 | grep 'reg0[\[14\]]' | sort | sed 's/table=../table=??/g' | sed 's/is_chassis_resident([[^)]]*)/is_chassis_resident("??")/g'], [0], [dnl
+AT_CHECK([ovn-sbctl lflow-list lsw0 | grep 'reg0[\[14\]]' | sort | sed 's/table=../table=??/g'], [0], [dnl
   table=??(ls_in_check_port_sec), priority=70   , match=(inport == "lp-vtep"), action=(reg0[[14]] = 1; next(pipeline=ingress, table=??);)
   table=??(ls_in_hairpin      ), priority=1000 , match=(reg0[[14]] == 1), action=(next(pipeline=ingress, table=??);)
-  table=??(ls_in_hairpin      ), priority=2000 , match=(reg0[[14]] == 1 && (is_chassis_resident("??") || is_chassis_resident("??"))), action=(next;)
+  table=??(ls_in_hairpin      ), priority=2000 , match=(reg0[[14]] == 1 && is_chassis_resident("cr-lrp1")), action=(next;)
+  table=??(ls_in_hairpin      ), priority=2000 , match=(reg0[[14]] == 1 && is_chassis_resident("cr-lrp2")), action=(next;)
 ])
 
-# We've ensured that the expected hairpin flows are present
-# and that the expected number of "is_chassis_resident" fields are in
-# the flow. Now we need to ensure the contents are correct.
-# Unfortunately, the order of the "is_chassis_resident" fields is
-# unpredictable. Therefore we sort them so the order is predictable.
-actual_chassis=$(ovn-sbctl lflow-list lsw0 | grep 'ls_in_hairpin' | grep 'priority=2000' | grep -o 'is_chassis_resident([[^)]]*)' | sort)
-
-expected_chassis='is_chassis_resident("cr-lrp1")
-is_chassis_resident("cr-lrp2")'
-
-check test "$expected_chassis" = "$actual_chassis"
-
 # dump information with counters
 echo "------ OVN dump ------"
 ovn-nbctl show
@@ -5055,6 +5044,7 @@ AT_CLEANUP
 
 OVN_FOR_EACH_NORTHD([
 AT_SETUP([IP relocation using GARP request])
+AT_SKIP_IF([test $HAVE_SCAPY = no])
 ovn_start
 
 # Logical network:
@@ -5154,7 +5144,9 @@ done
 test_ip() {
     # This packet has bad checksums but logical L3 routing doesn't check.
     local inport=$1 src_mac=$2 dst_mac=$3 src_ip=$4 dst_ip=$5
-    local packet=${dst_mac}${src_mac}08004500001c0000000040110000${src_ip}${dst_ip}0035111100080000
+    local packet=$(fmt_pkt "Ether(dst='${dst_mac}', src='${src_mac}')/ \
+                            IP(dst='${dst_ip}', src='${src_ip}')/ \
+                            UDP(sport=53, dport=4369)")
     shift; shift; shift; shift; shift
     hv=hv`vif_to_hv $inport`
     as $hv ovs-appctl netdev-dummy/receive vif$inport $packet
@@ -5169,7 +5161,9 @@ test_ip() {
             # Routing decrements TTL and updates source and dest MAC
             # (and checksum).
             out_lrp=`vif_to_lrp $outport`
-            echo f000000000${outport}00000000ff0${out_lrp}08004500001c00000000"3f1101"00${src_ip}${dst_ip}0035111100080000
+            echo $(fmt_pkt "Ether(dst='f0:00:00:00:00:${outport}', src='00:00:00:00:ff:${out_lrp}')/ \
+                            IP(src='${src_ip}', dst='${dst_ip}', ttl=63)/ \
+                            UDP(sport=53, dport=4369)")
         fi >> $outport.expected
     done
 }
@@ -5185,8 +5179,10 @@ test_ip() {
 # SHA and REPLY_HA are each 12 hex digits.
 # SPA and TPA are each 8 hex digits.
 test_arp() {
-    local inport=$1 sha=$2 spa=$3 tpa=$4 reply_ha=$5
-    local request=ffffffffffff${sha}08060001080006040001${sha}${spa}ffffffffffff${tpa}
+    local inport=$1 sha=$2 spa=$3 tpa=$3
+    local request=$(fmt_pkt "Ether(dst='ff:ff:ff:ff:ff:ff', src='${sha}')/ \
+                             ARP(hwsrc='${sha}', hwdst='ff:ff:ff:ff:ff:ff', psrc='${spa}', pdst='${tpa}')")
+
     hv=hv`vif_to_hv $inport`
     as $hv ovs-appctl netdev-dummy/receive vif$inport $request
 
@@ -5199,53 +5195,72 @@ test_arp() {
             echo $request >> $i$j$k.expected
         fi
     done
+}
 
-    # Expect to receive the reply, if any.
-    if test X$reply_ha != X; then
-        lrp=`vif_to_lrp $inport`
-        local reply=${sha}00000000ff0${lrp}08060001080006040002${reply_ha}${tpa}${sha}${spa}
-        echo $reply >> $inport.expected
-    fi
+test_na() {
+    local inport=$1 sha=$2 spa=$3
+    local request=$(fmt_pkt "Ether(dst='ff:ff:ff:ff:ff:ff', src='${sha}')/ \
+                             IPv6(dst='ff01::1', src='${spa}')/ \
+                             ICMPv6ND_NA(tgt='${spa}')")
+
+    hv=hv`vif_to_hv $inport`
+    as $hv ovs-appctl netdev-dummy/receive vif$inport $request
+
+    # Expect to receive the broadcast ARP on the other logical switch ports if
+    # IP address is not configured to the switch patch port.
+    local i=`vif_to_ls $inport`
+    local j
+    for j in 1 2; do
+        if test $i$j != $inport; then
+            echo $request >> $i$j$k.expected
+        fi
+    done
 }
 
-# lp11 send GARP request to announce ownership of 192.168.1.100.
+# lp11 send GARP request to announce ownership of 192.168.1.100 and fe80::abcd:1.
 
-sha=f00000000011
-spa=`ip_to_hex 192 168 1 100`
-tpa=$spa
+sha="f0:00:00:00:00:11"
+spa="192.168.1.100"
+spa6="fe80::abcd:1"
 
 # When always_learn_from_arp_request=false, the new mac-binding will not be learned
 # through GARP request.
 ovn-nbctl --wait=hv set logical_router lr0 options:always_learn_from_arp_request=false
 
-test_arp 11 $sha $spa $tpa
+test_arp 11 $sha $spa
+test_na 11 $sha $spa6
 sleep 1
-check_row_count MAC_Binding 0 ip="192.168.1.100"
+check_row_count MAC_Binding 0 ip="$spa"
+check_row_count MAC_Binding 0 ip=\"$spa6\"
 
 # When always_learn_from_arp_request=true, the new mac-binding will be learned.
 ovn-nbctl --wait=hv set logical_router lr0 options:always_learn_from_arp_request=true
 
-test_arp 11 $sha $spa $tpa
-OVS_WAIT_UNTIL([test `ovn-sbctl find mac_binding ip="192.168.1.100" | wc -l` -gt 0])
+test_arp 11 $sha $spa
+test_na 11 $sha $spa6
+wait_row_count MAC_Binding 1 ip="$spa" mac=\"$sha\"
+wait_row_count MAC_Binding 1 ip=\"$spa6\" mac=\"$sha\"
 ovn-nbctl --wait=hv sync
 
 # Send an IP packet from lp21 to 192.168.1.100, which should go to lp11.
 
-smac=f00000000021
-dmac=00000000ff02
-sip=`ip_to_hex 192 168 2 11`
-dip=`ip_to_hex 192 168 1 100`
+smac="f0:00:00:00:00:21"
+dmac="00:00:00:00:ff:02"
+sip="192.168.2.11"
+dip="192.168.1.100"
 test_ip 21 $smac $dmac $sip $dip 11
 
-# lp12 send GARP request to announce ownership of 192.168.1.100.
+# lp12 send GARP request to announce ownership of 192.168.1.100 and fe80::abcd:1.
 
 # Even when always_learn_from_arp_request=false, the existing mac-binding should be
 # updated through GARP request.
 ovn-nbctl --wait=hv set logical_router lr0 options:always_learn_from_arp_request=false
 
-sha=f00000000012
-test_arp 12 $sha $spa $tpa
-wait_row_count MAC_Binding 1 ip="192.168.1.100" mac='"f0:00:00:00:00:12"'
+sha="f0:00:00:00:00:12"
+test_arp 12 $sha $spa
+test_na 11 $sha $spa6
+wait_row_count MAC_Binding 1 ip="$spa" mac=\"$sha\"
+wait_row_count MAC_Binding 1 ip=\"$spa6\" mac=\"$sha\"
 ovn-nbctl --wait=hv sync
 # give to the hv the time to send queued ip packets
 sleep 1
@@ -5753,7 +5768,7 @@ check ovn-nbctl --wait=hv sync
 packet="inport==\"ls1-lp1\" && eth.src==$ls1_lp1_mac && eth.dst==$rp_ls1_mac &&
         ip4 && ip.ttl==64 && ip4.src==$ls1_lp1_ip && ip4.dst==$ls2_lp1_ip &&
         udp && udp.src==53 && udp.dst==4369"
-AT_CHECK([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 
 echo "---------NB dump-----"
@@ -5803,7 +5818,7 @@ packet="inport==\"ls1-lp1\" && eth.src==$ls1_lp1_mac && eth.dst==$rp_ls1_mac &&
         ip4 && ip.ttl==64 && ip4.src==$ls1_lp1_ip && ip4.dst==$ls2_lp1_ip &&
         udp && udp.src==53 && udp.dst==4369"
 
-as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 # The 2nd packet sent shound not be received.
 OVN_CHECK_PACKETS([hv2/vif1-tx.pcap], [expected])
 
@@ -7741,7 +7756,6 @@ ls3_p1_mac=00:00:00:01:02:05
 check ovn-nbctl --wait=hv lr-policy-add R1 10 "ip4.src==192.168.1.0/24 && ip4.dst==172.16.1.0/24" drop
 
 # Check logical flow
-ovn-sbctl dump-flows > sbflows
 AT_CHECK([ovn-sbctl dump-flows | grep lr_in_policy | grep "192.168.1.0" | wc -l], [0], [dnl
 1
 ])
@@ -7751,15 +7765,12 @@ packet="inport==\"ls1-lp1\" && eth.src==$ls1_p1_mac && eth.dst==$ls1_ro_mac &&
        ip4 && ip.ttl==64 && ip4.src==$ls1_p1_ip && ip4.dst==$ls2_p1_ip &&
        udp && udp.src==53 && udp.dst==4369"
 
-as pbr-hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as pbr-hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Check if packet hit the drop policy
-AT_CHECK([ovs-ofctl dump-flows br-int | \
+OVS_WAIT_UNTIL([test "1" = "$(ovs-ofctl dump-flows br-int | \
     grep "nw_src=192.168.1.0/24,nw_dst=172.16.1.0/24 actions=drop" | \
-    grep "priority=10" | \
-    grep "n_packets=1" | wc -l], [0], [dnl
-1
-])
+    grep "priority=10" | grep "n_packets=1" -c)"])
 
 # Expected to drop the packet.
 $PYTHON "$ovs_srcdir/utilities/ovs-pcap.in" pbr-hv/vif2-tx.pcap > vif2.packets
@@ -7770,7 +7781,7 @@ AT_FAIL_IF([test "$rcvd_packet" != ""])
 check ovn-nbctl --wait=hv lr-policy-add R1 20 "ip4.src==192.168.1.0/24 && ip4.dst==172.16.1.0/24" allow
 
 # Check logical flow
-AT_CHECK([ovn-sbctl dump-flows | grep lr_in_policy | grep "192.168.1.0" | wc -l], [0], [dnl
+AT_CHECK([ovn-sbctl dump-flows | grep lr_in_policy | grep "192.168.1.0" -c], [0], [dnl
 2
 ])
 
@@ -7778,15 +7789,12 @@ AT_CHECK([ovn-sbctl dump-flows | grep lr_in_policy | grep "192.168.1.0" | wc -l]
 packet="inport==\"ls1-lp1\" && eth.src==$ls1_p1_mac && eth.dst==$ls1_ro_mac &&
        ip4 && ip.ttl==64 && ip4.src==$ls1_p1_ip && ip4.dst==$ls2_p1_ip &&
        udp && udp.src==53 && udp.dst==4369"
-as pbr-hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as pbr-hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Check if packet hit the allow policy
-sleep 1
-AT_CHECK([ovn-sbctl dump-flows | grep lr_in_policy | \
-    grep "192.168.1.0" | \
-    grep "priority=20" | wc -l], [0], [dnl
-1
-])
+OVS_WAIT_UNTIL([test "1" = "$(ovs-ofctl dump-flows br-int | \
+    grep "nw_src=192.168.1.0/24,nw_dst=172.16.1.0/24" | \
+    grep "priority=20" | grep "n_packets=1" -c)"])
 
 # Expected packet has TTL decreased by 1
 expected="eth.src==$ls2_ro_mac && eth.dst==$ls2_p1_mac &&
@@ -7802,7 +7810,7 @@ check ovn-nbctl --wait=hv lr-policy-add R1 30 "ip4.src==192.168.1.0/24 && ip4.ds
 # Check logical flow
 AT_CHECK([ovn-sbctl dump-flows | grep lr_in_policy | \
     grep "192.168.1.0" | \
-    grep "priority=30" | wc -l], [0], [dnl
+    grep "priority=30" -c], [0], [dnl
 1
 ])
 
@@ -7810,21 +7818,12 @@ AT_CHECK([ovn-sbctl dump-flows | grep lr_in_policy | \
 packet="inport==\"ls1-lp1\" && eth.src==$ls1_p1_mac && eth.dst==$ls1_ro_mac &&
        ip4 && ip.ttl==64 && ip4.src==$ls1_p1_ip && ip4.dst==$ls2_p1_ip &&
        udp && udp.src==53 && udp.dst==4369"
-as pbr-hv ovs-appctl -t ovn-controller inject-pkt "$packet"
-sleep 1
+OVS_WAIT_UNTIL([as pbr-hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
-echo "southbound flows"
-ovn-sbctl --ovs dump-flows > sbflows
-AT_CAPTURE_FILE([sbflows])
-echo "ovs flows"
-ovs-ofctl dump-flows br-int > brflows
-AT_CAPTURE_FILE([brflows])
 # Check if packet hit the allow policy
-AT_CHECK([grep "nw_src=192.168.1.0/24,nw_dst=172.16.1.0/24" brflows | \
-    grep "priority=30" | \
-    grep "n_packets=1" | wc -l], [0], [dnl
-1
-])
+OVS_WAIT_UNTIL([test "1" = "$(ovs-ofctl dump-flows br-int | \
+    grep "nw_src=192.168.1.0/24,nw_dst=172.16.1.0/24" | \
+    grep "priority=30" | grep "n_packets=1" -c)"])
 echo "packet hit reroute policy"
 
 # Expected packet has TTL decreased by 1
@@ -7927,9 +7926,7 @@ ls3_p1_mac=00:00:00:01:02:05
 check ovn-nbctl --wait=sb lr-policy-add R1 10 "ip6.src==2001::/64 && ip6.dst==2002::/64" drop
 
 # Check logical flow
-ovn-sbctl dump-flows > sbflows
-AT_CAPTURE_FILE([sbflows])
-AT_CHECK([grep lr_in_policy sbflows | grep "2001" | wc -l], [0], [dnl
+AT_CHECK([ovn-sbctl dump-flows | grep lr_in_policy | grep "2001" -c], [0], [dnl
 1
 ])
 
@@ -7938,15 +7935,12 @@ packet="inport==\"ls1-lp1\" && eth.src==$ls1_p1_mac && eth.dst==$ls1_ro_mac &&
        ip6 && ip.ttl==64 && ip6.src==$ls1_p1_ip && ip6.dst==$ls2_p1_ip &&
        udp && udp.src==53 && udp.dst==4369"
 
-as pbr-hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as pbr-hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Check if packet hit the drop policy
-AT_CHECK([ovs-ofctl dump-flows br-int | \
+OVS_WAIT_UNTIL([test "1" = "$(ovs-ofctl dump-flows br-int | \
     grep "ipv6_src=2001::/64,ipv6_dst=2002::/64 actions=drop" | \
-    grep "priority=10" | \
-    grep "n_packets=1" | wc -l], [0], [dnl
-1
-])
+    grep "priority=10" | grep "n_packets=1" -c)"])
 
 # Expected to drop the packet.
 $PYTHON "$ovs_srcdir/utilities/ovs-pcap.in" pbr-hv/vif2-tx.pcap > vif2.packets
@@ -7956,9 +7950,7 @@ AT_FAIL_IF([test -s vif2.packets])
 check ovn-nbctl --wait=sb lr-policy-add R1 20 "ip6.src==2001::/64 && ip6.dst==2002::/64" allow
 
 # Check logical flow
-ovn-sbctl dump-flows > sbflows2
-AT_CAPTURE_FILE([sbflows2])
-AT_CHECK([grep lr_in_policy sbflows2 | grep "2001" | wc -l], [0], [dnl
+AT_CHECK([ovn-sbctl dump-flows | grep lr_in_policy | grep "2001" -c], [0], [dnl
 2
 ])
 
@@ -7966,16 +7958,12 @@ AT_CHECK([grep lr_in_policy sbflows2 | grep "2001" | wc -l], [0], [dnl
 packet="inport==\"ls1-lp1\" && eth.src==$ls1_p1_mac && eth.dst==$ls1_ro_mac &&
        ip6 && ip.ttl==64 && ip6.src==$ls1_p1_ip && ip6.dst==$ls2_p1_ip &&
        udp && udp.src==53 && udp.dst==4369"
-as pbr-hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as pbr-hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Check if packet hit the allow policy
-ovn-sbctl dump-flows > sbflows3
-AT_CAPTURE_FILE([sbflows3])
-AT_CHECK([grep lr_in_policy sbflows3 | \
-    grep "2001" | \
-    grep "priority=20" | wc -l], [0], [dnl
-1
-])
+OVS_WAIT_UNTIL([test "1" = "$(ovs-ofctl dump-flows br-int | \
+    grep "ipv6_src=2001::/64,ipv6_dst=2002::/64"  | \
+    grep "priority=20" | grep "n_packets=1" -c)"])
 
 # Expected packet has TTL decreased by 1
 expected="eth.src==$ls2_ro_mac && eth.dst==$ls2_p1_mac &&
@@ -7989,11 +7977,9 @@ OVN_CHECK_PACKETS([pbr-hv/vif2-tx.pcap], [expected])
 check ovn-nbctl --wait=sb lr-policy-add R1 30 "ip6.src==2001::/64 && ip6.dst==2002::/64" reroute 2003::2
 
 # Check logical flow
-ovn-sbctl dump-flows > sbflows4
-AT_CAPTURE_FILE([sbflows4])
-AT_CHECK([grep lr_in_policy sbflows4 | \
+AT_CHECK([ovn-sbctl dump-flows | grep lr_in_policy | \
     grep "2001" | \
-    grep "priority=30" | wc -l], [0], [dnl
+    grep "priority=30" -c], [0], [dnl
 1
 ])
 
@@ -8001,19 +7987,12 @@ AT_CHECK([grep lr_in_policy sbflows4 | \
 packet="inport==\"ls1-lp1\" && eth.src==$ls1_p1_mac && eth.dst==$ls1_ro_mac &&
        ip6 && ip.ttl==64 && ip6.src==$ls1_p1_ip && ip6.dst==$ls2_p1_ip &&
        udp && udp.src==53 && udp.dst==4369"
-as pbr-hv ovs-appctl -t ovn-controller inject-pkt "$packet"
-sleep 1
+OVS_WAIT_UNTIL([as pbr-hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
-ovn-sbctl dump-flows > sbflows5
-ovs-ofctl dump-flows br-int > offlows5
-AT_CAPTURE_FILE([sbflows5])
-AT_CAPTURE_FILE([offlows5])
 # Check if packet hit the allow policy
-AT_CHECK([grep "ipv6_src=2001::/64,ipv6_dst=2002::/64" offlows5 | \
-    grep "priority=30" | \
-    grep "n_packets=1" | wc -l], [0], [dnl
-1
-])
+OVS_WAIT_UNTIL([test "1" = "$(ovs-ofctl dump-flows br-int | \
+    grep "ipv6_src=2001::/64,ipv6_dst=2002::/64"  | \
+    grep "priority=30" | grep "n_packets=1" -c)"])
 
 # Expected packet has TTL decreased by 1
 expected="eth.src==$ls3_ro_mac && eth.dst==$ls3_p1_mac &&
@@ -9531,73 +9510,73 @@ AT_CAPTURE_FILE([sbflows])
 packet="inport==\"lp1\" && eth.src==$lp1_mac && eth.dst==$lp2_mac &&
         ip4 && ip.ttl==64 && ip4.src==$lp1_ip && ip4.dst==$lp2_ip &&
         tcp && tcp.flags==2 && tcp.src==4360 && tcp.dst==80"
-as hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Send packet that should be dropped with logging in the ingress pipeline.
 packet="inport==\"lp1\" && eth.src==$lp1_mac && eth.dst==$lp2_mac &&
         ip4 && ip.ttl==64 && ip4.src==$lp1_ip && ip4.dst==$lp2_ip &&
         tcp && tcp.flags==2 && tcp.src==4361 && tcp.dst==81"
-as hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Send packet that should be dropped without logging in the eggress pipeline.
 packet="inport==\"lp1\" && eth.src==$lp1_mac && eth.dst==$lp2_mac &&
         ip4 && ip.ttl==64 && ip4.src==$lp1_ip && ip4.dst==$lp2_ip &&
         tcp && tcp.flags==2 && tcp.src==4360 && tcp.dst==180"
-as hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Send packet that should be dropped with logging in the egress pipeline.
 packet="inport==\"lp1\" && eth.src==$lp1_mac && eth.dst==$lp2_mac &&
         ip4 && ip.ttl==64 && ip4.src==$lp1_ip && ip4.dst==$lp2_ip &&
         tcp && tcp.flags==2 && tcp.src==4361 && tcp.dst==181"
-as hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Send packet that should be allowed without logging.
 packet="inport==\"lp1\" && eth.src==$lp1_mac && eth.dst==$lp2_mac &&
         ip4 && ip.ttl==64 && ip4.src==$lp1_ip && ip4.dst==$lp2_ip &&
         tcp && tcp.flags==2 && tcp.src==4362 && tcp.dst==82"
-as hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Send packet that should be allowed with logging.
 packet="inport==\"lp1\" && eth.src==$lp1_mac && eth.dst==$lp2_mac &&
         ip4 && ip.ttl==64 && ip4.src==$lp1_ip && ip4.dst==$lp2_ip &&
         tcp && tcp.flags==2 && tcp.src==4363 && tcp.dst==83"
-as hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Send packet that should allow related flows without logging.
 packet="inport==\"lp1\" && eth.src==$lp1_mac && eth.dst==$lp2_mac &&
         ip4 && ip.ttl==64 && ip4.src==$lp1_ip && ip4.dst==$lp2_ip &&
         tcp && tcp.flags==2 && tcp.src==4364 && tcp.dst==84"
-as hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Send packet that should allow related flows with logging.
 packet="inport==\"lp1\" && eth.src==$lp1_mac && eth.dst==$lp2_mac &&
         ip4 && ip.ttl==64 && ip4.src==$lp1_ip && ip4.dst==$lp2_ip &&
         tcp && tcp.flags==2 && tcp.src==4365 && tcp.dst==85"
-as hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Send packet that should be rejected without logging in the ingress pipeline.
 packet="inport==\"lp1\" && eth.src==$lp1_mac && eth.dst==$lp2_mac &&
         ip4 && ip.ttl==64 && ip4.src==$lp1_ip && ip4.dst==$lp2_ip &&
         tcp && tcp.flags==2 && tcp.src==4366 && tcp.dst==86"
-as hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Send packet that should be rejected with logging in the ingress pipeline.
 packet="inport==\"lp1\" && eth.src==$lp1_mac && eth.dst==$lp2_mac &&
         ip4 && ip.ttl==64 && ip4.src==$lp1_ip && ip4.dst==$lp2_ip &&
         tcp && tcp.flags==2 && tcp.src==4367 && tcp.dst==87"
-as hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Send packet that should be rejected without logging in the egress pipeline.
 packet="inport==\"lp1\" && eth.src==$lp1_mac && eth.dst==$lp2_mac &&
         ip4 && ip.ttl==64 && ip4.src==$lp1_ip && ip4.dst==$lp2_ip &&
         tcp && tcp.flags==2 && tcp.src==4366 && tcp.dst==186"
-as hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Send packet that should be rejected with logging in the egress pipeline.
 packet="inport==\"lp1\" && eth.src==$lp1_mac && eth.dst==$lp2_mac &&
         ip4 && ip.ttl==64 && ip4.src==$lp1_ip && ip4.dst==$lp2_ip &&
         tcp && tcp.flags==2 && tcp.src==4367 && tcp.dst==187"
-as hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 OVS_WAIT_UNTIL([ test 8 = $(grep -c 'acl_log' hv/ovn-controller.log) ])
 
@@ -10206,14 +10185,21 @@ AT_CHECK([test ! -z $foo2_zoneid])
 bar2_zoneid=$(as hv2 ovs-vsctl get bridge br-int external_ids:ct-zone-bar2)
 AT_CHECK([test ! -z $bar2_zoneid])
 
-ovn-nbctl lsp-del bar2
+# When a port is removed from a logical switch, the ct-zone is flushed, then
+# the ct-zone-id is removed from external_ids. This is done in two steps(
+# ct-zone-id is removed when the transaction flushing the ct_zone is complete).
+# ovn-nbctl --wait=hv sync does not take this into account, and hence we need
+# two "wait=hv" before we are sure that the ct-zone-id is removed from
+# external_ids.
+ovn-nbctl --wait=hv lsp-del bar2
 ovn-nbctl --wait=hv sync
 
 bar2_zoneid=$(as hv2 ovs-vsctl get bridge br-int external_ids:ct-zone-bar2)
 AT_CHECK([test  -z $bar2_zoneid])
 
 # Add back bar2
-ovn-nbctl lsp-add bar bar2 vm2 1 \
+# Same comment as above: two "wait=hv" are needed.
+ovn-nbctl --wait=hv lsp-add bar bar2 vm2 1 \
 -- lsp-set-addresses bar2 "f0:00:00:01:02:08 192.168.2.3"
 wait_for_ports_up
 ovn-nbctl --wait=hv sync
@@ -11214,7 +11200,7 @@ hv1_gw1_ofport=$(as hv1 ovs-vsctl --bare --columns ofport find Interface name=ov
 hv1_gw2_ofport=$(as hv1 ovs-vsctl --bare --columns ofport find Interface name=ovn-gw2-0)
 
 OVS_WAIT_UNTIL([
-    test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=37 | grep -c "active_backup,ofport,members:$hv1_gw1_ofport,$hv1_gw2_ofport")
+    test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=39 | grep -c "active_backup,ofport,members:$hv1_gw1_ofport,$hv1_gw2_ofport")
 ])
 
 test_ip_packet()
@@ -11324,7 +11310,7 @@ AT_CHECK(
 ])
 
 OVS_WAIT_UNTIL([
-    test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=37 | grep -c "active_backup,ofport,members:$hv1_gw2_ofport,$hv1_gw1_ofport")
+    test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=39 | grep -c "active_backup,ofport,members:$hv1_gw2_ofport,$hv1_gw1_ofport")
 ])
 
 test_ip_packet gw2 gw1 0
@@ -11502,7 +11488,7 @@ hv1_gw1_ofport=$(as hv1 ovs-vsctl --bare --columns ofport find Interface name=ov
 hv1_gw2_ofport=$(as hv1 ovs-vsctl --bare --columns ofport find Interface name=ovn-gw2-0)
 
 OVS_WAIT_UNTIL([
-    test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=37 | grep -c "active_backup,ofport,members:$hv1_gw1_ofport,$hv1_gw2_ofport")
+    test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=39 | grep -c "active_backup,ofport,members:$hv1_gw1_ofport,$hv1_gw2_ofport")
 ])
 
 test_ip_packet()
@@ -11582,7 +11568,7 @@ AT_CHECK([ovn-nbctl --wait=hv \
 ])
 
 OVS_WAIT_UNTIL([
-    test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=37 | grep -c "active_backup,ofport,members:$hv1_gw2_ofport,$hv1_gw1_ofport")
+    test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=39 | grep -c "active_backup,ofport,members:$hv1_gw2_ofport,$hv1_gw1_ofport")
 ])
 
 test_ip_packet gw2 gw1
@@ -11748,12 +11734,12 @@ AT_CAPTURE_FILE([hv2flows])
 
 AT_CHECK(
   [# Check that redirect mapping is programmed only on hv2
-   grep table=38 hv1flows | grep =0x3,metadata=0x1 | wc -l
-   grep table=38 hv2flows | grep =0x3,metadata=0x1 | grep load:0x2- | wc -l
+   grep table=40 hv1flows | grep =0x3,metadata=0x1 | wc -l
+   grep table=40 hv2flows | grep =0x3,metadata=0x1 | grep load:0x2- | wc -l
 
    # Check that hv1 sends chassisredirect port traffic to hv2
-   grep table=37 hv1flows | grep =0x3,metadata=0x1 | grep output | wc -l
-   grep table=37 hv2flows | grep =0x3,metadata=0x1 | wc -l
+   grep table=39 hv1flows | grep =0x3,metadata=0x1 | grep output | wc -l
+   grep table=39 hv2flows | grep =0x3,metadata=0x1 | wc -l
 
    # Check that arp reply on distributed gateway port is only programmed on hv2
    grep arp hv1flows | grep load:0x2- | grep =0x2,metadata=0x1 | wc -l
@@ -12254,7 +12240,7 @@ nexthop_mac="f00000010204"
 AS_BOX([Send ip packet from foo1 to 8.8.8.8])
 src_mac="f00000010203"
 dst_mac="000001010203"
-packet=${foo_mac}${foo1_mac}08004500001c0000000040110000${foo1_ip}${dst_ip}0035111100080000
+packet=${foo_mac}${foo1_mac}080045000028000000004006a916${foo1_ip}${dst_ip}0035111112345678000000005002faf069450000
 
 AS_BOX([Wait for GARPs announcing gw IP to arrive])
 OVS_WAIT_UNTIL([
@@ -12265,15 +12251,12 @@ grep actions=mod_dl_dst:f0:00:00:01:02:04 | wc -l` -eq 1
 AS_BOX([Verify VLAN tagged packet on bridge connecting hv1 and hv2])
 # VLAN tagged packet with router port(192.168.1.1) MAC as destination MAC
 # is expected on bridge connecting hv1 and hv2
-expected=${foo_mac}${foo1_mac}8100000208004500001c0000000040110000${foo1_ip}${dst_ip}0035111100080000
+expected=${foo_mac}${foo1_mac}81000002080045000028000000004006a916${foo1_ip}${dst_ip}0035111112345678000000005002faf069450000
 echo $expected > hv1-br-ex_n2.expected
 
 AS_BOX([Verify packet at outside1 i.e nexthop(172.16.1.1) port])
 # Packet to Expect at outside1 i.e nexthop(172.16.1.1) port.
-# As connection tracking not enabled for this test, snat can't be done on the packet.
-# We still see foo1 as the source ip address. But source mac(gateway MAC) and
-# dest mac(nexthop mac) are properly configured.
-expected=${nexthop_mac}${gw_mac}08004500001c000000003f110100${foo1_ip}${dst_ip}0035111100080000
+expected=${nexthop_mac}${gw_mac}080045000028000000003f06beaa${gw_ip}${dst_ip}0035111112345678000000005002faf07dd90000
 echo $expected > hv3-vif1.expected
 
 check as hv1 ovs-appctl dpctl/del-flows
@@ -12284,8 +12267,8 @@ as hv1 ovs-appctl netdev-dummy/receive hv1-vif1 $packet
 as hv1 ovs-appctl ofproto/trace br-int in_port=hv1-vif1 $packet
 sleep 2
 
-AS_BOX([On hv1, table 37 check that no packet goes via the tunnel port])
-OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=37 \
+AS_BOX([On hv1, table 40 check that no packet goes via the tunnel port])
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=39 \
 | grep "NXM_NX_TUN_ID" | grep -v n_packets=0 | wc -l], [0], [[0
 ]])
 
@@ -12304,7 +12287,7 @@ cat hv1-br-ex_n2.expected > expout
 AT_CHECK([sort hv1-br-ex_n2], [0], [expout])
 
 AS_BOX([Check expected packet on nexthop interface])
-$PYTHON "$ovs_srcdir/utilities/ovs-pcap.in" hv3/vif1-tx.pcap | grep ${foo1_ip}${dst_ip} | uniq > hv3-vif1
+$PYTHON "$ovs_srcdir/utilities/ovs-pcap.in" hv3/vif1-tx.pcap | grep ${gw_ip}${dst_ip} | uniq > hv3-vif1
 cat hv3-vif1.expected > expout
 AT_CHECK([sort hv3-vif1], [0], [expout])
 
@@ -13260,38 +13243,35 @@ echo $hv2_gw1_ofport
 echo $hv2_gw2_ofport
 
 echo "--- hv1 ---"
-as hv1 ovs-ofctl dump-flows br-int table=37
+as hv1 ovs-ofctl dump-flows br-int table=39
 
 echo "--- hv2 ---"
-as hv2 ovs-ofctl dump-flows br-int table=37
+as hv2 ovs-ofctl dump-flows br-int table=39
 
 gw1_chassis=$(fetch_column Chassis _uuid name=gw1)
 gw2_chassis=$(fetch_column Chassis _uuid name=gw2)
 
-OVS_WAIT_UNTIL([as hv1 ovs-ofctl dump-flows br-int table=37 | \
-grep active_backup | grep slaves:$hv1_gw1_ofport,$hv1_gw2_ofport \
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=39 | \
+grep active_backup | grep members:$hv1_gw1_ofport,$hv1_gw2_ofport \
 | wc -l], [0], [1
 ])
 
-OVS_WAIT_UNTIL([as hv2 ovs-ofctl dump-flows br-int table=37 | \
-grep active_backup | grep slaves:$hv2_gw1_ofport,$hv2_gw2_ofport \
+OVS_WAIT_FOR_OUTPUT([as hv2 ovs-ofctl dump-flows br-int table=39 | \
+grep active_backup | grep members:$hv2_gw1_ofport,$hv2_gw2_ofport \
 | wc -l], [0], [1
 ])
 
-# make sure that flows for handling the outside router port reside on gw1
-OVS_WAIT_UNTIL([as gw1 ovs-ofctl dump-flows br-int table=25 | \
-grep 00:00:02:01:02:04 | wc -l], [0], [[1
+# make sure that flows for handling the outside router port reside on gw1 through ls_in_l2_lkup table
+OVS_WAIT_FOR_OUTPUT([as gw1 ovs-ofctl dump-flows br-int table=33 | \
+grep "dl_dst=00:00:02:01:02:04" | wc -l], [0], [[1
 ]])
-OVS_WAIT_UNTIL([as gw2 ovs-ofctl dump-flows br-int table=25 | \
-grep 00:00:02:01:02:04 | wc -l], [0], [[0
+OVS_WAIT_FOR_OUTPUT([as gw2 ovs-ofctl dump-flows br-int table=33 | \
+grep "dl_dst=00:00:02:01:02:04" | wc -l], [0], [[0
 ]])
 
-# make sure ARP responder flows for outside router port reside on gw1 too
-OVS_WAIT_UNTIL([as gw1 ovs-ofctl dump-flows br-int table=9 | \
-grep arp_tpa=192.168.0.101 | wc -l], [0], [[1
-]])
-OVS_WAIT_UNTIL([as gw2 ovs-ofctl dump-flows br-int table=9 | grep arp_tpa=192.168.0.101 | wc -l], [0], [[0
-]])
+# make sure ARP responder flows for outside router port reside on gw1 too through ls_in_arp_rsp table
+OVS_WAIT_UNTIL([test `as gw1 ovs-ofctl dump-flows br-int table=27 | \
+grep arp_tpa=192.168.0.101 | wc -l` -ge 1])
 
 # check that the chassis redirect port has been claimed by the gw1 chassis
 wait_row_count Port_Binding 1 logical_port=cr-outside chassis=$gw1_chassis
@@ -13314,13 +13294,13 @@ wait_for_ports_up
 check ovn-nbctl --wait=hv sync
 
 # we make sure that the hypervisors noticed, and inverted the slave ports
-OVS_WAIT_UNTIL([as hv1 ovs-ofctl dump-flows br-int table=37 | \
-grep active_backup | grep slaves:$hv1_gw2_ofport,$hv1_gw1_ofport \
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=39 | \
+grep active_backup | grep members:$hv1_gw2_ofport,$hv1_gw1_ofport \
 | wc -l], [0], [1
 ])
 
-OVS_WAIT_UNTIL([as hv2 ovs-ofctl dump-flows br-int table=37 | \
-grep active_backup | grep slaves:$hv2_gw2_ofport,$hv2_gw1_ofport \
+OVS_WAIT_FOR_OUTPUT([as hv2 ovs-ofctl dump-flows br-int table=39 | \
+grep active_backup | grep members:$hv2_gw2_ofport,$hv2_gw1_ofport \
 | wc -l], [0], [1
 ])
 
@@ -13372,11 +13352,11 @@ AT_CHECK([ovs-vsctl --bare --columns bfd find Interface name=ovn-hv1-0],[0],
 ]])
 
 # make sure that flows for handling the outside router port reside on gw2 now
-OVS_WAIT_UNTIL([as gw2 ovs-ofctl dump-flows br-int table=25 | \
-grep 00:00:02:01:02:04 | wc -l], [0], [[1
+OVS_WAIT_FOR_OUTPUT([as gw2 ovs-ofctl dump-flows br-int table=33 | \
+grep "dl_dst=00:00:02:01:02:04" | wc -l], [0], [[1
 ]])
-OVS_WAIT_UNTIL([as gw1 ovs-ofctl dump-flows br-int table=25 | \
-grep 00:00:02:01:02:04 | wc -l], [0], [[0
+OVS_WAIT_FOR_OUTPUT([as gw1 ovs-ofctl dump-flows br-int table=33 | \
+grep "dl_dst=00:00:02:01:02:04" | wc -l], [0], [[0
 ]])
 
 # disconnect GW2 from the network, GW1 should take over
@@ -13386,12 +13366,12 @@ as main ovs-vsctl del-port n1 $port
 
 bfd_dump
 
-# make sure that flows for handling the outside router port reside on gw2 now
-OVS_WAIT_UNTIL([as gw1 ovs-ofctl dump-flows br-int table=25 | \
-grep 00:00:02:01:02:04 | wc -l], [0], [[1
+# make sure that flows for handling the outside router port reside on gw1 now
+OVS_WAIT_FOR_OUTPUT([as gw1 ovs-ofctl dump-flows br-int table=33 | \
+grep "dl_dst=00:00:02:01:02:04" | wc -l], [0], [[1
 ]])
-OVS_WAIT_UNTIL([as gw2 ovs-ofctl dump-flows br-int table=25 | \
-grep 00:00:02:01:02:04 | wc -l], [0], [[0
+OVS_WAIT_FOR_OUTPUT([as gw2 ovs-ofctl dump-flows br-int table=33 | \
+grep "dl_dst=00:00:02:01:02:04" | wc -l], [0], [[0
 ]])
 
 # check that the chassis redirect port has been reclaimed by the gw1 chassis
@@ -13470,45 +13450,16 @@ ovn-nbctl set Logical_Router_Port outside ha_chassis_group=$hagrp1_uuid
 wait_row_count HA_Chassis_Group 1
 wait_row_count HA_Chassis 2
 
-OVS_WAIT_UNTIL([as hv1 ovs-ofctl dump-flows br-int table=37 | \
-grep active_backup | grep slaves:$hv1_gw1_ofport,$hv1_gw2_ofport \
-| wc -l], [0], [1
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=39 | \
+grep active_backup | grep members:$hv1_gw1_ofport,$hv1_gw2_ofport \
+| wc -l], [0], [0
 ])
 
-OVS_WAIT_UNTIL([as hv2 ovs-ofctl dump-flows br-int table=37 | \
-grep active_backup | grep slaves:$hv2_gw1_ofport,$hv2_gw2_ofport \
-| wc -l], [0], [1
+OVS_WAIT_FOR_OUTPUT([as hv2 ovs-ofctl dump-flows br-int table=39 | \
+grep active_backup | grep members:$hv2_gw1_ofport,$hv2_gw2_ofport \
+| wc -l], [0], [0
 ])
 
-# make sure that flows for handling the outside router port reside on gw1
-OVS_WAIT_UNTIL([as gw1 ovs-ofctl dump-flows br-int table=24 | \
-grep 00:00:02:01:02:04 | wc -l], [0], [[1
-]])
-OVS_WAIT_UNTIL([as gw2 ovs-ofctl dump-flows br-int table=24 | \
-grep 00:00:02:01:02:04 | wc -l], [0], [[0
-]])
-
-# make sure ARP responder flows for outside router port reside on gw1 too
-OVS_WAIT_UNTIL([as gw1 ovs-ofctl dump-flows br-int table=9 | \
-grep arp_tpa=192.168.0.101 | wc -l], [0], [[1
-]])
-OVS_WAIT_UNTIL([as gw2 ovs-ofctl dump-flows br-int table=9 | grep arp_tpa=192.168.0.101 | wc -l], [0], [[0
-]])
-
-# check that the chassis redirect port has been claimed by the gw1 chassis
-#
-# XXX actually it doesn't happen, the test has always been wrong here
-# because the following just checks that "wc -l" succeeds (and it always
-# does):
-#
-#   OVS_WAIT_UNTIL([ovn-sbctl --columns chassis --bare find Port_Binding \
-#   logical_port=cr-outside | grep $gw1_chassis | wc -l], [0],[[1
-#   ]])
-#
-# If it were correct, then the following would be a good substitute:
-#
-#   wait_row_count Port_Binding 1 logical_port=cr-outside chassis=$gw1_chassis
-
 # Re add the ovs ports.
 for i in 1 2; do
     as hv$i
@@ -13519,6 +13470,34 @@ for i in 1 2; do
         ofport-request=1
 done
 
+# Re-add gw2
+as gw2 ovn_attach n1 br-phys 192.168.0.1
+
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=39 | \
+grep active_backup | grep members:$hv1_gw1_ofport,$hv1_gw2_ofport \
+| wc -l], [0], [1
+])
+
+OVS_WAIT_FOR_OUTPUT([as hv2 ovs-ofctl dump-flows br-int table=39 | \
+grep active_backup | grep members:$hv2_gw1_ofport,$hv2_gw2_ofport \
+| wc -l], [0], [1
+])
+
+# make sure that flows for handling the outside router port reside on gw1
+OVS_WAIT_FOR_OUTPUT([as gw1 ovs-ofctl dump-flows br-int table=33 | \
+grep "dl_dst=00:00:02:01:02:04" | wc -l], [0], [[1
+]])
+OVS_WAIT_FOR_OUTPUT([as gw2 ovs-ofctl dump-flows br-int table=33 | \
+grep "dl_dst:00:00:02:01:02:04" | wc -l], [0], [[0
+]])
+
+# make sure ARP responder flows for outside router port reside on gw1 too
+OVS_WAIT_UNTIL([test `as gw1 ovs-ofctl dump-flows br-int table=27 | \
+grep arp_tpa=192.168.0.101 | wc -l` -ge 1 ])
+
+# check that the chassis redirect port has been claimed by the gw1 chassis
+wait_row_count Port_Binding 1 logical_port=cr-outside chassis=$gw1_chassis
+
 hv1_ch_uuid=$(fetch_column Chassis _uuid name=hv1)
 hv2_ch_uuid=$(fetch_column Chassis _uuid name=hv2)
 exp_ref_ch_list="$hv1_ch_uuid $hv2_ch_uuid"
@@ -13527,29 +13506,18 @@ wait_column "$exp_ref_ch_list" HA_Chassis_Group ref_chassis
 # Increase the priority of gw2
 ovn-nbctl --wait=sb ha-chassis-group-add-chassis hagrp1 gw2 40
 
-OVS_WAIT_UNTIL([as hv1 ovs-ofctl dump-flows br-int table=37 | \
-grep active_backup | grep slaves:$hv1_gw2_ofport,$hv1_gw1_ofport \
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=39 | \
+grep active_backup | grep members:$hv1_gw2_ofport,$hv1_gw1_ofport \
 | wc -l], [0], [1
 ])
 
-OVS_WAIT_UNTIL([as hv2 ovs-ofctl dump-flows br-int table=37 | \
-grep active_backup | grep slaves:$hv2_gw2_ofport,$hv2_gw1_ofport \
+OVS_WAIT_FOR_OUTPUT([as hv2 ovs-ofctl dump-flows br-int table=39 | \
+grep active_backup | grep members:$hv2_gw2_ofport,$hv2_gw1_ofport \
 | wc -l], [0], [1
 ])
 
 # check that the chassis redirect port has been reclaimed by the gw2 chassis
-#
-# XXX actually it doesn't happen, the test has always been wrong here
-# because the following just checks that "wc -l" succeeds (and it always
-# does):
-#
-#   OVS_WAIT_UNTIL([ovn-sbctl --columns chassis --bare find Port_Binding \
-#   logical_port=cr-outside | grep $gw2_chassis | wc -l], [0],[[1
-#   ]])
-#
-# If it were correct, then the following would be a good substitute:
-#
-#   wait_row_count Port_Binding 1 logical_port=cr-outside chassis=$gw2_chassis
+wait_row_count Port_Binding 1 logical_port=cr-outside chassis=$gw2_chassis
 
 # check BFD enablement on tunnel ports from gw1 #########
 as gw1
@@ -13588,11 +13556,11 @@ AT_CHECK([ovs-vsctl --bare --columns bfd find Interface name=ovn-hv1-0],[0],
 ]])
 
 # make sure that flows for handling the outside router port reside on gw2 now
-OVS_WAIT_UNTIL([as gw2 ovs-ofctl dump-flows br-int table=24 | \
-grep 00:00:02:01:02:04 | wc -l], [0], [[1
+OVS_WAIT_FOR_OUTPUT([as gw2 ovs-ofctl dump-flows br-int table=33 | \
+grep "dl_dst=00:00:02:01:02:04" | wc -l], [0], [[1
 ]])
-OVS_WAIT_UNTIL([as gw1 ovs-ofctl dump-flows br-int table=24 | \
-grep 00:00:02:01:02:04 | wc -l], [0], [[0
+OVS_WAIT_FOR_OUTPUT([as gw1 ovs-ofctl dump-flows br-int table=33 | \
+grep "dl_dst=00:00:02:01:02:04" | wc -l], [0], [[0
 ]])
 
 # disconnect GW2 from the network, GW1 should take over
@@ -13603,11 +13571,11 @@ as main ovs-vsctl del-port n1 $port
 bfd_dump
 
 # make sure that flows for handling the outside router port reside on gw2 now
-OVS_WAIT_UNTIL([as gw1 ovs-ofctl dump-flows br-int table=24 | \
-grep 00:00:02:01:02:04 | wc -l], [0], [[1
+OVS_WAIT_FOR_OUTPUT([as gw1 ovs-ofctl dump-flows br-int table=33 | \
+grep "dl_dst=00:00:02:01:02:04" | wc -l], [0], [[1
 ]])
-OVS_WAIT_UNTIL([as gw2 ovs-ofctl dump-flows br-int table=24 | \
-grep 00:00:02:01:02:04 | wc -l], [0], [[0
+OVS_WAIT_FOR_OUTPUT([as gw2 ovs-ofctl dump-flows br-int table=33 | \
+grep "dl_dst=00:00:02:01:02:04" | wc -l], [0], [[0
 ]])
 
 # check that the chassis redirect port has been reclaimed by the gw1 chassis
@@ -13889,6 +13857,133 @@ OVN_CLEANUP([gw1],[gw2],[hv1])
 AT_CLEANUP
 ])
 
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([allow IPv6 RA / NA / MLD by default])
+AT_SKIP_IF([test $HAVE_SCAPY = no])
+ovn_start
+net_add n1
+sim_add hv1
+as hv1
+ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.1
+
+lsp_mac_prefix=50:64:00:00:00:0
+lsp_ip_prefix=10.0.0.
+lsp_ip6_prefix=aef0::5264:00ff:fe00:000
+
+check ovn-nbctl ls-add ls0
+for i in 1 2; do
+  check ovn-nbctl lsp-add ls0 lsp$i
+  check ovn-nbctl lsp-set-addresses lsp$i \
+    "${lsp_mac_prefix}$i ${lsp_ip_prefix}$i ${lsp_ip6_prefix}$i"
+
+  # forbid all traffic for the ports
+  check ovn-nbctl acl-add ls0 \
+      from-lport 1000 "inport == \"lsp$i\"" drop
+  check ovn-nbctl --apply-after-lb acl-add ls0\
+      from-lport 1000 "inport == \"lsp$i\"" drop
+  check ovn-nbctl acl-add ls0 \
+      to-lport 1000 "outport == \"lsp$i\"" drop
+
+  check ovs-vsctl -- add-port br-int vif$i -- \
+      set interface vif$i external-ids:iface-id=lsp$i \
+      options:tx_pcap=hv1/vif$i-tx.pcap \
+      options:rxq_pcap=hv1/vif$i-rx.pcap
+  : > $i.expected
+done
+
+router_mac=fa:16:3e:00:00:01
+router_prefix=fdad:1234:5678::
+router_ip=${router_prefix}1
+check ovn-nbctl lr-add lr0
+check ovn-nbctl lrp-add lr0 lrp0 ${router_mac} ${router_ip}/64
+check ovn-nbctl set Logical_Router_Port lrp0 ipv6_ra_configs:address_mode="slaac"
+check ovn-nbctl \
+    -- lsp-add ls0 rp0 \
+    -- set Logical_Switch_Port rp0 type=router \
+                     options:router-port=lrp0 \
+                     addresses='"${router_mac} ${router_ip}"'
+
+wait_for_ports_up
+
+test_ns_na() {
+    local inport=$1 src_mac=$2 dst_mac=$3 src_ip=$4 dst_ip=$5
+
+    packet=$(fmt_pkt "
+        Ether(dst='ff:ff:ff:ff:ff:ff', src='${src_mac}') /
+        IPv6(src='${src_ip}', dst='ff02::1:ff00:2') /
+        ICMPv6ND_NS(tgt='${dst_ip}')
+    ")
+    as hv1 ovs-appctl netdev-dummy/receive vif${inport} $packet
+
+    expected_packet=$(fmt_pkt "
+        Ether(dst='${src_mac}', src='${dst_mac}') /
+        IPv6(src='${dst_ip}', dst='${src_ip}') /
+        ICMPv6ND_NA(tgt='${dst_ip}', R=0, S=1) /
+        ICMPv6NDOptDstLLAddr(lladdr='${dst_mac}')
+    ")
+    echo $expected_packet >> $inport.expected
+}
+
+test_rs_ra() {
+    local inport=$1 src_mac=$2 src_ip=$3
+    local router_mac=$4 router_prefix=$5 router_ip=$6
+
+    packet=$(fmt_pkt "
+        Ether(dst='ff:ff:ff:ff:ff:ff', src='${src_mac}') /
+        IPv6(src='${src_ip}', dst='ff02::2') /
+        ICMPv6ND_RS()
+    ")
+    as hv1 ovs-appctl netdev-dummy/receive vif${inport} $packet
+
+    expected_packet=$(fmt_pkt "
+        Ether(dst='${src_mac}', src='${router_mac}') /
+        IPv6(src='${router_ip}', dst='${src_ip}') /
+        ICMPv6ND_RA(chlim=255, prf=0, routerlifetime=65535) /
+        ICMPv6NDOptSrcLLAddr(lladdr='${router_mac}') /
+        ICMPv6NDOptPrefixInfo(prefix='${router_prefix}')
+    ")
+    echo $expected_packet >> $inport.expected
+}
+
+test_mldv2() {
+    local inport=$1 outport=$2 src_mac=$3 src_ip=$4
+
+    packet=$(fmt_pkt "
+        Ether(dst='ff:ff:ff:ff:ff:ff', src='${src_mac}') /
+        IPv6(src='${src_ip}', dst='ff02::2') /
+        ICMPv6MLQuery2()
+    ")
+    as hv1 ovs-appctl netdev-dummy/receive vif${inport} $packet
+
+    expected_packet=$packet
+    echo $expected_packet >> $outport.expected
+}
+
+src_mac=${lsp_mac_prefix}1
+dst_mac=${lsp_mac_prefix}2
+src_ip=${lsp_ip6_prefix}1
+dst_ip=${lsp_ip6_prefix}2
+
+as hv1
+test_ns_na 1 $src_mac $dst_mac $src_ip $dst_ip
+
+as hv1
+router_local_ip=fe80::f816:3eff:fe00:1
+test_rs_ra 1 $src_mac $src_ip $router_mac $router_prefix $router_local_ip
+
+as hv1
+src_ip=fe80::1
+test_mldv2 1 2 $src_mac $src_ip
+
+OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [1.expected])
+OVN_CHECK_PACKETS([hv1/vif2-tx.pcap], [2.expected])
+
+OVN_CLEANUP([hv1])
+
+AT_CLEANUP
+])
+
 OVN_FOR_EACH_NORTHD([
 AT_SETUP([IPv6 Neighbor Solicitation for unknown MAC])
 AT_KEYWORDS([ovn-nd_ns for unknown mac])
@@ -14162,10 +14257,12 @@ wait_column "$hv1_uuid" Port_Binding requested_chassis logical_port=lsp0
 wait_column "$hv2_uuid" Port_Binding additional_chassis logical_port=lsp0
 wait_column "$hv2_uuid" Port_Binding requested_additional_chassis logical_port=lsp0
 
-# Check ovn-installed updated for main chassis
+# Check ovn-installed updated for both chassis
 wait_for_ports_up
-OVS_WAIT_UNTIL([test `as hv1 ovs-vsctl get Interface lsp0 external_ids:ovn-installed` = '"true"'])
-OVS_WAIT_UNTIL([test x`as hv2 ovs-vsctl get Interface lsp0 external_ids:ovn-installed` = x])
+
+for hv in hv1 hv2; do
+    OVS_WAIT_UNTIL([test `as $hv ovs-vsctl get Interface lsp0 external_ids:ovn-installed` = '"true"'])
+done
 
 # Check that setting iface:encap-ip populates Port_Binding:additional_encap
 wait_row_count Encap 2 chassis_name=hv1
@@ -14192,7 +14289,7 @@ wait_column "$hv2_uuid" Port_Binding requested_chassis logical_port=lsp0
 wait_column "" Port_Binding additional_chassis logical_port=lsp0
 wait_column "" Port_Binding requested_additional_chassis logical_port=lsp0
 
-# Check ovn-installed updated for main chassis and not for other chassis
+# Check ovn-installed updated for main chassis and removed from additional chassis
 wait_for_ports_up
 OVS_WAIT_UNTIL([test `as hv2 ovs-vsctl get Interface lsp0 external_ids:ovn-installed` = '"true"'])
 OVS_WAIT_UNTIL([test x`as hv1 ovs-vsctl get Interface lsp0 external_ids:ovn-installed` = x])
@@ -15071,6 +15168,327 @@ OVN_CLEANUP([hv1],[hv2],[hv3])
 AT_CLEANUP
 ])
 
+m4_define([MULTICHASSIS_PATH_MTU_DISCOVERY_TEST],
+  [OVN_FOR_EACH_NORTHD([
+   AT_SETUP([localnet connectivity with multiple requested-chassis, path mtu discovery (ip=$1, tunnel=$2, mtu=$3)])
+   AT_KEYWORDS([multi-chassis])
+   AT_SKIP_IF([test $HAVE_SCAPY = no])
+
+   ovn_start
+
+   net_add n1
+   for i in 1 2; do
+       sim_add hv$i
+       as hv$i
+       check ovs-vsctl add-br br-phys
+       if test "x$1" = "xipv6"; then
+           ovn_attach n1 br-phys fd00::$i 64 $2
+       else
+           ovn_attach n1 br-phys 192.168.0.$i 24 $2
+       fi
+       check ovs-vsctl set open . external-ids:ovn-bridge-mappings=phys:br-phys
+   done
+
+   first_mac=00:00:00:00:00:01
+   second_mac=00:00:00:00:00:02
+   multi1_mac=00:00:00:00:00:f0
+   multi2_mac=00:00:00:00:00:f1
+   first_ip=10.0.0.1
+   second_ip=10.0.0.2
+   multi1_ip=10.0.0.10
+   multi2_ip=10.0.0.20
+   first_ip6=abcd::1
+   second_ip6=abcd::2
+   multi1_ip6=abcd::f0
+   multi2_ip6=abcd::f1
+
+   check ovn-nbctl ls-add ls0
+   check ovn-nbctl lsp-add ls0 first
+   check ovn-nbctl lsp-add ls0 second
+   check ovn-nbctl lsp-add ls0 multi1
+   check ovn-nbctl lsp-add ls0 multi2
+   check ovn-nbctl lsp-set-addresses first "${first_mac} ${first_ip} ${first_ip6}"
+   check ovn-nbctl lsp-set-addresses second "${second_mac} ${second_ip} ${second_ip6}"
+   check ovn-nbctl lsp-set-addresses multi1 "${multi1_mac} ${multi1_ip} ${multi1_ip6}"
+   check ovn-nbctl lsp-set-addresses multi2 "${multi2_mac} ${multi2_ip} ${multi2_ip6}"
+
+   check ovn-nbctl lsp-add ls0 public
+   check ovn-nbctl lsp-set-type public localnet
+   check ovn-nbctl lsp-set-addresses public unknown
+   check ovn-nbctl lsp-set-options public network_name=phys
+
+   check ovn-nbctl lsp-set-options first requested-chassis=hv1
+   check ovn-nbctl lsp-set-options second requested-chassis=hv2
+   check ovn-nbctl lsp-set-options multi1 requested-chassis=hv1,hv2
+   check ovn-nbctl lsp-set-options multi2 requested-chassis=hv1,hv2
+
+   as hv1 check ovs-vsctl -- add-port br-int first -- \
+       set Interface first external-ids:iface-id=first \
+       options:tx_pcap=hv1/first-tx.pcap \
+       options:rxq_pcap=hv1/first-rx.pcap \
+       ofport-request=1
+   as hv2 check ovs-vsctl -- add-port br-int second -- \
+       set Interface second external-ids:iface-id=second \
+       options:tx_pcap=hv2/second-tx.pcap \
+       options:rxq_pcap=hv2/second-rx.pcap \
+       ofport-request=2
+
+   # Create interfaces for multichassis ports on both hv1 and hv2
+   for hv in hv1 hv2; do
+       for i in 1 2; do
+           as $hv check ovs-vsctl -- add-port br-int multi${i} -- \
+               set Interface multi${i} external-ids:iface-id=multi${i} \
+               options:tx_pcap=$hv/multi${i}-tx.pcap \
+               options:rxq_pcap=$hv/multi${i}-rx.pcap \
+               ofport-request=${i}00
+       done
+   done
+
+   send_ip_packet() {
+       local inport=${1} hv=${2} eth_src=${3} eth_dst=${4} ipv4_src=${5} ipv4_dst=${6} data=${7} fail=${8} mtu=${9:-$3}
+       packet=$(fmt_pkt "
+           Ether(dst='${eth_dst}', src='${eth_src}') /
+           IP(src='${ipv4_src}', dst='${ipv4_dst}') /
+           ICMP(type=8) / bytes.fromhex('${data}')
+       ")
+       as hv${hv} ovs-appctl netdev-dummy/receive ${inport} ${packet}
+       if [[ x"${fail}" != x0 ]]; then
+         original_ip_frame=$(fmt_pkt "
+           IP(src='${ipv4_src}', dst='${ipv4_dst}') /
+           ICMP(type=8) / bytes.fromhex('${data}')
+         ")
+         # IP(flags=2) means DF (Don't Fragment) = 1
+         # ICMP(type=3, code=4) means Destination Unreachable, Fragmentation Needed
+         packet=$(fmt_pkt "
+             Ether(dst='${eth_src}', src='${eth_dst}') /
+             IP(src='${ipv4_dst}', dst='${ipv4_src}', ttl=255, flags=2, id=0) /
+             ICMP(type=3, code=4, nexthopmtu=${mtu}) /
+             bytes.fromhex('${original_ip_frame:0:$((534 * 2))}')
+         ")
+       fi
+       echo ${packet}
+   }
+
+   send_ip6_packet() {
+       local inport=${1} hv=${2} eth_src=${3} eth_dst=${4} ipv6_src=${5} ipv6_dst=${6} data=${7} fail=${8} mtu=${9:-$3}
+       packet=$(fmt_pkt "
+           Ether(dst='${eth_dst}', src='${eth_src}') /
+           IPv6(src='${ipv6_src}', dst='${ipv6_dst}') /
+           ICMPv6EchoRequest() / bytes.fromhex('${data}')
+       ")
+       as hv${hv} ovs-appctl netdev-dummy/receive ${inport} ${packet}
+       if [[ x"${fail}" != x0 ]]; then
+         original_ip_frame=$(fmt_pkt "
+           IPv6(src='${ipv6_src}', dst='${ipv6_dst}') /
+           ICMPv6EchoRequest() / bytes.fromhex('${data}')
+         ")
+         packet=$(fmt_pkt "
+             Ether(dst='${eth_src}', src='${eth_dst}') /
+             IPv6(src='${ipv6_dst}', dst='${ipv6_src}', hlim=255) /
+             ICMPv6PacketTooBig(mtu=${mtu}) /
+             bytes.fromhex('${original_ip_frame:0:$((1218 * 2))}')
+         ")
+       fi
+       echo ${packet}
+   }
+
+   reset_env() {
+       for port in first multi1 multi2; do
+           as hv1 reset_pcap_file $port hv1/$port
+       done
+       for port in second multi1 multi2; do
+           as hv2 reset_pcap_file $port hv2/$port
+       done
+       for port in hv1/multi1 hv2/multi1 hv1/multi2 hv2/multi2 hv1/first hv2/second; do
+           : > $port.expected
+       done
+   }
+
+   check_pkts() {
+       for port in hv1/multi1 hv2/multi1 hv1/multi2 hv2/multi2 hv1/first hv2/second; do
+           OVN_CHECK_PACKETS_REMOVE_BROADCAST([${port}-tx.pcap], [${port}.expected])
+       done
+   }
+
+   payload() {
+       echo $(cat /dev/urandom | tr -cd 'a-f0-9' | head -c ${1})
+   }
+
+   wait_for_ports_up
+   OVN_POPULATE_ARP
+
+   reset_env
+
+   AS_BOX([Packets of proper size are delivered from multichassis to regular ports])
+
+   len=1000
+   packet=$(send_ip_packet multi1 1 $multi1_mac $first_mac $multi1_ip $first_ip $(payload $len) 0)
+   echo $packet >> hv1/first.expected
+
+   packet=$(send_ip_packet multi1 1 $multi1_mac $second_mac $multi1_ip $second_ip $(payload $len) 0)
+   echo $packet >> hv2/second.expected
+
+   packet=$(send_ip6_packet multi1 1 $multi1_mac $first_mac $multi1_ip6 $first_ip6 $(payload $len) 0)
+   echo $packet >> hv1/first.expected
+
+   packet=$(send_ip6_packet multi1 1 $multi1_mac $second_mac $multi1_ip6 $second_ip6 $(payload $len) 0)
+   echo $packet >> hv2/second.expected
+
+   check_pkts
+   reset_env
+
+   AS_BOX([Oversized packets are not delivered from multichassis to regular ports])
+
+   len=3000
+   packet=$(send_ip_packet multi1 1 $multi1_mac $first_mac $multi1_ip $first_ip $(payload $len) 1)
+   echo $packet >> hv1/multi1.expected
+
+   packet=$(send_ip_packet multi1 1 $multi1_mac $second_mac $multi1_ip $second_ip $(payload $len) 1)
+   echo $packet >> hv1/multi1.expected
+
+   packet=$(send_ip6_packet multi1 1 $multi1_mac $first_mac $multi1_ip6 $first_ip6 $(payload $len) 1)
+   echo $packet >> hv1/multi1.expected
+
+   packet=$(send_ip6_packet multi1 1 $multi1_mac $second_mac $multi1_ip6 $second_ip6 $(payload $len) 1)
+   echo $packet >> hv1/multi1.expected
+
+   check_pkts
+   reset_env
+
+   AS_BOX([Packets of proper size are delivered from regular to multichassis ports])
+
+   len=1000
+   packet=$(send_ip_packet first 1 $first_mac $multi1_mac $first_ip $multi1_ip $(payload $len) 0)
+   echo $packet >> hv1/multi1.expected
+   echo $packet >> hv2/multi1.expected
+
+   packet=$(send_ip_packet second 2 $second_mac $multi1_mac $second_ip $multi1_ip $(payload $len) 0)
+   echo $packet >> hv1/multi1.expected
+   echo $packet >> hv2/multi1.expected
+
+   packet=$(send_ip6_packet first 1 $first_mac $multi1_mac $first_ip6 $multi1_ip6 $(payload $len) 0)
+   echo $packet >> hv1/multi1.expected
+   echo $packet >> hv2/multi1.expected
+
+   packet=$(send_ip6_packet second 2 $second_mac $multi1_mac $second_ip6 $multi1_ip6 $(payload $len) 0)
+   echo $packet >> hv1/multi1.expected
+   echo $packet >> hv2/multi1.expected
+
+   check_pkts
+   reset_env
+
+   AS_BOX([Oversized packets are not delivered from regular to multichassis ports])
+
+   len=3000
+   packet=$(send_ip_packet first 1 $first_mac $multi1_mac $first_ip $multi1_ip $(payload $len) 1)
+   echo $packet >> hv1/first.expected
+
+   packet=$(send_ip_packet second 2 $second_mac $multi1_mac $second_ip $multi1_ip $(payload $len) 1)
+   echo $packet >> hv2/second.expected
+
+   packet=$(send_ip6_packet first 1 $first_mac $multi1_mac $first_ip6 $multi1_ip6 $(payload $len) 1)
+   echo $packet >> hv1/first.expected
+
+   packet=$(send_ip6_packet second 2 $second_mac $multi1_mac $second_ip6 $multi1_ip6 $(payload $len) 1)
+   echo $packet >> hv2/second.expected
+
+   check_pkts
+   reset_env
+
+   AS_BOX([Packets of proper size are delivered from multichassis to multichassis ports])
+
+   len=1000
+   packet=$(send_ip_packet multi1 1 $multi1_mac $multi2_mac $multi1_ip $multi2_ip $(payload $len) 0)
+   echo $packet >> hv1/multi2.expected
+   echo $packet >> hv2/multi2.expected
+
+   packet=$(send_ip6_packet multi1 1 $multi1_mac $multi2_mac $multi1_ip6 $multi2_ip6 $(payload $len) 0)
+   echo $packet >> hv1/multi2.expected
+   echo $packet >> hv2/multi2.expected
+
+   check_pkts
+   reset_env
+
+   AS_BOX([Oversized packets are not delivered from multichassis to multichassis ports])
+
+   len=3000
+   packet=$(send_ip_packet multi1 1 $multi1_mac $multi2_mac $multi1_ip $multi2_ip $(payload $len) 1)
+   echo $packet >> hv1/multi1.expected
+
+   packet=$(send_ip6_packet multi1 1 $multi1_mac $multi2_mac $multi1_ip6 $multi2_ip6 $(payload $len) 1)
+   echo $packet >> hv1/multi1.expected
+
+   check_pkts
+   reset_env
+
+   AS_BOX([MTU updates are honored in ICMP Path MTU calculation])
+
+   set_mtu() {
+       local hv=${1} iface=${2} new_mtu=${3}
+
+       iface_uuid=$(as ${hv} ovs-vsctl --bare --columns _uuid find Interface name=${iface})
+       check as ${hv} ovs-vsctl set interface ${iface_uuid} mtu_request=${new_mtu}
+   }
+
+   set_mtu_for_all_ports() {
+       for port in multi1 multi2 first; do
+           set_mtu hv1 ${port} ${1}
+       done
+       for port in multi1 multi2 second; do
+           set_mtu hv2 ${port} ${1}
+       done
+   }
+
+   initial_mtu=1500  # all interfaces are 1500 by default
+   new_mtu=1400
+   set_mtu_for_all_ports ${new_mtu}
+   mtu_diff=$((${initial_mtu} - ${new_mtu}))
+
+   len=3000
+   expected_ip_mtu=$(($3 - ${mtu_diff}))
+   packet=$(send_ip_packet first 1 $first_mac $multi1_mac $first_ip $multi1_ip $(payload $len) 1 ${expected_ip_mtu})
+   echo $packet >> hv1/first.expected
+
+   packet=$(send_ip_packet second 2 $second_mac $multi1_mac $second_ip $multi1_ip $(payload $len) 1 ${expected_ip_mtu})
+   echo $packet >> hv2/second.expected
+
+   packet=$(send_ip6_packet first 1 $first_mac $multi1_mac $first_ip6 $multi1_ip6 $(payload $len) 1 ${expected_ip_mtu})
+   echo $packet >> hv1/first.expected
+
+   packet=$(send_ip6_packet second 2 $second_mac $multi1_mac $second_ip6 $multi1_ip6 $(payload $len) 1 ${expected_ip_mtu})
+   echo $packet >> hv2/second.expected
+
+   packet=$(send_ip_packet multi1 1 $multi1_mac $first_mac $multi1_ip $first_ip $(payload $len) 1 ${expected_ip_mtu})
+   echo $packet >> hv1/multi1.expected
+
+   packet=$(send_ip_packet multi1 1 $multi1_mac $second_mac $multi1_ip $second_ip $(payload $len) 1 ${expected_ip_mtu})
+   echo $packet >> hv1/multi1.expected
+
+   packet=$(send_ip6_packet multi1 1 $multi1_mac $first_mac $multi1_ip6 $first_ip6 $(payload $len) 1 ${expected_ip_mtu})
+   echo $packet >> hv1/multi1.expected
+
+   packet=$(send_ip6_packet multi1 1 $multi1_mac $second_mac $multi1_ip6 $second_ip6 $(payload $len) 1 ${expected_ip_mtu})
+   echo $packet >> hv1/multi1.expected
+
+   packet=$(send_ip_packet multi1 1 $multi1_mac $multi2_mac $multi1_ip $multi2_ip $(payload $len) 1 ${expected_ip_mtu})
+   echo $packet >> hv1/multi1.expected
+
+   packet=$(send_ip6_packet multi1 1 $multi1_mac $multi2_mac $multi1_ip6 $multi2_ip6 $(payload $len) 1 ${expected_ip_mtu})
+   echo $packet >> hv1/multi1.expected
+
+   check_pkts
+
+   OVN_CLEANUP([hv1],[hv2])
+
+   AT_CLEANUP
+   ])])
+
+# NOTE(ihar) no STT variants because it's not supported by upstream kernels
+MULTICHASSIS_PATH_MTU_DISCOVERY_TEST([ipv4], [geneve], [1424])
+MULTICHASSIS_PATH_MTU_DISCOVERY_TEST([ipv6], [geneve], [1404])
+MULTICHASSIS_PATH_MTU_DISCOVERY_TEST([ipv4], [vxlan], [1432])
+MULTICHASSIS_PATH_MTU_DISCOVERY_TEST([ipv6], [vxlan], [1412])
+
 OVN_FOR_EACH_NORTHD([
 AT_SETUP([options:activation-strategy for logical port])
 AT_KEYWORDS([multi-chassis])
@@ -16278,25 +16696,25 @@ sleep 2
 # Get total number of ipv4 packets that received on ovs
 
 # sender side
-flow=$(as hv1 ovs-ofctl dump-flows br-int table=44 | grep priority=2002|grep ip,metadata=0x1)
+flow=$(as hv1 ovs-ofctl dump-flows br-int table=46 | grep priority=2002|grep ip,metadata=0x1)
 n_pkts="$(echo $flow|awk -F',' '{ print $4 }'|awk -F'=' '{ print $2 }')"
 check test $n_pkts -eq 1
 
 # receiver side
-flow=$(as hv2 ovs-ofctl dump-flows br-int table=44 | grep priority=2002|grep ip,metadata=0x1)
+flow=$(as hv2 ovs-ofctl dump-flows br-int table=46 | grep priority=2002|grep ip,metadata=0x1)
 n_pkts="$(echo $flow|awk -F',' '{ print $4 }'|awk -F'=' '{ print $2 }')"
 check test $n_pkts -eq 1
 
 # Get total number of ipv6 packets that received on ovs
 
 # sender side
-flow=$(as hv1 ovs-ofctl dump-flows br-int table=44 | grep priority=2002|grep ipv6,metadata=0x1)
+flow=$(as hv1 ovs-ofctl dump-flows br-int table=46 | grep priority=2002|grep ipv6,metadata=0x1)
 n_pkts="$(echo $flow|awk -F',' '{ print $4 }'|awk -F'=' '{ print $2 }')"
 check test $n_pkts -eq 1
 
 
 # receiver side
-flow=$(as hv2 ovs-ofctl dump-flows br-int table=44 | grep priority=2002|grep ipv6,metadata=0x1)
+flow=$(as hv2 ovs-ofctl dump-flows br-int table=46 | grep priority=2002|grep ipv6,metadata=0x1)
 n_pkts="$(echo $flow|awk -F',' '{ print $4 }'|awk -F'=' '{ print $2 }')"
 check test $n_pkts -eq 1
 
@@ -17210,7 +17628,7 @@ test_icmp() {
                   icmp4.code==0"
     shift; shift; shift; shift; shift; shift
     hv=hv`vif_to_hv $inport`
-    as $hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+    OVS_WAIT_UNTIL([as $hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
     in_ls=`vif_to_ls $inport`
     in_lrp=`vif_to_lrp $inport`
     for outport; do
@@ -17856,17 +18274,17 @@ check ovn-nbctl acl-add ls1 to-lport 3 'ip4.src==10.0.0.1' allow
 check ovn-nbctl --wait=hv sync
 
 # Check OVS flows, the less restrictive flows should have been installed.
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=44 | ofctl_strip_all | \
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=46 | ofctl_strip_all | \
     grep "priority=1003" | \
     sed 's/conjunction([[^)]]*)/conjunction()/g' | \
     sed 's/conj_id=[[0-9]]*,/conj_id=xxx,/g' | sort], [0], [dnl
- table=44, priority=1003,conj_id=xxx,ip,metadata=0x1 actions=resubmit(,45)
- table=44, priority=1003,conj_id=xxx,ip,metadata=0x1 actions=resubmit(,45)
- table=44, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.3 actions=conjunction(),conjunction()
- table=44, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.4 actions=conjunction(),conjunction()
- table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.1 actions=resubmit(,45)
- table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.2 actions=conjunction()
- table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.42 actions=conjunction()
+ table=46, priority=1003,conj_id=xxx,ip,metadata=0x1 actions=resubmit(,47)
+ table=46, priority=1003,conj_id=xxx,ip,metadata=0x1 actions=resubmit(,47)
+ table=46, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.3 actions=conjunction(),conjunction()
+ table=46, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.4 actions=conjunction(),conjunction()
+ table=46, priority=1003,ip,metadata=0x1,nw_src=10.0.0.1 actions=resubmit(,47)
+ table=46, priority=1003,ip,metadata=0x1,nw_src=10.0.0.2 actions=conjunction()
+ table=46, priority=1003,ip,metadata=0x1,nw_src=10.0.0.42 actions=conjunction()
 ])
 
 # Traffic 10.0.0.1, 10.0.0.2 -> 10.0.0.3, 10.0.0.4 should be allowed.
@@ -17901,17 +18319,17 @@ check ovn-nbctl acl-del ls1 to-lport 3 'ip4.src==10.0.0.1 || ip4.src==10.0.0.1'
 check ovn-nbctl --wait=hv sync
 
 # Check OVS flows, the second less restrictive allow ACL should have been installed.
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=44 | ofctl_strip_all | \
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=46 | ofctl_strip_all | \
     grep "priority=1003" | \
     sed 's/conjunction([[^)]]*)/conjunction()/g' | \
     sed 's/conj_id=[[0-9]]*,/conj_id=xxx,/g' | sort], [0], [dnl
- table=44, priority=1003,conj_id=xxx,ip,metadata=0x1 actions=resubmit(,45)
- table=44, priority=1003,conj_id=xxx,ip,metadata=0x1 actions=resubmit(,45)
- table=44, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.3 actions=conjunction(),conjunction()
- table=44, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.4 actions=conjunction(),conjunction()
- table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.1 actions=resubmit(,45)
- table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.2 actions=conjunction()
- table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.42 actions=conjunction()
+ table=46, priority=1003,conj_id=xxx,ip,metadata=0x1 actions=resubmit(,47)
+ table=46, priority=1003,conj_id=xxx,ip,metadata=0x1 actions=resubmit(,47)
+ table=46, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.3 actions=conjunction(),conjunction()
+ table=46, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.4 actions=conjunction(),conjunction()
+ table=46, priority=1003,ip,metadata=0x1,nw_src=10.0.0.1 actions=resubmit(,47)
+ table=46, priority=1003,ip,metadata=0x1,nw_src=10.0.0.2 actions=conjunction()
+ table=46, priority=1003,ip,metadata=0x1,nw_src=10.0.0.42 actions=conjunction()
 ])
 
 # Remove the less restrictive allow ACL.
@@ -17919,17 +18337,17 @@ check ovn-nbctl acl-del ls1 to-lport 3 'ip4.src==10.0.0.1'
 check ovn-nbctl --wait=hv sync
 
 # Check OVS flows, the 10.0.0.1 conjunction should have been reinstalled.
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=44 | ofctl_strip_all | \
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=46 | ofctl_strip_all | \
     grep "priority=1003" | \
     sed 's/conjunction([[^)]]*)/conjunction()/g' | \
     sed 's/conj_id=[[0-9]]*,/conj_id=xxx,/g' | sort], [0], [dnl
- table=44, priority=1003,conj_id=xxx,ip,metadata=0x1 actions=resubmit(,45)
- table=44, priority=1003,conj_id=xxx,ip,metadata=0x1 actions=resubmit(,45)
- table=44, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.3 actions=conjunction(),conjunction()
- table=44, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.4 actions=conjunction(),conjunction()
- table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.1 actions=conjunction(),conjunction()
- table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.2 actions=conjunction()
- table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.42 actions=conjunction()
+ table=46, priority=1003,conj_id=xxx,ip,metadata=0x1 actions=resubmit(,47)
+ table=46, priority=1003,conj_id=xxx,ip,metadata=0x1 actions=resubmit(,47)
+ table=46, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.3 actions=conjunction(),conjunction()
+ table=46, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.4 actions=conjunction(),conjunction()
+ table=46, priority=1003,ip,metadata=0x1,nw_src=10.0.0.1 actions=conjunction(),conjunction()
+ table=46, priority=1003,ip,metadata=0x1,nw_src=10.0.0.2 actions=conjunction()
+ table=46, priority=1003,ip,metadata=0x1,nw_src=10.0.0.42 actions=conjunction()
 ])
 
 # Traffic 10.0.0.1, 10.0.0.2 -> 10.0.0.3, 10.0.0.4 should be allowed.
@@ -17959,17 +18377,17 @@ check ovn-nbctl acl-add ls1 to-lport 3 'ip4.src==10.0.0.1' allow
 check ovn-nbctl --wait=hv sync
 
 # Check OVS flows, the less restrictive flows should have been installed.
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=44 | ofctl_strip_all | \
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=46 | ofctl_strip_all | \
    grep "priority=1003" | \
    sed 's/conjunction([[^)]]*)/conjunction()/g' | \
    sed 's/conj_id=[[0-9]]*,/conj_id=xxx,/g' | sort], [0], [dnl
- table=44, priority=1003,conj_id=xxx,ip,metadata=0x1 actions=resubmit(,45)
- table=44, priority=1003,conj_id=xxx,ip,metadata=0x1 actions=resubmit(,45)
- table=44, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.3 actions=conjunction(),conjunction()
- table=44, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.4 actions=conjunction(),conjunction()
- table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.1 actions=resubmit(,45)
- table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.2 actions=conjunction()
- table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.42 actions=conjunction()
+ table=46, priority=1003,conj_id=xxx,ip,metadata=0x1 actions=resubmit(,47)
+ table=46, priority=1003,conj_id=xxx,ip,metadata=0x1 actions=resubmit(,47)
+ table=46, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.3 actions=conjunction(),conjunction()
+ table=46, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.4 actions=conjunction(),conjunction()
+ table=46, priority=1003,ip,metadata=0x1,nw_src=10.0.0.1 actions=resubmit(,47)
+ table=46, priority=1003,ip,metadata=0x1,nw_src=10.0.0.2 actions=conjunction()
+ table=46, priority=1003,ip,metadata=0x1,nw_src=10.0.0.42 actions=conjunction()
 ])
 
 # Add another ACL that overlaps with the existing less restrictive ones.
@@ -17980,20 +18398,20 @@ check ovn-nbctl --wait=hv sync
 # with an additional conjunction action.
 #
 # New non-conjunctive flows should be added to match on 'udp'.
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=44 | ofctl_strip_all | \
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=46 | ofctl_strip_all | \
    grep "priority=1003" | \
    sed 's/conjunction([[^)]]*)/conjunction()/g' | \
    sed 's/conj_id=[[0-9]]*,/conj_id=xxx,/g' | sort], [0], [dnl
- table=44, priority=1003,conj_id=xxx,ip,metadata=0x1 actions=resubmit(,45)
- table=44, priority=1003,conj_id=xxx,ip,metadata=0x1 actions=resubmit(,45)
- table=44, priority=1003,conj_id=xxx,ip,metadata=0x1 actions=resubmit(,45)
- table=44, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.3 actions=conjunction(),conjunction(),conjunction()
- table=44, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.4 actions=conjunction(),conjunction(),conjunction()
- table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.1 actions=resubmit(,45)
- table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.2 actions=conjunction(),conjunction()
- table=44, priority=1003,ip,metadata=0x1,nw_src=10.0.0.42 actions=conjunction()
- table=44, priority=1003,udp,metadata=0x1 actions=resubmit(,45)
- table=44, priority=1003,udp6,metadata=0x1 actions=resubmit(,45)
+ table=46, priority=1003,conj_id=xxx,ip,metadata=0x1 actions=resubmit(,47)
+ table=46, priority=1003,conj_id=xxx,ip,metadata=0x1 actions=resubmit(,47)
+ table=46, priority=1003,conj_id=xxx,ip,metadata=0x1 actions=resubmit(,47)
+ table=46, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.3 actions=conjunction(),conjunction(),conjunction()
+ table=46, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.4 actions=conjunction(),conjunction(),conjunction()
+ table=46, priority=1003,ip,metadata=0x1,nw_src=10.0.0.1 actions=resubmit(,47)
+ table=46, priority=1003,ip,metadata=0x1,nw_src=10.0.0.2 actions=conjunction(),conjunction()
+ table=46, priority=1003,ip,metadata=0x1,nw_src=10.0.0.42 actions=conjunction()
+ table=46, priority=1003,udp,metadata=0x1 actions=resubmit(,47)
+ table=46, priority=1003,udp6,metadata=0x1 actions=resubmit(,47)
 ])
 
 OVN_CLEANUP([hv1])
@@ -18048,17 +18466,17 @@ check ovn-nbctl acl-add pg1 to-lport 100 'outport == @pg1 && ip4.src == $as2' al
 
 wait_for_ports_up
 check ovn-nbctl --wait=hv sync
-ovs-ofctl dump-flows br-int table=44
-AT_CHECK([test `ovs-ofctl dump-flows br-int table=44 | grep -c conj_id` = 2])
+ovs-ofctl dump-flows br-int table=46
+AT_CHECK([test `ovs-ofctl dump-flows br-int table=46 | grep -c conj_id` = 2])
 
 echo -------
 # Add another address in as1, so that the 1st ACL will now generate 2 conjunctions.
 ovn-nbctl set address_set as1 addresses="10.0.0.1,10.0.0.2"
 check ovn-nbctl --wait=hv sync
 
-ovs-ofctl dump-flows br-int table=44
+ovs-ofctl dump-flows br-int table=46
 # There should be 3 conjunctions in total (2 from 1st ACL + 1 from 2nd ACL)
-AT_CHECK([test `ovs-ofctl dump-flows br-int table=44 | grep -c conj_id` = 3])
+AT_CHECK([test `ovs-ofctl dump-flows br-int table=46 | grep -c conj_id` = 3])
 
 OVN_CLEANUP([hv1])
 AT_CLEANUP
@@ -18276,7 +18694,7 @@ AT_SETUP([TTL exceeded])
 AT_KEYWORDS([ttl-exceeded])
 ovn_start
 
-# test_ip_packet INPORT HV ETH_SRC ETH_DST IPV4_SRC IPV4_DST IPV4_ROUTER IP_CHKSUM EXP_IP_CHKSUM EXP_ICMP_CHKSUM
+# test_ip_packet INPORT HV ETH_SRC ETH_DST IPV4_SRC IPV4_DST IPV4_ROUTER IP_CHKSUM EXP_IP_CHKSUM EXP_ICMP_CHKSUM SHOULD_REPLY
 #
 # Causes a packet to be received on INPORT of the hypervisor HV. The packet is an IPv4 packet with
 # ETH_SRC, ETH_DST, IPV4_SRC, IPV4_DST, IP_CHKSUM as specified and TTL set to 1.
@@ -18292,6 +18710,7 @@ test_ip_packet() {
     local inport=$1 hv=$2 eth_src=$3 eth_dst=$4 ipv4_src=$5 ipv4_dst=$6 ip_router=$7 ip_chksum=$8
     local exp_ip_chksum=$9 exp_icmp_chksum=${10}
     shift 10
+    local should_reply=$1
 
     local ip_ttl=01
     local packet=${eth_dst}${eth_src}08004500001400004000${ip_ttl}01${ip_chksum}${ipv4_src}${ipv4_dst}
@@ -18300,27 +18719,31 @@ test_ip_packet() {
     local icmp_type_code_response=0b00
     local icmp_data=00000000
     local reply_icmp_payload=${icmp_type_code_response}${exp_icmp_chksum}${icmp_data}
-    local reply=${eth_src}${eth_dst}08004500003000004000${reply_icmp_ttl}01${exp_ip_chksum}${ip_router}${ipv4_src}${reply_icmp_payload}
-    echo $reply$orig_pkt_in_reply >> vif$inport.expected
+    if test $should_reply == yes; then
+        local reply=${eth_src}${eth_dst}08004500003000004000${reply_icmp_ttl}01${exp_ip_chksum}${ip_router}${ipv4_src}${reply_icmp_payload}
+        echo $reply$orig_pkt_in_reply >> vif$inport.expected
+    fi
 
     as hv$hv ovs-appctl netdev-dummy/receive vif$inport $packet
 }
 
-# test_ip6_packet INPORT HV ETH_SRC ETH_DST IPV6_SRC IPV6_DST IPV6_ROUTER EXP_ICMP_CHKSUM
+# test_ip6_packet INPORT HV ETH_SRC ETH_DST IPV6_SRC IPV6_DST IPV6_ROUTER EXP_ICMP_CHKSUM SHOULD_REPLY
 #
 # Causes a packet to be received on INPORT of the hypervisor HV. The packet is an IPv6
 # packet with ETH_SRC, ETH_DST, IPV6_SRC and IPV6_DST as specified.
 # IPV6_ROUTER and EXP_ICMP_CHKSUM are the source IP and checksum of the icmpv6 ttl exceeded
 # packet sent by OVN logical router
 test_ip6_packet() {
-    local inport=$1 hv=$2 eth_src=$3 eth_dst=$4 ipv6_src=$5 ipv6_dst=$6 ipv6_router=$7 exp_icmp_chksum=$8
+    local inport=$1 hv=$2 eth_src=$3 eth_dst=$4 ipv6_src=$5 ipv6_dst=$6 ipv6_router=$7 exp_icmp_chksum=$8 should_reply=$9
     shift 8
 
     local ip6_hdr=6000000000151101${ipv6_src}${ipv6_dst}
     local packet=${eth_dst}${eth_src}86dd${ip6_hdr}dbb8303900155bac6b646f65206676676e6d66720a
 
-    local reply=${eth_src}${eth_dst}86dd6000000000453afe${ipv6_router}${ipv6_src}0300${exp_icmp_chksum}00000000${ip6_hdr}dbb8303900155bac6b646f65206676676e6d66720a
-    echo $reply >> vif$inport.expected
+    if test $should_reply == yes; then
+        local reply=${eth_src}${eth_dst}86dd6000000000453afe${ipv6_router}${ipv6_src}0300${exp_icmp_chksum}00000000${ip6_hdr}dbb8303900155bac6b646f65206676676e6d66720a
+        echo $reply >> vif$inport.expected
+    fi
 
     as hv$hv ovs-appctl netdev-dummy/receive vif$inport $packet
 }
@@ -18343,6 +18766,8 @@ for i in 1 2; do
             options:tx_pcap=hv$i/vif$i-tx.pcap \
             options:rxq_pcap=hv$i/vif$i-rx.pcap \
             ofport-request=$i
+
+    ovs-appctl -t ovn-controller vlog/set file:dbg:pinctrl
 done
 
 ovn-nbctl lr-add lr0
@@ -18358,10 +18783,22 @@ OVN_POPULATE_ARP
 wait_for_ports_up
 ovn-nbctl --wait=hv sync
 
-test_ip_packet 1 1 000000000001 00000000ff01 $(ip_to_hex 192 168 1 1) $(ip_to_hex 192 168 2 1) $(ip_to_hex 192 168 1 254) 0000 f87c ea96
-test_ip6_packet 1 1 000000000001 00000000ff01 20010db8000100000000000000000011 20010db8000200000000000000000011 20010db8000100000000000000000001 1c22
+test_ip_packet 1 1 000000000001 00000000ff01 $(ip_to_hex 192 168 1 1) $(ip_to_hex 192 168 2 1) $(ip_to_hex 192 168 1 254) 0000 f87c ea96 yes
+test_ip6_packet 1 1 000000000001 00000000ff01 20010db8000100000000000000000011 20010db8000200000000000000000011 20010db8000100000000000000000001 1c22 yes
+
+# Should not send ICMP for multicast
+test_ip_packet 1 1 000000000001 01005e7f0001 $(ip_to_hex 192 168 1 1) $(ip_to_hex 239 255 0 1) $(ip_to_hex 192 168 1 254) 0000 000000000 no
+test_ip6_packet 1 1 000000000001 333300000001 20010db8000100000000000000000011 ff020000000000000000000000000001 20010db8000100000000000000000001 0000 no
+
 OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [vif1.expected])
 
+# Confirm from debug log that we only see 2 packet-ins (no packet-ins for
+# multicasts). This is necessary because not seeing ICMP messages doesn't
+# necessarily mean the packet-in didn't happen. It is possible that packet-in
+# is processed but the ICMP message got dropped.
+AT_CHECK([grep -c packet-in hv1/ovn-controller.log], [0], [2
+])
+
 OVN_CLEANUP([hv1], [hv2])
 AT_CLEANUP
 ])
@@ -18656,7 +19093,7 @@ packet="inport==\"sw1-p1\" && eth.src==$sw1_p1_mac && eth.dst==$sw1_ro_mac &&
        udp && udp.src==53 && udp.dst==4369"
 
 # Start by Sending the packet and make sure it makes it there as expected
-as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Expected packet has TTL decreased by 1
 expected="eth.src==$sw2_ro_mac && eth.dst==$sw2_p1_mac &&
@@ -18670,7 +19107,7 @@ OVN_CHECK_PACKETS([hv2/vif1-tx.pcap], [expected])
 as hv2 ovs-appctl -t ovn-controller exit
 
 # Now send the packet again. This time, it should not arrive.
-as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 OVN_CHECK_PACKETS([hv2/vif1-tx.pcap], [expected])
 
@@ -19552,7 +19989,7 @@ packet="inport==\"sw1-p1\" && eth.src==$sw1_p1_mac && eth.dst==$sw1_ro_mac &&
        udp && udp.src==53 && udp.dst==4369"
 
 # Start by Sending the packet and make sure it makes it there as expected
-as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Expected packet has TTL decreased by 1
 expected="eth.src==$sw2_ro_mac && eth.dst==$sw2_p1_mac &&
@@ -19566,7 +20003,7 @@ OVN_CHECK_PACKETS([hv2/vif1-tx.pcap], [expected])
 as hv2 ovs-appctl -t ovn-controller exit --restart
 
 # Now send the packet again. This time, it should still arrive
-as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 cat expected expected > expected2
 
@@ -19705,7 +20142,7 @@ test_ip_packet_larger() {
     # Set the packet length to 114.
     pkt_len=0072
     packet=${dst_mac}${src_mac}08004500${pkt_len}000000004001c3dd
-    orig_packet_l3=${src_ip}${dst_ip}0304fcfb00000000
+    orig_packet_l3=${src_ip}${dst_ip}0800f7ff00000000
     orig_packet_l3=${orig_packet_l3}000000000000000000000000000000000000
     orig_packet_l3=${orig_packet_l3}000000000000000000000000000000000000
     orig_packet_l3=${orig_packet_l3}000000000000000000000000000000000000
@@ -19729,10 +20166,10 @@ test_ip_packet_larger() {
         # Packet to expect at br-phys.
         src_mac="000020201213"
         dst_mac="00000012af11"
-        src_ip=`ip_to_hex 10 0 0 3`
+        src_ip=`ip_to_hex 172 168 0 100`
         dst_ip=`ip_to_hex 172 168 0 3`
-        expected=${dst_mac}${src_mac}08004500${pkt_len}000000003f01c4dd
-        expected=${expected}${src_ip}${dst_ip}0304fcfb00000000
+        expected=${dst_mac}${src_mac}08004500${pkt_len}000000003f0121d4
+        expected=${expected}${src_ip}${dst_ip}0800f7ff00000000
         expected=${expected}000000000000000000000000000000000000
         expected=${expected}000000000000000000000000000000000000
         expected=${expected}000000000000000000000000000000000000
@@ -19793,7 +20230,7 @@ test_ip_packet_larger_ext() {
     # Set the packet length to 114.
     pkt_len=0072
     packet=${dst_mac}${src_mac}08004500${pkt_len}000000004001${checksum}
-    orig_packet_l3=${src_ip}${dst_ip}0900000000000000
+    orig_packet_l3=${src_ip}${dst_ip}0800f7ff00000000
     orig_packet_l3=${orig_packet_l3}000000000000000000000000000000000000
     orig_packet_l3=${orig_packet_l3}000000000000000000000000000000000000
     orig_packet_l3=${orig_packet_l3}000000000000000000000000000000000000
@@ -19810,7 +20247,7 @@ test_ip_packet_larger_ext() {
     dst_ip=`ip_to_hex 172 168 0 4`
     # pkt len should be 146 (28 (icmp packet) + 118 (orig ip + payload))
     reply_pkt_len=008e
-    ip_csum=f39b
+    ip_csum=$7
     icmp_reply=${src_mac}${dst_mac}08004500${reply_pkt_len}00004000fe01${reply_checksum}
     icmp_reply=${icmp_reply}${src_ip}${dst_ip}0304${ip_csum}0000$(printf "%04x" $mtu)
     icmp_reply=${icmp_reply}4500${pkt_len}000000004001${checksum}
@@ -19985,10 +20422,10 @@ OVS_WAIT_FOR_OUTPUT([
 ])
 
 AS_BOX([testing ingress traffic mtu 100 - IPv4])
-test_ip_packet_larger_ext 1 000020201213 $(ip_to_hex 172 168 0 100) 20cf 100 22b6
+test_ip_packet_larger_ext 1 000020201213 $(ip_to_hex 172 168 0 100) 20d3 100 22b6 fc97
 
 AS_BOX([testing ingress traffic mtu 100 - IPv4 FIP])
-test_ip_packet_larger_ext 2 f00000010204 $(ip_to_hex 172 168 0 110) 20c5 100 22ac
+test_ip_packet_larger_ext 2 f00000010204 $(ip_to_hex 172 168 0 110) 20c5 100 22ac fc9b
 
 AS_BOX([testing ingress traffic mtu 100 - IPv6])
 test_ip6_packet_larger_ext 1 000020201213 20000000000000000000000000000001 100 cc7a
@@ -20055,10 +20492,10 @@ OVS_WAIT_FOR_OUTPUT([
 ])
 
 AS_BOX([testing ingress traffic mtu 100 for gw router - IPv4])
-test_ip_packet_larger_ext 1 000020201213 $(ip_to_hex 172 168 0 100) 20cf 100 22b6
+test_ip_packet_larger_ext 1 000020201213 $(ip_to_hex 172 168 0 100) 20d3 100 22b6 fc97
 
 AS_BOX([testing ingress traffic mtu 100 for gw router - IPv6])
-test_ip6_packet_larger_ext 1 000020201213 20000000000000000000000000000001 100 cc7a
+test_ip6_packet_larger_ext 1 000020201213 20000000000000000000000000000001 100 cc7a fc9b
 
 OVN_CLEANUP([hv1])
 AT_CLEANUP
@@ -21012,9 +21449,9 @@ check_virtual_offlows_present() {
     lr0_dp_key=$(printf "%x" $(fetch_column Datapath_Binding tunnel_key external_ids:name=lr0))
     lr0_public_dp_key=$(printf "%x" $(fetch_column Port_Binding tunnel_key logical_port=lr0-public))
 
-    AT_CHECK_UNQUOTED([as $hv ovs-ofctl dump-flows br-int table=44 | ofctl_strip_all | grep "priority=2000"], [0], [dnl
- table=44, priority=2000,ip,metadata=0x$sw0_dp_key actions=resubmit(,45)
- table=44, priority=2000,ipv6,metadata=0x$sw0_dp_key actions=resubmit(,45)
+    AT_CHECK_UNQUOTED([as $hv ovs-ofctl dump-flows br-int table=46 | ofctl_strip_all | grep "priority=2000"], [0], [dnl
+ table=46, priority=2000,ip,metadata=0x$sw0_dp_key actions=resubmit(,47)
+ table=46, priority=2000,ipv6,metadata=0x$sw0_dp_key actions=resubmit(,47)
 ])
 
     AT_CHECK_UNQUOTED([as $hv ovs-ofctl dump-flows br-int table=11 | ofctl_strip_all | \
@@ -21025,7 +21462,7 @@ check_virtual_offlows_present() {
 
 check_virtual_offlows_not_present() {
     hv=$1
-    AT_CHECK([as $hv ovs-ofctl dump-flows br-int table=45 | ofctl_strip_all | grep "priority=2000"], [1], [dnl
+    AT_CHECK([as $hv ovs-ofctl dump-flows br-int table=47 | ofctl_strip_all | grep "priority=2000"], [1], [dnl
 ])
 
     AT_CHECK([as $hv ovs-ofctl dump-flows br-int table=11 | ofctl_strip_all | \
@@ -21116,7 +21553,7 @@ check_virtual_offlows_not_present hv2
 send_garp 1 1 $eth_src $eth_dst $spa $tpa
 
 OVS_WAIT_UNTIL([test x$(ovn-sbctl --bare --columns chassis find port_binding \
-logical_port=sw0-vir) = x$hv1_ch_uuid], [0], [])
+logical_port=sw0-vir) = x$hv1_ch_uuid])
 
 AT_CHECK([test x$(ovn-sbctl --bare --columns virtual_parent find port_binding \
 logical_port=sw0-vir) = xsw0-p1])
@@ -21184,7 +21621,7 @@ tpa=$(ip_to_hex 10 0 0 10)
 send_garp 1 2 $eth_src $eth_dst $spa $tpa
 
 OVS_WAIT_UNTIL([test x$(ovn-sbctl --bare --columns chassis find port_binding \
-logical_port=sw0-vir) = x$hv1_ch_uuid], [0], [])
+logical_port=sw0-vir) = x$hv1_ch_uuid])
 
 OVS_WAIT_UNTIL([test x$(ovn-sbctl --bare --columns virtual_parent find port_binding \
 logical_port=sw0-vir) = xsw0-p3])
@@ -21217,7 +21654,7 @@ tpa=$(ip_to_hex 10 0 0 10)
 send_garp 2 1 $eth_src $eth_dst $spa $tpa
 
 OVS_WAIT_UNTIL([test x$(ovn-sbctl --bare --columns chassis find port_binding \
-logical_port=sw0-vir) = x$hv2_ch_uuid], [0], [])
+logical_port=sw0-vir) = x$hv2_ch_uuid])
 
 AT_CHECK([test x$(ovn-sbctl --bare --columns virtual_parent find port_binding \
 logical_port=sw0-vir) = xsw0-p2])
@@ -21249,7 +21686,7 @@ tpa=$(ip_to_hex 10 0 0 4)
 send_arp_reply 1 1 $eth_src $eth_dst $spa $tpa
 
 OVS_WAIT_UNTIL([test x$(ovn-sbctl --bare --columns chassis find port_binding \
-logical_port=sw0-vir) = x$hv1_ch_uuid], [0], [])
+logical_port=sw0-vir) = x$hv1_ch_uuid])
 sleep 1
 
 AT_CHECK([test x$(ovn-sbctl --bare --columns virtual_parent find port_binding \
@@ -21275,7 +21712,7 @@ check_virtual_offlows_not_present hv2
 as hv1 ovs-vsctl del-port hv1-vif1
 
 OVS_WAIT_UNTIL([test x$(ovn-sbctl --bare --columns chassis find port_binding \
-logical_port=sw0-vir) = x], [0], [])
+logical_port=sw0-vir) = x])
 sleep 1
 
 AT_CHECK([test x$(ovn-sbctl --bare --columns virtual_parent find port_binding \
@@ -21310,7 +21747,7 @@ send_arp_reply 2 1 $eth_src $eth_dst $spa $tpa
 sleep 1
 
 OVS_WAIT_UNTIL([test x$(ovn-sbctl --bare --columns chassis find port_binding \
-logical_port=sw0-vir) = x$hv2_ch_uuid], [0], [])
+logical_port=sw0-vir) = x$hv2_ch_uuid])
 sleep 1
 AT_CHECK([test x$(ovn-sbctl --bare --columns virtual_parent find port_binding \
 logical_port=sw0-vir) = xsw0-p2])
@@ -21335,7 +21772,7 @@ check_virtual_offlows_not_present hv1
 ovn-nbctl lsp-del sw0-p2
 
 OVS_WAIT_UNTIL([test x$(ovn-sbctl --bare --columns chassis find port_binding \
-logical_port=sw0-vir) = x], [0], [])
+logical_port=sw0-vir) = x])
 AT_CHECK([test x$(ovn-sbctl --bare --columns virtual_parent find port_binding \
 logical_port=sw0-vir) = x])
 
@@ -21516,7 +21953,7 @@ AT_CAPTURE_FILE([offlows])
 packet0="inport==\"sw0-p11\" && eth.src==00:00:00:00:00:11 && eth.dst==00:00:00:00:00:21 &&
          ip4 && ip.ttl==64 && ip4.src==192.168.1.11 && ip4.dst==192.168.1.100 &&
          tcp && tcp.src==10000 && tcp.dst==80"
-as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet0"
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet0"])
 ovn-nbctl --wait=hv
 
 ovn-sbctl list controller_event > events
@@ -21545,7 +21982,7 @@ packet1="inport==\"sw1-p0\" && eth.src==00:00:00:00:00:33 && eth.dst==00:00:00:0
          ip4 && ip.ttl==64 && ip4.src==192.168.2.11 && ip4.dst==192.168.2.100 &&
          tcp && tcp.src==10000 && tcp.dst==80"
 
-as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet1"
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet1"])
 ovn-nbctl --wait=hv
 ovn-sbctl list controller_event
 uuid=$(ovn-sbctl list controller_event | awk '/_uuid/{print $3}')
@@ -21561,7 +21998,7 @@ packet2="inport==\"sw0-p11\" && eth.src==00:00:00:00:00:11 && eth.dst==00:00:00:
          ip6 && ip.ttl==64 && ip6.src==2001::11 && ip6.dst==2001::10 &&
          tcp && tcp.src==10000 && tcp.dst==50051"
 
-as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet2"
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet2"])
 ovn-nbctl --wait=hv
 ovn-sbctl list controller_event
 uuid=$(ovn-sbctl list controller_event | awk '/_uuid/{print $3}')
@@ -23619,7 +24056,7 @@ m4_define([DVR_N_S_PING],
    OVN_CHECK_PACKETS_REMOVE_BROADCAST([hv4/vif-north-tx.pcap], [vif-north.expected])
 
    # Confirm that packets did not go out via tunnel port.
-   AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep table=38 | grep NXM_NX_TUN_METADATA0 | grep n_packets=0 | wc -l], [0], [[0
+   AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep table=39 | grep NXM_NX_TUN_METADATA0 | grep n_packets=0 | wc -l], [0], [[0
 ]])
 
    # Confirm that packet went out via localnet port
@@ -23744,7 +24181,7 @@ send_garp 1 1 $eth_src $eth_dst $spa $tpa
 
 wait_row_count MAC_Binding 1
 
-OVS_WAIT_UNTIL([ovn-sbctl --format=csv --bare --columns logical_port,ip,mac \
+OVS_WAIT_FOR_OUTPUT([ovn-sbctl --format=csv --bare --columns logical_port,ip,mac \
 list mac_binding], [0], [lr0-sw0
 10.0.0.30
 50:54:00:00:00:03
@@ -23791,7 +24228,7 @@ grep table_id=10 | wc -l`])
 
 check_row_count MAC_Binding 1
 
-OVS_WAIT_UNTIL([ovn-sbctl --format=csv --bare --columns logical_port,ip,mac \
+OVS_WAIT_FOR_OUTPUT([ovn-sbctl --format=csv --bare --columns logical_port,ip,mac \
 list mac_binding], [0], [lr0-sw0
 10.0.0.30
 50:54:00:00:00:13
@@ -23820,7 +24257,7 @@ OVS_WAIT_UNTIL(
 | wc -l`]
 )
 
-OVS_WAIT_UNTIL([ovn-sbctl --format=csv --bare --columns logical_port,ip,mac \
+OVS_WAIT_FOR_OUTPUT([ovn-sbctl --format=csv --bare --columns logical_port,ip,mac \
 find mac_binding ip=10.0.0.50], [0], [lr0-sw0
 10.0.0.50
 50:54:00:00:00:33
@@ -24377,7 +24814,7 @@ AT_CAPTURE_FILE([sbflows2])
 OVS_WAIT_FOR_OUTPUT(
   [ovn-sbctl dump-flows > sbflows2
    ovn-sbctl dump-flows lr0 | grep ct_lb_mark | grep priority=120 | sed 's/table=..//'], 0,
-  [  (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.3:80,20.0.0.3:80; hash_fields="ip_dst,ip_src,tcp_dst,tcp_src");)
+  [  (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.3:80,20.0.0.3:80; hash_fields="ip_dst,ip_src,tcp_dst,tcp_src");)
 ])
 
 # get the svc monitor mac.
@@ -24419,8 +24856,7 @@ AT_CHECK(
 AT_CAPTURE_FILE([sbflows4])
 ovn-sbctl dump-flows lr0 > sbflows4
 AT_CHECK([grep lr_in_dnat sbflows4 | grep priority=120 | sed 's/table=..//' | sort], [0], [dnl
-  (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;)
-  (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && is_chassis_resident("cr-lr0-public")), action=(drop;)
+  (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80 && is_chassis_resident("cr-lr0-public")), action=(drop;)
 ])
 
 # Delete sw0-p1
@@ -24576,7 +25012,7 @@ AT_CAPTURE_FILE([sbflows2])
 OVS_WAIT_FOR_OUTPUT(
   [ovn-sbctl dump-flows > sbflows2
    ovn-sbctl dump-flows lr0 | grep ct_lb_mark | grep priority=120 | sed 's/table=..//'], 0,
-  [  (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip6 && xxreg0 == 2001::a && tcp && reg9[[16..31]] == 80 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=[[2001::3]]:80,[[2002::3]]:80; hash_fields="ip_dst,ip_src,tcp_dst,tcp_src");)
+  [  (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip6 && ip6.dst == 2001::a && tcp && tcp.dst == 80 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=[[2001::3]]:80,[[2002::3]]:80; hash_fields="ip_dst,ip_src,tcp_dst,tcp_src");)
 ])
 
 # get the svc monitor mac.
@@ -24618,8 +25054,7 @@ AT_CHECK(
 AT_CAPTURE_FILE([sbflows4])
 ovn-sbctl dump-flows lr0 > sbflows4
 AT_CHECK([grep lr_in_dnat sbflows4 | grep priority=120 | sed 's/table=..//' | sort], [0], [dnl
-  (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip6 && xxreg0 == 2001::a && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;)
-  (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip6 && xxreg0 == 2001::a && tcp && reg9[[16..31]] == 80 && is_chassis_resident("cr-lr0-public")), action=(drop;)
+  (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip6 && ip6.dst == 2001::a && tcp && tcp.dst == 80 && is_chassis_resident("cr-lr0-public")), action=(drop;)
 ])
 
 # Delete sw0-p1
@@ -25447,7 +25882,7 @@ for s_az in $(seq 1 $n_az); do
                     udp && udp.src==53 && udp.dst==4369"
             echo "sending: $packet"
             AT_CHECK([ovn_trace --ovs "$packet" > ${s_az}-${d_az}-$i.ovn-trace])
-            AT_CHECK([ovs-appctl -t ovn-controller inject-pkt "$packet"])
+            OVS_WAIT_UNTIL([ovs-appctl -t ovn-controller inject-pkt "$packet"])
             ovs_inport=$(ovs-vsctl --bare --columns=ofport find Interface external-ids:iface-id="$ovn_inport")
 
             ovs_packet=$(echo $packet | ovstest test-ovn expr-to-packets)
@@ -26002,7 +26437,7 @@ for i in $(seq 5001 5010); do
     packet="inport==\"lsp11\" && eth.src==f0:00:00:00:01:11 && eth.dst==00:00:00:01:01:01 &&
             ip4 && ip.ttl==64 && ip4.src==192.168.1.11 && ip4.dst==10.0.0.123 &&
             tcp && tcp.src==$i && tcp.dst==80"
-    AT_CHECK([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
+    OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
     for j in 1 2; do
         # Assume all packets go to lsp2${j}.
@@ -26121,7 +26556,7 @@ wait_for_ports_up
 # Test 1
 packet="inport==\"lsp11\" && eth.src==f0:00:00:00:01:11 && eth.dst==00:00:00:01:01:01 &&
         ip4 && ip.ttl==64 && ip4.src==192.168.1.11 && ip4.dst==2.2.2.2 && icmp"
-AT_CHECK([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Assume no packets go neither to lsp21 nor to lsp22.
 > expected_lsp21
@@ -26151,7 +26586,7 @@ done
 # Test 2
 packet="inport==\"lsp11\" && eth.src==f0:00:00:00:01:11 && eth.dst==00:00:00:01:01:01 &&
         ip4 && ip.ttl==64 && ip4.src==192.168.1.11 && ip4.dst==1.1.1.1 && icmp"
-AT_CHECK([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Assume all packets go to lsp22.
 exp_packet="eth.src==00:00:00:01:02:01 && eth.dst==f0:00:00:00:02:22 &&
@@ -26181,7 +26616,7 @@ done
 # Test 3
 packet="inport==\"lsp21\" && eth.src==f0:00:00:00:02:21 && eth.dst==00:00:00:01:02:01 &&
         ip4 && ip.ttl==64 && ip4.src==192.168.2.21 && ip4.dst==2.2.2.2 && icmp"
-AT_CHECK([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Assume all packets go to lsp21.
 exp_packet="eth.src==00:00:00:01:02:01 && eth.dst==f0:00:00:00:02:21 &&
@@ -26278,7 +26713,7 @@ wait_for_ports_up
 # test 1
 packet="inport==\"lsp11\" && eth.src==f0:00:00:00:01:11 && eth.dst==00:00:00:01:01:01 &&
         ip4 && ip.ttl==64 && ip4.src==192.168.1.11 && ip4.dst==192.168.2.21 && icmp"
-AT_CHECK([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Assume all packets go to lsp21.
 exp_packet="eth.src==00:00:00:01:02:01 && eth.dst==f0:00:00:00:02:21 && ip4 &&
@@ -26312,7 +26747,7 @@ ovs-vsctl set interface hv1-vif2 options:tx_pcap=hv1/vif2-tx.pcap
 # test 2
 packet="inport==\"lsp11\" && eth.src==f0:00:00:00:01:11 && eth.dst==00:00:00:01:01:01 &&
         ip4 && ip.ttl==64 && ip4.src==192.168.1.11 && ip4.dst==192.168.2.200 && icmp"
-AT_CHECK([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Assume all packets go to lsp11.
 exp_packet="eth.src==00:00:00:01:01:01 && eth.dst==f0:00:00:00:01:11 && ip4 &&
@@ -26417,7 +26852,7 @@ for i in $(seq 1 2); do
     packet="inport==\"lsp${i}1\" && eth.src==f0:00:00:00:0${i}:1${i} &&
             eth.dst==00:00:00:01:0${i}:01 && ip4 && ip.ttl==64 &&
             ip4.src==192.168.${i}.${i}1 && ip4.dst==10.0.0.1 && icmp"
-    AT_CHECK([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
+    OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
     # Assume all packets go to lsp${di}1.
     exp_packet="eth.src==00:00:00:01:0${di}:01 && eth.dst==f0:00:00:00:0${di}:1${di} &&
@@ -26530,7 +26965,7 @@ for i in $(seq 1 2); do
     packet="inport==\"lsp${i}1\" && eth.src==f0:00:00:00:0${i}:1${i} &&
             eth.dst==00:00:00:01:0${i}:01 && ip6 && ip.ttl==64 &&
             ip6.src==2001:db8:${i}::${i}1 && ip6.dst==2001:db8:2000::1 && icmp6"
-    AT_CHECK([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
+    OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
     # Assume all packets go to lsp${di}1.
     exp_packet="eth.src==00:00:00:01:0${di}:01 && eth.dst==f0:00:00:00:0${di}:1${di} && ip6 &&
@@ -26650,7 +27085,7 @@ dst_ip=172.16.1.11
 packet="inport==\"lsp11\" && eth.src==$src_mac && eth.dst==$dst_mac &&
         ip4 && ip.ttl==64 && ip4.src==$src_ip && ip4.dst==$dst_ip &&
         udp && udp.src==53 && udp.dst==4369"
-check as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Check if the packet hit the forwarding group policy
 AT_CAPTURE_FILE([offlows2])
@@ -27173,7 +27608,7 @@ ovn_attach n1 br-phys 192.168.0.1
 
 # Chassis hv1 should add flows for the ls1 datapath in table 8 (ls_in_port_sec_l2).
 dp_key=$(ovn-sbctl --bare --columns tunnel_key list Datapath_Binding ls1)
-OVS_WAIT_UNTIL([as hv1 ovs-ofctl dump-flows br-int | grep -E "table=8.*metadata=0x${dp_key}"], [0], [ignore])
+OVS_WAIT_UNTIL([as hv1 ovs-ofctl dump-flows br-int | grep -E "table=8.*metadata=0x${dp_key}"])
 
 OVN_CLEANUP([hv1])
 AT_CLEANUP
@@ -27199,7 +27634,7 @@ ovs-vsctl add-br br-phys
 ovn_attach n1 br-phys 192.168.0.1
 
 # Port_Binding should be released.
-OVS_WAIT_UNTIL([test 0 = $(ovn-sbctl show | grep Port_Binding -c)], [0])
+OVS_WAIT_UNTIL([test 0 = $(ovn-sbctl show | grep Port_Binding -c)])
 
 OVN_CLEANUP([hv1])
 AT_CLEANUP
@@ -27332,22 +27767,24 @@ sleep 5
 send_ipv4_pkt() {
     local hv=$1 inport=$2 eth_src=$3 eth_dst=$4
     local ip_src=$5 ip_dst=$6
-    packet=${eth_dst}${eth_src}08004500001c0000000040110000${ip_src}${ip_dst}0035111100080000
+    local ip_cksum=$7 tcp_cksum=$8
+    packet=${eth_dst}${eth_src}080045000028000000004006${ip_cksum}${ip_src}${ip_dst}0035111112345678000000005002faf0${tcp_cksum}0000
     tcpdump_hex $packet
     as $hv ovs-appctl netdev-dummy/receive ${inport} ${packet}
 }
 
 send_icmp6_packet() {
-    local hv=$1 inport=$2 eth_src=$3 eth_dst=$4 ipv6_src=$5 ipv6_dst=$6
+    local hv=$1 inport=$2 eth_src=$3 eth_dst=$4 ipv6_src=$5 ipv6_dst=$6 tcp_cksum=$7
 
-    local ip6_hdr=6000000000083aff${ipv6_src}${ipv6_dst}
-    local packet=${eth_dst}${eth_src}86dd${ip6_hdr}8000dcb662f00001
+    local ip6_hdr=60000000001406ff${ipv6_src}${ipv6_dst}
+    local packet=${eth_dst}${eth_src}86dd${ip6_hdr}0035111112345678000000005002faf0${tcp_cksum}0000
 
     as $hv ovs-appctl netdev-dummy/receive ${inport} ${packet}
 }
 
 send_ipv4_pkt hv1 hv1-vif1 505400000003 00000000ff01 \
-    $(ip_to_hex 10 0 0 3) $(ip_to_hex 172 168 0 120)
+    $(ip_to_hex 10 0 0 3) $(ip_to_hex 172 168 0 120) \
+    c3ad 83dc
 
 AT_CAPTURE_FILE([offlows2])
 OVS_WAIT_UNTIL([
@@ -27364,7 +27801,8 @@ AT_CHECK([
 
 # Send the pkt from sw0-port2. Packet should not be marked.
 send_ipv4_pkt hv1 hv1-vif2 505400000004 00000000ff01 \
-    $(ip_to_hex 10 0 0 4) $(ip_to_hex 172 168 0 120)
+    $(ip_to_hex 10 0 0 4) $(ip_to_hex 172 168 0 120) \
+    c3ac 83db
 
 AT_CHECK([
     test 1 -eq $(as hv1 ovs-ofctl dump-flows br-phys table=0 | \
@@ -27398,7 +27836,8 @@ AT_CHECK([
 
 ovn-nbctl set logical_router_policy $pol1 options:pkt_mark=2
 send_ipv4_pkt hv1 hv1-vif1 505400000003 00000000ff01 \
-    $(ip_to_hex 10 0 0 3) $(ip_to_hex 172 168 0 120)
+    $(ip_to_hex 10 0 0 3) $(ip_to_hex 172 168 0 120) \
+    c3ad 83dc
 
 OVS_WAIT_UNTIL([
     test 1 -eq $(as hv1 ovs-ofctl dump-flows br-int table=23 | \
@@ -27431,7 +27870,8 @@ AT_CHECK([
 # Send with src ip 10.0.0.5. The reroute policy should be hit
 # and the packet should be marked with 5.
 send_ipv4_pkt hv1 hv1-vif1 505400000003 00000000ff01 \
-    $(ip_to_hex 10 0 0 5) $(ip_to_hex 172 168 0 120)
+    $(ip_to_hex 10 0 0 5) $(ip_to_hex 172 168 0 120) \
+    c3ab 83da
 
 OVS_WAIT_UNTIL([
     test 1 -eq $(as hv1 ovs-ofctl dump-flows br-phys table=0 | \
@@ -27443,7 +27883,7 @@ OVS_WAIT_UNTIL([
 src_ip6=aef00000000000000000000000000004
 dst_ip6=bef00000000000000000000000000004
 
-send_icmp6_packet hv1 hv1-vif2 505400000004 00000000ff01 ${src_ip6} ${dst_ip6}
+send_icmp6_packet hv1 hv1-vif2 505400000004 00000000ff01 ${src_ip6} ${dst_ip6} cd16
 
 OVS_WAIT_UNTIL([
     test 1 -eq $(as hv1 ovs-ofctl dump-flows br-phys table=0 | \
@@ -27463,7 +27903,7 @@ AT_CHECK([
 src_ip6=aef00000000000000000000000000004
 dst_ip6=bef00000000000000000000000000005
 
-send_icmp6_packet hv1 hv1-vif2 505400000004 00000000ff01 ${src_ip6} ${dst_ip6}
+send_icmp6_packet hv1 hv1-vif2 505400000004 00000000ff01 ${src_ip6} ${dst_ip6} cd15
 
 OVS_WAIT_UNTIL([
     test 1 -eq $(as hv1 ovs-ofctl dump-flows br-phys table=0 | \
@@ -27970,22 +28410,22 @@ AT_CHECK([test ! -z $p1_zoneid])
 p2_zoneid=$(as hv1 ovs-vsctl get bridge br-int external_ids:ct-zone-sw0-p2 | sed 's/"//g')
 AT_CHECK([test ! -z $p2_zoneid])
 
-AT_CHECK([test $(ovs-ofctl dump-flows br-int table=38,metadata=${sw0_dpkey},\
+AT_CHECK([test $(ovs-ofctl dump-flows br-int table=40,metadata=${sw0_dpkey},\
 reg15=0x${p1_dpkey} | grep REG13 | wc -l) -eq 1])
 
-AT_CHECK([test $(ovs-ofctl dump-flows br-int table=38,metadata=${sw0_dpkey},\
+AT_CHECK([test $(ovs-ofctl dump-flows br-int table=40,metadata=${sw0_dpkey},\
 reg15=0x${p1_dpkey} | grep "load:0x${p1_zoneid}->NXM_NX_REG13" | wc -l) -eq 1])
 
-AT_CHECK([test $(ovs-ofctl dump-flows br-int table=38,metadata=${sw1_dpkey},\
+AT_CHECK([test $(ovs-ofctl dump-flows br-int table=40,metadata=${sw1_dpkey},\
 reg15=0x${p2_dpkey} | grep REG13 | wc -l) -eq 1])
 
-AT_CHECK([test $(ovs-ofctl dump-flows br-int table=38,metadata=${sw1_dpkey},\
+AT_CHECK([test $(ovs-ofctl dump-flows br-int table=40,metadata=${sw1_dpkey},\
 reg15=0x${p2_dpkey} | grep "load:0x${p2_zoneid}->NXM_NX_REG13" | wc -l) -eq 1])
 
 ovs-vsctl set interface hv1-vif1 external_ids:iface-id=foo
 OVS_WAIT_UNTIL([test x$(ovn-nbctl lsp-get-up sw0-p1) = xdown])
 
-AT_CHECK([test $(ovs-ofctl dump-flows br-int table=38,metadata=${sw0_dpkey},\
+AT_CHECK([test $(ovs-ofctl dump-flows br-int table=40,metadata=${sw0_dpkey},\
 reg15=0x${p1_dpkey} | grep REG13 | wc -l) -eq 0])
 
 p1_zoneid=$(as hv1 ovs-vsctl get bridge br-int external_ids:ct-zone-sw0-p1 | sed 's/"//g')
@@ -27997,16 +28437,16 @@ OVS_WAIT_UNTIL([test x$(ovn-nbctl lsp-get-up sw0-p1) = xup])
 p1_zoneid=$(as hv1 ovs-vsctl get bridge br-int external_ids:ct-zone-sw0-p1 | sed 's/"//g')
 AT_CHECK([test ! -z $p1_zoneid])
 
-AT_CHECK([test $(ovs-ofctl dump-flows br-int table=38,metadata=${sw0_dpkey},\
+AT_CHECK([test $(ovs-ofctl dump-flows br-int table=40,metadata=${sw0_dpkey},\
 reg15=0x${p1_dpkey} | grep REG13 | wc -l) -eq 1])
 
-AT_CHECK([test $(ovs-ofctl dump-flows br-int table=38,metadata=${sw0_dpkey},\
+AT_CHECK([test $(ovs-ofctl dump-flows br-int table=40,metadata=${sw0_dpkey},\
 reg15=0x${p1_dpkey} | grep "load:0x${p1_zoneid}->NXM_NX_REG13" | wc -l) -eq 1])
 
 ovs-vsctl del-port hv1-vif2
 OVS_WAIT_UNTIL([test x$(ovn-nbctl lsp-get-up sw0-p2) = xdown])
 
-AT_CHECK([test $(ovs-ofctl dump-flows br-int table=38,metadata=${sw0_dpkey},\
+AT_CHECK([test $(ovs-ofctl dump-flows br-int table=40,metadata=${sw0_dpkey},\
 reg15=0x${p2_dpkey} | grep REG13 | wc -l) -eq 0])
 
 p2_zoneid=$(as hv1 ovs-vsctl get bridge br-int external_ids:ct-zone-sw0-p2 | sed 's/"//g')
@@ -28014,7 +28454,7 @@ AT_CHECK([test -z $p2_zoneid])
 
 ovn-nbctl lsp-del sw0-p1
 
-OVS_WAIT_UNTIL([test $(ovs-ofctl dump-flows br-int table=38,metadata=${sw0_dpkey},\
+OVS_WAIT_UNTIL([test $(ovs-ofctl dump-flows br-int table=40,metadata=${sw0_dpkey},\
 reg15=0x${p1_dpkey} | grep REG13 | wc -l) -eq 0])
 
 p1_zoneid=$(as hv1 ovs-vsctl get bridge br-int external_ids:ct-zone-sw0-p1 | sed 's/"//g')
@@ -28737,7 +29177,7 @@ src_mac="f00000000102"
 dst_mac="000000000101"
 src_ip=`ip_to_hex 10 0 1 2`
 dst_ip=`ip_to_hex 10 0 1 1`
-packet=${dst_mac}${src_mac}08004500001c0000000040110000${src_ip}${dst_ip}0035111100080000
+packet=${dst_mac}${src_mac}08004500001c00000000401164cf${src_ip}${dst_ip}0035111100080000
 as hv1 ovs-appctl netdev-dummy/receive hv1-vif1 $packet
 
 # Even after configuring a router owned IP for SNAT, no packet-ins should
@@ -28763,7 +29203,7 @@ src_mac="f00000000202"
 dst_mac="000000000201"
 src_ip=`ip_to_hex 10 0 2 2`
 dst_ip=`ip_to_hex 10 0 1 1`
-packet=${dst_mac}${src_mac}08004500001c0000000040110000${src_ip}${dst_ip}0035111100080000
+packet=${dst_mac}${src_mac}08004500001c00000000401163cf${src_ip}${dst_ip}0035111100080000
 as hv1 ovs-appctl netdev-dummy/receive hv1-vif2 $packet
 
 # Still no packet-ins should reach ovn-controller.
@@ -29548,7 +29988,9 @@ OVS_WAIT_UNTIL([test x$(ovn-nbctl lsp-get-up sw1-p1) = xup])
 
 check ovn-nbctl lb-add lb-ipv4-tcp 88.88.88.88:8080 42.42.42.1:4041 tcp
 check ovn-nbctl lb-add lb-ipv4-udp 88.88.88.88:4040 42.42.42.1:2021 udp
+check ovn-nbctl lb-add lb-ipv4 88.88.88.89 42.42.42.2
 check ovn-nbctl lb-add lb-ipv6-tcp [[8800::0088]]:8080 [[4200::1]]:4041 tcp
+check ovn-nbctl lb-add lb-ipv6 8800::0089 4200::2
 check ovn-nbctl --wait=hv lb-add lb-ipv6-udp [[8800::0088]]:4040 [[4200::1]]:2021 udp
 
 AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST], [1], [dnl
@@ -29839,6 +30281,119 @@ AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | ofctl_strip_all | grep -
  table=70, priority=100,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
 ])
 
+check ovn-nbctl --wait=hv ls-lb-add sw0 lb-ipv4
+OVS_WAIT_UNTIL(
+    [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 7]
+)
+
+OVS_WAIT_UNTIL(
+    [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 7]
+)
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=68, priority=100,ct_mark=0x2/0x2,ip,reg1=0x58585859,nw_src=42.42.42.2,nw_dst=42.42.42.2 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.89,NXM_OF_IP_PROTO[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,udp,reg1=0x58585858,reg2=0xfc8/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST], [1], [dnl
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=70, priority=100,tcp,reg1=0x58585858,reg2=0x1f90/0xffff actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+ table=70, priority=100,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=100,udp,reg1=0x58585858,reg2=0xfc8/0xffff actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=90,ip,reg1=0x58585859 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.89))
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=68, priority=100,ct_mark=0x2/0x2,ip,reg1=0x58585859,nw_src=42.42.42.2,nw_dst=42.42.42.2 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.89,NXM_OF_IP_PROTO[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,udp,reg1=0x58585858,reg2=0xfc8/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST], [1], [dnl
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=70, priority=100,tcp,reg1=0x58585858,reg2=0x1f90/0xffff actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+ table=70, priority=100,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=100,udp,reg1=0x58585858,reg2=0xfc8/0xffff actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=90,ip,reg1=0x58585859 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.89))
+])
+
+check ovn-nbctl --wait=hv ls-lb-add sw0 lb-ipv6
+OVS_WAIT_UNTIL(
+    [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 8]
+)
+
+OVS_WAIT_UNTIL(
+    [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 8]
+)
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=68, priority=100,ct_mark=0x2/0x2,ip,reg1=0x58585859,nw_src=42.42.42.2,nw_dst=42.42.42.2 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.89,NXM_OF_IP_PROTO[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,ipv6,reg4=0x88000000,reg5=0,reg6=0,reg7=0x89,ipv6_src=4200::2,ipv6_dst=4200::2 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::89,NXM_OF_IP_PROTO[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,udp,reg1=0x58585858,reg2=0xfc8/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST], [1], [dnl
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=70, priority=100,tcp,reg1=0x58585858,reg2=0x1f90/0xffff actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+ table=70, priority=100,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=100,udp,reg1=0x58585858,reg2=0xfc8/0xffff actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=90,ip,reg1=0x58585859 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.89))
+ table=70, priority=90,ipv6,reg4=0x88000000,reg5=0,reg6=0,reg7=0x89 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::89))
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=68, priority=100,ct_mark=0x2/0x2,ip,reg1=0x58585859,nw_src=42.42.42.2,nw_dst=42.42.42.2 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.89,NXM_OF_IP_PROTO[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,ipv6,reg4=0x88000000,reg5=0,reg6=0,reg7=0x89,ipv6_src=4200::2,ipv6_dst=4200::2 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::89,NXM_OF_IP_PROTO[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,udp,reg1=0x58585858,reg2=0xfc8/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST], [1], [dnl
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=70, priority=100,tcp,reg1=0x58585858,reg2=0x1f90/0xffff actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+ table=70, priority=100,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=100,udp,reg1=0x58585858,reg2=0xfc8/0xffff actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=90,ip,reg1=0x58585859 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.89))
+ table=70, priority=90,ipv6,reg4=0x88000000,reg5=0,reg6=0,reg7=0x89 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::89))
+])
+
+check ovn-nbctl --wait=hv ls-lb-del sw0 lb-ipv4
+check ovn-nbctl --wait=hv ls-lb-del sw0 lb-ipv6
+
 # Check backwards compatibility with ovn-northd versions that don't store the
 # original destination tuple.
 #
@@ -30354,46 +30909,46 @@ AT_CHECK([kill -0 $(cat hv1/ovn-controller.pid)])
 check ovn-nbctl --wait=hv sync
 
 # Check OVS flows are installed properly.
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=44 | ofctl_strip_all | \
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=46 | ofctl_strip_all | \
     grep "priority=2002" | grep conjunction | \
     sed 's/conjunction([[^)]]*)/conjunction()/g' | \
     sed 's/reg15=0x[[1-9]]/reg15=0xN/g' | sort], [0], [dnl
- table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x10/0xfff0 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x100/0xff00 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x1000/0xf000 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x2/0xfffe actions=conjunction()
- table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x20/0xffe0 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x200/0xfe00 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x2000/0xe000 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x4/0xfffc actions=conjunction()
- table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x40/0xffc0 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x400/0xfc00 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x4000/0xc000 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x8/0xfff8 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x80/0xff80 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x800/0xf800 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x8000/0x8000 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=1 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x100/0x100,reg15=0xN,metadata=0x1,nw_src=192.168.47.4 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x100/0x100,reg15=0xN,metadata=0x1,nw_src=192.168.47.4 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x10/0xfff0 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x100/0xff00 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x1000/0xf000 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x2/0xfffe actions=conjunction()
- table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x20/0xffe0 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x200/0xfe00 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x2000/0xe000 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x4/0xfffc actions=conjunction()
- table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x40/0xffc0 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x400/0xfc00 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x4000/0xc000 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x8/0xfff8 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x80/0xff80 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x800/0xf800 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x8000/0x8000 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=1 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x80/0x80,reg15=0xN,metadata=0x1,nw_src=192.168.47.4 actions=conjunction()
- table=44, priority=2002,udp,reg0=0x80/0x80,reg15=0xN,metadata=0x1,nw_src=192.168.47.4 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x10/0xfff0 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x100/0xff00 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x1000/0xf000 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x2/0xfffe actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x20/0xffe0 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x200/0xfe00 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x2000/0xe000 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x4/0xfffc actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x40/0xffc0 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x400/0xfc00 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x4000/0xc000 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x8/0xfff8 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x80/0xff80 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x800/0xf800 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x8000/0x8000 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.4,tp_dst=1 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x100/0x100,reg15=0xN,metadata=0x1,nw_src=192.168.47.4 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x100/0x100,reg15=0xN,metadata=0x1,nw_src=192.168.47.4 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x10/0xfff0 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x100/0xff00 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x1000/0xf000 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x2/0xfffe actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x20/0xffe0 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x200/0xfe00 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x2000/0xe000 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x4/0xfffc actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x40/0xffc0 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x400/0xfc00 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x4000/0xc000 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x8/0xfff8 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x80/0xff80 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x800/0xf800 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=0x8000/0x8000 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.4,tp_dst=1 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x80/0x80,reg15=0xN,metadata=0x1,nw_src=192.168.47.4 actions=conjunction()
+ table=46, priority=2002,udp,reg0=0x80/0x80,reg15=0xN,metadata=0x1,nw_src=192.168.47.4 actions=conjunction()
 ])
 
 OVN_CLEANUP([hv1])
@@ -31568,7 +32123,7 @@ ovs-vsctl add-port br-int lsp0-0 -- set interface lsp0-0 external_ids:iface-id=l
 ovs-vsctl add-port br-int lsp0-1 -- set interface lsp0-1 external_ids:iface-id=lsp0-1
 
 check ovn-nbctl --wait=hv sync
-AT_CHECK([test $(ovs-ofctl dump-flows br-int table=44 | grep conjunction | wc -l) == 22])
+AT_CHECK([test $(ovs-ofctl dump-flows br-int table=46 | grep conjunction | wc -l) == 22])
 
 # Save the current lflow_run counter
 lflow_run=$(ovn-appctl -t ovn-controller coverage/read-counter lflow_run)
@@ -31578,7 +32133,7 @@ lflow_run=$(ovn-appctl -t ovn-controller coverage/read-counter lflow_run)
 # 1. Remove half of the ports from pg1. The excepted conjunction flows should be:
 #    2 + 10 = 12
 check ovn-nbctl --wait=hv pg-set-ports pg1 $(for i in 0 1 2 3 4; do for j in 0 1; do echo lsp${i}-${j}; done; done)
-AT_CHECK([test $(ovs-ofctl dump-flows br-int table=44 | grep conjunction | wc -l) == 12])
+AT_CHECK([test $(ovs-ofctl dump-flows br-int table=46 | grep conjunction | wc -l) == 12])
 
 # 2. Unbind lsp0-0. The there shouldn't be any conjunction flows because the
 #    port group const set should have only one member (lsp0-1). And the total
@@ -31586,25 +32141,25 @@ AT_CHECK([test $(ovs-ofctl dump-flows br-int table=44 | grep conjunction | wc -l
 #    10.
 ovs-vsctl del-port br-int lsp0-0
 check ovn-nbctl --wait=hv sync
-AT_CHECK([test $(ovs-ofctl dump-flows br-int table=44 | grep conjunction | wc -l) == 0])
-AT_CHECK([test $(ovs-ofctl dump-flows br-int table=44 | grep 192.168 | wc -l) == 10])
+AT_CHECK([test $(ovs-ofctl dump-flows br-int table=46 | grep conjunction | wc -l) == 0])
+AT_CHECK([test $(ovs-ofctl dump-flows br-int table=46 | grep 192.168 | wc -l) == 10])
 
 # 3. Rebind lsp0-0. The expected conjunction flows are back to 12.
 ovs-vsctl add-port br-int lsp0-0 -- set interface lsp0-0 external_ids:iface-id=lsp0-0
 check ovn-nbctl --wait=hv sync
-AT_CHECK([test $(ovs-ofctl dump-flows br-int table=44 | grep conjunction | wc -l) == 12])
+AT_CHECK([test $(ovs-ofctl dump-flows br-int table=46 | grep conjunction | wc -l) == 12])
 
 # 4. Bind a lsp (lsp9-0) that doesn't belong to pg1, should not see any change.
 ovs-vsctl add-port br-int lsp9-0 -- set interface lsp9-0 external_ids:iface-id=lsp9-0
 check ovn-nbctl --wait=hv sync
-AT_CHECK([test $(ovs-ofctl dump-flows br-int table=44 | grep conjunction | wc -l) == 12])
+AT_CHECK([test $(ovs-ofctl dump-flows br-int table=46 | grep conjunction | wc -l) == 12])
 
 # 5. Bind another 2 lsps (lsp1-0 lsp1-1) that belong to pg1 and on a different
 #    LS (ls1), should see conjunction flows doubled (12 x 2 = 24)
 ovs-vsctl add-port br-int lsp1-0 -- set interface lsp1-0 external_ids:iface-id=lsp1-0
 ovs-vsctl add-port br-int lsp1-1 -- set interface lsp1-1 external_ids:iface-id=lsp1-1
 check ovn-nbctl --wait=hv sync
-AT_CHECK([test $(ovs-ofctl dump-flows br-int table=44 | grep conjunction | wc -l) == 24])
+AT_CHECK([test $(ovs-ofctl dump-flows br-int table=46 | grep conjunction | wc -l) == 24])
 
 # 6. Simulate a SB port-group "del and add" notification to ovn-controller in the
 #    same IDL iteration. ovn-controller should still program the same flows. In
@@ -31629,7 +32184,7 @@ for i in $(seq 1 10); do
     check ovn-nbctl --wait=hv sync
 
     # Finally check flow count is the same as before.
-    AT_CHECK([test $(ovs-ofctl dump-flows br-int table=44 | grep conjunction | wc -l) == 24])
+    AT_CHECK([test $(ovs-ofctl dump-flows br-int table=46 | grep conjunction | wc -l) == 24])
 done
 
 # Make sure all the above was performed with I-P (no recompute)
@@ -31743,7 +32298,7 @@ packet="inport==\"sw1-lp1\" && eth.src==00:00:04:01:02:03 &&
        ip4.src==10.0.0.100 && ip4.dst==20.0.0.200 &&
        udp && udp.src==53 && udp.dst==4369"
 
-as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Check if packet hit the drop rule
 AT_CHECK([ovs-ofctl dump-flows br-int | grep "nw_dst=20.0.0.0/24" | \
@@ -31770,7 +32325,7 @@ packet="inport==\"sw1-lp1\" && eth.src==00:00:04:01:02:03 &&
        ip4.src==10.0.0.100 && ip4.dst==20.0.0.200 &&
        udp && udp.src==53 && udp.dst==4369"
 
-as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Check if packet hit the drop rule
 AT_CHECK([ovs-ofctl dump-flows br-int "nw_src=10.0.0.0/24" | \
@@ -31857,7 +32412,7 @@ packet="inport==\"ls1-lp1\" && eth.src==$ls1_p1_mac && eth.dst==$bcast_mac &&
        arp.op==1 && arp.sha==$ls1_p1_mac && arp.spa==$ls1_p1_ip &&
        arp.tha==$bcast_mac && arp.tpa==$proxy_ip1"
 
-as hv1 ovn-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv1 ovn-appctl -t ovn-controller inject-pkt "$packet"])
 
 as hv1 ovs-ofctl dump-flows br-int| grep 169.254.239.254 | grep priority=50 > debug1
 AT_CAPTURE_FILE([debug1])
@@ -31916,8 +32471,8 @@ check ovn-nbctl acl-add lsw0 to-lport 1002 'outport == "lp2" && ip4.src == 10.0.
 
 # The first ACL should be programmed, but the second one shouldn't.
 check ovn-nbctl --wait=hv sync
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep 10.0.0.111], [0], [ignore])
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep 10.0.0.122], [1], [ignore])
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep 10.0.0.111], [0], [ignore])
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep 10.0.0.122], [1], [ignore])
 
 # Now create the lport lp2.
 check ovn-nbctl lsp-add lsw0 lp2 \
@@ -31925,12 +32480,12 @@ check ovn-nbctl lsp-add lsw0 lp2 \
 
 check ovn-nbctl --wait=hv sync
 # Now the second ACL should be programmed.
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep 10.0.0.122], [0], [ignore])
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep 10.0.0.122], [0], [ignore])
 
 # Remove the lport lp2 again, the OVS flow for the second ACL should be
 # removed.
 check ovn-nbctl --wait=hv lsp-del lp2
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep 10.0.0.122], [1], [ignore])
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep 10.0.0.122], [1], [ignore])
 
 # Test similar scenario but when the referenced lport is not bound locally.
 
@@ -31944,8 +32499,8 @@ check ovn-nbctl acl-add lsw0 to-lport 1002 'inport == "lp4" && ip4.dst == 10.0.0
 
 # The ACL for lp3 should be programmed, but the one for lp4 shouldn't.
 check ovn-nbctl --wait=hv sync
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep 10.0.0.133], [0], [ignore])
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep 10.0.0.144], [1], [ignore])
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep 10.0.0.133], [0], [ignore])
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep 10.0.0.144], [1], [ignore])
 
 # Now create the lport lp4.
 check ovn-nbctl lsp-add lsw0 lp4 \
@@ -31953,7 +32508,7 @@ check ovn-nbctl lsp-add lsw0 lp4 \
 
 # Now the ACL for lp4 should be programmed.
 check ovn-nbctl --wait=hv sync
-AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep 10.0.0.144], [0], [ignore])
+AT_CHECK([ovs-ofctl dump-flows br-int table=46 | grep 10.0.0.144], [0], [ignore])
 
 OVN_CLEANUP([hv1])
 AT_CLEANUP
@@ -32108,7 +32663,6 @@ ovn-nbctl lrp-set-gateway-chassis DR-S3 hv4
 
 ovn-nbctl --wait=sb sync
 OVN_POPULATE_ARP
-
 vif_to_ls () {
     case ${1} in dnl (
         vif?[[11]]) echo ls ;; dnl (
@@ -32222,6 +32776,9 @@ echo "Send Dummy ARP"
 sip=`ip_to_hex 172 16 1 10`
 tip=`ip_to_hex 172 16 1 50`
 test_arp vif-north1 f0f000000011 $sip $tip
+OVS_WAIT_UNTIL(
+    [test 1 = `as hv1 ovs-ofctl dump-flows br-int table=67 | grep dl_src=f0:f0:00:00:00:11 | wc -l`]
+)
 
 echo "Send traffic North to South"
 sip=`ip_to_hex 172 16 1 10`
@@ -32242,6 +32799,9 @@ echo "Send Dummy ARP"
 sip=`ip_to_hex 10 0 0 10`
 tip=`ip_to_hex 10 0 0 50`
 test_arp vif-north2 f0f000000022 $sip $tip
+OVS_WAIT_UNTIL(
+    [test 1 = `as hv1 ovs-ofctl dump-flows br-int table=67 | grep dl_src=f0:f0:00:00:00:22 | wc -l`]
+)
 
 echo "Send traffic South to North2"
 sip=`ip_to_hex 20 0 0 10`
@@ -32255,6 +32815,9 @@ echo "Send Dummy ARP"
 sip=`ip_to_hex 192 168 0 10`
 tip=`ip_to_hex 192 168 0 50`
 test_arp vif-north3 f0f000000033 $sip $tip
+OVS_WAIT_UNTIL(
+    [test 1 = `as hv1 ovs-ofctl dump-flows br-int table=67 | grep dl_src=f0:f0:00:00:00:33 | wc -l`]
+)
 
 echo "Send traffic South to North3"
 sip=`ip_to_hex 20 0 0 10`
@@ -33384,7 +33947,7 @@ check ovn-nbctl --wait=hv sync
 # Use constants so that if tables or registers change, this test can
 # be updated easily.
 DNAT_TABLE=15
-SNAT_TABLE=43
+SNAT_TABLE=45
 DNAT_ZONE_REG="NXM_NX_REG11[[0..15]]"
 SNAT_ZONE_REG="NXM_NX_REG12[[0..15]]"
 
@@ -33929,6 +34492,7 @@ m4_define([MULTIPLE_OVS_INT],
        ovs-ofctl dump-flows br-int | grep $cookie |
            sed -e 's/duration=[[0-9.]]*s, //g' |
            sed -e 's/idle_age=[[0-9]]*, //g' |
+           sed -e 's/hard_age=[[0-9]]*, //g' |
            sed -e 's/n_packets=[[0-9]]*, //g' |
            sed -e 's/n_bytes=[[0-9]]*, //g'
    }
@@ -34926,7 +35490,8 @@ check ovs-vsctl add-port br-int p1 -- set interface p1 external_ids:iface-id=lsp
 wait_for_ports_up
 ovn-nbctl --wait=hv sync
 
-check ovn-nbctl lb-add lb1 "192.168.0.10" "192.168.10.10,192.168.10.20"
+check ovn-nbctl lb-add lb1 "192.168.0.10" "192.168.10.10,192.168.10.20" \
+    -- set load_balancer lb1 options:ct_flush="true"
 check ovn-nbctl ls-lb-add sw lb1
 
 # Remove a single backend
@@ -34949,7 +35514,8 @@ AT_CHECK([grep -q "Flushing CT for 5-tuple: vip=192.168.0.10:0, backend=192.168.
 AT_CHECK([grep -q "Flushing CT for 5-tuple: vip=192.168.0.10:0, backend=192.168.10.30:0, protocol=0" hv1/ovn-controller.log], [0])
 
 # Check flush for LB with port and protocol
-check ovn-nbctl lb-add lb1 "192.168.30.10:80" "192.168.40.10:8080,192.168.40.20:8090" udp
+check ovn-nbctl lb-add lb1 "192.168.30.10:80" "192.168.40.10:8080,192.168.40.20:8090" udp \
+    -- set load_balancer lb1 options:ct_flush="true"
 check ovn-nbctl ls-lb-add sw lb1
 check ovn-nbctl lb-del lb1
 check ovn-nbctl --wait=hv sync
@@ -34958,7 +35524,8 @@ AT_CHECK([grep -q "Flushing CT for 5-tuple: vip=192.168.30.10:80, backend=192.16
 AT_CHECK([grep -q "Flushing CT for 5-tuple: vip=192.168.30.10:80, backend=192.168.40.20:8090, protocol=17" hv1/ovn-controller.log], [0])
 
 # Check recompute when LB is no longer local
-check ovn-nbctl lb-add lb1 "192.168.50.10:80" "192.168.60.10:8080"
+check ovn-nbctl lb-add lb1 "192.168.50.10:80" "192.168.60.10:8080" \
+    -- set load_balancer lb1 options:ct_flush="true"
 check ovn-nbctl ls-lb-add sw lb1
 check ovs-vsctl remove interface p1 external_ids iface-id
 check ovn-appctl inc-engine/recompute
@@ -34968,6 +35535,193 @@ AT_CHECK([grep -q "Flushing CT for 5-tuple: vip=192.168.50.10:80, backend=192.16
 
 AT_CHECK([test "$(grep -c "Flushing CT for 5-tuple" hv1/ovn-controller.log)" = "6"], [0])
 
+# Check if CT flush is disabled by default
+check ovn-nbctl lb-del lb1
+check ovn-nbctl lb-add lb1 "192.168.70.10:80" "192.168.80.10:8080,192.168.90.10:8080"
+check ovn-nbctl ls-lb-add sw lb1
+check ovs-vsctl set interface p1 external_ids:iface-id=lsp1
+check ovn-nbctl --wait=hv sync
+
+AT_CHECK([test "$(grep -c "Flushing CT for 5-tuple" hv1/ovn-controller.log)" = "6"], [0])
+
+# Remove one backend
+check ovn-nbctl --wait=hv set load_balancer lb1 vips='"192.168.70.10:80"="192.168.80.10:8080"'
+
+AT_CHECK([grep -q "Flushing CT for 5-tuple: vip=192.168.70.10:80, backend=192.168.90.10:8080, protocol=6" hv1/ovn-controller.log], [1])
+AT_CHECK([test "$(grep -c "Flushing CT for 5-tuple" hv1/ovn-controller.log)" = "6"], [0])
+
+check ovn-nbctl --wait=hv lb-del lb1
+AT_CHECK([grep -q "Flushing CT for 5-tuple: vip=192.168.70.10:80, backend=192.168.80.10:8080, protocol=6" hv1/ovn-controller.log], [1])
+AT_CHECK([test "$(grep -c "Flushing CT for 5-tuple" hv1/ovn-controller.log)" = "6"], [0])
+
+OVN_CLEANUP([hv1])
+AT_CLEANUP
+])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([Re-create encap tunnels during integration bridge migration])
+ovn_start
+net_add n1
+
+sim_add hv1
+as hv1
+ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.1
+
+sim_add hv2
+as hv2
+ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.2
+
+check ovn-nbctl --wait=hv sync
+
+check_tunnel_port() {
+    local hv=$1
+    local br=$2
+    local id=$3
+
+    as $hv
+    OVS_WAIT_UNTIL([
+        test "$(ovs-vsctl --format=table --no-headings find port external_ids:ovn-chassis-id="$id" | wc -l)" = "1"
+    ])
+    local tunnel_id=$(ovs-vsctl --bare --columns _uuid find port external_ids:ovn-chassis-id="$id")
+    AT_CHECK([ovs-vsctl --bare --columns ports find bridge name="$br" | grep -q "$tunnel_id"])
+}
+
+# Check that both chassis have tunnel
+check_tunnel_port hv1 br-int hv2@192.168.0.2
+check_tunnel_port hv2 br-int hv1@192.168.0.1
+
+# Stop ovn-controller on hv1
+check as hv1 ovn-appctl -t ovn-controller exit --restart
+
+# The tunnel should remain intact
+check_tunnel_port hv1 br-int hv2@192.168.0.2
+
+# Change the bridge to br-int1 on hv1
+as hv1
+check ovs-vsctl add-br br-int1
+check ovs-vsctl set open . external_ids:ovn-bridge="br-int1"
+start_daemon ovn-controller --verbose="encaps:dbg"
+check ovn-nbctl --wait=hv sync
+
+# Check that the tunnel was created on br-int1 instead
+check_tunnel_port hv1 br-int1 hv2@192.168.0.2
+check grep -q "Clearing old tunnel port \"ovn-hv2-0\" (hv2@192.168.0.2) from bridge \"br-int\"" hv1/ovn-controller.log
+
+# Change the bridge to br-int1 on hv2
+as hv2
+check ovn-appctl vlog/set encaps:dbg
+check ovs-vsctl add-br br-int1
+check ovs-vsctl set open . external_ids:ovn-bridge="br-int1"
+check ovn-nbctl --wait=hv sync
+
+
+# Check that the tunnel was created on br-int1 instead
+check_tunnel_port hv2 br-int1 hv1@192.168.0.1
+check grep -q "Clearing old tunnel port \"ovn-hv1-0\" (hv1@192.168.0.1) from bridge \"br-int\"" hv2/ovn-controller.log
+
+# Stop ovn-controller on hv1
+check as hv1 ovn-appctl -t ovn-controller exit --restart
+
+# The tunnel should remain intact
+check_tunnel_port hv1 br-int1 hv2@192.168.0.2
+prev_id=$(ovs-vsctl --bare --columns _uuid find port external_ids:ovn-chassis-id="hv2@192.168.0.2")
+
+# Start the controller again
+start_daemon ovn-controller --verbose="encaps:dbg"
+check ovn-nbctl --wait=hv sync
+check_tunnel_port hv1 br-int1 hv2@192.168.0.2
+current_id=$(ovs-vsctl --bare --columns _uuid find port external_ids:ovn-chassis-id="hv2@192.168.0.2")
+
+# The tunnel should be the same after restart
+check test "$current_id" = "$prev_id"
+
+OVN_CLEANUP([hv1],[hv2])
+AT_CLEANUP
+])
+
+# NOTE: This test case runs two ovn-controllers inside the same sandbox (hv1).
+# Each controller uses a unique chassis name - hv1 and hv2 - and manage
+# different bridges with different ports.
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([Encaps tunnel cleanup does not interfere with multiple controller on the same host])
+ovn_start
+net_add n1
+
+sim_add hv1
+as hv1
+ovs-vsctl add-br br-phys-1
+ovn_attach n1 br-phys-1 192.168.0.1 24
+
+
+# now start the second virtual controller
+ovs-vsctl add-br br-phys-2
+
+
+# the file is read once at startup so it's safe to write it
+# here after the first ovn-controller has started
+echo hv2 > ${OVN_SYSCONFDIR}/system-id-override
+
+# for some reason SSL ovsdb configuration overrides CLI, so
+# delete ssl config from ovsdb to give CLI arguments priority
+ovs-vsctl del-ssl
+
+start_virtual_controller n1 br-phys-2 br-int-2 192.168.0.2 24 geneve,vxlan hv2 \
+    --pidfile=${OVS_RUNDIR}/ovn-controller-2.pid \
+    --log-file=${OVS_RUNDIR}/ovn-controller-2.log \
+    -p $PKIDIR/testpki-hv2-privkey.pem \
+    -c $PKIDIR/testpki-hv2-cert.pem \
+    -C $PKIDIR/testpki-cacert.pem
+pidfile="$OVS_RUNDIR"/ovn-controller-2.pid
+on_exit "test -e \"$pidfile\" && kill \`cat \"$pidfile\"\`"
+
+ovn-nbctl --wait=hv sync
+
+check_tunnel_port() {
+    local hv=$1
+    local br=$2
+    local id=$3
+
+    as $hv
+    OVS_WAIT_UNTIL([
+        test "$(ovs-vsctl --format=table --no-headings find port external_ids:ovn-chassis-id="$id" | wc -l)" = "1"
+    ])
+    local tunnel_id=$(ovs-vsctl --bare --columns _uuid find port external_ids:ovn-chassis-id="$id")
+    AT_CHECK([ovs-vsctl --bare --columns ports find bridge name="$br" | grep -q "$tunnel_id"])
+}
+
+check_tunnel_port hv1 br-int hv2@192.168.0.2
+check_tunnel_port hv1 br-int-2 hv1@192.168.0.1
+prev_id1=$(ovs-vsctl --bare --columns _uuid find port external_ids:ovn-chassis-id="hv1@192.168.0.1")
+prev_id2=$(ovs-vsctl --bare --columns _uuid find port external_ids:ovn-chassis-id="hv2@192.168.0.2")
+
+# The hv2 is running we can remove the override file
+rm -f ${OVN_SYSCONFDIR}/system-id-override
+
+check ovn-appctl -t ovn-controller exit --restart
+
+# for some reason SSL ovsdb configuration overrides CLI, so
+# delete ssl config from ovsdb to give CLI arguments priority
+ovs-vsctl del-ssl
+
+start_daemon ovn-controller --verbose="encaps:dbg" \
+    -p $PKIDIR/testpki-hv1-privkey.pem \
+    -c $PKIDIR/testpki-hv1-cert.pem \
+    -C $PKIDIR/testpki-cacert.pem
+
+check ovn-nbctl --wait=hv sync
+
+check_tunnel_port hv1 br-int hv2@192.168.0.2
+check_tunnel_port hv1 br-int-2 hv1@192.168.0.1
+current_id1=$(ovs-vsctl --bare --columns _uuid find port external_ids:ovn-chassis-id="hv1@192.168.0.1")
+current_id2=$(ovs-vsctl --bare --columns _uuid find port external_ids:ovn-chassis-id="hv2@192.168.0.2")
+
+# Check that restart of hv1 ovn-controller did not interfere with hv2
+AT_CHECK([grep -q "Clearing old tunnel port \"ovn0-hv1-0\" (hv1@192.168.0.1) from bridge \"br-int-2\"" hv1/ovn-controller.log], [1])
+check test "$current_id1" = "$prev_id1"
+check test "$current_id2" = "$prev_id2"
+
 OVN_CLEANUP([hv1])
 AT_CLEANUP
 ])
diff --git a/tests/ovs-macros.at b/tests/ovs-macros.at
index 36b58b5ae..cc5f6e3b1 100644
--- a/tests/ovs-macros.at
+++ b/tests/ovs-macros.at
@@ -256,6 +256,13 @@ ovs_wait () {
     ovs_wait_failed
     AT_FAIL_IF([:])
 }
+
+check_ovs_wait_until_args() {
+   AT_FAIL_IF([test $1 -ge 3])
+   dnl The second argument should not be a number (confused with AT_CHECK ?).
+   AT_FAIL_IF([test $1 -eq 2 && test "$2" -eq "$2" 2>/dev/null])
+}
+
 OVS_END_SHELL_HELPERS
 m4_define([OVS_WAIT], [dnl
 ovs_wait_cond () {
@@ -276,7 +283,8 @@ dnl zero code within reasonable time limit, then
 dnl the test fails.  In that case, runs IF-FAILED
 dnl before aborting.
 m4_define([OVS_WAIT_UNTIL],
-  [OVS_WAIT([$1], [$2], [AT_LINE], [until $1])])
+  [check_ovs_wait_until_args "$#" "$2"
+   OVS_WAIT([$1], [$2], [AT_LINE], [until $1])])
 
 dnl OVS_WAIT_FOR_OUTPUT(COMMAND, EXIT-STATUS, STDOUT, STDERR)
 dnl OVS_WAIT_FOR_OUTPUT_UNQUOTED(COMMAND, EXIT-STATUS, STDOUT, STDERR)
diff --git a/tests/system-common-macros.at b/tests/system-common-macros.at
index d65f359a6..b8c5ae9ad 100644
--- a/tests/system-common-macros.at
+++ b/tests/system-common-macros.at
@@ -44,43 +44,8 @@ m4_define([NS_CHECK_EXEC],
 # appropriate type, and allows additional arguments to be passed.
 m4_define([ADD_BR], [ovs-vsctl _ADD_BR([$1]) -- $2])
 
-# ADD_INT([port], [namespace], [ovs-br], [ip_addr] [ip6_addr])
-#
-# Add an internal port to 'ovs-br', then shift it into 'namespace' and
-# configure it with 'ip_addr' (specified in CIDR notation).
-# Optionally add an ipv6 address
-m4_define([ADD_INT],
-    [ AT_CHECK([ovs-vsctl add-port $3 $1 -- set int $1 type=internal])
-      AT_CHECK([ip link set $1 netns $2])
-      NS_CHECK_EXEC([$2], [ip addr add $4 dev $1])
-      NS_CHECK_EXEC([$2], [ip link set dev $1 up])
-      if test -n "$5"; then
-        NS_CHECK_EXEC([$2], [ip -6 addr add $5 dev $1])
-      fi
-    ]
-)
-
-# NS_ADD_INT([port], [namespace], [ovs-br], [ip_addr] [mac_addr] [ip6_addr] [default_gw] [default_ipv6_gw])
-# Create a namespace
-# Add an internal port to 'ovs-br', then shift it into 'namespace'.
-# Configure it with 'ip_addr' (specified in CIDR notation) and ip6_addr.
-# Set mac_addr
-# Add default gw for ipv4 and ipv6
-m4_define([NS_ADD_INT],
-    [ AT_CHECK([ovs-vsctl add-port $3 $1 -- set int $1 type=internal  external_ids:iface-id=$1])
-      ADD_NAMESPACES($2)
-      AT_CHECK([ip link set $1 netns $2])
-      NS_CHECK_EXEC([$2], [ip link set $1 address $5])
-      NS_CHECK_EXEC([$2], [ip link set dev $1 up])
-      NS_CHECK_EXEC([$2], [ip addr add $4 dev $1])
-      NS_CHECK_EXEC([$2], [ip addr add $6 dev $1])
-      NS_CHECK_EXEC([$2], [ip route add default via $7 dev $1])
-      NS_CHECK_EXEC([$2], [ip -6 route add default via $8 dev $1])
-    ]
-)
-
 # ADD_VETH([port], [namespace], [ovs-br], [ip_addr] [mac_addr], [gateway],
-#          [ip_addr_flags])
+#          [ip_addr_flags] [ip6_addr] [gateway6])
 #
 # Add a pair of veth ports. 'port' will be added to name space 'namespace',
 # and "ovs-'port'" will be added to ovs bridge 'ovs-br'.
@@ -108,6 +73,12 @@ m4_define([ADD_VETH],
       if test -n "$6"; then
         NS_CHECK_EXEC([$2], [ip route add default via $6])
       fi
+      if test -n "$8"; then
+        NS_CHECK_EXEC([$2], [ip addr add $8 dev $1])
+      fi
+      if test -n "$9"; then
+        NS_CHECK_EXEC([$2], [ip route add default via $9])
+      fi
       on_exit "ip link del ovs-$1"
     ]
 )
@@ -263,7 +234,7 @@ m4_define([STRIP_MONITOR_CSUM], [grep "csum:" | sed 's/csum:.*/csum: <skip>/'])
 # and limit the output to the rows containing 'ip-addr'.
 #
 m4_define([FORMAT_CT],
-    [[grep -F "dst=$1" | sed -e 's/port=[0-9]*/port=<cleared>/g' -e 's/id=[0-9]*/id=<cleared>/g' -e 's/state=[0-9_A-Z]*/state=<cleared>/g' | sort | uniq]])
+    [[grep -F "dst=$1," | sed -e 's/port=[0-9]*/port=<cleared>/g' -e 's/id=[0-9]*/id=<cleared>/g' -e 's/state=[0-9_A-Z]*/state=<cleared>/g' | sort | uniq]])
 
 # NETNS_DAEMONIZE([namespace], [command], [pidfile])
 #
diff --git a/tests/system-ovn-kmod.at b/tests/system-ovn-kmod.at
index dd4996041..a1aee3313 100644
--- a/tests/system-ovn-kmod.at
+++ b/tests/system-ovn-kmod.at
@@ -172,7 +172,7 @@ ovn-nbctl set load_balancer $uuid vips:'"30.0.0.2:8000"'='"192.168.1.2:12345,192
 
 ovn-nbctl list load_balancer
 ovn-sbctl dump-flows R2
-OVS_WAIT_UNTIL([ovs-ofctl -O OpenFlow13 dump-flows br-int table=41 | \
+OVS_WAIT_UNTIL([ovs-ofctl -O OpenFlow13 dump-flows br-int table=43 | \
 grep 'nat(src=20.0.0.2)'])
 
 dnl Test load-balancing that includes L4 ports in NAT.
@@ -215,3 +215,139 @@ as
 OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
 /connection dropped.*/d"])
 AT_CLEANUP
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([LB correctly de-fragments traffic])
+AT_KEYWORDS([ovnlb])
+
+CHECK_CONNTRACK()
+CHECK_CONNTRACK_NAT()
+AT_SKIP_IF([test $HAVE_SCAPY = no])
+
+ovn_start
+OVS_TRAFFIC_VSWITCHD_START()
+ADD_BR([br-int])
+ADD_BR([br-ext])
+
+# Logical network:
+# 2 logical switches "public" (192.168.1.0/24) and "internal" (172.16.1.0/24)
+# connected to a router lr.
+# internal has a server.
+# client is connected through localnet.
+#
+# Load balancer for udp 192.168.1.20:4242 172.16.1.2 4242.
+
+check ovs-ofctl add-flow br-ext action=normal
+# Set external-ids in br-int needed for ovn-controller
+check ovs-vsctl \
+        -- set Open_vSwitch . external-ids:system-id=hv1 \
+        -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+        -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+        -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+        -- set bridge br-int fail-mode=secure other-config:disable-in-band=true \
+        -- set Open_vSwitch . external-ids:ovn-bridge-mappings=phynet:br-ext
+
+
+# Start ovn-controller
+start_daemon ovn-controller
+
+check ovn-nbctl lr-add lr
+check ovn-nbctl ls-add internal
+check ovn-nbctl ls-add public
+
+check ovn-nbctl lrp-add lr lr-pub 00:00:01:01:02:03 192.168.1.1/24
+check ovn-nbctl lsp-add  public pub-lr -- set Logical_Switch_Port pub-lr \
+    type=router options:router-port=lr-pub addresses=\"00:00:01:01:02:03\"
+
+check ovn-nbctl lrp-add lr lr-internal 00:00:01:01:02:04 172.16.1.1/24
+check ovn-nbctl lsp-add internal internal-lr -- set Logical_Switch_Port internal-lr \
+    type=router options:router-port=lr-internal addresses=\"00:00:01:01:02:04\"
+
+ovn-nbctl lsp-add public ln_port \
+                -- lsp-set-addresses ln_port unknown \
+                -- lsp-set-type ln_port localnet \
+                -- lsp-set-options ln_port network_name=phynet
+
+ADD_NAMESPACES(client)
+ADD_VETH(client, client, br-ext, "192.168.1.2/24", "f0:00:00:01:02:03", \
+         "192.168.1.1")
+
+ADD_NAMESPACES(server)
+ADD_VETH(server, server, br-int, "172.16.1.2/24", "f0:00:0f:01:02:03", \
+         "172.16.1.1")
+check ovn-nbctl lsp-add internal server \
+-- lsp-set-addresses server "f0:00:0f:01:02:03 172.16.1.2"
+
+# Config OVN load-balancer with a VIP.
+check ovn-nbctl lb-add lb1 192.168.1.20:4242 172.16.1.2:4242 udp
+check ovn-nbctl lr-lb-add lr lb1
+check ovn-nbctl set logical_router lr options:chassis=hv1
+check ovn-nbctl set logical_router_port lr-internal options:gateway_mtu=800
+
+ovn-nbctl --wait=hv sync
+
+NETNS_DAEMONIZE([server], [nc -l -u 172.16.1.2 4242 > /dev/null], [server.pid])
+
+# Collect ICMP packets on client side
+NETNS_DAEMONIZE([client], [tcpdump -l -U -i client -vnne \
+icmp > client.pcap 2>client_err], [tcpdump0.pid])
+OVS_WAIT_UNTIL([grep "listening" client_err])
+
+# Collect UDP packets on server side
+NETNS_DAEMONIZE([server], [tcpdump -l -U -i server -vnne \
+'udp and ip[[6:2]] > 0 and not ip[[6]] = 64' > server.pcap 2>server_err], [tcpdump1.pid])
+OVS_WAIT_UNTIL([grep "listening" server_err])
+
+check ip netns exec client python3 << EOF
+import os
+import socket
+import sys
+import time
+
+FILE="client.pcap"
+
+
+def contains_string(file, str):
+    file = open(file, "r")
+    for line in file.readlines():
+        if str in line:
+            return True
+    return False
+
+
+def need_frag_received():
+    for _ in range(20):
+        if os.path.getsize(FILE) and contains_string(FILE, "need to frag"):
+            return True
+        time.sleep(0.5)
+    return False
+
+
+sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+sock.sendto(b"x" * 1000, ("192.168.1.20", 4242))
+if need_frag_received():
+    sock.sendto(b"x" * 1000, ("192.168.1.20", 4242))
+else:
+    print("Missing need frag")
+    sys.exit(1)
+EOF
+
+OVS_WAIT_UNTIL([test "$(cat server.pcap | wc -l)" = "4"])
+
+
+OVS_APP_EXIT_AND_WAIT([ovn-controller])
+
+as ovn-sb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as ovn-nb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as northd
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+as
+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+/connection dropped.*/d"])
+AT_CLEANUP
+])
diff --git a/tests/system-ovn.at b/tests/system-ovn.at
index 84a459d6a..0b6e9f602 100644
--- a/tests/system-ovn.at
+++ b/tests/system-ovn.at
@@ -1569,7 +1569,6 @@ bar3_ct=$(ovs-appctl dpctl/dump-conntrack | grep 30.0.0.2 | grep 172.16.1.4 -c)
 AT_CHECK([test $(ovs-appctl dpctl/dump-conntrack | grep 30.0.0.2 | grep 172.16.1 -c) -ne 0])
 
 if [[ "$bar1_ct" == "20" ]]; then
-    AT_CHECK([test $bar1_ct -eq 20])
     AT_CHECK([test $bar2_ct -eq 0])
     AT_CHECK([test $bar3_ct -eq 0])
 else
@@ -1577,17 +1576,15 @@ else
 fi
 
 if [[ "$bar2_ct" == "20" ]]; then
-    AT_CHECK([test $bar1_ct -eq 20])
-    AT_CHECK([test $bar2_ct -eq 0])
+    AT_CHECK([test $bar1_ct -eq 0])
     AT_CHECK([test $bar3_ct -eq 0])
 else
     AT_CHECK([test $bar2_ct -eq 0])
 fi
 
 if [[ "$bar3_ct" == "20" ]]; then
-    AT_CHECK([test $bar1_ct -eq 20])
+    AT_CHECK([test $bar1_ct -eq 0])
     AT_CHECK([test $bar2_ct -eq 0])
-    AT_CHECK([test $bar3_ct -eq 0])
 else
     AT_CHECK([test $bar3_ct -eq 0])
 fi
@@ -2246,7 +2243,7 @@ ovn-nbctl set load_balancer $uuid vips:'"30.0.0.2:8000"'='"192.168.1.2:80,192.16
 
 ovn-nbctl list load_balancer
 ovn-sbctl dump-flows R2
-OVS_WAIT_UNTIL([ovs-ofctl -O OpenFlow13 dump-flows br-int table=43 | \
+OVS_WAIT_UNTIL([ovs-ofctl -O OpenFlow13 dump-flows br-int table=45 | \
 grep 'nat(src=20.0.0.2)'])
 
 check ovs-appctl dpctl/flush-conntrack
@@ -2285,7 +2282,7 @@ ovn-nbctl set load_balancer $uuid vips:'"30.0.0.2:8000"'='"192.168.1.2:80,192.16
 
 ovn-nbctl list load_balancer
 ovn-sbctl dump-flows R2
-OVS_WAIT_UNTIL([ovs-ofctl -O OpenFlow13 dump-flows br-int table=43 | \
+OVS_WAIT_UNTIL([ovs-ofctl -O OpenFlow13 dump-flows br-int table=45 | \
 grep 'nat(src=20.0.0.2)'])
 
 rm -f wget*.log
@@ -4850,9 +4847,9 @@ NS_CHECK_EXEC([lsp], [tcpdump -l -nn -c 3 -i lsp ${filter} > lsp.pcap 2>tcpdump_
 OVS_WAIT_UNTIL([grep "listening" tcpdump_err])
 
 # Generate IPv4 UDP hairpin traffic.
-NS_CHECK_EXEC([lsp], [echo a | nc -u 88.88.88.88 4040 &], [0])
-NS_CHECK_EXEC([lsp], [echo a | nc -u 88.88.88.89 4040 &], [0])
-NS_CHECK_EXEC([lsp], [echo a | nc -u 88.88.88.90 2021 &], [0])
+NS_CHECK_EXEC([lsp], [echo a | nc -u 88.88.88.88 4040], [ignore], [ignore], [ignore])
+NS_CHECK_EXEC([lsp], [echo a | nc -u 88.88.88.89 4040], [ignore], [ignore], [ignore])
+NS_CHECK_EXEC([lsp], [echo a | nc -u 88.88.88.90 2021], [ignore], [ignore], [ignore])
 
 # Check hairpin traffic.
 OVS_WAIT_UNTIL([
@@ -4949,9 +4946,9 @@ NS_CHECK_EXEC([lsp], [tcpdump -l -nn -c 3 -i lsp $filter > lsp.pcap 2>tcpdump_er
 OVS_WAIT_UNTIL([grep "listening" tcpdump_err])
 
 # Generate IPv6 UDP hairpin traffic.
-NS_CHECK_EXEC([lsp], [echo a | nc -u 8800::0088 4040 &], [0])
-NS_CHECK_EXEC([lsp], [echo a | nc -u 8800::0089 4040 &], [0])
-NS_CHECK_EXEC([lsp], [echo a | nc -u 8800::0090 2021 &], [0])
+NS_CHECK_EXEC([lsp], [echo a | nc -u 8800::0088 4040], [ignore], [ignore], [ignore])
+NS_CHECK_EXEC([lsp], [echo a | nc -u 8800::0089 4040], [ignore], [ignore], [ignore])
+NS_CHECK_EXEC([lsp], [echo a | nc -u 8800::0090 2021], [ignore], [ignore], [ignore])
 
 # Check hairpin traffic.
 OVS_WAIT_UNTIL([
@@ -5084,7 +5081,7 @@ OVS_WAIT_UNTIL([
 ])
 
 OVS_WAIT_UNTIL([
-    n_pkt=$(ovs-ofctl dump-flows br-int table=44 | grep -v n_packets=0 | \
+    n_pkt=$(ovs-ofctl dump-flows br-int table=46 | grep -v n_packets=0 | \
 grep controller | grep tp_dst=84 -c)
     test $n_pkt -eq 1
 ])
@@ -5334,7 +5331,7 @@ OVS_WAIT_UNTIL([
 ])
 
 OVS_WAIT_UNTIL([
-    n_pkt=$(ovs-ofctl dump-flows br-int table=44 | grep -v n_packets=0 | \
+    n_pkt=$(ovs-ofctl dump-flows br-int table=46 | grep -v n_packets=0 | \
 grep controller | grep tp_dst=84 -c)
     test $n_pkt -eq 1
 ])
@@ -7190,7 +7187,7 @@ NS_EXEC([sw01], [tcpdump -l -n -i sw01 icmp -Q in > reject.pcap &])
 check ovn-nbctl --may-exist meter-add acl-meter drop 10 pktps 0
 ip netns exec sw01 scapy -H <<-EOF
 p = IP(src="192.168.1.2", dst="192.168.1.1") / UDP(dport = 12345) / Raw(b"X"*64)
-send (p, iface='sw01', loop = 0, verbose = 0, count = 100)
+send (p, iface='sw01', loop = 0, verbose = 0, count = 40)
 EOF
 
 # 10pps
@@ -8482,11 +8479,18 @@ check ovn-nbctl lsp-set-addresses ln unknown
 check ovn-nbctl lr-nat-add lr1 snat 172.16.1.10 192.168.1.0/24
 check ovn-nbctl lr-nat-add lr1 snat 1711::10 2001::/64
 
-NS_ADD_INT(ls1p1, ls1p1, br-int, "192.168.1.1/24", "00:00:00:01:01:01", "2001::1/64", "192.168.1.254", "2001::a" )
-NS_ADD_INT(ls1p2, ls1p2, br-int, "192.168.1.2/24", "00:00:00:01:01:02", "2001::2/64", "192.168.1.254", "2001::a" )
+ADD_NAMESPACES(ls1p1)
+ADD_VETH(ls1p1, ls1p1, br-int, "192.168.1.1/24", "00:00:00:01:01:01", \
+         "192.168.1.254", , "2001::1/64", "2001::a")
+
+ADD_NAMESPACES(ls1p2)
+ADD_VETH(ls1p2, ls1p2, br-int, "192.168.1.2/24", "00:00:00:01:01:02", \
+         "192.168.1.254", , "2001::2/64", "2001::a")
 
 ADD_NAMESPACES(ext1)
-ADD_INT(ext1, ext1, br0, 172.16.1.1/24, 1711::1/64)
+ADD_VETH(ext1, ext1, br0, "172.16.1.1/24", "00:ee:00:01:01:01", \
+         "172.16.1.254", , "1711::1/64", "1711::a")
+
 check ovn-nbctl --wait=hv sync
 wait_for_ports_up
 OVS_WAIT_UNTIL([test "$(ip netns exec ls1p1 ip a | grep 2001::1 | grep tentative)" = ""])
@@ -8548,25 +8552,17 @@ wait_igmp_flows_installed()
 }
 
 ADD_NAMESPACES(vm1)
-ADD_INT([vm1], [vm1], [br-int], [42.42.42.1/24])
-NS_CHECK_EXEC([vm1], [ip link set vm1 address 00:00:00:00:00:01], [0])
-NS_CHECK_EXEC([vm1], [ip route add default via 42.42.42.5], [0])
-check ovs-vsctl set Interface vm1 external_ids:iface-id=vm1
+ADD_VETH(vm1, vm1, br-int, "42.42.42.1/24", "00:00:00:00:00:01", \
+         "42.42.42.5")
 
 ADD_NAMESPACES(vm2)
-ADD_INT([vm2], [vm2], [br-int], [42.42.42.2/24])
-NS_CHECK_EXEC([vm2], [ip link set vm2 address 00:00:00:00:00:02], [0])
-NS_CHECK_EXEC([vm2], [ip link set lo up], [0])
-check ovs-vsctl set Interface vm2 external_ids:iface-id=vm2
+ADD_VETH(vm2, vm2, br-int, "42.42.42.2/24", "00:00:00:00:00:02")
 
 ADD_NAMESPACES(vm3)
 NETNS_DAEMONIZE([vm3], [tcpdump -n -i any -nnleX > vm3.pcap 2>/dev/null], [tcpdump3.pid])
 
-ADD_INT([vm3], [vm3], [br-int], [42.42.42.3/24])
-NS_CHECK_EXEC([vm3], [ip link set vm3 address 00:00:00:00:00:03], [0])
-NS_CHECK_EXEC([vm3], [ip link set lo up], [0])
-NS_CHECK_EXEC([vm3], [ip route add default via 42.42.42.5], [0])
-check ovs-vsctl set Interface vm3 external_ids:iface-id=vm3
+ADD_VETH(vm3, vm3, br-int, "42.42.42.3/24", "00:00:00:00:00:03", \
+         "42.42.42.5")
 
 NS_CHECK_EXEC([vm2], [sysctl -w net.ipv4.igmp_max_memberships=100], [ignore], [ignore])
 NS_CHECK_EXEC([vm3], [sysctl -w net.ipv4.igmp_max_memberships=100], [ignore], [ignore])
@@ -9639,7 +9635,7 @@ start_daemon ovn-controller
 #         |
 # VM2 ----+
 #
-# Two templated load balancer applied on LS1 and GW-Router with
+# Four templated load balancer applied on LS1 and GW-Router with
 # VM1 as backend.  The VIPs should be accessible from both VM2 and VM3.
 
 check ovn-nbctl                                                   \
@@ -9667,7 +9663,7 @@ check ovn-nbctl                                                   \
 # VIP=66.66.66.66:777 backends=42.42.42.2:4343 proto=udp
 
 AT_CHECK([ovn-nbctl -- create chassis_template_var chassis="hv1" \
-    variables="{vip=66.66.66.66,vport1=666,backends1=\"42.42.42.2:4242\",vport2=777,backends2=\"42.42.42.2:4343\"}"],
+    variables="{vip=66.66.66.66,vport1=666,backends1=\"42.42.42.2:4242\",vport2=777,backends2=\"42.42.42.2:4343\",vport3=888,vport4=999}"],
          [0], [ignore])
 
 check ovn-nbctl --template lb-add lb-test-tcp "^vip:^vport1" "^backends1" tcp \
@@ -9678,6 +9674,18 @@ check ovn-nbctl --template lb-add lb-test-udp "^vip:^vport2" "^backends2" udp \
     -- ls-lb-add ls1 lb-test-udp                                              \
     -- lr-lb-add rtr lb-test-udp
 
+# Add a TCP template LB with explicit backends that eventually expands to:
+# VIP=66.66.66.66:888 backends=42.42.42.2:4242 proto=tcp
+# And a UDP template LB that eventually expands to:
+# VIP=66.66.66.66:999 backends=42.42.42.2:4343 proto=udp
+check ovn-nbctl --template lb-add lb-test-tcp2 "^vip:^vport3" "42.42.42.2:4242" tcp ipv4 \
+    -- ls-lb-add ls1 lb-test-tcp2                                                        \
+    -- lr-lb-add rtr lb-test-tcp2
+
+check ovn-nbctl --template lb-add lb-test-udp2 "^vip:^vport4" "42.42.42.2:4343" udp ipv4 \
+    -- ls-lb-add ls1 lb-test-udp2                                                        \
+    -- lr-lb-add rtr lb-test-udp2
+
 ADD_NAMESPACES(vm1)
 ADD_VETH(vm1, vm1, br-int, "42.42.42.2/24", "00:00:00:00:00:01", "42.42.42.1")
 
@@ -9698,13 +9706,15 @@ name: 'backends2' value: '42.42.42.2:4343'
 name: 'vip' value: '66.66.66.66'
 name: 'vport1' value: '666'
 name: 'vport2' value: '777'
+name: 'vport3' value: '888'
+name: 'vport4' value: '999'
 ])
 
 # Start IPv4 TCP server on vm1.
 NETNS_DAEMONIZE([vm1], [nc -k -l 42.42.42.2 4242], [nc-vm1.pid])
 
 NETNS_DAEMONIZE([vm1],
-    [tcpdump -n -i vm1 -nnleX -c3 udp and dst 42.42.42.2 and dst port 4343 > vm1.pcap 2>/dev/null],
+    [tcpdump -n -i vm1 -nnleX -c6 udp and dst 42.42.42.2 and dst port 4343 > vm1.pcap 2>/dev/null],
     [tcpdump1.pid])
 
 # Make sure connecting to the VIP works (hairpin, via ls and via lr).
@@ -9712,13 +9722,21 @@ NS_CHECK_EXEC([vm1], [nc 66.66.66.66 666 -z], [0], [ignore], [ignore])
 NS_CHECK_EXEC([vm2], [nc 66.66.66.66 666 -z], [0], [ignore], [ignore])
 NS_CHECK_EXEC([vm3], [nc 66.66.66.66 666 -z], [0], [ignore], [ignore])
 
-NS_CHECK_EXEC([vm1], [echo a | nc -u 66.66.66.66 777 &], [0])
-NS_CHECK_EXEC([vm2], [echo a | nc -u 66.66.66.66 777 &], [0])
-NS_CHECK_EXEC([vm3], [echo a | nc -u 66.66.66.66 777 &], [0])
+NS_CHECK_EXEC([vm1], [echo a | nc -u 66.66.66.66 777], [ignore], [ignore], [ignore])
+NS_CHECK_EXEC([vm2], [echo a | nc -u 66.66.66.66 777], [ignore], [ignore], [ignore])
+NS_CHECK_EXEC([vm3], [echo a | nc -u 66.66.66.66 777], [ignore], [ignore], [ignore])
+
+NS_CHECK_EXEC([vm1], [nc 66.66.66.66 888 -z], [0], [ignore], [ignore])
+NS_CHECK_EXEC([vm2], [nc 66.66.66.66 888 -z], [0], [ignore], [ignore])
+NS_CHECK_EXEC([vm3], [nc 66.66.66.66 888 -z], [0], [ignore], [ignore])
+
+NS_CHECK_EXEC([vm1], [echo a | nc -u 66.66.66.66 999], [ignore], [ignore], [ignore])
+NS_CHECK_EXEC([vm2], [echo a | nc -u 66.66.66.66 999], [ignore], [ignore], [ignore])
+NS_CHECK_EXEC([vm3], [echo a | nc -u 66.66.66.66 999], [ignore], [ignore], [ignore])
 
 OVS_WAIT_UNTIL([
     requests=`grep "UDP" -c vm1.pcap`
-    test "${requests}" -ge "3"
+    test "${requests}" -ge "6"
 ])
 
 AT_CLEANUP
@@ -9753,7 +9771,7 @@ start_daemon ovn-controller
 #         |
 # VM2 ----+
 #
-# Two templated load balancer applied on LS1 and GW-Router with
+# Four templated load balancer applied on LS1 and GW-Router with
 # VM1 as backend.  The VIPs should be accessible from both VM2 and VM3.
 
 check ovn-nbctl                                                   \
@@ -9781,7 +9799,7 @@ check ovn-nbctl                                                   \
 # VIP=[6666::1]:777 backends=[4242::2]:4343 proto=udp
 
 AT_CHECK([ovn-nbctl -- create chassis_template_var chassis="hv1" \
-    variables="{vip=\"6666::1\",vport1=666,backends1=\"[[4242::2]]:4242\",vport2=777,backends2=\"[[4242::2]]:4343\"}"],
+    variables="{vip=\"6666::1\",vport1=666,backends1=\"[[4242::2]]:4242\",vport2=777,backends2=\"[[4242::2]]:4343\",vport3=888,vport4=999}"],
          [0], [ignore])
 
 check ovn-nbctl --template lb-add lb-test-tcp "^vip:^vport1" "^backends1" tcp ipv6 \
@@ -9792,6 +9810,18 @@ check ovn-nbctl --template lb-add lb-test-udp "^vip:^vport2" "^backends2" udp ip
     -- ls-lb-add ls1 lb-test-udp                                                   \
     -- lr-lb-add rtr lb-test-udp
 
+# Add a TCP template LB with explicit backends that eventually expands to:
+# VIP=[6666::1]:888 backends=[4242::2]:4242 proto=tcp
+# And a UDP template LB that eventually expands to:
+# VIP=[6666::1]:999 backends=[4242::2]:4343 proto=udp
+check ovn-nbctl --template lb-add lb-test-tcp2 "^vip:^vport3" "[[4242::2]]:4242" tcp ipv6 \
+    -- ls-lb-add ls1 lb-test-tcp2                                                         \
+    -- lr-lb-add rtr lb-test-tcp2
+
+check ovn-nbctl --template lb-add lb-test-udp2 "^vip:^vport4" "[[4242::2]]:4343" udp ipv6 \
+    -- ls-lb-add ls1 lb-test-udp2                                                         \
+    -- lr-lb-add rtr lb-test-udp2
+
 ADD_NAMESPACES(vm1)
 ADD_VETH(vm1, vm1, br-int, "4242::2/64", "00:00:00:00:00:01", "4242::1")
 OVS_WAIT_UNTIL([test "$(ip netns exec vm1 ip a | grep 4242::2 | grep tentative)" = ""])
@@ -9815,13 +9845,15 @@ name: 'backends2' value: '[[4242::2]]:4343'
 name: 'vip' value: '6666::1'
 name: 'vport1' value: '666'
 name: 'vport2' value: '777'
+name: 'vport3' value: '888'
+name: 'vport4' value: '999'
 ])
 
 # Start IPv6 TCP server on vm1.
 NETNS_DAEMONIZE([vm1], [nc -k -l 4242::2 4242], [nc-vm1.pid])
 
 NETNS_DAEMONIZE([vm1],
-    [tcpdump -n -i vm1 -nnleX -c3 udp and dst 4242::2 and dst port 4343 > vm1.pcap 2>/dev/null],
+    [tcpdump -n -i vm1 -nnleX -c6 udp and dst 4242::2 and dst port 4343 > vm1.pcap 2>/dev/null],
     [tcpdump1.pid])
 
 # Make sure connecting to the VIP works (hairpin, via ls and via lr).
@@ -9829,13 +9861,21 @@ NS_CHECK_EXEC([vm1], [nc 6666::1 666 -z], [0], [ignore], [ignore])
 NS_CHECK_EXEC([vm2], [nc 6666::1 666 -z], [0], [ignore], [ignore])
 NS_CHECK_EXEC([vm3], [nc 6666::1 666 -z], [0], [ignore], [ignore])
 
-NS_CHECK_EXEC([vm1], [echo a | nc -u 6666::1 777 &], [0])
-NS_CHECK_EXEC([vm2], [echo a | nc -u 6666::1 777 &], [0])
-NS_CHECK_EXEC([vm3], [echo a | nc -u 6666::1 777 &], [0])
+NS_CHECK_EXEC([vm1], [echo a | nc -u 6666::1 777], [ignore], [ignore], [ignore])
+NS_CHECK_EXEC([vm2], [echo a | nc -u 6666::1 777], [ignore], [ignore], [ignore])
+NS_CHECK_EXEC([vm3], [echo a | nc -u 6666::1 777], [ignore], [ignore], [ignore])
+
+NS_CHECK_EXEC([vm1], [nc 6666::1 888 -z], [0], [ignore], [ignore])
+NS_CHECK_EXEC([vm2], [nc 6666::1 888 -z], [0], [ignore], [ignore])
+NS_CHECK_EXEC([vm3], [nc 6666::1 888 -z], [0], [ignore], [ignore])
+
+NS_CHECK_EXEC([vm1], [echo a | nc -u 6666::1 999], [ignore], [ignore], [ignore])
+NS_CHECK_EXEC([vm2], [echo a | nc -u 6666::1 999], [ignore], [ignore], [ignore])
+NS_CHECK_EXEC([vm3], [echo a | nc -u 6666::1 999], [ignore], [ignore], [ignore])
 
 OVS_WAIT_UNTIL([
     requests=`grep "UDP" -c vm1.pcap`
-    test "${requests}" -ge "3"
+    test "${requests}" -ge "6"
 ])
 
 AT_CLEANUP
@@ -10587,11 +10627,13 @@ check ovn-nbctl lsp-add bar bar3 \
 -- lsp-set-addresses bar3 "f0:00:0f:01:02:05 172.16.1.4"
 
 # Config OVN load-balancer with a VIP.
-check ovn-nbctl lb-add lb1 30.0.0.1 "172.16.1.2,172.16.1.3,172.16.1.4"
+check ovn-nbctl lb-add lb1 30.0.0.1 "172.16.1.2,172.16.1.3,172.16.1.4" \
+    -- set load_balancer lb1 options:ct_flush="true"
 check ovn-nbctl ls-lb-add foo lb1
 
 # Create another load-balancer with another VIP.
 lb2_uuid=`ovn-nbctl create load_balancer name=lb2 vips:30.0.0.3="172.16.1.2,172.16.1.3,172.16.1.4"`
+check ovn-nbctl set load_balancer lb2 options:ct_flush="true"
 check ovn-nbctl ls-lb-add foo lb2
 
 # Config OVN load-balancer with another VIP (this time with ports).
@@ -10607,16 +10649,18 @@ OVS_START_L7([bar1], [http])
 OVS_START_L7([bar2], [http])
 OVS_START_L7([bar3], [http])
 
-OVS_WAIT_FOR_OUTPUT([
-    for i in `seq 1 20`; do
-        ip netns exec foo1 wget 30.0.0.1 -t 5 -T 1 --retry-connrefused -v -o wget$i.log;
-    done
-    ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
+m4_define([LB1_CT_ENTRIES], [dnl
 tcp,orig=(src=192.168.1.2,dst=30.0.0.1,sport=<cleared>,dport=<cleared>),reply=(src=172.16.1.2,dst=192.168.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
 tcp,orig=(src=192.168.1.2,dst=30.0.0.1,sport=<cleared>,dport=<cleared>),reply=(src=172.16.1.3,dst=192.168.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
 tcp,orig=(src=192.168.1.2,dst=30.0.0.1,sport=<cleared>,dport=<cleared>),reply=(src=172.16.1.4,dst=192.168.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
 ])
 
+OVS_WAIT_FOR_OUTPUT([
+    for i in `seq 1 20`; do
+        ip netns exec foo1 wget 30.0.0.1 -t 5 -T 1 --retry-connrefused -v -o wget$i.log;
+    done
+    ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [LB1_CT_ENTRIES])
+
 OVS_WAIT_FOR_OUTPUT([
     for i in `seq 1 20`; do
         ip netns exec foo1 wget 30.0.0.2:8000 -t 5 -T 1 --retry-connrefused -v -o wget$i.log;
@@ -10690,6 +10734,535 @@ check ovn-nbctl lb-del lb2
 
 OVS_WAIT_UNTIL([test "$(ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.3) | wc -l)" = "0"])
 
+# Check that LB has CT flush disabled by default
+check ovn-nbctl lb-add lb1 30.0.0.1 "172.16.1.2,172.16.1.3,172.16.1.4"
+check ovn-nbctl ls-lb-add foo lb1
+
+OVS_WAIT_FOR_OUTPUT([
+    for i in `seq 1 20`; do
+        ip netns exec foo1 wget 30.0.0.1 -t 5 -T 1 --retry-connrefused -v -o wget$i.log;
+    done
+    ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [LB1_CT_ENTRIES])
+
+# Remove one backend
+check ovn-nbctl --wait=hv set load_balancer lb1 vips='"30.0.0.1"="172.16.1.2,172.16.1.3"'
+
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [LB1_CT_ENTRIES])
+
+# Remove whole LB
+check ovn-nbctl --wait=hv lb-del lb1
+
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [LB1_CT_ENTRIES])
+
+OVS_APP_EXIT_AND_WAIT([ovn-controller])
+
+as ovn-sb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as ovn-nb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as northd
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+as
+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+/connection dropped.*/d"])
+AT_CLEANUP
+])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([ACL and committing to conntrack])
+AT_KEYWORDS([acl])
+
+CHECK_CONNTRACK()
+CHECK_CONNTRACK_NAT()
+ovn_start
+OVS_TRAFFIC_VSWITCHD_START()
+ADD_BR([br-int])
+# Set external-ids in br-int needed for ovn-controller
+ovs-vsctl \
+        -- set Open_vSwitch . external-ids:system-id=hv1 \
+        -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+        -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+        -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+        -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
+
+start_daemon ovn-controller
+
+check ovn-nbctl lr-add r1
+check ovn-nbctl lrp-add r1 r1_s1 00:de:ad:fe:00:01 173.0.1.1/24
+check ovn-nbctl lrp-add r1 r1_s2 00:de:ad:fe:00:02 173.0.2.1/24
+
+check ovn-nbctl ls-add s1
+check ovn-nbctl lsp-add s1 s1_r1
+check ovn-nbctl lsp-set-type s1_r1 router
+check ovn-nbctl lsp-set-addresses s1_r1 router
+check ovn-nbctl lsp-set-options s1_r1 router-port=r1_s1
+
+check ovn-nbctl ls-add s2
+check ovn-nbctl lsp-add s2 s2_r1
+check ovn-nbctl lsp-set-type s2_r1 router
+check ovn-nbctl lsp-set-addresses s2_r1 router
+check ovn-nbctl lsp-set-options s2_r1 router-port=r1_s2
+
+check ovn-nbctl lsp-add s1 vm1
+check ovn-nbctl lsp-set-addresses vm1 "00:de:ad:01:00:01 173.0.1.2"
+
+check ovn-nbctl lsp-add s2 vm2
+check ovn-nbctl lsp-set-addresses vm2 "00:de:ad:01:00:02 173.0.2.2"
+
+check ovn-nbctl lsp-add s2 vm3
+check ovn-nbctl lsp-set-addresses vm3 "00:de:ad:01:00:03 173.0.2.3"
+
+check ovn-nbctl lb-add lb1 30.0.0.1:80 173.0.2.2:80 udp
+check ovn-nbctl lb-add lb2 20.0.0.1:80 173.0.1.2:80 udp
+check ovn-nbctl lb-add lb1 30.0.0.1 173.0.2.2
+check ovn-nbctl lb-add lb2 173.0.2.250 173.0.1.3
+check ovn-nbctl ls-lb-add s1 lb1
+check ovn-nbctl ls-lb-add s2 lb2
+
+ADD_NAMESPACES(vm1)
+ADD_VETH(vm1, vm1, br-int, "173.0.1.2/24", "00:de:ad:01:00:01", \
+         "173.0.1.1")
+ADD_NAMESPACES(vm2)
+ADD_VETH(vm2, vm2, br-int, "173.0.2.2/24", "00:de:ad:01:00:02", \
+         "173.0.2.1")
+ADD_NAMESPACES(vm3)
+ADD_VETH(vm3, vm3, br-int, "173.0.2.250/24", "00:de:ad:01:00:03", \
+         "173.0.2.1")
+
+check ovn-nbctl acl-add s1 from-lport 1001 "ip" allow
+check ovn-nbctl acl-add s1 to-lport 1002 "ip" allow
+check ovn-nbctl acl-add s2 from-lport 1003 "ip" allow
+check ovn-nbctl acl-add s2 to-lport 1004 "ip" allow
+check ovn-nbctl --wait=hv sync
+AS_BOX([initial ping])
+# Send ping in background. Same ping, same flow throughout the test
+on_exit 'kill $(pidof ping)'
+NS_EXEC([vm1], [ping -c 10000 -i 0.1 30.0.0.1 > icmp.txt &])
+
+# Check for conntrack entries
+OVS_WAIT_FOR_OUTPUT([
+    ovs-appctl dpctl/dump-conntrack | FORMAT_CT(173.0.1.2) | \
+      sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
+icmp,orig=(src=173.0.1.2,dst=173.0.2.2,id=<cleared>,type=8,code=0),reply=(src=173.0.2.2,dst=173.0.1.2,id=<cleared>,type=0,code=0),zone=<cleared>
+icmp,orig=(src=173.0.1.2,dst=30.0.0.1,id=<cleared>,type=8,code=0),reply=(src=173.0.2.2,dst=173.0.1.2,id=<cleared>,type=0,code=0),zone=<cleared>,mark=2
+])
+
+# Now check for multiple ct_commits
+ovs-appctl dpctl/dump-flows > dp_flows
+zone_id=$(ovn-appctl -t ovn-controller ct-zone-list | grep vm1 | cut -d ' ' -f2)
+AT_CHECK([test 1 = `cat dp_flows | grep "commit,zone=$zone_id" | wc -l`])
+
+check ovn-nbctl acl-del s1 from-lport 1001 "ip"
+check ovn-nbctl acl-del s1 to-lport 1002 "ip"
+check ovn-nbctl acl-del s2 from-lport 1003 "ip"
+check ovn-nbctl acl-del s2 to-lport 1004 "ip"
+
+AS_BOX([acl drop echo request])
+check ovn-nbctl --log --severity=alert --name=drop-flow-s1 acl-add s1 to-lport 2001 icmp4 drop
+# acl-drop to-lport s1 apply to traffic from s1 to vm1 and s1 to r1.
+check ovn-nbctl --wait=hv sync
+
+# Check that traffic is blocked
+# Wait for some packets to hit the rule to avoid potential race conditions. Then count packets.
+OVS_WAIT_UNTIL([test `cat ovn-controller.log | grep acl_log | grep -c drop-flow-s1` -gt "0"])
+total_icmp_pkts=$(cat icmp.txt | grep ttl | wc -l)
+
+# Wait some time and check whether packets went through. In the worse race condition, the sleep is too short
+# and this test will still succeed.
+sleep 1
+OVS_WAIT_UNTIL([
+        total_icmp1_pkts=$(cat icmp.txt | grep ttl | wc -l)
+        test "${total_icmp1_pkts}" -eq "${total_icmp_pkts}"
+])
+
+AS_BOX([acl allow-related echo request])
+check ovn-nbctl acl-add s1 to-lport 2002 "icmp4 && ip4.src == 173.0.1.2" allow-related
+# This rule has higher priority than to-lport 2001 icmp4 drop.
+# So traffic from s1 (w/ src=173.0.1.2) to r1 should be accepted
+# (return) traffic from s1 to vm1 should be accepted as return traffic
+check ovn-nbctl --wait=hv sync
+OVS_WAIT_UNTIL([
+        total_icmp1_pkts=$(cat icmp.txt | grep ttl | wc -l)
+        test "${total_icmp1_pkts}" -gt "${total_icmp_pkts}"
+])
+
+# Check we did not break handling acl-drop for existing flows
+AS_BOX([acl drop echo request in s2])
+check ovn-nbctl acl-del s1 to-lport 2001 icmp4
+check ovn-nbctl --log --severity=alert --name=drop-flow-s2 acl-add s2 to-lport 2001 icmp4 drop
+check ovn-nbctl --wait=hv sync
+
+OVS_WAIT_UNTIL([test `cat ovn-controller.log | grep acl_log | grep -c drop-flow-s2` -gt "0"])
+
+OVS_WAIT_FOR_OUTPUT([
+    ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | \
+      sed -e 's/zone=[[0-9]]*/zone=<cleared>/' | \
+      sed -e 's/mark=[[0-9]]*/mark=<cleared>/'], [0], [dnl
+icmp,orig=(src=173.0.1.2,dst=30.0.0.1,id=<cleared>,type=8,code=0),reply=(src=173.0.2.2,dst=173.0.1.2,id=<cleared>,type=0,code=0),zone=<cleared>,mark=<cleared>
+])
+total_icmp_pkts=$(cat icmp.txt | grep ttl | wc -l)
+
+# Allow ping again
+AS_BOX([acl allow echo request in s2])
+check ovn-nbctl acl-add s2 to-lport 2005 icmp4 allow
+check ovn-nbctl --wait=hv sync
+OVS_WAIT_FOR_OUTPUT([
+    ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | \
+      sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
+icmp,orig=(src=173.0.1.2,dst=30.0.0.1,id=<cleared>,type=8,code=0),reply=(src=173.0.2.2,dst=173.0.1.2,id=<cleared>,type=0,code=0),zone=<cleared>,mark=2
+])
+OVS_WAIT_UNTIL([
+        total_icmp1_pkts=$(cat icmp.txt | grep ttl | wc -l)
+        test "${total_icmp1_pkts}" -gt "${total_icmp_pkts}"
+])
+
+OVS_APP_EXIT_AND_WAIT([ovn-controller])
+
+as ovn-sb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as ovn-nb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as northd
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+as
+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+/connection dropped.*/d"])
+AT_CLEANUP
+])
+
+# This tests port->up/down and ovn-installed after adding and removing Ports and Interfaces.
+# 3 Conditions x 3 tests:
+# - 3 Conditions:
+#   - In normal conditions
+#   - Remove interface while starting and stopping SB and Controller
+#   - Remove and add back interface while starting and stopping SB and Controller
+# - 3 tests:
+#   - Add/Remove Logical Port
+#   - Add/Remove iface-id
+#   - Add/Remove Interface
+# Each tests/conditions checks for
+# - Port_binding->chassis
+# - Port up or down
+# - ovn-installed
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([ovn-install on slow ovsdb])
+AT_KEYWORDS([ovn-install])
+
+OVS_TRAFFIC_VSWITCHD_START()
+# Restart ovsdb-server, this time with tcp
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+start_daemon ovsdb-server --remote=punix:"$OVS_RUNDIR"/db.sock --remote=ptcp:0:127.0.0.1
+
+ovn_start
+ADD_BR([br-int])
+
+# Set external-ids in br-int needed for ovn-controller
+ovs-vsctl \
+        -- set Open_vSwitch . external-ids:system-id=hv1 \
+        -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+        -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+        -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+        -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
+
+# Start ovn-controller
+PARSE_LISTENING_PORT([$ovs_base/ovsdb-server.log], [TCP_PORT])
+start_daemon ovn-controller tcp:127.0.0.1:$TCP_PORT
+
+check ovn-nbctl ls-add ls1
+check ovn-nbctl set Logical_Switch ls1 other_config:subnet=10.1.0.0/16
+
+check ovn-nbctl --wait=hv sync
+
+add_logical_ports() {
+  echo Adding logical ports
+  check ovn-nbctl lsp-add ls1 lsp1
+  check ovn-nbctl lsp-add ls1 lsp2
+}
+
+remove_logical_ports() {
+  echo Removing logical ports
+  check ovn-nbctl lsp-del lsp1
+  check ovn-nbctl lsp-del lsp2
+}
+
+add_ovs_interface() {
+  echo Adding interface $1 $2
+  ovs-vsctl --no-wait -- add-port br-int $1 \
+                      -- set Interface $1 external_ids:iface-id=$2 \
+                      -- set Interface $1 type=internal
+}
+add_ovs_interfaces() {
+  add_ovs_interface vif1 lsp1
+  add_ovs_interface vif2 lsp2
+}
+remove_ovs_interface() {
+  echo Removing interface $1
+  check ovs-vsctl --no-wait -- del-port $1
+}
+remove_ovs_interfaces() {
+  remove_ovs_interface vif1
+  remove_ovs_interface vif2
+}
+add_iface_ids() {
+  echo Adding iface-id vif1 lsp1
+  ovs-vsctl --no-wait -- set Interface vif1 external_ids:iface-id=lsp1
+  echo Adding iface-id vif2 lsp2
+  ovs-vsctl --no-wait -- set Interface vif2 external_ids:iface-id=lsp2
+}
+remove_iface_id() {
+  echo Removing iface-id $1
+  check ovs-vsctl remove Interface $1 external_ids iface-id
+}
+remove_iface_ids() {
+  remove_iface_id vif1
+  remove_iface_id vif2
+}
+wait_for_local_bindings() {
+  OVS_WAIT_UNTIL(
+      [test `ovs-appctl -t ovn-controller debug/dump-local-bindings | grep interface | wc -l` -eq 2],
+      [kill -CONT $(cat ovn-sb/ovsdb-server.pid)]
+  )
+}
+sleep_sb() {
+  echo SB going to sleep
+  AT_CHECK([kill -STOP $(cat ovn-sb/ovsdb-server.pid)])
+}
+wake_up_sb() {
+  echo SB waking up
+  AT_CHECK([kill -CONT $(cat ovn-sb/ovsdb-server.pid)])
+}
+sleep_controller() {
+  echo Controller going to sleep
+  ovn-appctl debug/pause
+  OVS_WAIT_UNTIL([test x$(ovn-appctl -t ovn-controller debug/status) = "xpaused"])
+}
+
+stop_ovsdb_controller_updates() {
+  TCP_PORT=$1
+  echo Stopping updates from ovn-controller to ovsdb using port $TCP_PORT
+  on_exit 'iptables -C INPUT -p tcp --destination-port $TCP_PORT -j DROP 2>/dev/null && iptables -D INPUT -p tcp --destination-port $TCP_PORT -j DROP'
+  iptables -A INPUT -p tcp --destination-port $TCP_PORT -j DROP
+}
+restart_ovsdb_controller_updates() {
+  TCP_PORT=$1
+  echo Restarting updates from ovn-controller to ovsdb
+  iptables -D INPUT -p tcp --destination-port $TCP_PORT  -j DROP
+}
+wake_up_controller() {
+  echo Controller waking up
+  ovn-appctl debug/resume
+}
+ensure_controller_run() {
+# We want to make sure controller could run at least one full loop.
+# We can't use wait=hv as sb might be sleeping.
+# Use 2 ovn-appctl to guarentee that ovn-controller run the full loop, and not just the unixctl handling
+  OVS_WAIT_UNTIL([test x$(ovn-appctl -t ovn-controller debug/status) = "xrunning"])
+  OVS_WAIT_UNTIL([test x$(ovn-appctl -t ovn-controller debug/status) = "xrunning"])
+}
+sleep_ovsdb() {
+  echo OVSDB going to sleep
+  AT_CHECK([kill -STOP $(cat ovsdb-server.pid)])
+}
+wake_up_ovsdb() {
+  echo OVSDB waking up
+  AT_CHECK([kill -CONT $(cat ovsdb-server.pid)])
+}
+check_ovn_installed() {
+  OVS_WAIT_UNTIL([test `ovs-vsctl get Interface vif1 external_ids:ovn-installed` = '"true"'])
+  OVS_WAIT_UNTIL([test `ovs-vsctl get Interface vif2 external_ids:ovn-installed` = '"true"'])
+}
+check_ovn_uninstalled() {
+  OVS_WAIT_UNTIL([test x`ovs-vsctl get Interface vif2 external_ids:ovn-installed` = x])
+  OVS_WAIT_UNTIL([test x`ovs-vsctl get Interface vif1 external_ids:ovn-installed` = x])
+}
+check_ports_up() {
+  OVS_WAIT_UNTIL([test `ovn-sbctl get Port_Binding lsp1 up` = 'true'])
+  OVS_WAIT_UNTIL([test `ovn-sbctl get Port_Binding lsp2 up` = 'true'])
+}
+check_ports_down() {
+  OVS_WAIT_UNTIL([test `ovn-sbctl get Port_Binding lsp1 up` = 'false'])
+  OVS_WAIT_UNTIL([test `ovn-sbctl get Port_Binding lsp2 up` = 'false'])
+}
+
+check_ports_bound() {
+  ch=$(fetch_column Chassis _uuid name=hv1)
+  wait_row_count Port_Binding 1 logical_port=lsp1 chassis=$ch
+  wait_row_count Port_Binding 1 logical_port=lsp2 chassis=$ch
+}
+check_ports_unbound() {
+  wait_column "" Port_Binding chassis logical_port=lsp1
+  wait_column "" Port_Binding chassis logical_port=lsp2
+}
+add_logical_ports
+add_ovs_interfaces
+wait_for_local_bindings
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
+############################################################
+########## Remove interface while removing iface-id ########
+############################################################
+AS_BOX(["Remove interface while removing iface-id"])
+stop_ovsdb_controller_updates $TCP_PORT
+remove_iface_id vif1
+ensure_controller_run
+# OVSDB should be seen as ro now
+remove_iface_id vif2
+ensure_controller_run
+# Controller delaying ovn-install removal for vif2 as ovsdb ro
+sleep_controller
+restart_ovsdb_controller_updates $TCP_PORT
+remove_ovs_interface vif2
+# vif2, for which we want to remove ovn-install, is deleted
+wake_up_controller
+check_ovn_uninstalled
+check_ports_down
+check_ports_unbound
+add_ovs_interface vif2 lsp2
+add_iface_ids
+check_ovn_installed
+check_ports_up
+check_ports_bound
+############################################################
+################### Add/Remove iface-id ####################
+############################################################
+AS_BOX(["iface-id removal and added back (no sleeping sb or controller)"])
+remove_iface_ids
+check_ovn_uninstalled
+check_ports_down
+check_ports_unbound
+add_iface_ids
+check_ovn_installed
+check_ports_up
+check_ports_bound
+
+AS_BOX(["iface-id removal"])
+sleep_sb
+remove_iface_ids
+ensure_controller_run
+sleep_controller
+wake_up_sb
+wake_up_controller
+check_ovn_uninstalled
+check_ports_down
+check_ports_unbound
+add_iface_ids
+check ovn-nbctl --wait=hv sync
+
+AS_BOX(["iface-id removal 2"])
+# Block IDL from ovn-controller to OVSDB
+stop_ovsdb_controller_updates $TCP_PORT
+remove_iface_id vif2
+ensure_controller_run
+
+# OVSDB should now be seen as read-only by ovn-controller
+remove_iface_id vif1
+ensure_controller_run
+
+# Restart connection from ovn-controller to OVSDB
+restart_ovsdb_controller_updates $TCP_PORT
+check_ovn_uninstalled
+check_ports_down
+check_ports_unbound
+
+add_iface_ids
+check ovn-nbctl --wait=hv sync
+
+AS_BOX(["iface-id removal and added back"])
+sleep_sb
+remove_iface_ids
+ensure_controller_run
+sleep_controller
+add_iface_ids
+wake_up_sb
+wake_up_controller
+check_ovn_installed
+check_ports_up
+check_ports_bound
+############################################################
+###################### Add/Remove Interface ################
+############################################################
+AS_BOX(["Interface removal and added back (no sleeping sb or controller)"])
+remove_ovs_interfaces
+check_ovn_uninstalled
+check_ports_down
+check_ports_unbound
+add_ovs_interfaces
+check_ovn_installed
+check_ports_up
+check_ports_bound
+check ovn-nbctl --wait=hv sync
+
+AS_BOX(["Interface removal"])
+sleep_sb
+remove_ovs_interfaces
+ensure_controller_run
+sleep_controller
+wake_up_sb
+wake_up_controller
+check_ovn_uninstalled
+check_ports_down
+check_ports_unbound
+add_ovs_interfaces
+check ovn-nbctl --wait=hv sync
+
+AS_BOX(["Interface removal and added back"])
+sleep_sb
+remove_ovs_interfaces
+ensure_controller_run
+sleep_controller
+add_ovs_interfaces
+wake_up_sb
+wake_up_controller
+check_ovn_installed
+check_ports_up
+check_ports_bound
+check ovn-nbctl --wait=hv sync
+############################################################
+###################### Add/Remove Logical Port #############
+############################################################
+AS_BOX(["Logical port removal and added back (no sleeping sb or controller)"])
+remove_logical_ports
+check_ovn_uninstalled
+check_ports_unbound
+sleep_ovsdb
+add_logical_ports
+ensure_controller_run
+wake_up_ovsdb
+check_ovn_installed
+check_ports_up
+check_ports_bound
+check ovn-nbctl --wait=hv sync
+
+AS_BOX(["Logical port removal"])
+sleep_sb
+remove_logical_ports
+ensure_controller_run
+sleep_controller
+wake_up_sb
+wake_up_controller
+check_ovn_uninstalled
+check_ports_unbound
+add_logical_ports
+check ovn-nbctl --wait=hv sync
+
+AS_BOX(["Logical port removal and added back"])
+sleep_sb
+remove_logical_ports
+ensure_controller_run
+sleep_controller
+add_logical_ports
+wake_up_sb
+wake_up_controller
+check_ovn_installed
+check_ports_up
+check_ports_bound
+
 OVS_APP_EXIT_AND_WAIT([ovn-controller])
 
 as ovn-sb
@@ -10706,3 +11279,338 @@ OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
 /connection dropped.*/d"])
 AT_CLEANUP
 ])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([ovn mirroring])
+AT_KEYWORDS([mirror])
+AT_SKIP_IF([test $HAVE_TCPDUMP = no])
+
+CHECK_CONNTRACK()
+CHECK_CONNTRACK_NAT()
+ovn_start
+OVS_TRAFFIC_VSWITCHD_START()
+ADD_BR([br-int])
+ADD_BR([br-mirror])
+
+# Set external-ids in br-int needed for ovn-controller
+ovs-vsctl \
+        -- set Open_vSwitch . external-ids:system-id=hv1 \
+        -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+        -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+        -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+        -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
+
+# Start ovn-controller
+start_daemon ovn-controller
+
+ovs-ofctl add-flow br-mirror action=normal
+
+ovn-nbctl create Logical_Router name=R1 options:chassis=hv1
+
+ovn-nbctl ls-add foo
+ovn-nbctl ls-add bar
+
+# Connect foo to R1
+ovn-nbctl lrp-add R1 foo 00:00:01:01:02:03 192.168.1.1/24 2001::1/64
+ovn-nbctl lsp-add foo rp-foo -- set Logical_Switch_Port rp-foo \
+    type=router options:router-port=foo addresses=\"00:00:01:01:02:03\"
+
+# Connect bar to R1
+ovn-nbctl lrp-add R1 bar 00:00:01:01:02:04 192.168.2.1/24 2002::1/64
+ovn-nbctl lsp-add bar rp-bar -- set Logical_Switch_Port rp-bar \
+    type=router options:router-port=bar addresses=\"00:00:01:01:02:04\"
+
+# Logical port 'foo1' in switch 'foo'.
+ADD_NAMESPACES(foo1)
+ADD_VETH(foo1, foo1, br-int, "2001::2/64", "f0:00:00:01:02:03", \
+         "2001::1", "nodad", "192.168.1.2/24", "192.168.1.1")
+ovn-nbctl lsp-add foo foo1 \
+-- lsp-set-addresses foo1 "f0:00:00:01:02:03 192.168.1.2 2001::2"
+
+# Logical port 'bar1' in switch 'bar'.
+ADD_NAMESPACES(bar1)
+ADD_VETH(bar1, bar1, br-int, "2002::2/64", "f0:00:00:01:02:05", \
+         "2002::1", "nodad", "192.168.2.2/24", "192.168.2.1")
+ovn-nbctl lsp-add bar bar1 \
+-- lsp-set-addresses bar1 "f0:00:00:01:02:05 192.168.2.2 2002::2"
+
+ovn-nbctl mirror-add mirror0 gre 1 to-lport 172.16.0.100
+ovn-nbctl lsp-attach-mirror bar1 mirror0
+
+OVN_POPULATE_ARP
+check ovn-nbctl --wait=hv sync
+
+ADD_NAMESPACES(mirror)
+ADD_VETH(mirror, mirror, br-mirror, "2003::b/64", "f0:00:00:01:07:06", \
+         "2003::1", "nodad", "172.16.0.100/24", "172.16.0.1")
+AT_CHECK([ip addr add 172.16.0.101/24 dev br-mirror])
+AT_CHECK([ip addr add 2003::a/64 dev br-mirror nodad])
+AT_CHECK([ip link set dev br-mirror up])
+
+NS_CHECK_EXEC([mirror], [tcpdump -l -c 3 -neei mirror proto GRE > gre_mirror4.pcap 2>gre_mirror4_error &])
+OVS_WAIT_UNTIL([grep "listening" gre_mirror4_error])
+
+NS_CHECK_EXEC([foo1], [ping -q -c 3 -i 0.3 -w 2 192.168.2.2 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+OVS_WAIT_UNTIL([
+    n_packets=$(grep "GRE" -c gre_mirror4.pcap)
+    test "${n_packets}" = "3"
+])
+
+killall tcpdump
+
+ovn-nbctl mirror-del mirror0
+ovn-nbctl mirror-add mirror1 gre 2 to-lport 2003::b
+
+ovn-nbctl --wait=hv lsp-attach-mirror bar1 mirror1
+
+NS_CHECK_EXEC([mirror], [tcpdump -l -c 3 -neei mirror proto GRE > gre_mirror6.pcap 2>gre_mirror6_error &])
+OVS_WAIT_UNTIL([grep "listening" gre_mirror6_error])
+
+NS_CHECK_EXEC([foo1], [ping6 -q -c 3 -i 0.3 -w 2 2002::2 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+OVS_WAIT_UNTIL([
+    n_packets=$(grep "GRE" -c gre_mirror6.pcap)
+    test "${n_packets}" = "3"
+])
+
+killall tcpdump
+
+ovn-nbctl mirror-del mirror1
+ovn-nbctl mirror-add mirror2 erspan 3 to-lport 172.16.0.100
+ovn-nbctl --wait=hv lsp-attach-mirror bar1 mirror2
+
+NS_CHECK_EXEC([mirror], [tcpdump -l -c 3 -neei mirror ip[[22:2]]=0x88be > erspan_mirror4.pcap 2>erspan_mirror4_error &])
+OVS_WAIT_UNTIL([grep "listening" erspan_mirror4_error])
+
+NS_CHECK_EXEC([foo1], [ping -q -c 3 -i 0.3 -w 2 192.168.2.2 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+OVS_WAIT_UNTIL([
+    n_packets=$(grep "gre-proto-0x88be" -c erspan_mirror4.pcap)
+    test "${n_packets}" = "3"
+])
+
+killall tcpdump
+
+ovn-nbctl mirror-del mirror2
+ovn-nbctl mirror-add mirror3 erspan 4 to-lport 2003::b
+ovn-nbctl --wait=hv lsp-attach-mirror bar1 mirror3
+
+NS_CHECK_EXEC([mirror], [tcpdump -l -c 3 -neei mirror ip6[[42:2]]=0x88be > erspan_mirror6.pcap 2>erspan_mirror6_error &])
+OVS_WAIT_UNTIL([grep "listening" erspan_mirror6_error])
+
+NS_CHECK_EXEC([foo1], [ping6 -q -c 3 -i 0.3 -w 2 2002::2 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+OVS_WAIT_UNTIL([
+    n_packets=$(grep "gre-proto-0x88be" -c erspan_mirror6.pcap)
+    test "${n_packets}" = "3"
+])
+
+killall tcpdump
+
+uuid=$(fetch_column nb:mirror _uuid name="mirror3")
+ovn-nbctl --wait=hv set mirror $uuid type=gre
+
+NS_CHECK_EXEC([mirror], [tcpdump -c 3 -l -neei mirror proto GRE > gre_mirror6.pcap 2>gre_mirror6_error &])
+OVS_WAIT_UNTIL([grep "listening" gre_mirror6_error])
+
+NS_CHECK_EXEC([foo1], [ping6 -q -c 3 -i 0.3 -w 2 2002::2 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+OVS_WAIT_UNTIL([
+    n_packets=$(grep "GRE" -c gre_mirror6.pcap)
+    test "${n_packets}" = "3"
+])
+
+killall tcpdump
+
+OVS_APP_EXIT_AND_WAIT([ovn-controller])
+
+as ovn-sb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as ovn-nb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as northd
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+as
+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+/connection dropped.*/d"])
+AT_CLEANUP
+])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([load balancer with localnet port])
+CHECK_CONNTRACK()
+CHECK_CONNTRACK_NAT()
+ovn_start
+OVS_TRAFFIC_VSWITCHD_START()
+ADD_BR([br-int])
+ADD_BR([br-phys], [set Bridge br-phys fail-mode=standalone])
+
+# Set external-ids in br-int needed for ovn-controller
+ovs-vsctl \
+        -- set Open_vSwitch . external-ids:system-id=hv1 \
+        -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+        -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+        -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+        -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
+
+start_daemon ovn-controller
+
+check ovn-nbctl lr-add ro
+check ovn-nbctl lrp-add ro ro-sw 00:00:00:00:00:01 192.168.0.1/24
+check ovn-nbctl lrp-add ro ro-pub 00:00:00:00:01:01 10.0.0.1/24
+
+check ovn-nbctl ls-add sw
+check ovn-nbctl lsp-add sw sw-vm1 \
+    -- lsp-set-addresses sw-vm1 "00:00:00:00:00:02 192.168.0.2"
+check ovn-nbctl lsp-add sw sw-ro \
+    -- lsp-set-type sw-ro router \
+    -- lsp-set-addresses sw-ro router \
+    -- lsp-set-options sw-ro router-port=ro-sw
+
+check ovn-nbctl ls-add pub
+check ovn-nbctl lsp-add pub sw-ln \
+    -- lsp-set-type sw-ln localnet \
+    -- lsp-set-addresses sw-ln unknown \
+    -- lsp-set-options sw-ln network_name=phys
+check ovn-nbctl lsp-add pub pub-ro \
+    -- lsp-set-type pub-ro router \
+    -- lsp-set-addresses pub-ro router \
+    -- lsp-set-options pub-ro router-port=ro-pub
+
+check ovs-vsctl set open . external-ids:ovn-bridge-mappings=phys:br-phys
+
+ADD_NAMESPACES(sw-vm1)
+ADD_VETH(sw-vm1, sw-vm1, br-int, "192.168.0.2/24", "00:00:00:00:00:02", \
+         "192.168.0.1")
+
+ADD_NAMESPACES(ln)
+ADD_VETH(ln, ln, br-phys, "10.0.0.2/24", "00:00:00:00:01:02", \
+         "10.0.0.1")
+
+# We have the basic network set up. Now let's add a load balancer
+# on the "pub" logical switch.
+
+check ovn-nbctl lb-add ln-lb 172.16.0.1:80 192.168.0.2:80 tcp
+check ovn-nbctl ls-lb-add pub ln-lb
+check ovn-nbctl --wait=hv sync
+
+# Add a route so that the localnet port can reach the load balancer
+# VIP.
+NS_CHECK_EXEC([ln], [ip route add 172.16.0.1 via 10.0.0.1])
+NS_CHECK_EXEC([ln], [ip route add 192.168.0.0/24 via 10.0.0.1])
+
+OVS_START_L7([sw-vm1], [http])
+
+NS_CHECK_EXEC([ln], [wget 172.16.0.1 -t 5 -T 1 --retry-connrefused -v -o wget.log])
+
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(172.16.0.1) | \
+sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
+tcp,orig=(src=10.0.0.2,dst=172.16.0.1,sport=<cleared>,dport=<cleared>),reply=(src=192.168.0.2,dst=10.0.0.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
+])
+
+OVS_APP_EXIT_AND_WAIT([ovn-controller])
+
+as ovn-sb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as ovn-nb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as northd
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+as
+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+/connection dropped.*/d"])
+
+AT_CLEANUP
+])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([Traffic to router port via LLA])
+ovn_start
+OVS_TRAFFIC_VSWITCHD_START()
+ADD_BR([br-int])
+ADD_BR([br-phys], [set Bridge br-phys fail-mode=standalone])
+
+# Set external-ids in br-int needed for ovn-controller
+ovs-vsctl \
+        -- set Open_vSwitch . external-ids:system-id=hv1 \
+        -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+        -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+        -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+        -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
+
+start_daemon ovn-controller
+
+check ovn-nbctl lr-add lr0
+check ovn-nbctl lrp-add lr0 lr0-ls0 00:00:00:00:00:01 fd00::1/64
+
+check ovn-nbctl ls-add ls0
+check ovn-nbctl lsp-add ls0 vif0 \
+    -- lsp-set-addresses vif0 "00:00:00:00:00:02 fd00::2"
+check ovn-nbctl lsp-add ls0 ls0-lr0 \
+    -- lsp-set-type ls0-lr0 router \
+    -- lsp-set-addresses ls0-lr0 router \
+    -- lsp-set-options ls0-lr0 router-port=lr0-ls0
+
+ADD_NAMESPACES(vif0)
+ADD_VETH(vif0, vif0, br-int, "fd00::2/64", "00:00:00:00:00:02", "fd00::1")
+OVS_WAIT_UNTIL([test "$(ip netns exec vif0 ip a | grep fe80:: | grep tentative)" = ""])
+
+check ovn-nbctl set logical_router lr0 options:always_learn_from_arp_request=false
+
+OVN_POPULATE_ARP
+wait_for_ports_up
+check ovn-nbctl --wait=sb sync
+
+NS_CHECK_EXEC([vif0], [ping -q -c 3 -i 0.3 -w 2 fe80::200:ff:fe00:1 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+check_row_count mac_binding 1 mac=\"00:00:00:00:00:02\"
+ovn-sbctl --all destroy mac_binding
+
+ovn-nbctl --wait=hv set logical_router lr0 options:always_learn_from_arp_request=true
+
+NS_CHECK_EXEC([vif0], [ping -q -c 3 -i 0.3 -w 2 fe80::200:ff:fe00:1 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+check_row_count mac_binding 1 mac=\"00:00:00:00:00:02\"
+
+OVS_APP_EXIT_AND_WAIT([ovn-controller])
+
+as ovn-sb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as ovn-nb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as northd
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+as
+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+/connection dropped.*/d"])
+
+AT_CLEANUP
+])
diff --git a/utilities/containers/py-requirements.txt b/utilities/containers/py-requirements.txt
index d7bd21e0d..0d90765c9 100644
--- a/utilities/containers/py-requirements.txt
+++ b/utilities/containers/py-requirements.txt
@@ -1,5 +1,6 @@
 flake8
 hacking>=3.0
+scapy
 sphinx
 setuptools
 pyelftools
diff --git a/utilities/ovn-nbctl.c b/utilities/ovn-nbctl.c
index 45572fd30..9399f9462 100644
--- a/utilities/ovn-nbctl.c
+++ b/utilities/ovn-nbctl.c
@@ -3033,7 +3033,7 @@ nbctl_lb_add(struct ctl_context *ctx)
     }
 
     ovn_lb_vip_format(&lb_vip_parsed, &lb_vip_normalized, template);
-    ovn_lb_vip_backends_format(&lb_vip_parsed, &lb_ips_new, template);
+    ovn_lb_vip_backends_format(&lb_vip_parsed, &lb_ips_new);
     ovn_lb_vip_destroy(&lb_vip_parsed);
 
     const struct nbrec_load_balancer *lb = NULL;
@@ -4204,8 +4204,7 @@ print_routing_policy(const struct nbrec_logical_router_policy *policy,
                       policy->match, policy->action);
         for (int i = 0; i < policy->n_nexthops; i++) {
             char *next_hop = normalize_prefix_str(policy->nexthops[i]);
-            char *fmt = i ? ", %s" : " %25s";
-            ds_put_format(s, fmt, next_hop);
+            ds_put_format(s, i ? ", %s" : " %25s", next_hop ? next_hop : "");
             free(next_hop);
         }
     } else {
@@ -6586,18 +6585,17 @@ print_route(const struct nbrec_logical_router_static_route *route,
 {
 
     char *prefix = normalize_prefix_str(route->ip_prefix);
-    char *next_hop = "";
+    char *next_hop = NULL;
 
     if (!strcmp(route->nexthop, "discard")) {
         next_hop = xasprintf("discard");
     } else if (route->nexthop[0]) {
         next_hop = normalize_prefix_str(route->nexthop);
     }
-    ds_put_format(s, "%25s %25s", prefix, next_hop);
+    ds_put_format(s, "%25s %25s", prefix ? prefix : "",
+                  next_hop ? next_hop : "");
     free(prefix);
-    if (next_hop[0]) {
-        free(next_hop);
-    }
+    free(next_hop);
 
     if (route->policy) {
         ds_put_format(s, " %s", route->policy);