Blob Blame History Raw
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 0f8d9d193..edf4fb2fd 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -17,7 +17,8 @@ jobs:
       dependencies: |
         automake libtool gcc bc libjemalloc2 libjemalloc-dev    \
         libssl-dev llvm-dev libelf-dev libnuma-dev libpcap-dev  \
-        selinux-policy-dev ncat python3-scapy isc-dhcp-server
+        selinux-policy-dev ncat python3-scapy isc-dhcp-server \
+        iputils-arping
       m32_dependecies: gcc-multilib
       ARCH:        ${{ matrix.cfg.arch }}
       CC:          ${{ matrix.cfg.compiler }}
diff --git a/NEWS b/NEWS
index 5e8aed06d..60c460a05 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,11 @@
+OVN v23.03.1 - xx xxx xxxx
+--------------------------
+  - CT entries are not flushed by default anymore whenever a load balancer
+    backend is removed.  A new, per-LB, option 'ct_flush' can be used to
+    restore the previous behavior.  Disabled by default.
+  - Always allow IPv6 Router Discovery, Neighbor Discovery, and Multicast
+    Listener Discovery protocols, regardless of ACLs defined.
+
 OVN v23.03.0 - 03 Mar 2023
 --------------------------
   - ovn-controller: Experimental support for co-hosting multiple controller
diff --git a/configure.ac b/configure.ac
index b51d0f01e..0ba9e8d7e 100644
--- a/configure.ac
+++ b/configure.ac
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 AC_PREREQ(2.63)
-AC_INIT(ovn, 23.03.0, bugs@openvswitch.org)
+AC_INIT(ovn, 23.03.1, bugs@openvswitch.org)
 AC_CONFIG_MACRO_DIR([m4])
 AC_CONFIG_AUX_DIR([build-aux])
 AC_CONFIG_HEADERS([config.h])
diff --git a/controller/binding.c b/controller/binding.c
index 5df62baef..bd810f669 100644
--- a/controller/binding.c
+++ b/controller/binding.c
@@ -746,6 +746,19 @@ local_binding_get_lport_ofport(const struct shash *local_bindings,
             u16_to_ofp(lbinding->iface->ofport[0]) : 0;
 }
 
+bool
+local_binding_is_ovn_installed(struct shash *local_bindings,
+                               const char *pb_name)
+{
+    struct local_binding *lbinding =
+        local_binding_find(local_bindings, pb_name);
+    if (lbinding && lbinding->iface) {
+        return smap_get_bool(&lbinding->iface->external_ids,
+                             OVN_INSTALLED_EXT_ID, false);
+    }
+    return false;
+}
+
 bool
 local_binding_is_up(struct shash *local_bindings, const char *pb_name,
                     const struct sbrec_chassis *chassis_rec)
@@ -783,6 +796,7 @@ local_binding_is_down(struct shash *local_bindings, const char *pb_name,
         } else if (b_lport->pb->chassis) {
             VLOG_DBG("lport %s already claimed by other chassis",
                      b_lport->pb->logical_port);
+            return true;
         }
     }
 
@@ -834,6 +848,38 @@ local_binding_set_up(struct shash *local_bindings, const char *pb_name,
     }
 }
 
+void
+local_binding_remove_ovn_installed(
+        struct shash *local_bindings,
+        const struct ovsrec_interface_table *iface_table,
+        const char *pb_name, bool ovs_readonly)
+{
+    if (ovs_readonly) {
+        return;
+    }
+    struct local_binding *lbinding =
+        local_binding_find(local_bindings, pb_name);
+    if (lbinding && lbinding->iface) {
+        const struct uuid *iface_uuid = &lbinding->iface->header_.uuid;
+        remove_ovn_installed_for_uuid(iface_table, iface_uuid);
+    }
+}
+
+void
+remove_ovn_installed_for_uuid(const struct ovsrec_interface_table *iface_table,
+                              const struct uuid *iface_uuid)
+{
+    const struct ovsrec_interface *iface_rec =
+        ovsrec_interface_table_get_for_uuid(iface_table, iface_uuid);
+    if (iface_rec && smap_get_bool(&iface_rec->external_ids,
+                                   OVN_INSTALLED_EXT_ID, false)) {
+        VLOG_INFO("Removing iface %s ovn-installed in OVS",
+                  iface_rec->name);
+        ovsrec_interface_update_external_ids_delkey(iface_rec,
+                                                    OVN_INSTALLED_EXT_ID);
+    }
+}
+
 void
 local_binding_set_down(struct shash *local_bindings, const char *pb_name,
                        const struct sbrec_chassis *chassis_rec,
@@ -853,7 +899,6 @@ local_binding_set_down(struct shash *local_bindings, const char *pb_name,
 
     if (!sb_readonly && b_lport && b_lport->pb->n_up && b_lport->pb->up[0] &&
             (!b_lport->pb->chassis || b_lport->pb->chassis == chassis_rec)) {
-        VLOG_INFO("Setting lport %s down in Southbound", pb_name);
         binding_lport_set_down(b_lport, sb_readonly);
         LIST_FOR_EACH (b_lport, list_node, &lbinding->binding_lports) {
             binding_lport_set_down(b_lport, sb_readonly);
@@ -1239,7 +1284,9 @@ claim_lport(const struct sbrec_port_binding *pb,
                     return false;
                 }
             } else {
-                if (pb->n_up && !pb->up[0]) {
+                if ((pb->n_up && !pb->up[0]) ||
+                    !smap_get_bool(&iface_rec->external_ids,
+                                   OVN_INSTALLED_EXT_ID, false)) {
                     if_status_mgr_claim_iface(if_mgr, pb, chassis_rec,
                                               sb_readonly);
                 }
@@ -1464,9 +1511,11 @@ consider_vif_lport_(const struct sbrec_port_binding *pb,
             const char *requested_chassis_option = smap_get(
                 &pb->options, "requested-chassis");
             VLOG_INFO_RL(&rl,
-                "Not claiming lport %s, chassis %s requested-chassis %s",
+                "Not claiming lport %s, chassis %s requested-chassis %s "
+                "pb->chassis %s",
                 pb->logical_port, b_ctx_in->chassis_rec->name,
-                requested_chassis_option ? requested_chassis_option : "[]");
+                requested_chassis_option ? requested_chassis_option : "[]",
+                pb->chassis ? pb->chassis->name: "");
         }
     }
 
@@ -2288,6 +2337,11 @@ consider_iface_release(const struct ovsrec_interface *iface_rec,
                 return false;
             }
         }
+        if (lbinding->iface && lbinding->iface->name) {
+            if_status_mgr_remove_ovn_installed(b_ctx_out->if_mgr,
+                                               lbinding->iface->name,
+                                               &lbinding->iface->header_.uuid);
+        }
 
     } else if (lbinding && b_lport && b_lport->type == LP_LOCALPORT) {
         /* lbinding is associated with a localport.  Remove it from the
@@ -2558,6 +2612,7 @@ handle_deleted_lport(const struct sbrec_port_binding *pb,
     if (ld) {
         remove_pb_from_local_datapath(pb,
                                       b_ctx_out, ld);
+        if_status_mgr_release_iface(b_ctx_out->if_mgr, pb->logical_port);
         return;
     }
 
@@ -2581,6 +2636,7 @@ handle_deleted_lport(const struct sbrec_port_binding *pb,
             remove_pb_from_local_datapath(pb, b_ctx_out,
                                           ld);
         }
+        if_status_mgr_release_iface(b_ctx_out->if_mgr, pb->logical_port);
     }
 }
 
@@ -2627,6 +2683,11 @@ handle_deleted_vif_lport(const struct sbrec_port_binding *pb,
     }
 
     handle_deleted_lport(pb, b_ctx_in, b_ctx_out);
+    if (lbinding && lbinding->iface && lbinding->iface->name) {
+        if_status_mgr_remove_ovn_installed(b_ctx_out->if_mgr,
+                                           lbinding->iface->name,
+                                           &lbinding->iface->header_.uuid);
+    }
     return true;
 }
 
@@ -3314,6 +3375,24 @@ binding_lport_delete(struct shash *binding_lports,
     binding_lport_destroy(b_lport);
 }
 
+void
+port_binding_set_down(const struct sbrec_chassis *chassis_rec,
+                      const struct sbrec_port_binding_table *pb_table,
+                      const char *iface_id,
+                      const struct uuid *pb_uuid)
+{
+        const struct sbrec_port_binding *pb =
+            sbrec_port_binding_table_get_for_uuid(pb_table, pb_uuid);
+        if (!pb) {
+            VLOG_DBG("port_binding already deleted for %s", iface_id);
+        } else if (pb->n_up && pb->up[0]) {
+            bool up = false;
+            sbrec_port_binding_set_up(pb, &up, 1);
+            VLOG_INFO("Setting lport %s down in Southbound", pb->logical_port);
+            set_pb_chassis_in_sbrec(pb, chassis_rec, false);
+        }
+}
+
 static void
 binding_lport_set_up(struct binding_lport *b_lport, bool sb_readonly)
 {
@@ -3331,6 +3410,7 @@ binding_lport_set_down(struct binding_lport *b_lport, bool sb_readonly)
     if (sb_readonly || !b_lport || !b_lport->pb->n_up || !b_lport->pb->up[0]) {
         return;
     }
+    VLOG_INFO("Setting lport %s down in Southbound", b_lport->name);
 
     bool up = false;
     sbrec_port_binding_set_up(b_lport->pb, &up, 1);
diff --git a/controller/binding.h b/controller/binding.h
index 6c3a98b02..5b73c6a4b 100644
--- a/controller/binding.h
+++ b/controller/binding.h
@@ -159,6 +159,14 @@ bool local_binding_is_up(struct shash *local_bindings, const char *pb_name,
 bool local_binding_is_down(struct shash *local_bindings, const char *pb_name,
                            const struct sbrec_chassis *);
 
+bool local_binding_is_ovn_installed(struct shash *local_bindings,
+                                    const char *pb_name);
+void local_binding_remove_ovn_installed(
+        struct shash *local_bindings,
+        const struct ovsrec_interface_table *iface_table,
+        const char *pb_name,
+        bool ovs_readonly);
+
 void local_binding_set_up(struct shash *local_bindings, const char *pb_name,
                           const struct sbrec_chassis *chassis_rec,
                           const char *ts_now_str, bool sb_readonly,
@@ -195,6 +203,14 @@ void set_pb_chassis_in_sbrec(const struct sbrec_port_binding *pb,
                              const struct sbrec_chassis *chassis_rec,
                              bool is_set);
 
+void remove_ovn_installed_for_uuid(const struct ovsrec_interface_table *,
+                                   const struct uuid *);
+
+void port_binding_set_down(const struct sbrec_chassis *chassis_rec,
+                           const struct sbrec_port_binding_table *pb_table,
+                           const char *iface_id,
+                           const struct uuid *pb_uuid);
+
 /* Corresponds to each Port_Binding.type. */
 enum en_lport_type {
     LP_UNKNOWN,
diff --git a/controller/encaps.c b/controller/encaps.c
index 2662eaf98..b69d72584 100644
--- a/controller/encaps.c
+++ b/controller/encaps.c
@@ -36,6 +36,8 @@ VLOG_DEFINE_THIS_MODULE(encaps);
  */
 #define	OVN_MVTEP_CHASSISID_DELIM '@'
 
+static char *current_br_int_name = NULL;
+
 void
 encaps_register_ovs_idl(struct ovsdb_idl *ovs_idl)
 {
@@ -386,6 +388,21 @@ chassis_tzones_overlap(const struct sset *transport_zones,
     return false;
 }
 
+static void
+clear_old_tunnels(const struct ovsrec_bridge *old_br_int, const char *prefix,
+                  size_t prefix_len)
+{
+    for (size_t i = 0; i < old_br_int->n_ports; i++) {
+        const struct ovsrec_port *port = old_br_int->ports[i];
+        const char *id = smap_get(&port->external_ids, "ovn-chassis-id");
+        if (id && !strncmp(port->name, prefix, prefix_len)) {
+            VLOG_DBG("Clearing old tunnel port \"%s\" (%s) from bridge "
+                     "\"%s\".", port->name, id, old_br_int->name);
+            ovsrec_bridge_update_ports_delvalue(old_br_int, port);
+        }
+    }
+}
+
 void
 encaps_run(struct ovsdb_idl_txn *ovs_idl_txn,
            const struct ovsrec_bridge *br_int,
@@ -393,12 +410,42 @@ encaps_run(struct ovsdb_idl_txn *ovs_idl_txn,
            const struct sbrec_chassis *this_chassis,
            const struct sbrec_sb_global *sbg,
            const struct ovsrec_open_vswitch_table *ovs_table,
-           const struct sset *transport_zones)
+           const struct sset *transport_zones,
+           const struct ovsrec_bridge_table *bridge_table)
 {
     if (!ovs_idl_txn || !br_int) {
         return;
     }
 
+    if (!current_br_int_name) {
+        /* The controller has just started, we need to look through all
+         * bridges for old tunnel ports. */
+        char *tunnel_prefix = xasprintf("ovn%s-", get_chassis_idx(ovs_table));
+        size_t prefix_len = strlen(tunnel_prefix);
+
+        const struct ovsrec_bridge *br;
+        OVSREC_BRIDGE_TABLE_FOR_EACH (br, bridge_table) {
+            if (!strcmp(br->name, br_int->name)) {
+                continue;
+            }
+            clear_old_tunnels(br, tunnel_prefix, prefix_len);
+        }
+
+        free(tunnel_prefix);
+        current_br_int_name = xstrdup(br_int->name);
+    } else if (strcmp(current_br_int_name, br_int->name)) {
+        /* The integration bridge was changed, clear tunnel ports from
+         * the old one. */
+        const struct ovsrec_bridge *old_br_int =
+            get_bridge(bridge_table, current_br_int_name);
+        if (old_br_int) {
+            clear_old_tunnels(old_br_int, "", 0);
+        }
+
+        free(current_br_int_name);
+        current_br_int_name = xstrdup(br_int->name);
+    }
+
     const struct sbrec_chassis *chassis_rec;
 
     struct tunnel_ctx tc = {
@@ -511,3 +558,9 @@ encaps_cleanup(struct ovsdb_idl_txn *ovs_idl_txn,
 
     return !any_changes;
 }
+
+void
+encaps_destroy(void)
+{
+    free(current_br_int_name);
+}
diff --git a/controller/encaps.h b/controller/encaps.h
index 867c6f28c..3e58b3c82 100644
--- a/controller/encaps.h
+++ b/controller/encaps.h
@@ -35,7 +35,8 @@ void encaps_run(struct ovsdb_idl_txn *ovs_idl_txn,
                 const struct sbrec_chassis *,
                 const struct sbrec_sb_global *,
                 const struct ovsrec_open_vswitch_table *,
-                const struct sset *transport_zones);
+                const struct sset *transport_zones,
+                const struct ovsrec_bridge_table *bridge_table);
 
 bool encaps_cleanup(struct ovsdb_idl_txn *ovs_idl_txn,
                     const struct ovsrec_bridge *br_int);
@@ -46,4 +47,6 @@ bool  encaps_tunnel_id_parse(const char *tunnel_id, char **chassis_id,
 bool  encaps_tunnel_id_match(const char *tunnel_id, const char *chassis_id,
                              const char *encap_ip);
 
+void encaps_destroy(void);
+
 #endif /* controller/encaps.h */
diff --git a/controller/if-status.c b/controller/if-status.c
index d1c14ac30..8503e5daa 100644
--- a/controller/if-status.c
+++ b/controller/if-status.c
@@ -54,44 +54,54 @@ VLOG_DEFINE_THIS_MODULE(if_status);
  */
 
 enum if_state {
-    OIF_CLAIMED,       /* Newly claimed interface. pb->chassis update not yet
-                          initiated. */
-    OIF_INSTALL_FLOWS, /* Claimed interface with pb->chassis update sent to
-                        * SB (but update notification not confirmed, so the
-                        * update may be resent in any of the following states)
-                        * and for which flows are still being installed.
-                        */
-    OIF_MARK_UP,       /* Interface with flows successfully installed in OVS
-                        * but not yet marked "up" in the binding module (in
-                        * SB and OVS databases).
-                        */
-    OIF_MARK_DOWN,     /* Released interface but not yet marked "down" in the
-                        * binding module (in SB and/or OVS databases).
-                        */
-    OIF_INSTALLED,     /* Interface flows programmed in OVS and binding marked
-                        * "up" in the binding module.
-                        */
+    OIF_CLAIMED,          /* Newly claimed interface. pb->chassis update not
+                             yet initiated. */
+    OIF_INSTALL_FLOWS,    /* Claimed interface with pb->chassis update sent to
+                           * SB (but update notification not confirmed, so the
+                           * update may be resent in any of the following
+                           * states and for which flows are still being
+                           * installed.
+                           */
+    OIF_REM_OLD_OVN_INST, /* Interface with flows successfully installed in OVS
+                           * but with ovn-installed still in OVSDB.
+                           */
+    OIF_MARK_UP,          /* Interface with flows successfully installed in OVS
+                           * but not yet marked "up" in the binding module (in
+                           * SB and OVS databases).
+                           */
+    OIF_MARK_DOWN,        /* Released interface but not yet marked "down" in
+                           * the binding module (in SB and/or OVS databases).
+                           */
+    OIF_INSTALLED,        /* Interface flows programmed in OVS and binding
+                           * marked "up" in the binding module.
+                           */
+    OIF_UPDATE_PORT,      /* Logical ports need to be set down, and pb->chassis
+                           * removed.
+                           */
     OIF_MAX,
 };
 
 static const char *if_state_names[] = {
-    [OIF_CLAIMED]       = "CLAIMED",
-    [OIF_INSTALL_FLOWS] = "INSTALL_FLOWS",
-    [OIF_MARK_UP]       = "MARK_UP",
-    [OIF_MARK_DOWN]     = "MARK_DOWN",
-    [OIF_INSTALLED]     = "INSTALLED",
+    [OIF_CLAIMED]          = "CLAIMED",
+    [OIF_INSTALL_FLOWS]    = "INSTALL_FLOWS",
+    [OIF_REM_OLD_OVN_INST] = "REM_OLD_OVN_INST",
+    [OIF_MARK_UP]          = "MARK_UP",
+    [OIF_MARK_DOWN]        = "MARK_DOWN",
+    [OIF_INSTALLED]        = "INSTALLED",
+    [OIF_UPDATE_PORT]      = "UPDATE_PORT",
 };
 
 /*
  *       +----------------------+
  * +---> |                      |
- * | +-> |         NULL         | <--------------------------------------+++-+
- * | |   +----------------------+                                            |
- * | |     ^ release_iface   | claim_iface()                                 |
- * | |     |                 V - sbrec_update_chassis(if sb is rw)           |
- * | |   +----------------------+                                            |
- * | |   |                      | <----------------------------------------+ |
- * | |   |       CLAIMED        | <--------------------------------------+ | |
+ * | +-> |         NULL         |
+ * | |   +----------------------+
+ * | |     ^ release_iface   | claim_iface()
+ * | |     |                 V - sbrec_update_chassis(if sb is rw)
+ * | |   +----------------------+
+ * | |   |                      | <------------------------------------------+
+ * | |   |       CLAIMED        | <----------------------------------------+ |
+ * | |   |                      | <--------------------------------------+ | |
  * | |   +----------------------+                                        | | |
  * | |                 |  V  ^                                           | | |
  * | |                 |  |  | handle_claims()                           | | |
@@ -109,38 +119,63 @@ static const char *if_state_names[] = {
  * |     |                      |   - remove ovn-installed from ovsdb    | | |
  * |     |                      |  mgr_update()                          | | |
  * |     +----------------------+   - sbrec_update_chassis if needed     | | |
- * |                    |                                                | | |
- * |                    |  mgr_run(seqno rcvd)                           | | |
- * |                    |  - set port up in sb                           | | |
- * | release_iface      |  - set ovn-installed in ovs                    | | |
- * |                    V                                                | | |
+ * |        |            |                                               | | |
+ * |        |            +----------------------------------------+      | | |
+ * |        |                                                     |      | | |
+ * |        | mgr_run(seqno rcvd, ovn-installed present)          |      | | |
+ * |        V                                                     |      | | |
+ * |    +--------------------+                                    |      | | |
+ * |    |                    |  mgr_run()                         |      | | |
+ * +--- | REM_OLD_OVN_INST   |  - remove ovn-installed in ovs     |      | | |
+ * |    +--------------------+                                    |      | | |
+ * |               |                                              |      | | |
+ * |               |                                              |      | | |
+ * |               | mgr_update( ovn_installed not present)       |      | | |
+ * |               |                                              |      | | |
+ * |               |  +-------------------------------------------+      | | |
+ * |               |  |                                                  | | |
+ * |               |  |  mgr_run(seqno rcvd, ovn-installed not present)  | | |
+ * |               |  |  - set port up in sb                             | | |
+ * |               |  |  - set ovn-installed in ovs                      | | |
+ * |release_iface  |  |                                                  | | |
+ * |               V  V                                                  | | |
  * |   +----------------------+                                          | | |
  * |   |                      |  mgr_run()                               | | |
- * +-- |       MARK_UP        |  - set port up in sb                     | | |
- *     |                      |  - set ovn-installed in ovs              | | |
- *     |                      |  mgr_update()                            | | |
- *     +----------------------+  - sbrec_update_chassis if needed        | | |
- *              |                                                        | | |
- *              | mgr_update(rcvd port up / ovn_installed & chassis set) | | |
- *              V                                                        | | |
- *     +----------------------+                                          | | |
- *     |      INSTALLED       | ------------> claim_iface ---------------+ | |
- *     +----------------------+                                            | |
- *              |                                                          | |
- *              | release_iface                                            | |
- *              V                                                          | |
- *     +----------------------+                                            | |
- *     |                      | ------------> claim_iface -----------------+ |
- *     |      MARK_DOWN       | ------> mgr_update(rcvd port down) ----------+
- *     |                      | mgr_run()
- *     |                      | - set port down in sb
- *     |                      | mgr_update()
+ * +---|       MARK_UP        |  - set port up in sb                     | | |
+ * |   |                      |  - set ovn-installed in ovs              | | |
+ * |   |                      |  mgr_update()                            | | |
+ * |   +----------------------+  - sbrec_update_chassis if needed        | | |
+ * |            |                                                        | | |
+ * |            | mgr_update(rcvd port up / ovn_installed & chassis set) | | |
+ * |            V                                                        | | |
+ * |   +----------------------+                                          | | |
+ * |   |      INSTALLED       | ------------> claim_iface ---------------+ | |
+ * |   +----------------------+                                            | |
+ * |                  |                                                    | |
+ * |                  | release_iface                                      | |
+ * |mgr_update(       |                                                    | |
+ * |  rcvd port down) |                                                    | |
+ * |                  V                                                    | |
+ * |   +----------------------+                                            | |
+ * |   |                      | ------------> claim_iface -----------------+ |
+ * +---+      MARK_DOWN       | mgr_run()                                    |
+ * |   |                      | - set port down in sb                        |
+ * |   |                      | mgr_update(sb is rw)                         |
+ * |   +----------------------+ - sbrec_update_chassis(NULL)                 |
+ * |                  |                                                      |
+ * |                  | mgr_update(local binding not found)                  |
+ * |                  |                                                      |
+ * |                  V                                                      |
+ * |   +----------------------+                                              |
+ * |   |                      | ------------> claim_iface -------------------+
+ * +---+      UPDATE_PORT     | mgr_run()
  *     +----------------------+ - sbrec_update_chassis(NULL)
  */
 
 
 struct ovs_iface {
     char *id;               /* Extracted from OVS external_ids.iface_id. */
+    struct uuid pb_uuid;    /* Port_binding uuid */
     enum if_state state;    /* State of the interface in the state machine. */
     uint32_t install_seqno; /* Seqno at which this interface is expected to
                              * be fully programmed in OVS.  Only used in state
@@ -155,6 +190,9 @@ struct if_status_mgr {
     /* All local interfaces, mapping from 'iface-id' to 'struct ovs_iface'. */
     struct shash ifaces;
 
+    /* local interfaces which need ovn-install removal */
+    struct shash ovn_uninstall_hash;
+
     /* All local interfaces, stored per state. */
     struct hmapx ifaces_per_state[OIF_MAX];
 
@@ -170,15 +208,20 @@ struct if_status_mgr {
 static struct ovs_iface *ovs_iface_create(struct if_status_mgr *,
                                           const char *iface_id,
                                           enum if_state );
+static void add_to_ovn_uninstall_hash(struct if_status_mgr *, const char *,
+                                      const struct uuid *);
 static void ovs_iface_destroy(struct if_status_mgr *, struct ovs_iface *);
+static void ovn_uninstall_hash_destroy(struct if_status_mgr *mgr, char *name);
 static void ovs_iface_set_state(struct if_status_mgr *, struct ovs_iface *,
                                 enum if_state);
 
 static void if_status_mgr_update_bindings(
     struct if_status_mgr *mgr, struct local_binding_data *binding_data,
     const struct sbrec_chassis *,
+    const struct ovsrec_interface_table *iface_table,
     bool sb_readonly, bool ovs_readonly);
 
+static void ovn_uninstall_hash_account_mem(const char *name, bool erase);
 struct if_status_mgr *
 if_status_mgr_create(void)
 {
@@ -189,6 +232,7 @@ if_status_mgr_create(void)
         hmapx_init(&mgr->ifaces_per_state[i]);
     }
     shash_init(&mgr->ifaces);
+    shash_init(&mgr->ovn_uninstall_hash);
     return mgr;
 }
 
@@ -202,6 +246,11 @@ if_status_mgr_clear(struct if_status_mgr *mgr)
     }
     ovs_assert(shash_is_empty(&mgr->ifaces));
 
+    SHASH_FOR_EACH_SAFE (node, &mgr->ovn_uninstall_hash) {
+        ovn_uninstall_hash_destroy(mgr, node->data);
+    }
+    ovs_assert(shash_is_empty(&mgr->ovn_uninstall_hash));
+
     for (size_t i = 0; i < ARRAY_SIZE(mgr->ifaces_per_state); i++) {
         ovs_assert(hmapx_is_empty(&mgr->ifaces_per_state[i]));
     }
@@ -212,6 +261,7 @@ if_status_mgr_destroy(struct if_status_mgr *mgr)
 {
     if_status_mgr_clear(mgr);
     shash_destroy(&mgr->ifaces);
+    shash_destroy(&mgr->ovn_uninstall_hash);
     for (size_t i = 0; i < ARRAY_SIZE(mgr->ifaces_per_state); i++) {
         hmapx_destroy(&mgr->ifaces_per_state[i]);
     }
@@ -231,6 +281,7 @@ if_status_mgr_claim_iface(struct if_status_mgr *mgr,
         iface = ovs_iface_create(mgr, iface_id, OIF_CLAIMED);
     }
 
+    memcpy(&iface->pb_uuid, &pb->header_.uuid, sizeof(iface->pb_uuid));
     if (!sb_readonly) {
         set_pb_chassis_in_sbrec(pb, chassis_rec, true);
     }
@@ -238,11 +289,13 @@ if_status_mgr_claim_iface(struct if_status_mgr *mgr,
     switch (iface->state) {
     case OIF_CLAIMED:
     case OIF_INSTALL_FLOWS:
+    case OIF_REM_OLD_OVN_INST:
     case OIF_MARK_UP:
         /* Nothing to do here. */
         break;
     case OIF_INSTALLED:
     case OIF_MARK_DOWN:
+    case OIF_UPDATE_PORT:
         ovs_iface_set_state(mgr, iface, OIF_CLAIMED);
         break;
     case OIF_MAX:
@@ -271,9 +324,10 @@ if_status_mgr_release_iface(struct if_status_mgr *mgr, const char *iface_id)
     switch (iface->state) {
     case OIF_CLAIMED:
     case OIF_INSTALL_FLOWS:
-        /* Not yet fully installed interfaces can be safely deleted. */
-        ovs_iface_destroy(mgr, iface);
-        break;
+        /* Not yet fully installed interfaces:
+         * pb->chassis still need to be deleted.
+         */
+    case OIF_REM_OLD_OVN_INST:
     case OIF_MARK_UP:
     case OIF_INSTALLED:
         /* Properly mark interfaces "down" if their flows were already
@@ -282,6 +336,7 @@ if_status_mgr_release_iface(struct if_status_mgr *mgr, const char *iface_id)
         ovs_iface_set_state(mgr, iface, OIF_MARK_DOWN);
         break;
     case OIF_MARK_DOWN:
+    case OIF_UPDATE_PORT:
         /* Nothing to do here. */
         break;
     case OIF_MAX:
@@ -302,9 +357,10 @@ if_status_mgr_delete_iface(struct if_status_mgr *mgr, const char *iface_id)
     switch (iface->state) {
     case OIF_CLAIMED:
     case OIF_INSTALL_FLOWS:
-        /* Not yet fully installed interfaces can be safely deleted. */
-        ovs_iface_destroy(mgr, iface);
-        break;
+        /* Not yet fully installed interfaces:
+         * pb->chassis still need to be deleted.
+         */
+    case OIF_REM_OLD_OVN_INST:
     case OIF_MARK_UP:
     case OIF_INSTALLED:
         /* Properly mark interfaces "down" if their flows were already
@@ -313,6 +369,7 @@ if_status_mgr_delete_iface(struct if_status_mgr *mgr, const char *iface_id)
         ovs_iface_set_state(mgr, iface, OIF_MARK_DOWN);
         break;
     case OIF_MARK_DOWN:
+    case OIF_UPDATE_PORT:
         /* Nothing to do here. */
         break;
     case OIF_MAX:
@@ -346,12 +403,34 @@ if_status_handle_claims(struct if_status_mgr *mgr,
     return rc;
 }
 
+static void
+clean_ovn_installed(struct if_status_mgr *mgr,
+                    const struct ovsrec_interface_table *iface_table)
+{
+    struct shash_node *node;
+
+    SHASH_FOR_EACH_SAFE (node, &mgr->ovn_uninstall_hash) {
+        const struct uuid *iface_uuid = node->data;
+        remove_ovn_installed_for_uuid(iface_table, iface_uuid);
+        free(node->data);
+        char *node_name = shash_steal(&mgr->ovn_uninstall_hash, node);
+        ovn_uninstall_hash_account_mem(node_name, true);
+        free(node_name);
+    }
+}
+
 void
 if_status_mgr_update(struct if_status_mgr *mgr,
                      struct local_binding_data *binding_data,
                      const struct sbrec_chassis *chassis_rec,
+                     const struct ovsrec_interface_table *iface_table,
+                     const struct sbrec_port_binding_table *pb_table,
+                     bool ovs_readonly,
                      bool sb_readonly)
 {
+    if (!ovs_readonly) {
+        clean_ovn_installed(mgr, iface_table);
+    }
     if (!binding_data) {
         return;
     }
@@ -359,6 +438,17 @@ if_status_mgr_update(struct if_status_mgr *mgr,
     struct shash *bindings = &binding_data->bindings;
     struct hmapx_node *node;
 
+    /* Move all interfaces that have been confirmed without ovn-installed,
+     * from OIF_REM_OLD_OVN_INST to OIF_MARK_UP.
+     */
+    HMAPX_FOR_EACH_SAFE (node, &mgr->ifaces_per_state[OIF_REM_OLD_OVN_INST]) {
+        struct ovs_iface *iface = node->data;
+
+        if (!local_binding_is_ovn_installed(bindings, iface->id)) {
+            ovs_iface_set_state(mgr, iface, OIF_MARK_UP);
+        }
+    }
+
     /* Interfaces in OIF_MARK_UP/INSTALL_FLOWS state have already set their
      * pb->chassis. However, the update might still be in fly (confirmation
      * not received yet) or pb->chassis was overwitten by another chassis.
@@ -390,6 +480,10 @@ if_status_mgr_update(struct if_status_mgr *mgr,
     HMAPX_FOR_EACH_SAFE (node, &mgr->ifaces_per_state[OIF_MARK_DOWN]) {
         struct ovs_iface *iface = node->data;
 
+        if (!local_binding_find(bindings, iface->id)) {
+            ovs_iface_set_state(mgr, iface, OIF_UPDATE_PORT);
+            continue;
+        }
         if (!sb_readonly) {
             local_binding_set_pb(bindings, iface->id, chassis_rec,
                                  NULL, false);
@@ -437,6 +531,21 @@ if_status_mgr_update(struct if_status_mgr *mgr,
         }
     }
 
+    if (!sb_readonly) {
+        HMAPX_FOR_EACH_SAFE (node, &mgr->ifaces_per_state[OIF_UPDATE_PORT]) {
+            struct ovs_iface *iface = node->data;
+            port_binding_set_down(chassis_rec, pb_table, iface->id,
+                                  &iface->pb_uuid);
+            ovs_iface_destroy(mgr, node->data);
+        }
+    } else {
+        HMAPX_FOR_EACH_SAFE (node, &mgr->ifaces_per_state[OIF_UPDATE_PORT]) {
+            struct ovs_iface *iface = node->data;
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+            VLOG_INFO_RL(&rl, "Not setting lport %s down as sb is readonly",
+                         iface->id);
+        }
+    }
     /* Register for a notification about flows being installed in OVS for all
      * newly claimed interfaces for which pb->chassis has been updated.
      * Request a seqno update when the flows for new interfaces have been
@@ -450,10 +559,23 @@ if_status_mgr_update(struct if_status_mgr *mgr,
     }
 }
 
+void
+if_status_mgr_remove_ovn_installed(struct if_status_mgr *mgr,
+                                   const char *name,
+                                   const struct uuid *uuid)
+{
+    VLOG_DBG("Adding %s to list of interfaces for which to remove "
+              "ovn-installed", name);
+    if (!shash_find_data(&mgr->ovn_uninstall_hash, name)) {
+        add_to_ovn_uninstall_hash(mgr, name, uuid);
+    }
+}
+
 void
 if_status_mgr_run(struct if_status_mgr *mgr,
                   struct local_binding_data *binding_data,
                   const struct sbrec_chassis *chassis_rec,
+                  const struct ovsrec_interface_table *iface_table,
                   bool sb_readonly, bool ovs_readonly)
 {
     struct ofctrl_acked_seqnos *acked_seqnos =
@@ -471,12 +593,25 @@ if_status_mgr_run(struct if_status_mgr *mgr,
                                           iface->install_seqno)) {
             continue;
         }
-        ovs_iface_set_state(mgr, iface, OIF_MARK_UP);
+        /* Wait for ovn-installed to be absent before moving to MARK_UP state.
+         * Most of the times ovn-installed is already absent and hence we will
+         * not have to wait.
+         * If there is no binding_data, we can't determine if ovn-installed is
+         * present or not; hence also go to the OIF_REM_OLD_OVN_INST state.
+         */
+        if (!binding_data ||
+            local_binding_is_ovn_installed(&binding_data->bindings,
+                                           iface->id)) {
+            ovs_iface_set_state(mgr, iface, OIF_REM_OLD_OVN_INST);
+        } else {
+            ovs_iface_set_state(mgr, iface, OIF_MARK_UP);
+        }
     }
     ofctrl_acked_seqnos_destroy(acked_seqnos);
 
     /* Update binding states. */
     if_status_mgr_update_bindings(mgr, binding_data, chassis_rec,
+                                  iface_table,
                                   sb_readonly, ovs_readonly);
 }
 
@@ -492,6 +627,18 @@ ovs_iface_account_mem(const char *iface_id, bool erase)
     }
 }
 
+static void
+ovn_uninstall_hash_account_mem(const char *name, bool erase)
+{
+    uint32_t size = (strlen(name) + sizeof(struct uuid) +
+                     sizeof(struct shash_node));
+    if (erase) {
+        ifaces_usage -= size;
+    } else {
+        ifaces_usage += size;
+    }
+}
+
 static struct ovs_iface *
 ovs_iface_create(struct if_status_mgr *mgr, const char *iface_id,
                  enum if_state state)
@@ -506,6 +653,16 @@ ovs_iface_create(struct if_status_mgr *mgr, const char *iface_id,
     return iface;
 }
 
+static void
+add_to_ovn_uninstall_hash(struct if_status_mgr *mgr, const char *name,
+                          const struct uuid *uuid)
+{
+    struct uuid *new_uuid = xzalloc(sizeof *new_uuid);
+    memcpy(new_uuid, uuid, sizeof(*new_uuid));
+    shash_add(&mgr->ovn_uninstall_hash, name, new_uuid);
+    ovn_uninstall_hash_account_mem(name, false);
+}
+
 static void
 ovs_iface_destroy(struct if_status_mgr *mgr, struct ovs_iface *iface)
 {
@@ -521,6 +678,23 @@ ovs_iface_destroy(struct if_status_mgr *mgr, struct ovs_iface *iface)
     free(iface);
 }
 
+static void
+ovn_uninstall_hash_destroy(struct if_status_mgr *mgr, char *name)
+{
+    struct shash_node *node = shash_find(&mgr->ovn_uninstall_hash, name);
+    char *node_name = NULL;
+    if (node) {
+        free(node->data);
+        VLOG_DBG("Interface name %s destroy", name);
+        node_name = shash_steal(&mgr->ovn_uninstall_hash, node);
+        ovn_uninstall_hash_account_mem(name, true);
+        free(node_name);
+    } else {
+        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+        VLOG_WARN_RL(&rl, "Interface name %s not found", name);
+    }
+}
+
 static void
 ovs_iface_set_state(struct if_status_mgr *mgr, struct ovs_iface *iface,
                     enum if_state state)
@@ -539,6 +713,7 @@ static void
 if_status_mgr_update_bindings(struct if_status_mgr *mgr,
                               struct local_binding_data *binding_data,
                               const struct sbrec_chassis *chassis_rec,
+                              const struct ovsrec_interface_table *iface_table,
                               bool sb_readonly, bool ovs_readonly)
 {
     if (!binding_data) {
@@ -558,7 +733,17 @@ if_status_mgr_update_bindings(struct if_status_mgr *mgr,
                                sb_readonly, ovs_readonly);
     }
 
-    /* Notifiy the binding module to set "up" all bindings that have had
+    /* Notify the binding module to remove "ovn-installed" for all bindings
+     * in the OIF_REM_OLD_OVN_INST state.
+     */
+    HMAPX_FOR_EACH (node, &mgr->ifaces_per_state[OIF_REM_OLD_OVN_INST]) {
+        struct ovs_iface *iface = node->data;
+
+        local_binding_remove_ovn_installed(bindings, iface_table, iface->id,
+                                           ovs_readonly);
+    }
+
+    /* Notify the binding module to set "up" all bindings that have had
      * their flows installed but are not yet marked "up" in the binding
      * module.
      */
diff --git a/controller/if-status.h b/controller/if-status.h
index 5bd187a25..8ba80acd9 100644
--- a/controller/if-status.h
+++ b/controller/if-status.h
@@ -17,6 +17,7 @@
 #define IF_STATUS_H 1
 
 #include "openvswitch/shash.h"
+#include "lib/vswitch-idl.h"
 
 #include "binding.h"
 
@@ -35,9 +36,13 @@ void if_status_mgr_delete_iface(struct if_status_mgr *, const char *iface_id);
 
 void if_status_mgr_update(struct if_status_mgr *, struct local_binding_data *,
                           const struct sbrec_chassis *chassis,
+                          const struct ovsrec_interface_table *iface_table,
+                          const struct sbrec_port_binding_table *pb_table,
+                          bool ovs_readonly,
                           bool sb_readonly);
 void if_status_mgr_run(struct if_status_mgr *mgr, struct local_binding_data *,
                        const struct sbrec_chassis *,
+                       const struct ovsrec_interface_table *iface_table,
                        bool sb_readonly, bool ovs_readonly);
 void if_status_mgr_get_memory_usage(struct if_status_mgr *mgr,
                                     struct simap *usage);
@@ -48,5 +53,8 @@ bool if_status_handle_claims(struct if_status_mgr *mgr,
                              const struct sbrec_chassis *chassis_rec,
                              struct hmap *tracked_datapath,
                              bool sb_readonly);
+void if_status_mgr_remove_ovn_installed(struct if_status_mgr *mgr,
+                                        const char *name,
+                                        const struct uuid *uuid);
 
 # endif /* controller/if-status.h */
diff --git a/controller/lflow.c b/controller/lflow.c
index 6a98b19e1..0b071138d 100644
--- a/controller/lflow.c
+++ b/controller/lflow.c
@@ -1729,6 +1729,7 @@ add_lb_vip_hairpin_flows(const struct ovn_controller_lb *lb,
 
 static void
 add_lb_ct_snat_hairpin_for_dp(const struct ovn_controller_lb *lb,
+                              bool has_vip_port,
                               const struct sbrec_datapath_binding *datapath,
                               const struct hmap *local_datapaths,
                               struct match *dp_match,
@@ -1742,15 +1743,21 @@ add_lb_ct_snat_hairpin_for_dp(const struct ovn_controller_lb *lb,
         match_set_metadata(dp_match, htonll(datapath->tunnel_key));
     }
 
+    uint16_t priority = datapath ? 200 : 100;
+    if (!has_vip_port) {
+        /* If L4 ports are not specified for the current LB, we will decrease
+         * the flow priority in order to not collide with other LBs with more
+         * fine-grained configuration.
+         */
+        priority -= 10;
+    }
     /* A flow added for the "hairpin_snat_ip" case will have an extra
      * datapath match, but it will also match on the less restrictive
      * general case.  Therefore, we set the priority in the
      * "hairpin_snat_ip" case to be higher than the general case. */
-    ofctrl_add_or_append_flow(flow_table, OFTABLE_CT_SNAT_HAIRPIN,
-                              datapath ? 200 : 100,
-                              lb->slb->header_.uuid.parts[0],
-                              dp_match, dp_acts, &lb->slb->header_.uuid,
-                              NX_CTLR_NO_METER, NULL);
+    ofctrl_add_flow(flow_table, OFTABLE_CT_SNAT_HAIRPIN,
+                    priority, lb->slb->header_.uuid.parts[0],
+                    dp_match, dp_acts, &lb->slb->header_.uuid);
 }
 
 /* Add a ct_snat flow for each VIP of the LB.  If this LB does not use
@@ -1836,8 +1843,8 @@ add_lb_ct_snat_hairpin_vip_flow(const struct ovn_controller_lb *lb,
         }
     }
 
-    match_set_nw_proto(&match, lb->proto);
     if (lb_vip->vip_port) {
+        match_set_nw_proto(&match, lb->proto);
         if (!lb->hairpin_orig_tuple) {
             match_set_ct_nw_proto(&match, lb->proto);
             match_set_ct_tp_dst(&match, htons(lb_vip->vip_port));
@@ -1854,18 +1861,20 @@ add_lb_ct_snat_hairpin_vip_flow(const struct ovn_controller_lb *lb,
     }
 
     if (!use_hairpin_snat_ip) {
-        add_lb_ct_snat_hairpin_for_dp(lb, NULL, NULL,
+        add_lb_ct_snat_hairpin_for_dp(lb, !!lb_vip->vip_port, NULL, NULL,
                                       &match, &ofpacts, flow_table);
     } else {
         for (size_t i = 0; i < lb->slb->n_datapaths; i++) {
-            add_lb_ct_snat_hairpin_for_dp(lb, lb->slb->datapaths[i],
-                                          local_datapaths,
-                                          &match, &ofpacts, flow_table);
+            add_lb_ct_snat_hairpin_for_dp(lb, !!lb_vip->vip_port,
+                                          lb->slb->datapaths[i],
+                                          local_datapaths, &match,
+                                          &ofpacts, flow_table);
         }
         if (lb->slb->datapath_group) {
             for (size_t i = 0; i < lb->slb->datapath_group->n_datapaths; i++) {
                 add_lb_ct_snat_hairpin_for_dp(
-                    lb, lb->slb->datapath_group->datapaths[i],
+                    lb, !!lb_vip->vip_port,
+                    lb->slb->datapath_group->datapaths[i],
                     local_datapaths, &match, &ofpacts, flow_table);
             }
         }
diff --git a/controller/mirror.c b/controller/mirror.c
index 665736966..0e5885e9b 100644
--- a/controller/mirror.c
+++ b/controller/mirror.c
@@ -22,6 +22,7 @@
 
 /* OVS includes. */
 #include "lib/vswitch-idl.h"
+#include "lib/socket-util.h"
 #include "include/openvswitch/shash.h"
 #include "openvswitch/vlog.h"
 
@@ -69,6 +70,7 @@ static void set_mirror_iface_options(struct ovsrec_interface *,
 static const struct ovsrec_port *get_iface_port(
     const struct ovsrec_interface *, const struct ovsrec_bridge *);
 
+char *get_mirror_tunnel_type(const struct sbrec_mirror *);
 
 void
 mirror_register_ovs_idl(struct ovsdb_idl *ovs_idl)
@@ -244,24 +246,26 @@ set_mirror_iface_options(struct ovsrec_interface *iface,
     smap_destroy(&options);
 }
 
+char *
+get_mirror_tunnel_type(const struct sbrec_mirror *sb_mirror)
+{
+    bool is_ipv6 = addr_is_ipv6(sb_mirror->sink);
+
+    return xasprintf(is_ipv6 ? "ip6%s" : "%s", sb_mirror->type);
+}
+
 static void
 check_and_update_interface_table(const struct sbrec_mirror *sb_mirror,
                                  const struct ovsrec_mirror *ovs_mirror)
 {
-    char *type;
-    struct ovsrec_interface *iface =
-                          ovs_mirror->output_port->interfaces[0];
-    struct smap *opts = &iface->options;
-    const char *erspan_ver = smap_get(opts, "erspan_ver");
-    if (erspan_ver) {
-        type = "erspan";
-    } else {
-        type = "gre";
-    }
-    if (strcmp(type, sb_mirror->type)) {
-        ovsrec_interface_set_type(iface, sb_mirror->type);
+    struct ovsrec_interface *iface = ovs_mirror->output_port->interfaces[0];
+    char *type = get_mirror_tunnel_type(sb_mirror);
+
+    if (strcmp(type, iface->type)) {
+        ovsrec_interface_set_type(iface, type);
     }
     set_mirror_iface_options(iface, sb_mirror);
+    free(type);
 }
 
 static void
@@ -327,8 +331,11 @@ create_ovs_mirror(struct ovn_mirror *m, struct ovsdb_idl_txn *ovs_idl_txn,
     char *port_name = xasprintf("ovn-%s", m->name);
 
     ovsrec_interface_set_name(iface, port_name);
-    ovsrec_interface_set_type(iface, m->sb_mirror->type);
+
+    char *type = get_mirror_tunnel_type(m->sb_mirror);
+    ovsrec_interface_set_type(iface, type);
     set_mirror_iface_options(iface, m->sb_mirror);
+    free(type);
 
     struct ovsrec_port *port = ovsrec_port_insert(ovs_idl_txn);
     ovsrec_port_set_name(port, port_name);
diff --git a/controller/ovn-controller.8.xml b/controller/ovn-controller.8.xml
index ab52e2d34..f61f43008 100644
--- a/controller/ovn-controller.8.xml
+++ b/controller/ovn-controller.8.xml
@@ -121,11 +121,11 @@
           that is needed in the current chassis.
         </p>
         <p>
-          It is more optimal to set it to <code>true</code> in use cases when
-          the chassis would anyway need to monitor most of the records in
-          <var>ovs-database</var>, which would save the overhead of conditions
-          processing, especially for server side.  Typically, set it to
-          <code>true</code> for environments that all workloads need to be
+          It is more efficient to set it to <code>true</code> in use cases
+          where the chassis would anyway need to monitor most of the records in
+          <var>OVN Southbound</var> database, which would save the overhead of
+          conditions processing, especially for server side.  Typically, set it
+          to <code>true</code> for environments that all workloads need to be
           reachable from each other.
         </p>
         <p>
@@ -171,16 +171,14 @@
         </p>
 
         <p>
-          Supported tunnel types for connecting hypervisors
-          are <code>geneve</code> and <code>stt</code>.  Gateways may
-          use <code>geneve</code>, <code>vxlan</code>, or
-          <code>stt</code>.
+          Supported tunnel types for connecting hypervisors and gateways
+          are <code>geneve</code>, <code>vxlan</code>, and <code>stt</code>.
         </p>
 
         <p>
           Due to the limited amount of metadata in <code>vxlan</code>,
-          the capabilities and performance of connected gateways will be
-          reduced versus other tunnel formats.
+          the capabilities and performance of connected gateways and
+          hypervisors will be reduced versus other tunnel formats.
         </p>
       </dd>
 
diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c
index 2d18bbfca..44a4518b9 100644
--- a/controller/ovn-controller.c
+++ b/controller/ovn-controller.c
@@ -712,7 +712,7 @@ get_snat_ct_zone(const struct sbrec_datapath_binding *dp)
 }
 
 static void
-update_ct_zones(const struct shash *binding_lports,
+update_ct_zones(const struct sset *local_lports,
                 const struct hmap *local_datapaths,
                 struct simap *ct_zones, unsigned long *ct_zone_bitmap,
                 struct shash *pending_ct_zones)
@@ -725,9 +725,9 @@ update_ct_zones(const struct shash *binding_lports,
     unsigned long unreq_snat_zones_map[BITMAP_N_LONGS(MAX_CT_ZONES)];
     struct simap unreq_snat_zones = SIMAP_INITIALIZER(&unreq_snat_zones);
 
-    struct shash_node *shash_node;
-    SHASH_FOR_EACH (shash_node, binding_lports) {
-        sset_add(&all_users, shash_node->name);
+    const char *local_lport;
+    SSET_FOR_EACH (local_lport, local_lports) {
+        sset_add(&all_users, local_lport);
     }
 
     /* Local patched datapath (gateway routers) need zones assigned. */
@@ -2010,7 +2010,11 @@ addr_sets_update(const struct sbrec_address_set_table *address_set_table,
         if (sbrec_address_set_is_deleted(as)) {
             expr_const_sets_remove(addr_sets, as->name);
             sset_add(deleted, as->name);
-        } else {
+        }
+    }
+
+    SBREC_ADDRESS_SET_TABLE_FOR_EACH_TRACKED (as, address_set_table) {
+        if (!sbrec_address_set_is_deleted(as)) {
             struct expr_constant_set *cs_old = shash_find_data(addr_sets,
                                                                as->name);
             if (!cs_old) {
@@ -2381,7 +2385,7 @@ en_ct_zones_run(struct engine_node *node, void *data)
         EN_OVSDB_GET(engine_get_input("OVS_bridge", node));
 
     restore_ct_zones(bridge_table, ovs_table, ct_zones_data);
-    update_ct_zones(&rt_data->lbinding_data.lports, &rt_data->local_datapaths,
+    update_ct_zones(&rt_data->local_lports, &rt_data->local_datapaths,
                     &ct_zones_data->current, ct_zones_data->bitmap,
                     &ct_zones_data->pending);
 
@@ -2471,8 +2475,10 @@ ct_zones_runtime_data_handler(struct engine_node *node, void *data)
         SHASH_FOR_EACH (shash_node, &tdp->lports) {
             struct tracked_lport *t_lport = shash_node->data;
             if (strcmp(t_lport->pb->type, "")
-                && strcmp(t_lport->pb->type, "localport")) {
-                /* We allocate zone-id's only to VIF and localport lports. */
+                && strcmp(t_lport->pb->type, "localport")
+                && strcmp(t_lport->pb->type, "localnet")) {
+                /* We allocate zone-id's only to VIF, localport, and localnet
+                 * lports. */
                 continue;
             }
 
@@ -2697,7 +2703,8 @@ static void
 lb_data_removed_five_tuples_add(struct ed_type_lb_data *lb_data,
                                 const struct ovn_controller_lb *lb)
 {
-    if (!ovs_feature_is_supported(OVS_CT_TUPLE_FLUSH_SUPPORT)) {
+    if (!ovs_feature_is_supported(OVS_CT_TUPLE_FLUSH_SUPPORT) ||
+        !lb->ct_flush) {
         return;
     }
 
@@ -2716,7 +2723,8 @@ static void
 lb_data_removed_five_tuples_remove(struct ed_type_lb_data *lb_data,
                                    const struct ovn_controller_lb *lb)
 {
-    if (!ovs_feature_is_supported(OVS_CT_TUPLE_FLUSH_SUPPORT)) {
+    if (!ovs_feature_is_supported(OVS_CT_TUPLE_FLUSH_SUPPORT) ||
+        !lb->ct_flush) {
         return;
     }
 
@@ -5071,7 +5079,8 @@ main(int argc, char *argv[])
                                chassis,
                                sbrec_sb_global_first(ovnsb_idl_loop.idl),
                                ovs_table,
-                               &transport_zones);
+                               &transport_zones,
+                               bridge_table);
 
                     stopwatch_start(CONTROLLER_LOOP_STOPWATCH_NAME,
                                     time_msec());
@@ -5225,6 +5234,11 @@ main(int argc, char *argv[])
                     stopwatch_start(IF_STATUS_MGR_UPDATE_STOPWATCH_NAME,
                                     time_msec());
                     if_status_mgr_update(if_mgr, binding_data, chassis,
+                                         ovsrec_interface_table_get(
+                                                    ovs_idl_loop.idl),
+                                         sbrec_port_binding_table_get(
+                                                    ovnsb_idl_loop.idl),
+                                         !ovs_idl_txn,
                                          !ovnsb_idl_txn);
                     stopwatch_stop(IF_STATUS_MGR_UPDATE_STOPWATCH_NAME,
                                    time_msec());
@@ -5254,11 +5268,12 @@ main(int argc, char *argv[])
                     stopwatch_start(IF_STATUS_MGR_RUN_STOPWATCH_NAME,
                                     time_msec());
                     if_status_mgr_run(if_mgr, binding_data, chassis,
+                                      ovsrec_interface_table_get(
+                                                  ovs_idl_loop.idl),
                                       !ovnsb_idl_txn, !ovs_idl_txn);
                     stopwatch_stop(IF_STATUS_MGR_RUN_STOPWATCH_NAME,
                                    time_msec());
                 }
-
             }
 
             if (!engine_has_run()) {
@@ -5449,6 +5464,7 @@ loop_done:
     binding_destroy();
     patch_destroy();
     mirror_destroy();
+    encaps_destroy();
     if_status_mgr_destroy(if_mgr);
     shash_destroy(&vif_plug_deleted_iface_ids);
     shash_destroy(&vif_plug_changed_iface_ids);
diff --git a/controller/pinctrl.c b/controller/pinctrl.c
index 795847729..761783562 100644
--- a/controller/pinctrl.c
+++ b/controller/pinctrl.c
@@ -2444,19 +2444,19 @@ compose_out_dhcpv6_opts(struct ofpbuf *userdata,
                         struct ofpbuf *out_dhcpv6_opts, ovs_be32 iaid)
 {
     while (userdata->size) {
-        struct dhcp_opt6_header *userdata_opt = ofpbuf_try_pull(
+        struct dhcpv6_opt_header *userdata_opt = ofpbuf_try_pull(
             userdata, sizeof *userdata_opt);
         if (!userdata_opt) {
             return false;
         }
 
-        size_t size = ntohs(userdata_opt->size);
+        size_t size = ntohs(userdata_opt->len);
         uint8_t *userdata_opt_data = ofpbuf_try_pull(userdata, size);
         if (!userdata_opt_data) {
             return false;
         }
 
-        switch (ntohs(userdata_opt->opt_code)) {
+        switch (ntohs(userdata_opt->code)) {
         case DHCPV6_OPT_SERVER_ID_CODE:
         {
             /* The Server Identifier option carries a DUID
@@ -7190,7 +7190,9 @@ bfd_monitor_send_msg(struct rconn *swconn, long long int *bfd_time)
         pinctrl_send_bfd_tx_msg(swconn, entry, false);
 
         tx_timeout = MAX(entry->local_min_tx, entry->remote_min_rx);
-        tx_timeout -= random_range((tx_timeout * 25) / 100);
+        if (tx_timeout >= 4) {
+            tx_timeout -= random_range(tx_timeout / 4);
+        }
         entry->next_tx = cur_time + tx_timeout;
 next:
         if (*bfd_time > entry->next_tx) {
diff --git a/debian/changelog b/debian/changelog
index 11a07dd38..02a9953ba 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+OVN (23.03.1-1) unstable; urgency=low
+   [ OVN team ]
+   * New upstream version
+
+ -- OVN team <dev@openvswitch.org>  Fri, 03 Mar 2023 10:40:37 -0500
+
 ovn (23.03.0-1) unstable; urgency=low
 
    * New upstream version
diff --git a/lib/actions.c b/lib/actions.c
index 781549d75..2b566c85e 100644
--- a/lib/actions.c
+++ b/lib/actions.c
@@ -2882,26 +2882,26 @@ static void
 encode_put_dhcpv6_option(const struct ovnact_gen_option *o,
                          struct ofpbuf *ofpacts)
 {
-    struct dhcp_opt6_header *opt = ofpbuf_put_uninit(ofpacts, sizeof *opt);
+    struct dhcpv6_opt_header *opt = ofpbuf_put_uninit(ofpacts, sizeof *opt);
     const union expr_constant *c = o->value.values;
     size_t n_values = o->value.n_values;
     size_t size;
 
-    opt->opt_code = htons(o->option->code);
+    opt->code = htons(o->option->code);
 
     if (!strcmp(o->option->type, "ipv6")) {
         size = n_values * sizeof(struct in6_addr);
-        opt->size = htons(size);
+        opt->len = htons(size);
         for (size_t i = 0; i < n_values; i++) {
             ofpbuf_put(ofpacts, &c[i].value.ipv6, sizeof(struct in6_addr));
         }
     } else if (!strcmp(o->option->type, "mac")) {
         size = sizeof(struct eth_addr);
-        opt->size = htons(size);
+        opt->len = htons(size);
         ofpbuf_put(ofpacts, &c->value.mac, size);
     } else if (!strcmp(o->option->type, "str")) {
         size = strlen(c->string);
-        opt->size = htons(size);
+        opt->len = htons(size);
         ofpbuf_put(ofpacts, c->string, size);
     }
 }
diff --git a/lib/lb.c b/lib/lb.c
index e941434c4..f88c1855b 100644
--- a/lib/lb.c
+++ b/lib/lb.c
@@ -38,6 +38,7 @@ static const char *lb_neighbor_responder_mode_names[] = {
 static struct nbrec_load_balancer_health_check *
 ovn_lb_get_health_check(const struct nbrec_load_balancer *nbrec_lb,
                         const char *vip_port_str, bool template);
+static void ovn_lb_backends_clear(struct ovn_lb_vip *vip);
 
 struct ovn_lb_ip_set *
 ovn_lb_ip_set_create(void)
@@ -238,6 +239,8 @@ ovn_lb_backends_init_template(struct ovn_lb_vip *lb_vip, const char *value_)
             ds_put_format(&errors, "%s: should be a template of the form: "
                           "'^backendip_variable1[:^port_variable1|:port]', ",
                           atom);
+            free(backend_port);
+            free(backend_ip);
         }
         free(atom);
     }
@@ -285,8 +288,27 @@ ovn_lb_vip_init_template(struct ovn_lb_vip *lb_vip, const char *lb_key_,
                          lb_key_);
     }
 
+    /* Backends can either be templates or explicit IPs and ports. */
     lb_vip->address_family = address_family;
-    return ovn_lb_backends_init_template(lb_vip, lb_value);
+    lb_vip->template_backends = true;
+    char *template_error = ovn_lb_backends_init_template(lb_vip, lb_value);
+
+    if (template_error) {
+        lb_vip->template_backends = false;
+        ovn_lb_backends_clear(lb_vip);
+
+        char *explicit_error = ovn_lb_backends_init_explicit(lb_vip, lb_value);
+        if (explicit_error) {
+            char *error =
+                xasprintf("invalid backend: template (%s) OR explicit (%s)",
+                          template_error, explicit_error);
+            free(explicit_error);
+            free(template_error);
+            return error;
+        }
+        free(template_error);
+    }
+    return NULL;
 }
 
 /* Returns NULL on success, an error string on failure.  The caller is
@@ -304,15 +326,29 @@ ovn_lb_vip_init(struct ovn_lb_vip *lb_vip, const char *lb_key,
                                        address_family);
 }
 
-void
-ovn_lb_vip_destroy(struct ovn_lb_vip *vip)
+static void
+ovn_lb_backends_destroy(struct ovn_lb_vip *vip)
 {
-    free(vip->vip_str);
-    free(vip->port_str);
     for (size_t i = 0; i < vip->n_backends; i++) {
         free(vip->backends[i].ip_str);
         free(vip->backends[i].port_str);
     }
+}
+
+static void
+ovn_lb_backends_clear(struct ovn_lb_vip *vip)
+{
+    ovn_lb_backends_destroy(vip);
+    vip->backends = NULL;
+    vip->n_backends = 0;
+}
+
+void
+ovn_lb_vip_destroy(struct ovn_lb_vip *vip)
+{
+    free(vip->vip_str);
+    free(vip->port_str);
+    ovn_lb_backends_destroy(vip);
     free(vip->backends);
 }
 
@@ -357,11 +393,10 @@ ovn_lb_vip_format(const struct ovn_lb_vip *vip, struct ds *s, bool template)
 }
 
 void
-ovn_lb_vip_backends_format(const struct ovn_lb_vip *vip, struct ds *s,
-                           bool template)
+ovn_lb_vip_backends_format(const struct ovn_lb_vip *vip, struct ds *s)
 {
     bool needs_brackets = vip->address_family == AF_INET6 && vip->port_str
-                          && !template;
+                          && !vip->template_backends;
     for (size_t i = 0; i < vip->n_backends; i++) {
         struct ovn_lb_backend *backend = &vip->backends[i];
 
@@ -798,6 +833,7 @@ ovn_controller_lb_create(const struct sbrec_load_balancer *sbrec_lb,
     lb->hairpin_orig_tuple = smap_get_bool(&sbrec_lb->options,
                                            "hairpin_orig_tuple",
                                            false);
+    lb->ct_flush = smap_get_bool(&sbrec_lb->options, "ct_flush", false);
     ovn_lb_get_hairpin_snat_ip(&sbrec_lb->header_.uuid, &sbrec_lb->options,
                                &lb->hairpin_snat_ips);
     return lb;
diff --git a/lib/lb.h b/lib/lb.h
index 7a67b7426..e24f519db 100644
--- a/lib/lb.h
+++ b/lib/lb.h
@@ -96,6 +96,9 @@ struct ovn_lb_vip {
                           */
     struct ovn_lb_backend *backends;
     size_t n_backends;
+    bool template_backends; /* True if the backends are templates. False if
+                             * they're explicitly specified.
+                             */
     bool empty_backend_rej;
     int address_family;
 };
@@ -188,6 +191,7 @@ struct ovn_controller_lb {
     bool hairpin_orig_tuple; /* True if ovn-northd stores the original
                               * destination tuple in registers.
                               */
+    bool ct_flush; /* True if we should flush CT after backend removal. */
 
     struct lport_addresses hairpin_snat_ips; /* IP (v4 and/or v6) to be used
                                               * as source for hairpinned
@@ -210,8 +214,7 @@ char *ovn_lb_vip_init(struct ovn_lb_vip *lb_vip, const char *lb_key,
 void ovn_lb_vip_destroy(struct ovn_lb_vip *vip);
 void ovn_lb_vip_format(const struct ovn_lb_vip *vip, struct ds *s,
                        bool template);
-void ovn_lb_vip_backends_format(const struct ovn_lb_vip *vip, struct ds *s,
-                                bool template);
+void ovn_lb_vip_backends_format(const struct ovn_lb_vip *vip, struct ds *s);
 
 struct ovn_lb_5tuple {
     struct hmap_node hmap_node;
diff --git a/lib/ovn-l7.h b/lib/ovn-l7.h
index 2b20bc380..d718ed39a 100644
--- a/lib/ovn-l7.h
+++ b/lib/ovn-l7.h
@@ -240,12 +240,6 @@ struct dhcp_opt_header {
 #define DHCP_OPT_PAYLOAD(hdr) \
     (void *)((char *)hdr + sizeof(struct dhcp_opt_header))
 
-/* Used in the OpenFlow PACKET_IN userdata */
-struct dhcp_opt6_header {
-    ovs_be16 opt_code;
-    ovs_be16 size;
-};
-
 /* These are not defined in ovs/lib/dhcp.h, hence defining here. */
 #define OVN_DHCP_MSG_DECLINE        4
 #define OVN_DHCP_MSG_RELEASE        7
diff --git a/northd/inc-proc-northd.c b/northd/inc-proc-northd.c
index d23993a55..fd025c92b 100644
--- a/northd/inc-proc-northd.c
+++ b/northd/inc-proc-northd.c
@@ -34,10 +34,13 @@
 #include "en-lflow.h"
 #include "en-northd-output.h"
 #include "en-sync-sb.h"
+#include "unixctl.h"
 #include "util.h"
 
 VLOG_DEFINE_THIS_MODULE(inc_proc_northd);
 
+static unixctl_cb_func chassis_features_list;
+
 #define NB_NODES \
     NB_NODE(nb_global, "nb_global") \
     NB_NODE(copp, "copp") \
@@ -306,6 +309,12 @@ void inc_proc_northd_init(struct ovsdb_idl_loop *nb,
     engine_ovsdb_node_add_index(&en_sb_address_set,
                                 "sbrec_address_set_by_name",
                                 sbrec_address_set_by_name);
+
+    struct northd_data *northd_data =
+        engine_get_internal_data(&en_northd);
+    unixctl_command_register("debug/chassis-features-list", "", 0, 0,
+                             chassis_features_list,
+                             &northd_data->features);
 }
 
 /* Returns true if the incremental processing ended up updating nodes. */
@@ -356,3 +365,20 @@ void inc_proc_northd_cleanup(void)
     engine_cleanup();
     engine_set_context(NULL);
 }
+
+static void
+chassis_features_list(struct unixctl_conn *conn, int argc OVS_UNUSED,
+                      const char *argv[] OVS_UNUSED, void *features_)
+{
+    struct chassis_features *features = features_;
+    struct ds ds = DS_EMPTY_INITIALIZER;
+
+    ds_put_format(&ds, "ct_no_masked_label:    %s\n",
+                  features->ct_no_masked_label ? "true" : "false");
+    ds_put_format(&ds, "ct_lb_related:         %s\n",
+                  features->ct_lb_related ? "true" : "false");
+    ds_put_format(&ds, "mac_binding_timestamp: %s\n",
+                  features->mac_binding_timestamp ? "true" : "false");
+    unixctl_command_reply(conn, ds_cstr(&ds));
+    ds_destroy(&ds);
+}
diff --git a/northd/northd.c b/northd/northd.c
index 7ad4cdfad..045282fac 100644
--- a/northd/northd.c
+++ b/northd/northd.c
@@ -432,6 +432,13 @@ build_chassis_features(const struct northd_input *input_data,
     const struct sbrec_chassis *chassis;
 
     SBREC_CHASSIS_TABLE_FOR_EACH (chassis, input_data->sbrec_chassis) {
+        /* Only consider local AZ chassis.  Remote ones don't install
+         * flows generated by the local northd.
+         */
+        if (smap_get_bool(&chassis->other_config, "is-remote", false)) {
+            continue;
+        }
+
         bool ct_no_masked_label =
             smap_get_bool(&chassis->other_config,
                           OVN_FEATURE_CT_NO_MASKED_LABEL,
@@ -552,7 +559,7 @@ free_chassis_queueid(struct hmap *set, const struct uuid *uuid,
 static inline bool
 port_has_qos_params(const struct smap *opts)
 {
-    return (smap_get(opts, "qos_max_rate") ||
+    return (smap_get(opts, "qos_max_rate") || smap_get(opts, "qos_min_rate") ||
             smap_get(opts, "qos_burst"));
 }
 
@@ -1641,6 +1648,10 @@ ovn_port_destroy(struct hmap *ports, struct ovn_port *port)
          * use it. */
         hmap_remove(ports, &port->key_node);
 
+        if (port->peer) {
+            port->peer->peer = NULL;
+        }
+
         for (int i = 0; i < port->n_lsp_addrs; i++) {
             destroy_lport_addresses(&port->lsp_addrs[i]);
         }
@@ -3881,7 +3892,7 @@ build_lb_vip_actions(struct ovn_lb_vip *lb_vip,
     const char *ct_lb_action =
         features->ct_no_masked_label ? "ct_lb_mark" : "ct_lb";
     bool reject = !lb_vip->n_backends && lb_vip->empty_backend_rej;
-    bool drop = false;
+    bool drop = !lb_vip->n_backends && !lb_vip->empty_backend_rej;
 
     if (lb_vip_nb->lb_health_check) {
         ds_put_format(action, "%s(backends=", ct_lb_action);
@@ -5779,20 +5790,24 @@ skip_port_from_conntrack(struct ovn_datapath *od, struct ovn_port *op,
      * know about the connection, as the icmp request went through the logical
      * router on hostA, not hostB. This would only work with distributed
      * conntrack state across all chassis. */
-    struct ds match_in = DS_EMPTY_INITIALIZER;
-    struct ds match_out = DS_EMPTY_INITIALIZER;
 
-    ds_put_format(&match_in, "ip && inport == %s", op->json_key);
-    ds_put_format(&match_out, "ip && outport == %s", op->json_key);
+    const char *ingress_action = "next;";
+    const char *egress_action = od->has_stateful_acl
+                                ? "next;"
+                                : "ct_clear; next;";
+
+    char *ingress_match = xasprintf("ip && inport == %s", op->json_key);
+    char *egress_match = xasprintf("ip && outport == %s", op->json_key);
+
     ovn_lflow_add_with_lport_and_hint(lflows, od, in_stage, priority,
-                                      ds_cstr(&match_in), "next;", op->key,
-                                      &op->nbsp->header_);
+                                      ingress_match, ingress_action,
+                                      op->key, &op->nbsp->header_);
     ovn_lflow_add_with_lport_and_hint(lflows, od, out_stage, priority,
-                                      ds_cstr(&match_out), "next;", op->key,
-                                      &op->nbsp->header_);
+                                      egress_match, egress_action,
+                                      op->key, &op->nbsp->header_);
 
-    ds_destroy(&match_in);
-    ds_destroy(&match_out);
+    free(ingress_match);
+    free(egress_match);
 }
 
 static void
@@ -5867,7 +5882,8 @@ build_pre_acls(struct ovn_datapath *od, const struct hmap *port_groups,
         }
         for (size_t i = 0; i < od->n_localnet_ports; i++) {
             skip_port_from_conntrack(od, od->localnet_ports[i],
-                                     S_SWITCH_IN_PRE_ACL, S_SWITCH_OUT_PRE_ACL,
+                                     S_SWITCH_IN_PRE_ACL,
+                                     S_SWITCH_OUT_PRE_ACL,
                                      110, lflows);
         }
 
@@ -6036,10 +6052,17 @@ build_pre_lb(struct ovn_datapath *od, const struct shash *meter_groups,
                                  S_SWITCH_IN_PRE_LB, S_SWITCH_OUT_PRE_LB,
                                  110, lflows);
     }
-    for (size_t i = 0; i < od->n_localnet_ports; i++) {
-        skip_port_from_conntrack(od, od->localnet_ports[i],
-                                 S_SWITCH_IN_PRE_LB, S_SWITCH_OUT_PRE_LB,
-                                 110, lflows);
+    /* Localnet ports have no need for going through conntrack, unless
+     * the logical switch has a load balancer. Then, conntrack is necessary
+     * so that traffic arriving via the localnet port can be load
+     * balanced.
+     */
+    if (!od->has_lb_vip) {
+        for (size_t i = 0; i < od->n_localnet_ports; i++) {
+            skip_port_from_conntrack(od, od->localnet_ports[i],
+                                     S_SWITCH_IN_PRE_LB, S_SWITCH_OUT_PRE_LB,
+                                     110, lflows);
+        }
     }
 
     /* Do not sent statless flows via conntrack */
@@ -6700,6 +6723,8 @@ build_port_group_lswitches(struct northd_input *input_data,
     }
 }
 
+#define IPV6_CT_OMIT_MATCH "nd || nd_ra || nd_rs || mldv1 || mldv2"
+
 static void
 build_acls(struct ovn_datapath *od, const struct chassis_features *features,
            struct hmap *lflows, const struct hmap *port_groups,
@@ -6847,20 +6872,26 @@ build_acls(struct ovn_datapath *od, const struct chassis_features *features,
         ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, UINT16_MAX - 3,
                       ds_cstr(&match), ct_out_acl_action);
 
-        /* Ingress and Egress ACL Table (Priority 65532).
-         *
-         * Not to do conntrack on ND packets. */
-        ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, UINT16_MAX - 3,
-                      "nd || nd_ra || nd_rs || mldv1 || mldv2", "next;");
-        ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, UINT16_MAX - 3,
-                      "nd || nd_ra || nd_rs || mldv1 || mldv2", "next;");
-
         /* Reply and related traffic matched by an "allow-related" ACL
          * should be allowed in the ls_in_acl_after_lb stage too. */
         ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL_AFTER_LB, UINT16_MAX - 3,
                       REGBIT_ACL_HINT_ALLOW_REL" == 1", "next;");
     }
 
+    /* Ingress and Egress ACL Table (Priority 65532).
+     *
+     * Always allow service IPv6 protocols regardless of other ACLs defined.
+     *
+     * Also, don't send them to conntrack because session tracking
+     * for these protocols is not working properly:
+     * https://bugzilla.kernel.org/show_bug.cgi?id=11797. */
+    ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, UINT16_MAX - 3,
+                  IPV6_CT_OMIT_MATCH, "next;");
+    ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, UINT16_MAX - 3,
+                  IPV6_CT_OMIT_MATCH, "next;");
+    ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL_AFTER_LB, UINT16_MAX - 3,
+                  IPV6_CT_OMIT_MATCH, "next;");
+
     /* Ingress or Egress ACL Table (Various priorities). */
     for (size_t i = 0; i < od->nbs->n_acls; i++) {
         struct nbrec_acl *acl = od->nbs->acls[i];
@@ -7089,7 +7120,9 @@ build_lb_rules_pre_stateful(struct hmap *lflows, struct ovn_northd_lb *lb,
  * - load balancing affinity check:
  *   table=lr_in_lb_aff_check, priority=100
  *      match=(new_lb_match)
- *      action=(REGBIT_KNOWN_LB_SESSION = chk_lb_aff(); next;)
+ *      action=(REG_NEXT_HOP_IPV4 = ip4.dst;
+ *              REG_ORIG_TP_DPORT_ROUTER = tcp.dst;
+ *              REGBIT_KNOWN_LB_SESSION = chk_lb_aff(); next;)
  *
  * - load balancing:
  *   table=lr_in_dnat, priority=150
@@ -7130,16 +7163,11 @@ build_lb_affinity_lr_flows(struct hmap *lflows, struct ovn_northd_lb *lb,
         return;
     }
 
-    static char *aff_check = REGBIT_KNOWN_LB_SESSION" = chk_lb_aff(); next;";
-
-    ovn_lflow_add_with_dp_group(
-        lflows, dp_bitmap, S_ROUTER_IN_LB_AFF_CHECK, 100,
-        new_lb_match, aff_check, &lb->nlb->header_);
-
     struct ds aff_action = DS_EMPTY_INITIALIZER;
     struct ds aff_action_learn = DS_EMPTY_INITIALIZER;
     struct ds aff_match = DS_EMPTY_INITIALIZER;
     struct ds aff_match_learn = DS_EMPTY_INITIALIZER;
+    struct ds aff_check_action = DS_EMPTY_INITIALIZER;
 
     bool ipv6 = !IN6_IS_ADDR_V4MAPPED(&lb_vip->vip);
     const char *ip_match = ipv6 ? "ip6" : "ip4";
@@ -7155,6 +7183,20 @@ build_lb_affinity_lr_flows(struct hmap *lflows, struct ovn_northd_lb *lb,
         ct_flag = "; force_snat";
     }
 
+    /* Create affinity check flow. */
+    ds_put_format(&aff_check_action, "%s = %s.dst; ", reg_vip, ip_match);
+
+    if (lb_vip->port_str) {
+        ds_put_format(&aff_check_action, REG_ORIG_TP_DPORT_ROUTER" = %s.dst; ",
+                      lb->proto);
+    }
+    ds_put_cstr(&aff_check_action, REGBIT_KNOWN_LB_SESSION
+                " = chk_lb_aff(); next;");
+
+    ovn_lflow_add_with_dp_group(
+        lflows, dp_bitmap, S_ROUTER_IN_LB_AFF_CHECK, 100,
+        new_lb_match, ds_cstr(&aff_check_action), &lb->nlb->header_);
+
     /* Prepare common part of affinity LB and affinity learn action. */
     ds_put_format(&aff_action, "%s = %s; ", reg_vip, lb_vip->vip_str);
     ds_put_cstr(&aff_action_learn, "commit_lb_aff(vip = \"");
@@ -7252,6 +7294,7 @@ build_lb_affinity_lr_flows(struct hmap *lflows, struct ovn_northd_lb *lb,
     ds_destroy(&aff_action_learn);
     ds_destroy(&aff_match);
     ds_destroy(&aff_match_learn);
+    ds_destroy(&aff_check_action);
 }
 
 /* Builds the logical switch flows related to load balancer affinity.
@@ -10450,10 +10493,8 @@ enum lrouter_nat_lb_flow_type {
 
 struct lrouter_nat_lb_flows_ctx {
     const char *new_action[LROUTER_NAT_LB_FLOW_MAX];
-    const char *est_action[LROUTER_NAT_LB_FLOW_MAX];
 
     struct ds *new_match;
-    struct ds *est_match;
     struct ds *undnat_match;
 
     struct ovn_lb_vip *lb_vip;
@@ -10471,10 +10512,22 @@ build_distr_lrouter_nat_flows_for_lb(struct lrouter_nat_lb_flows_ctx *ctx,
                                      enum lrouter_nat_lb_flow_type type,
                                      struct ovn_datapath *od)
 {
-    char *gw_action = od->is_gw_router ? "ct_dnat;" : "ct_dnat_in_czone;";
+    const char *undnat_action;
+
+    switch (type) {
+    case LROUTER_NAT_LB_FLOW_FORCE_SNAT:
+        undnat_action = "flags.force_snat_for_lb = 1; next;";
+        break;
+    case LROUTER_NAT_LB_FLOW_SKIP_SNAT:
+        undnat_action = "flags.skip_snat_for_lb = 1; next;";
+        break;
+    case LROUTER_NAT_LB_FLOW_NORMAL:
+    case LROUTER_NAT_LB_FLOW_MAX:
+        undnat_action = od->is_gw_router ? "ct_dnat;" : "ct_dnat_in_czone;";
+        break;
+    }
     /* Store the match lengths, so we can reuse the ds buffer. */
     size_t new_match_len = ctx->new_match->length;
-    size_t est_match_len = ctx->est_match->length;
     size_t undnat_match_len = ctx->undnat_match->length;
 
 
@@ -10487,33 +10540,24 @@ build_distr_lrouter_nat_flows_for_lb(struct lrouter_nat_lb_flows_ctx *ctx,
     if (ctx->lb_vip->n_backends || !ctx->lb_vip->empty_backend_rej) {
         ds_put_format(ctx->new_match, " && is_chassis_resident(%s)",
                       od->l3dgw_ports[0]->cr_port->json_key);
-        ds_put_format(ctx->est_match, " && is_chassis_resident(%s)",
-                      od->l3dgw_ports[0]->cr_port->json_key);
     }
 
     ovn_lflow_add_with_hint__(ctx->lflows, od, S_ROUTER_IN_DNAT, ctx->prio,
                               ds_cstr(ctx->new_match), ctx->new_action[type],
                               NULL, meter, &ctx->lb->nlb->header_);
-    ovn_lflow_add_with_hint(ctx->lflows, od, S_ROUTER_IN_DNAT, ctx->prio,
-                            ds_cstr(ctx->est_match), ctx->est_action[type],
-                            &ctx->lb->nlb->header_);
 
     ds_truncate(ctx->new_match, new_match_len);
-    ds_truncate(ctx->est_match, est_match_len);
 
     if (!ctx->lb_vip->n_backends) {
         return;
     }
 
-    const char *action = (type == LROUTER_NAT_LB_FLOW_NORMAL)
-                         ? gw_action : ctx->est_action[type];
-
     ds_put_format(ctx->undnat_match,
                   ") && outport == %s && is_chassis_resident(%s)",
                   od->l3dgw_ports[0]->json_key,
                   od->l3dgw_ports[0]->cr_port->json_key);
     ovn_lflow_add_with_hint(ctx->lflows, od, S_ROUTER_OUT_UNDNAT, 120,
-                            ds_cstr(ctx->undnat_match), action,
+                            ds_cstr(ctx->undnat_match), undnat_action,
                             &ctx->lb->nlb->header_);
     ds_truncate(ctx->undnat_match, undnat_match_len);
 }
@@ -10556,11 +10600,6 @@ build_gw_lrouter_nat_flows_for_lb(struct lrouter_nat_lb_flows_ctx *ctx,
             ctx->new_action[type], &ctx->lb->nlb->header_);
     }
     bitmap_free(dp_non_meter);
-
-    ovn_lflow_add_with_dp_group(
-        ctx->lflows, dp_bitmap, S_ROUTER_IN_DNAT, ctx->prio,
-        ds_cstr(ctx->est_match), ctx->est_action[type],
-        &ctx->lb->nlb->header_);
 }
 
 static void
@@ -10572,19 +10611,13 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip,
                                const struct shash *meter_groups,
                                const struct chassis_features *features)
 {
-    const char *ct_natted = features->ct_no_masked_label
-                            ? "ct_mark.natted"
-                            : "ct_label.natted";
-
     bool ipv4 = lb_vip->address_family == AF_INET;
     const char *ip_match = ipv4 ? "ip4" : "ip6";
-    const char *ip_reg = ipv4 ? REG_NEXT_HOP_IPV4 : REG_NEXT_HOP_IPV6;
 
     int prio = 110;
 
     struct ds skip_snat_act = DS_EMPTY_INITIALIZER;
     struct ds force_snat_act = DS_EMPTY_INITIALIZER;
-    struct ds est_match = DS_EMPTY_INITIALIZER;
     struct ds undnat_match = DS_EMPTY_INITIALIZER;
     struct ds unsnat_match = DS_EMPTY_INITIALIZER;
 
@@ -10601,19 +10634,14 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip,
      * of "ct_lb_mark($targets);". The other flow is for ct.est with
      * an action of "next;".
      */
-    ds_put_format(match, "ct.new && !ct.rel && %s && %s == %s",
-                  ip_match, ip_reg, lb_vip->vip_str);
+    ds_put_format(match, "ct.new && !ct.rel && %s && %s.dst == %s",
+                  ip_match, ip_match, lb_vip->vip_str);
     if (lb_vip->port_str) {
         prio = 120;
-        ds_put_format(match, " && %s && "REG_ORIG_TP_DPORT_ROUTER" == %s",
-                      lb->proto, lb_vip->port_str);
+        ds_put_format(match, " && %s && %s.dst == %s",
+                      lb->proto, lb->proto, lb_vip->port_str);
     }
 
-    ds_put_cstr(&est_match, "ct.est");
-    /* Clone the match after initial "ct.new" (6 bytes). */
-    ds_put_cstr(&est_match, ds_cstr(match) + 6);
-    ds_put_format(&est_match, " && %s == 1", ct_natted);
-
     /* Add logical flows to UNDNAT the load balanced reverse traffic in
      * the router egress pipleine stage - S_ROUTER_OUT_UNDNAT if the logical
      * router has a gateway router port associated.
@@ -10650,20 +10678,12 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip,
         .lflows = lflows,
         .meter_groups = meter_groups,
         .new_match = match,
-        .est_match = &est_match,
         .undnat_match = &undnat_match
     };
 
     ctx.new_action[LROUTER_NAT_LB_FLOW_NORMAL] = ds_cstr(action);
-    ctx.est_action[LROUTER_NAT_LB_FLOW_NORMAL] = "next;";
-
     ctx.new_action[LROUTER_NAT_LB_FLOW_SKIP_SNAT] = ds_cstr(&skip_snat_act);
-    ctx.est_action[LROUTER_NAT_LB_FLOW_SKIP_SNAT] =
-                                        "flags.skip_snat_for_lb = 1; next;";
-
     ctx.new_action[LROUTER_NAT_LB_FLOW_FORCE_SNAT] = ds_cstr(&force_snat_act);
-    ctx.est_action[LROUTER_NAT_LB_FLOW_FORCE_SNAT] =
-                                        "flags.force_snat_for_lb = 1; next;";
 
     enum {
         LROUTER_NAT_LB_AFF            = LROUTER_NAT_LB_FLOW_MAX,
@@ -10746,7 +10766,6 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip,
 
     ds_destroy(&unsnat_match);
     ds_destroy(&undnat_match);
-    ds_destroy(&est_match);
     ds_destroy(&skip_snat_act);
     ds_destroy(&force_snat_act);
 
@@ -10820,39 +10839,19 @@ build_lrouter_defrag_flows_for_lb(struct ovn_northd_lb *lb,
         return;
     }
 
-    struct ds defrag_actions = DS_EMPTY_INITIALIZER;
     for (size_t i = 0; i < lb->n_vips; i++) {
         struct ovn_lb_vip *lb_vip = &lb->vips[i];
+        bool ipv6 = lb_vip->address_family == AF_INET6;
         int prio = 100;
 
-        ds_clear(&defrag_actions);
         ds_clear(match);
-
-        if (lb_vip->address_family == AF_INET) {
-            ds_put_format(match, "ip && ip4.dst == %s", lb_vip->vip_str);
-            ds_put_format(&defrag_actions, REG_NEXT_HOP_IPV4" = %s; ",
-                          lb_vip->vip_str);
-        } else {
-            ds_put_format(match, "ip && ip6.dst == %s", lb_vip->vip_str);
-            ds_put_format(&defrag_actions, REG_NEXT_HOP_IPV6" = %s; ",
-                          lb_vip->vip_str);
-        }
-
-        if (lb_vip->port_str) {
-            ds_put_format(match, " && %s", lb->proto);
-            prio = 110;
-
-            ds_put_format(&defrag_actions, REG_ORIG_TP_DPORT_ROUTER
-                          " = %s.dst; ", lb->proto);
-        }
-
-        ds_put_format(&defrag_actions, "ct_dnat;");
+        ds_put_format(match, "ip && ip%c.dst == %s", ipv6 ? '6' : '4',
+                      lb_vip->vip_str);
 
         ovn_lflow_add_with_dp_group(
             lflows, lb->nb_lr_map, S_ROUTER_IN_DEFRAG, prio,
-            ds_cstr(match), ds_cstr(&defrag_actions), &lb->nlb->header_);
+            ds_cstr(match), "ct_dnat;", &lb->nlb->header_);
     }
-    ds_destroy(&defrag_actions);
 }
 
 static void
@@ -10991,15 +10990,10 @@ copy_ra_to_sb(struct ovn_port *op, const char *address_mode)
 }
 
 static inline bool
-lrouter_nat_is_stateless(const struct nbrec_nat *nat)
+lrouter_dnat_and_snat_is_stateless(const struct nbrec_nat *nat)
 {
-    const char *stateless = smap_get(&nat->options, "stateless");
-
-    if (stateless && !strcmp(stateless, "true")) {
-        return true;
-    }
-
-    return false;
+    return smap_get_bool(&nat->options, "stateless", false) &&
+           !strcmp(nat->type, "dnat_and_snat");
 }
 
 /* Handles the match criteria and actions in logical flow
@@ -12814,8 +12808,7 @@ build_gateway_redirect_flows_for_lrouter(
         for (int j = 0; j < od->n_nat_entries; j++) {
             const struct ovn_nat *nat = &od->nat_entries[j];
 
-            if (!lrouter_nat_is_stateless(nat->nb) ||
-                strcmp(nat->nb->type, "dnat_and_snat") ||
+            if (!lrouter_dnat_and_snat_is_stateless(nat->nb) ||
                 (!nat->nb->allowed_ext_ips && !nat->nb->exempted_ext_ips)) {
                 continue;
             }
@@ -13038,9 +13031,27 @@ build_misc_local_traffic_drop_flows_for_lrouter(
         ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 50,
                       "eth.bcast", debug_drop_action());
 
+        /* Avoid ICMP time exceeded for multicast, silent drop instead.
+         * See RFC1812 section 5.3.1:
+         *  If the TTL is reduced to zero (or less), the packet MUST be
+         *  discarded, and if the destination is NOT A MULTICAST address the
+         *  router MUST send an ICMP Time Exceeded message ...
+         *
+         * The reason behind is that TTL has special meanings for multicast.
+         * For example, TTL = 1 means restricted to the same subnet, not
+         * forwarded by the router. So it is very common to see multicast
+         * packets with ttl = 1, and generating ICMP for such packets is
+         * harmful from both slowpath performance and functionality point of
+         * view.
+         *
+         * (priority-31 flows will send ICMP time exceeded) */
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 32,
+                      "ip.ttl == {0, 1} && !ip.later_frag && "
+                      "(ip4.mcast || ip6.mcast)", debug_drop_action());
+
         /* TTL discard */
         ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 30,
-                      "ip4 && ip.ttl == {0, 1}", debug_drop_action());
+                      "ip.ttl == {0, 1}", debug_drop_action());
 
         /* Pass other traffic not already handled to the next table for
          * routing. */
@@ -13224,7 +13235,7 @@ build_ipv6_input_flows_for_lrouter_port(
                           "outport = %s; flags.loopback = 1; output; };",
                           ds_cstr(&ip_ds), op->json_key);
             ovn_lflow_add_with_hint__(lflows, op->od, S_ROUTER_IN_IP_INPUT,
-                    100, ds_cstr(match), ds_cstr(actions), NULL,
+                    31, ds_cstr(match), ds_cstr(actions), NULL,
                     copp_meter_get(COPP_ICMP6_ERR, op->od->nbr->copp,
                                    meter_groups),
                     &op->nbrp->header_);
@@ -13352,7 +13363,7 @@ build_lrouter_ipv4_ip_input(struct ovn_port *op,
                           "outport = %s; flags.loopback = 1; output; };",
                           ds_cstr(&ip_ds), op->json_key);
             ovn_lflow_add_with_hint__(lflows, op->od, S_ROUTER_IN_IP_INPUT,
-                    100, ds_cstr(match), ds_cstr(actions), NULL,
+                    31, ds_cstr(match), ds_cstr(actions), NULL,
                     copp_meter_get(COPP_ICMP4_ERR, op->od->nbr->copp,
                                    meter_groups),
                     &op->nbrp->header_);
@@ -13597,13 +13608,13 @@ build_lrouter_in_unsnat_flow(struct hmap *lflows, struct ovn_datapath *od,
         return;
     }
 
-    bool stateless = lrouter_nat_is_stateless(nat);
+    bool stateless = lrouter_dnat_and_snat_is_stateless(nat);
     if (od->is_gw_router) {
         ds_clear(match);
         ds_clear(actions);
         ds_put_format(match, "ip && ip%s.dst == %s",
                       is_v6 ? "6" : "4", nat->external_ip);
-        if (!strcmp(nat->type, "dnat_and_snat") && stateless) {
+        if (stateless) {
             ds_put_format(actions, "next;");
         } else {
             ds_put_cstr(actions, "ct_snat;");
@@ -13628,7 +13639,7 @@ build_lrouter_in_unsnat_flow(struct hmap *lflows, struct ovn_datapath *od,
                           l3dgw_port->cr_port->json_key);
         }
 
-        if (!strcmp(nat->type, "dnat_and_snat") && stateless) {
+        if (stateless) {
             ds_put_format(actions, "next;");
         } else {
             ds_put_cstr(actions, "ct_snat_in_czone;");
@@ -13670,7 +13681,7 @@ build_lrouter_in_dnat_flow(struct hmap *lflows, struct ovn_datapath *od,
     * IP address that needs to be DNATted from a external IP address
     * to a logical IP address. */
     if (!strcmp(nat->type, "dnat") || !strcmp(nat->type, "dnat_and_snat")) {
-        bool stateless = lrouter_nat_is_stateless(nat);
+        bool stateless = lrouter_dnat_and_snat_is_stateless(nat);
 
         if (od->is_gw_router) {
             /* Packet when it goes from the initiator to destination.
@@ -13692,7 +13703,7 @@ build_lrouter_in_dnat_flow(struct hmap *lflows, struct ovn_datapath *od,
                 ds_put_format(actions, "flags.force_snat_for_dnat = 1; ");
             }
 
-            if (!strcmp(nat->type, "dnat_and_snat") && stateless) {
+            if (stateless) {
                 ds_put_format(actions, "flags.loopback = 1; "
                               "ip%s.dst=%s; next;",
                               is_v6 ? "6" : "4", nat->logical_ip);
@@ -13782,8 +13793,7 @@ build_lrouter_out_undnat_flow(struct hmap *lflows, struct ovn_datapath *od,
                       ETH_ADDR_ARGS(mac));
     }
 
-    if (!strcmp(nat->type, "dnat_and_snat") &&
-        lrouter_nat_is_stateless(nat)) {
+    if (lrouter_dnat_and_snat_is_stateless(nat)) {
         ds_put_format(actions, "next;");
     } else {
         ds_put_format(actions,
@@ -13839,7 +13849,7 @@ build_lrouter_out_snat_flow(struct hmap *lflows, struct ovn_datapath *od,
         return;
     }
 
-    bool stateless = lrouter_nat_is_stateless(nat);
+    bool stateless = lrouter_dnat_and_snat_is_stateless(nat);
     if (od->is_gw_router) {
         ds_clear(match);
         ds_put_format(match, "ip && ip%s.src == %s",
@@ -13905,7 +13915,7 @@ build_lrouter_out_snat_flow(struct hmap *lflows, struct ovn_datapath *od,
                           ETH_ADDR_ARGS(mac));
         }
 
-        if (!strcmp(nat->type, "dnat_and_snat") && stateless) {
+        if (stateless) {
             ds_put_format(actions, "ip%s.src=%s; next;",
                           is_v6 ? "6" : "4", nat->external_ip);
         } else {
@@ -14217,10 +14227,10 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, struct hmap *lflows,
     ovn_lflow_add(lflows, od, S_ROUTER_OUT_EGR_LOOP, 0, "1", "next;");
     ovn_lflow_add(lflows, od, S_ROUTER_IN_ECMP_STATEFUL, 0, "1", "next;");
 
-    /* Ingress DNAT and DEFRAG Table (Priority 50/70).
-     *
-     * The defrag stage needs to have flows for ICMP in order to get
-     * the correct ct_state that can be used by DNAT stage.
+    const char *ct_flag_reg = features->ct_no_masked_label
+                              ? "ct_mark"
+                              : "ct_label";
+    /* Ingress DNAT (Priority 50/70).
      *
      * Allow traffic that is related to an existing conntrack entry.
      * At the same time apply NAT for this traffic.
@@ -14231,16 +14241,10 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, struct hmap *lflows,
      * that's generated from a non-listening UDP port.  */
     if (od->has_lb_vip && features->ct_lb_related) {
         ds_clear(match);
-        const char *ct_flag_reg = features->ct_no_masked_label
-                                  ? "ct_mark"
-                                  : "ct_label";
 
         ds_put_cstr(match, "ct.rel && !ct.est && !ct.new");
         size_t match_len = match->length;
 
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_DEFRAG, 50, "icmp || icmp6",
-                      "ct_dnat;");
-
         ds_put_format(match, " && %s.skip_snat == 1", ct_flag_reg);
         ovn_lflow_add(lflows, od, S_ROUTER_IN_DNAT, 70, ds_cstr(match),
                       "flags.skip_snat_for_lb = 1; ct_commit_nat;");
@@ -14251,10 +14255,34 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, struct hmap *lflows,
                       "flags.force_snat_for_lb = 1; ct_commit_nat;");
 
         ds_truncate(match, match_len);
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_DNAT, 50,
-                      "ct.rel && !ct.est && !ct.new", "ct_commit_nat;");
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_DNAT, 50, ds_cstr(match),
+                      "ct_commit_nat;");
+    }
 
+    /* Ingress DNAT (Priority 50/70).
+     *
+     * Pass the traffic that is already established to the next table with
+     * proper flags set.
+     */
+    if (od->has_lb_vip) {
         ds_clear(match);
+
+        ds_put_format(match, "ct.est && !ct.rel && !ct.new && %s.natted",
+                      ct_flag_reg);
+        size_t match_len = match->length;
+
+        ds_put_format(match, " && %s.skip_snat == 1", ct_flag_reg);
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_DNAT, 70, ds_cstr(match),
+                      "flags.skip_snat_for_lb = 1; next;");
+
+        ds_truncate(match, match_len);
+        ds_put_format(match, " && %s.force_snat == 1", ct_flag_reg);
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_DNAT, 70, ds_cstr(match),
+                      "flags.force_snat_for_lb = 1; next;");
+
+        ds_truncate(match, match_len);
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_DNAT, 50, ds_cstr(match),
+                      "next;");
     }
 
     /* If the router has load balancer or DNAT rules, re-circulate every packet
@@ -14267,6 +14295,9 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, struct hmap *lflows,
      * flag set. Some NICs are unable to offload these flows.
      */
     if (od->is_gw_router && (od->nbr->n_nat || od->has_lb_vip)) {
+        /* Do not send ND or ICMP packets to connection tracking. */
+        ovn_lflow_add(lflows, od, S_ROUTER_OUT_UNDNAT, 100,
+                      "nd || nd_rs || nd_ra", "next;");
         ovn_lflow_add(lflows, od, S_ROUTER_OUT_UNDNAT, 50,
                       "ip", "flags.loopback = 1; ct_dnat;");
         ovn_lflow_add(lflows, od, S_ROUTER_OUT_POST_UNDNAT, 50,
diff --git a/northd/ovn-northd.8.xml b/northd/ovn-northd.8.xml
index 2eab2c4ae..e16d7d080 100644
--- a/northd/ovn-northd.8.xml
+++ b/northd/ovn-northd.8.xml
@@ -748,6 +748,12 @@
       drop behavior.
     </p>
 
+    <p>
+      A priority-65532 flow is added to allow IPv6 Neighbor solicitation,
+      Neighbor discover, Router solicitation, Router advertisement and MLD
+      packets regardless of other ACLs defined.
+    </p>
+
     <p>
       If the logical datapath has a stateful ACL or a load balancer with VIP
       configured, the following flows will also be added:
@@ -824,12 +830,6 @@
         in the request direction are skipped here to let a newly created
         ACL re-allow this connection.
       </li>
-
-      <li>
-        A priority-65532 flow that allows IPv6 Neighbor solicitation,
-        Neighbor discover, Router solicitation, Router advertisement and MLD
-        packets.
-      </li>
     </ul>
 
     <p>
@@ -2056,6 +2056,16 @@ output;
       db="OVN_Northbound"/> table.
     </p>
 
+    <p>
+      This table also has a priority-110 flow with the match
+      <code>outport == <var>I</var></code> for all logical switch
+      datapaths to move traffic to the next table, and, if there are no
+      stateful_acl, clear the ct_state. Where <var>I</var>
+      is the peer of a logical router port. This flow is added to
+      skip the connection tracking of packets which will be entering
+      logical router datapath from logical switch datapath for routing.
+    </p>
+
     <h3>Egress Table 2: Pre-stateful</h3>
 
     <p>
@@ -2098,6 +2108,12 @@ output;
       <code>to-lport</code> ACLs.
     </p>
 
+    <p>
+      Similar to ingress table, a priority-65532 flow is added to allow IPv6
+      Neighbor solicitation, Neighbor discover, Router solicitation, Router
+      advertisement and MLD packets regardless of other ACLs defined.
+    </p>
+
     <p>
       In addition, the following flows are added.
     </p>
@@ -3066,10 +3082,18 @@ nd.tll = <var>external_mac</var>;
         broadcast address.  By definition this traffic should not be forwarded.
       </li>
 
+      <li>
+        Avoid ICMP time exceeded for multicast.  A priority-32 flow with match
+        <code>ip.ttl == {0, 1} &amp;&amp; !ip.later_frag &amp;&amp;
+        (ip4.mcast || ip6.mcast)</code> and actions <code>drop;</code> drops
+        multicast packets whose TTL has expired without sending ICMP time
+        exceeded.
+      </li>
+
       <li>
         <p>
           ICMP time exceeded.  For each router port <var>P</var>, whose IP
-          address is <var>A</var>, a priority-100 flow with match <code>inport
+          address is <var>A</var>, a priority-31 flow with match <code>inport
           == <var>P</var> &amp;&amp; ip.ttl == {0, 1} &amp;&amp;
           !ip.later_frag</code> matches packets whose TTL has expired, with the
           following actions to send an ICMP time exceeded reply for IPv4 and
@@ -3282,35 +3306,16 @@ icmp6 {
     </p>
 
     <p>
-      If load balancing rules with only virtual IP addresses are configured in
+      For all load balancing rules that are configured in
       <code>OVN_Northbound</code> database for a Gateway router,
       a priority-100 flow is added for each configured virtual IP address
       <var>VIP</var>. For IPv4 <var>VIPs</var> the flow matches
       <code>ip &amp;&amp; ip4.dst == <var>VIP</var></code>.  For IPv6
       <var>VIPs</var>, the flow matches <code>ip &amp;&amp; ip6.dst ==
-      <var>VIP</var></code>. The flow applies the action <code>reg0 =
-      <var>VIP</var>; ct_dnat;</code>  (or <code>xxreg0</code> for IPv6) to
-      send IP packets to the connection tracker for packet de-fragmentation and
-      to dnat the destination IP for the committed connection before sending it
-      to the next table.
-    </p>
-
-    <p>
-      If load balancing rules with virtual IP addresses and ports are
-      configured in <code>OVN_Northbound</code> database for a Gateway router,
-      a priority-110 flow is added for each configured virtual IP address
-      <var>VIP</var>, protocol <var>PROTO</var> and port <var>PORT</var>.
-      For IPv4 <var>VIPs</var> the flow matches
-      <code>ip &amp;&amp; ip4.dst == <var>VIP</var> &amp;&amp;
-      <var>PROTO</var> &amp;&amp; <var>PROTO</var>.dst ==
-      <var>PORT</var></code>. For IPv6 <var>VIPs</var>, the flow matches
-      <code>ip &amp;&amp; ip6.dst == <var>VIP</var> &amp;&amp;
-      <var>PROTO</var> &amp;&amp; <var>PROTO</var>.dst ==
-      <var>PORT</var></code>. The flow applies the action <code>reg0 =
-      <var>VIP</var>; reg9[16..31] = <var>PROTO</var>.dst; ct_dnat;</code>
-      (or <code>xxreg0</code> for IPv6) to send IP packets to the connection
-      tracker for packet de-fragmentation and to dnat the destination IP for
-      the committed connection before sending it to the next table.
+      <var>VIP</var></code>. The flow applies the action <code> ct_dnat;</code>
+      to send IP packets to the connection tracker for packet de-fragmentation
+      and to dnat the destination IP for the committed connection before
+      sending it to the next table.
     </p>
 
     <p>
@@ -3349,10 +3354,11 @@ icmp6 {
         column, that includes a L4 port <var>PORT</var> of protocol
         <var>P</var> and IPv4 or IPv6 address <var>VIP</var>, a priority-100
         flow that matches on <code>ct.new &amp;&amp; ip &amp;&amp;
-        reg0 == <var>VIP</var> &amp;&amp; <var>P</var> &amp;&amp; reg9[16..31]
+        ip.dst == <var>VIP</var> &amp;&amp; <var>P</var> &amp;&amp; P.dst
         == </code> <code><var>PORT</var></code> (<code>xxreg0 == <var>VIP
-        </var></code> in the IPv6 case) with an action of <code>reg9[6] =
-        chk_lb_aff(); next;</code>
+        </var></code> in the IPv6 case) with an action of <code>reg0 = ip.dst;
+        reg9[16..31] = P.dst; reg9[6] = chk_lb_aff(); next;</code>
+        (<code>xxreg0 == <var>ip6.dst</var> </code> in the IPv6 case)
       </li>
 
       <li>
@@ -3385,9 +3391,8 @@ icmp6 {
         column, that includes a L4 port <var>PORT</var> of protocol
         <var>P</var> and IPv4 or IPv6 address <var>VIP</var>, a priority-150
         flow that matches on <code>reg9[6] == 1 &amp;&amp; ct.new &amp;&amp;
-        ip &amp;&amp; reg0 == <var>VIP</var> &amp;&amp; <var>P</var> &amp;&amp;
-        reg9[16..31] == </code> <code><var>PORT</var></code> (<code>xxreg0
-        == <var>VIP</var></code> in the IPv6 case) with an action of
+        ip &amp;&amp; ip.dst == <var>VIP</var> &amp;&amp; <var>P</var> &amp;&amp;
+        P.dst == </code> <code><var>PORT</var></code> with an action of
         <code>ct_lb_mark(<var>args</var>) </code>, where <var>args</var>
         contains comma separated IP addresses (and optional port numbers)
         to load balance to.  The address family of the IP addresses of
@@ -3410,56 +3415,25 @@ icmp6 {
           Router with gateway port in <code>OVN_Northbound</code> database that
           includes a L4 port <var>PORT</var> of protocol <var>P</var> and IPv4
           or IPv6 address <var>VIP</var>, a priority-120 flow that matches on
-          <code>ct.new &amp;&amp; !ct.rel &amp;&amp; ip &amp;&amp; reg0 ==
-          <var>VIP</var> &amp;&amp; <var>P</var> &amp;&amp; reg9[16..31] ==
-          </code> <code><var>PORT</var></code> (<code>xxreg0 == <var>VIP</var>
-          </code> in the IPv6 case) with an action of
+          <code>ct.new &amp;&amp; !ct.rel &amp;&amp; ip &amp;&amp; ip.dst ==
+          <var>VIP</var> &amp;&amp; <var>P</var> &amp;&amp; P.dst ==
+          </code> <code><var>PORT</var></code> with an action of
           <code>ct_lb_mark(<var>args</var>)</code>, where <var>args</var> contains
           comma separated IPv4 or IPv6 addresses (and optional port numbers) to
           load balance to.  If the router is configured to force SNAT any
           load-balanced packets, the above action will be replaced by
-          <code>flags.force_snat_for_lb = 1; ct_lb_mark(<var>args</var>);</code>.
+          <code>flags.force_snat_for_lb = 1; ct_lb_mark(<var>args</var>;
+          force_snat);</code>.
           If the load balancing rule is configured with <code>skip_snat</code>
           set to true, the above action will be replaced by
-          <code>flags.skip_snat_for_lb = 1; ct_lb_mark(<var>args</var>);</code>.
+          <code>flags.skip_snat_for_lb = 1; ct_lb_mark(<var>args</var>;
+          skip_snat);</code>.
           If health check is enabled, then
           <var>args</var> will only contain those endpoints whose service
           monitor status entry in <code>OVN_Southbound</code> db is
           either <code>online</code> or empty.
         </p>
 
-        <p>
-          The previous table <code>lr_in_defrag</code> sets the register
-          <code>reg0</code> (or <code>xxreg0</code> for IPv6) and does
-          <code>ct_dnat</code>.  Hence for established traffic, this
-          table just advances the packet to the next stage.
-        </p>
-      </li>
-
-      <li>
-        <p>
-          For all the configured load balancing rules for a router in
-          <code>OVN_Northbound</code> database that includes a L4 port
-          <var>PORT</var> of protocol <var>P</var> and IPv4 or IPv6 address
-          <var>VIP</var>, a priority-120 flow that matches on
-          <code>ct.est &amp;&amp; !ct.rel &amp;&amp; ip4 &amp;&amp; reg0 ==
-          <var>VIP</var> &amp;&amp; <var>P</var> &amp;&amp; reg9[16..31] ==
-          </code> <code><var>PORT</var></code> (<code>ip6</code> and
-          <code>xxreg0 == <var>VIP</var></code> in the IPv6 case) with an
-          action of <code>next;</code>. If the router is configured to force
-          SNAT any load-balanced packets, the above action will be replaced by
-          <code>flags.force_snat_for_lb = 1; next;</code>. If the load
-          balancing rule is configured with <code>skip_snat</code> set to true,
-          the above action will be replaced by
-          <code>flags.skip_snat_for_lb = 1; next;</code>.
-        </p>
-
-        <p>
-          The previous table <code>lr_in_defrag</code> sets the register
-          <code>reg0</code> (or <code>xxreg0</code> for IPv6) and does
-          <code>ct_dnat</code>.  Hence for established traffic, this
-          table just advances the packet to the next stage.
-        </p>
       </li>
 
       <li>
@@ -3467,42 +3441,17 @@ icmp6 {
           For all the configured load balancing rules for a router in
           <code>OVN_Northbound</code> database that includes just an IP address
           <var>VIP</var> to match on, a priority-110 flow that matches on
-          <code>ct.new &amp;&amp; !ct.rel &amp;&amp; ip4 &amp;&amp; reg0 ==
-          <var>VIP</var></code> (<code>ip6</code> and <code>xxreg0 ==
-          <var>VIP</var></code> in the IPv6 case) with an action of
+          <code>ct.new &amp;&amp; !ct.rel &amp;&amp; ip4 &amp;&amp; ip.dst ==
+          <var>VIP</var></code> with an action of
           <code>ct_lb_mark(<var>args</var>)</code>, where <var>args</var> contains
           comma separated IPv4 or IPv6 addresses.  If the router is configured
           to force SNAT any load-balanced packets, the above action will be
           replaced by <code>flags.force_snat_for_lb = 1;
-          ct_lb_mark(<var>args</var>);</code>.
-          If the load balancing rule is configured with <code>skip_snat</code>
-          set to true, the above action will be replaced by
-          <code>flags.skip_snat_for_lb = 1; ct_lb_mark(<var>args</var>);</code>.
-        </p>
-
-        <p>
-          The previous table <code>lr_in_defrag</code> sets the register
-          <code>reg0</code> (or <code>xxreg0</code> for IPv6) and does
-          <code>ct_dnat</code>.  Hence for established traffic, this
-          table just advances the packet to the next stage.
-        </p>
-      </li>
-
-
-      <li>
-        <p>
-          For all the configured load balancing rules for a router in
-          <code>OVN_Northbound</code> database that includes just an IP address
-          <var>VIP</var> to match on, a priority-110 flow that matches on
-          <code>ct.est &amp;&amp; !ct.rel &amp;&amp; ip4 &amp;&amp; reg0 ==
-          <var>VIP</var></code> (or <code>ip6</code> and
-          <code>xxreg0 == <var>VIP</var></code>) with an action of
-          <code>next;</code>. If the router is configured to force SNAT any
-          load-balanced packets, the above action will be replaced by
-          <code>flags.force_snat_for_lb = 1; next;</code>.
+          ct_lb_mark(<var>args</var>; force_snat);</code>.
           If the load balancing rule is configured with <code>skip_snat</code>
           set to true, the above action will be replaced by
-          <code>flags.skip_snat_for_lb = 1; next;</code>.
+          <code>flags.skip_snat_for_lb = 1; ct_lb_mark(<var>args</var>;
+          skip_snat);</code>.
         </p>
 
         <p>
@@ -3529,7 +3478,20 @@ icmp6 {
             with an action of <code>ct_commit_nat;</code>, if the router
             has load balancer assigned to it. Along with two priority 70 flows
             that match <code>skip_snat</code> and <code>force_snat</code>
-            flags.
+            flags, setting the <code>flags.force_snat_for_lb = 1</code> or
+            <code>flags.skip_snat_for_lb = 1</code> accordingly.
+        </p>
+      </li>
+      <li>
+        <p>
+          For the established traffic, a priority 50 flow that matches
+          <code>ct.est &amp;&amp; !ct.rel &amp;&amp; !ct.new &amp;&amp;
+          ct_mark.natted</code> with an action of <code>next;</code>,
+          if the router has load balancer assigned to it. Along with two
+          priority 70 flows that match <code>skip_snat</code> and
+          <code>force_snat</code> flags, setting the
+          <code>flags.force_snat_for_lb = 1</code> or
+          <code>flags.skip_snat_for_lb = 1</code> accordingly.
         </p>
       </li>
     </ul>
@@ -4721,6 +4683,11 @@ nd_ns {
     <h3>Egress Table 1: UNDNAT on Gateway Routers</h3>
 
     <ul>
+      <li>
+        For IPv6 Neighbor Discovery or Router Solicitation/Advertisement
+        traffic, a priority-100 flow with action <code>next;</code>.
+      </li>
+
       <li>
         For all IP packets, a priority-50 flow with an action
         <code>flags.loopback = 1; ct_dnat;</code>.
@@ -4998,7 +4965,19 @@ nd_ns {
       </li>
     </ul>
 
-    <h3>Egress Table 4: Egress Loopback</h3>
+    <h3>Egress Table 4: Post SNAT</h3>
+
+    <p>
+      Packets reaching this table are processed according to the flows below:
+      <ul>
+        <li>
+          A priority-0 logical flow that matches all packets not already
+          handled (match <code>1</code>) and action <code>next;</code>.
+        </li>
+      </ul>
+    </p>
+
+    <h3>Egress Table 5: Egress Loopback</h3>
 
     <p>
       For distributed logical routers where one of the logical router
@@ -5070,7 +5049,7 @@ clone {
       </li>
     </ul>
 
-    <h3>Egress Table 5: Delivery</h3>
+    <h3>Egress Table 6: Delivery</h3>
 
     <p>
       Packets that reach this table are ready for delivery.  It contains:
diff --git a/ovn-architecture.7.xml b/ovn-architecture.7.xml
index cb1064f71..86c6258e0 100644
--- a/ovn-architecture.7.xml
+++ b/ovn-architecture.7.xml
@@ -1318,7 +1318,7 @@
         output port is known. These pieces of information are obtained
         from the tunnel encapsulation metadata (see <code>Tunnel
         Encapsulations</code> for encoding details). Then the actions resubmit
-        to table 33 to enter the logical egress pipeline.
+        to table 38 to enter the logical egress pipeline.
       </p>
     </li>
 
diff --git a/ovn-nb.xml b/ovn-nb.xml
index 8d56d0c6e..35acda107 100644
--- a/ovn-nb.xml
+++ b/ovn-nb.xml
@@ -2036,6 +2036,14 @@ or
         the affinity timeslot. Max supported affinity_timeout is 65535
         seconds.
       </column>
+
+      <column name="options" key="ct_flush" type='{"type": "boolean"}'>
+        The value indicates whether ovn-controller should flush CT entries
+        that are related to this LB. The flush happens if the LB is removed,
+        any of the backends is updated/removed or the LB is not considered
+        local anymore by the ovn-controller. This option is set to
+        <code>false</code> by default.
+      </column>
     </group>
   </table>
 
diff --git a/ovn-sb.xml b/ovn-sb.xml
index a77f8f4ef..8ca206109 100644
--- a/ovn-sb.xml
+++ b/ovn-sb.xml
@@ -472,9 +472,8 @@
 
     <column name="type">
       The encapsulation to use to transmit packets to this chassis.
-      Hypervisors must use either <code>geneve</code> or
-      <code>stt</code>.  Gateways may use <code>vxlan</code>,
-      <code>geneve</code>, or <code>stt</code>.
+      Hypervisors and gateways must use one of: <code>geneve</code>,
+      <code>vxlan</code>, or <code>stt</code>.
     </column>
 
     <column name="options">
diff --git a/rhel/usr_lib_systemd_system_ovn-db@.service b/rhel/usr_lib_systemd_system_ovn-db@.service
index 98556a673..c835e4967 100644
--- a/rhel/usr_lib_systemd_system_ovn-db@.service
+++ b/rhel/usr_lib_systemd_system_ovn-db@.service
@@ -33,7 +33,7 @@ EnvironmentFile=-/etc/sysconfig/ovn-%i
 ExecStartPre=-/usr/bin/chown -R ${OVN_USER_ID} ${OVN_DBDIR}
 ExecStart=/usr/share/ovn/scripts/ovn-ctl \
           --ovn-user=${OVN_USER_ID} start_%i_ovsdb $OPTIONS $ovn_%i_opts
-ExecStop=/usr/share/ovn/scripts/ovn-ctl stop_%i_ovsdb
+ExecStop=/usr/share/ovn/scripts/ovn-ctl stop_%i_ovsdb $OPTIONS $ovn_%i_opts
 
 [Install]
 WantedBy=multi-user.target
diff --git a/rhel/usr_lib_systemd_system_ovn-northd.service b/rhel/usr_lib_systemd_system_ovn-northd.service
index d281f861c..6c4c6621c 100644
--- a/rhel/usr_lib_systemd_system_ovn-northd.service
+++ b/rhel/usr_lib_systemd_system_ovn-northd.service
@@ -26,7 +26,7 @@ EnvironmentFile=-/etc/sysconfig/ovn-northd
 ExecStartPre=-/usr/bin/chown -R ${OVN_USER_ID} ${OVN_DBDIR}
 ExecStart=/usr/share/ovn/scripts/ovn-ctl \
           --ovn-user=${OVN_USER_ID} start_northd $OVN_NORTHD_OPTS
-ExecStop=/usr/share/ovn/scripts/ovn-ctl stop_northd
+ExecStop=/usr/share/ovn/scripts/ovn-ctl stop_northd $OVN_NORTHD_OPTS
 
 [Install]
 WantedBy=multi-user.target
diff --git a/tests/ovn-controller.at b/tests/ovn-controller.at
index bbe142ae3..27fc44232 100644
--- a/tests/ovn-controller.at
+++ b/tests/ovn-controller.at
@@ -493,7 +493,8 @@ check ovn-nbctl --wait=hv sync
 
 # And check that it gets propagated to br-int external_ids.
 as hv1
-OVS_WAIT_UNTIL([ovs-vsctl get Bridge br-int external_ids:ovn-nb-cfg], [0], [1])
+OVS_WAIT_FOR_OUTPUT([ovs-vsctl get Bridge br-int external_ids:ovn-nb-cfg], [0], ["1"
+])
 
 nb_cfg_ts=$(fetch_column Chassis_Private nb_cfg_timestamp name=hv1)
 as hv1
@@ -672,22 +673,26 @@ check ovs-vsctl del-ssl
 start_daemon ovn-controller -p $key -c $cert -C $cacert
 
 # SSL should not connect because of key and cert mismatch
-OVS_WAIT_UNTIL([ovn-appctl -t ovn-controller connection-status], [0], [not connected])
+OVS_WAIT_FOR_OUTPUT([ovn-appctl -t ovn-controller connection-status], [0], [not connected
+])
 
 # Modify the files with the correct key and cert, and reconnect should succeed
 cp $PKIDIR/$key $key
 cp $PKIDIR/$cert $cert
 
-OVS_WAIT_UNTIL([ovn-appctl -t ovn-controller connection-status], [0], [connected])
+OVS_WAIT_FOR_OUTPUT([ovn-appctl -t ovn-controller connection-status], [0], [connected
+])
 
 # Remove the files and expect the connection to drop
 rm $key $cert
-OVS_WAIT_UNTIL([ovn-appctl -t ovn-controller connection-status], [0], [not connected])
+OVS_WAIT_FOR_OUTPUT([ovn-appctl -t ovn-controller connection-status], [0], [not connected
+])
 
 # Restore the files again and expect the connection to recover
 cp $PKIDIR/$key $key
 cp $PKIDIR/$cert $cert
-OVS_WAIT_UNTIL([ovn-appctl -t ovn-controller connection-status], [0], [connected])
+OVS_WAIT_FOR_OUTPUT([ovn-appctl -t ovn-controller connection-status], [0], [connected
+])
 
 cat hv1/ovn-controller.log
 
@@ -2060,6 +2065,57 @@ AT_CHECK([echo $(($reprocess_count_new - $reprocess_count_old))], [0], [2
 OVN_CLEANUP([hv1])
 AT_CLEANUP
 
+AT_SETUP([ovn-controller - address set del-and-add])
+
+ovn_start
+
+net_add n1
+sim_add hv1
+as hv1
+check ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.1
+check ovs-vsctl -- add-port br-int hv1-vif1 -- \
+    set interface hv1-vif1 external-ids:iface-id=ls1-lp1
+
+check ovn-nbctl ls-add ls1
+
+check ovn-nbctl lsp-add ls1 ls1-lp1 \
+-- lsp-set-addresses ls1-lp1 "f0:00:00:00:00:01"
+
+wait_for_ports_up
+ovn-appctl -t ovn-controller vlog/set file:dbg
+
+ovn-nbctl create address_set name=as1 addresses=8.8.8.8
+check ovn-nbctl acl-add ls1 to-lport 100 'outport == "ls1-lp1" && ip4.src == $as1' drop
+check ovn-nbctl --wait=hv sync
+AT_CHECK([ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100"], [0], [1
+])
+
+# pause ovn-northd
+check as northd ovn-appctl -t ovn-northd pause
+check as northd-backup ovn-appctl -t ovn-northd pause
+
+# Simulate a SB address set "del and add" notification to ovn-controller in the
+# same IDL iteration. The flows programmed by ovn-controller should reflect the
+# newly added address set. In reality it can happen when CMS deletes an
+# address-set and immediately creates a new address-set with the same name
+# (with same or different content). The notification of the changes can come to
+# ovn-controller in one shot and the order of the "del" and "add" in the IDL is
+# undefined. This test runs the scenario ten times to make sure different
+# orders are covered and handled properly.
+
+flow_count=$(ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100")
+for i in $(seq 10); do
+    # Delete and recreate the SB address set with same name and an extra IP.
+    addrs_=$(fetch_column address_set addresses name=as1)
+    addrs=${addrs_// /,}
+    AT_CHECK([ovn-sbctl destroy address_set as1 -- create address_set name=as1 addresses=$addrs,1.1.1.$i], [0], [ignore])
+    OVS_WAIT_UNTIL([test $(as hv1 ovs-ofctl dump-flows br-int table=44 | grep -c "priority=1100") = "$(($i + 1))"])
+done
+
+OVN_CLEANUP([hv1])
+AT_CLEANUP
+
 AT_SETUP([ovn-controller - I-P handle lb_hairpin_use_ct_mark change])
 
 ovn_start --backup-northd=none
@@ -2161,7 +2217,7 @@ AT_CHECK([ovs-ofctl dump-flows br-int | grep 10.1.2.3], [0], [ignore])
 sleep 5
 
 # Check after the wait
-OVS_WAIT_UNTIL([ovs-ofctl dump-flows br-int | grep 10.1.2.4], [0], [ignore])
+OVS_WAIT_UNTIL([ovs-ofctl dump-flows br-int | grep 10.1.2.4])
 lflow_run_2=$(ovn-appctl -t ovn-controller coverage/read-counter lflow_run)
 
 # Verify that the flow compute completed during the wait (after the wait it
@@ -2172,7 +2228,7 @@ AT_CHECK_UNQUOTED([echo $lflow_run_1], [0], [$lflow_run_2
 # Restart OVS this time, and wait until flows are reinstalled
 OVS_APP_EXIT_AND_WAIT([ovs-vswitchd])
 start_daemon ovs-vswitchd --enable-dummy=system -vvconn -vofproto_dpif -vunixctl
-OVS_WAIT_UNTIL([ovs-ofctl dump-flows br-int | grep 10.1.2.4], [0], [ignore])
+OVS_WAIT_UNTIL([ovs-ofctl dump-flows br-int | grep 10.1.2.4])
 
 check ovn-nbctl --wait=hv lb-add lb3 2.2.2.2 10.1.2.5 \
 -- ls-lb-add ls1 lb3
diff --git a/tests/ovn-macros.at b/tests/ovn-macros.at
index ee942e8a6..6f2d085ae 100644
--- a/tests/ovn-macros.at
+++ b/tests/ovn-macros.at
@@ -817,6 +817,29 @@ ovn_trace_client() {
     ovs-appctl -t $target trace "$@" | tee trace | sed '/^# /d'
 }
 
+# Receives a string with scapy python code that represents a packet.
+# Returns a hex-string that contains bytes that reflect the packet symbolic
+# description.
+#
+# Scapy docs: https://scapy.readthedocs.io/en/latest/usage.html
+#
+# Example of usage:
+#
+# packet=$(fmt_pkt "
+#     Ether(dst='ff:ff:ff:ff:ff:ff', src='50:64:00:00:00:01') /
+#     IPv6(src='abed::1', dst='ff02::1:ff00:2') /
+#     ICMPv6ND_NS(tgt='abed::2')
+# ")
+#
+# ovs-appctl netdev-dummy/receive $vif $packet
+#
+fmt_pkt() {
+    echo "from scapy.all import *; \
+          import binascii; \
+          out = binascii.hexlify(raw($1)); \
+          print(out.decode())" | $PYTHON3
+}
+
 OVS_END_SHELL_HELPERS
 
 m4_define([OVN_POPULATE_ARP], [AT_CHECK(ovn_populate_arp__, [0], [ignore])])
diff --git a/tests/ovn-nbctl.at b/tests/ovn-nbctl.at
index 2fffe1850..478a32f5a 100644
--- a/tests/ovn-nbctl.at
+++ b/tests/ovn-nbctl.at
@@ -1482,6 +1482,32 @@ UUID                                    LB                  PROTO      VIP
 
 dnl ---------------------------------------------------------------------
 
+OVN_NBCTL_TEST([ovn_nbctl_template_lbs], [Template LBs], [
+check ovn-nbctl --template lb-add lb0 ^vip ^backend
+check ovn-nbctl --template lb-add lb1 ^vip:^vport ^backend udp
+check ovn-nbctl --template lb-add lb2 ^vip:^vport ^backend udp ipv4
+check ovn-nbctl --template lb-add lb3 ^vip:^vport ^backend udp ipv6
+check ovn-nbctl --template lb-add lb4 ^vip:^vport ^backend:^bport udp ipv4
+check ovn-nbctl --template lb-add lb5 ^vip:^vport ^backend:^bport udp ipv6
+check ovn-nbctl --template lb-add lb6 ^vip:^vport 1.1.1.1:111 udp ipv4
+check ovn-nbctl --template lb-add lb7 ^vip:^vport [[1::1]]:111 udp ipv6
+
+AT_CHECK([ovn-nbctl lb-list | uuidfilt], [0], [dnl
+UUID                                    LB                  PROTO      VIP            IPs
+<0>    lb0                 tcp        ^vip           ^backend
+<1>    lb1                 udp        ^vip:^vport    ^backend
+<2>    lb2                 udp        ^vip:^vport    ^backend
+<3>    lb3                 udp        ^vip:^vport    ^backend
+<4>    lb4                 udp        ^vip:^vport    ^backend:^bport
+<5>    lb5                 udp        ^vip:^vport    ^backend:^bport
+<6>    lb6                 udp        ^vip:^vport    1.1.1.1:111
+<7>    lb7                 udp        ^vip:^vport    [[1::1]]:111
+])
+
+])
+
+dnl ---------------------------------------------------------------------
+
 OVN_NBCTL_TEST([ovn_nbctl_basic_lr], [basic logical router commands], [
 AT_CHECK([ovn-nbctl lr-add lr0])
 AT_CHECK([ovn-nbctl lr-list | uuidfilt], [0], [dnl
@@ -2599,6 +2625,7 @@ OVN_NBCTL_TEST_STOP "/terminating with signal 15/d"
 AT_CLEANUP
 
 AT_SETUP([ovn-nbctl - daemon ssl files change])
+AT_SKIP_IF([test "$HAVE_OPENSSL" = no])
 dnl Create ovn-nb database.
 AT_CHECK([ovsdb-tool create ovn-nb.db $abs_top_srcdir/ovn-nb.ovsschema])
 
diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at
index 3fa02d2b3..846f10e88 100644
--- a/tests/ovn-northd.at
+++ b/tests/ovn-northd.at
@@ -2486,6 +2486,7 @@ check ovn-nbctl --wait=sb \
 
 AT_CHECK([ovn-sbctl lflow-list ls | grep -e ls_in_acl_hint -e ls_out_acl_hint -e ls_in_acl -e ls_out_acl | sort], [0], [dnl
   table=17(ls_in_acl_after_lb ), priority=0    , match=(1), action=(next;)
+  table=17(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=17(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;)
   table=3 (ls_out_acl_hint    ), priority=0    , match=(1), action=(next;)
   table=3 (ls_out_acl_hint    ), priority=1    , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;)
@@ -2530,9 +2531,12 @@ ovn-nbctl --wait=sb clear logical_switch ls load_balancer
 
 AT_CHECK([ovn-sbctl lflow-list ls | grep -e ls_in_acl_hint -e ls_out_acl_hint -e ls_in_acl -e ls_out_acl | sort], [0], [dnl
   table=17(ls_in_acl_after_lb ), priority=0    , match=(1), action=(next;)
+  table=17(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=3 (ls_out_acl_hint    ), priority=65535, match=(1), action=(next;)
+  table=4 (ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=4 (ls_out_acl         ), priority=65535, match=(1), action=(next;)
   table=7 (ls_in_acl_hint     ), priority=65535, match=(1), action=(next;)
+  table=8 (ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=8 (ls_in_acl          ), priority=65535, match=(1), action=(next;)
 ])
 
@@ -3757,18 +3761,18 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.10 && tcp), action=(reg0 = 10.0.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.100 && tcp), action=(reg0 = 10.0.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.10), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.100), action=(ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(ct_lb_mark(backends=10.0.0.4:8080);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(ct_lb_mark(backends=10.0.0.40:8080);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80), action=(ct_lb_mark(backends=10.0.0.4:8080);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.100 && tcp && tcp.dst == 80), action=(ct_lb_mark(backends=10.0.0.40:8080);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -3788,18 +3792,18 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.10 && tcp), action=(reg0 = 10.0.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.100 && tcp), action=(reg0 = 10.0.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.10), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.100), action=(ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.100 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080; force_snat);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -3813,6 +3817,7 @@ AT_CHECK([grep "lr_out_snat" lr0flows | sed 's/table=./table=?/' | sort], [0], [
 
 AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
   table=? (lr_out_undnat      ), priority=0    , match=(1), action=(next;)
+  table=? (lr_out_undnat      ), priority=100  , match=(nd || nd_rs || nd_ra), action=(next;)
   table=? (lr_out_undnat      ), priority=50   , match=(ip), action=(flags.loopback = 1; ct_dnat;)
 ])
 
@@ -3838,18 +3843,18 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.10 && tcp), action=(reg0 = 10.0.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.100 && tcp), action=(reg0 = 10.0.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.10), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.100), action=(ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.100 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080; force_snat);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -3864,6 +3869,7 @@ AT_CHECK([grep "lr_out_snat" lr0flows | sed 's/table=./table=?/' | sort], [0], [
 
 AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
   table=? (lr_out_undnat      ), priority=0    , match=(1), action=(next;)
+  table=? (lr_out_undnat      ), priority=100  , match=(nd || nd_rs || nd_ra), action=(next;)
   table=? (lr_out_undnat      ), priority=50   , match=(ip), action=(flags.loopback = 1; ct_dnat;)
 ])
 
@@ -3902,18 +3908,18 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.10 && tcp), action=(reg0 = 10.0.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.100 && tcp), action=(reg0 = 10.0.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.10), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.100), action=(ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.100 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080; force_snat);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -3929,6 +3935,7 @@ AT_CHECK([grep "lr_out_snat" lr0flows | sed 's/table=./table=?/' | sort], [0], [
 
 AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
   table=? (lr_out_undnat      ), priority=0    , match=(1), action=(next;)
+  table=? (lr_out_undnat      ), priority=100  , match=(nd || nd_rs || nd_ra), action=(next;)
   table=? (lr_out_undnat      ), priority=50   , match=(ip), action=(flags.loopback = 1; ct_dnat;)
 ])
 
@@ -3953,14 +3960,13 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.100 && tcp), action=(reg0 = 10.0.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.20 && tcp), action=(reg0 = 10.0.0.20; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.100), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.20), action=(ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | grep skip_snat_for_lb | sort], [0], [dnl
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.20 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.skip_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.20 && tcp && reg9[[16..31]] == 80), action=(flags.skip_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080; skip_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.20 && tcp && tcp.dst == 80), action=(flags.skip_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080; skip_snat);)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
 
@@ -3970,6 +3976,7 @@ AT_CHECK([grep "lr_out_snat" lr0flows | grep skip_snat_for_lb | sed 's/table=./t
 
 AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
   table=? (lr_out_undnat      ), priority=0    , match=(1), action=(next;)
+  table=? (lr_out_undnat      ), priority=100  , match=(nd || nd_rs || nd_ra), action=(next;)
   table=? (lr_out_undnat      ), priority=50   , match=(ip), action=(flags.loopback = 1; ct_dnat;)
 ])
 
@@ -4111,6 +4118,7 @@ check ovn-nbctl lsp-set-options sw0-lr0 router-port=lr0-sw0
 check ovn-nbctl --wait=sb sync
 
 check_stateful_flows() {
+    action=$1
     ovn-sbctl dump-flows sw0 > sw0flows
     AT_CAPTURE_FILE([sw0flows])
 
@@ -4144,12 +4152,12 @@ check_stateful_flows() {
   table=??(ls_in_stateful     ), priority=100  , match=(reg0[[1]] == 1 && reg0[[13]] == 1), action=(ct_commit { ct_mark.blocked = 0; ct_label.label = reg3; }; next;)
 ])
 
-    AT_CHECK([grep "ls_out_pre_lb" sw0flows | sort], [0], [dnl
+    AT_CHECK_UNQUOTED([grep "ls_out_pre_lb" sw0flows | sort], [0], [dnl
   table=1 (ls_out_pre_lb      ), priority=0    , match=(1), action=(next;)
   table=1 (ls_out_pre_lb      ), priority=100  , match=(ip), action=(reg0[[2]] = 1; next;)
   table=1 (ls_out_pre_lb      ), priority=110  , match=(eth.mcast), action=(next;)
-  table=1 (ls_out_pre_lb      ), priority=110  , match=(eth.src == $svc_monitor_mac), action=(next;)
-  table=1 (ls_out_pre_lb      ), priority=110  , match=(ip && outport == "sw0-lr0"), action=(next;)
+  table=1 (ls_out_pre_lb      ), priority=110  , match=(eth.src == \$svc_monitor_mac), action=(next;)
+  table=1 (ls_out_pre_lb      ), priority=110  , match=(ip && outport == "sw0-lr0"), action=($action)
   table=1 (ls_out_pre_lb      ), priority=110  , match=(nd || nd_rs || nd_ra || mldv1 || mldv2), action=(next;)
   table=1 (ls_out_pre_lb      ), priority=110  , match=(reg0[[16]] == 1), action=(next;)
 ])
@@ -4169,13 +4177,13 @@ check_stateful_flows() {
 ])
 }
 
-check_stateful_flows
+check_stateful_flows "ct_clear; next;"
 
 # Add few ACLs
 check ovn-nbctl --wait=sb acl-add sw0 from-lport 1002 "ip4 && tcp && tcp.dst == 80" allow-related
 check ovn-nbctl --wait=sb acl-add sw0 to-lport 1002 "ip4 && tcp && tcp.src == 80" drop
 
-check_stateful_flows
+check_stateful_flows "next;"
 
 # Remove load balancers from sw0
 check ovn-nbctl ls-lb-del sw0 lb0
@@ -4231,6 +4239,15 @@ AT_CHECK([grep "ls_out_stateful" sw0flows | sort], [0], [dnl
   table=7 (ls_out_stateful    ), priority=100  , match=(reg0[[1]] == 1 && reg0[[13]] == 1), action=(ct_commit { ct_mark.blocked = 0; ct_label.label = reg3; }; next;)
 ])
 
+# LB with event=false and reject=false
+AT_CHECK([ovn-nbctl create load_balancer name=lb1 options:reject=false options:event=false vips:\"10.0.0.20\"=\"\" protocol=tcp], [0], [ignore])
+check ovn-nbctl --wait=sb ls-lb-add sw0 lb1
+
+AT_CHECK([ovn-sbctl dump-flows sw0 | grep "ls_in_lb " | sort ], [0], [dnl
+  table=12(ls_in_lb           ), priority=0    , match=(1), action=(next;)
+  table=12(ls_in_lb           ), priority=110  , match=(ct.new && ip4.dst == 10.0.0.20), action=(drop;)
+])
+
 AT_CLEANUP
 ])
 
@@ -5211,25 +5228,23 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.200), action=(reg0 = 172.168.0.200; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.10 && tcp), action=(reg0 = 10.0.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.100 && tcp), action=(reg0 = 172.168.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.210 && udp), action=(reg0 = 172.168.0.210; reg9[[16..31]] = udp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.10), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.100), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.200), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.210), action=(ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
   table=7 (lr_in_dnat         ), priority=100  , match=(ip && ip4.dst == 172.168.0.20 && inport == "lr0-public" && is_chassis_resident("cr-lr0-public")), action=(ct_dnat_in_czone(10.0.0.3);)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.200 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.80,10.0.0.81);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.4:8080);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.200 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.80,10.0.0.81);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.4:8080);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.100 && tcp && tcp.dst == 8082 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.210 && udp && udp.dst == 60 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -5284,25 +5299,23 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.200), action=(reg0 = 172.168.0.200; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.10 && tcp), action=(reg0 = 10.0.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.100 && tcp), action=(reg0 = 172.168.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.210 && udp), action=(reg0 = 172.168.0.210; reg9[[16..31]] = udp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.10), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.100), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.200), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.210), action=(ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
   table=7 (lr_in_dnat         ), priority=100  , match=(ip && ip4.dst == 172.168.0.20), action=(flags.loopback = 1; ct_dnat(10.0.0.3);)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.200), action=(ct_lb_mark(backends=10.0.0.80,10.0.0.81);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(ct_lb_mark(backends=10.0.0.4:8080);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082), action=(ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.200), action=(ct_lb_mark(backends=10.0.0.80,10.0.0.81);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80), action=(ct_lb_mark(backends=10.0.0.4:8080);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.100 && tcp && tcp.dst == 8082), action=(ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.210 && udp && udp.dst == 60), action=(ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -5314,6 +5327,7 @@ AT_CHECK([grep "lr_out_chk_dnat_local" lr0flows | sed 's/table=./table=?/' | sor
 
 AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
   table=? (lr_out_undnat      ), priority=0    , match=(1), action=(next;)
+  table=? (lr_out_undnat      ), priority=100  , match=(nd || nd_rs || nd_ra), action=(next;)
   table=? (lr_out_undnat      ), priority=50   , match=(ip), action=(flags.loopback = 1; ct_dnat;)
 ])
 
@@ -5349,25 +5363,23 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.200), action=(reg0 = 172.168.0.200; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.10 && tcp), action=(reg0 = 10.0.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.100 && tcp), action=(reg0 = 172.168.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.210 && udp), action=(reg0 = 172.168.0.210; reg9[[16..31]] = udp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.10), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.100), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.200), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.210), action=(ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
   table=7 (lr_in_dnat         ), priority=100  , match=(ip && ip4.dst == 172.168.0.20), action=(flags.loopback = 1; ct_dnat(10.0.0.3);)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.200), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.80,10.0.0.81; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062; force_snat);)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.200), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.80,10.0.0.81; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.100 && tcp && tcp.dst == 8082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.210 && udp && udp.dst == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062; force_snat);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -5379,6 +5391,7 @@ AT_CHECK([grep "lr_out_chk_dnat_local" lr0flows | sed 's/table=./table=?/' | sor
 
 AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
   table=? (lr_out_undnat      ), priority=0    , match=(1), action=(next;)
+  table=? (lr_out_undnat      ), priority=100  , match=(nd || nd_rs || nd_ra), action=(next;)
   table=? (lr_out_undnat      ), priority=50   , match=(ip), action=(flags.loopback = 1; ct_dnat;)
 ])
 
@@ -5416,28 +5429,25 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.200), action=(reg0 = 172.168.0.200; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.10 && tcp), action=(reg0 = 10.0.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.10 && tcp), action=(reg0 = 172.168.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.100 && tcp), action=(reg0 = 172.168.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.210 && udp), action=(reg0 = 172.168.0.210; reg9[[16..31]] = udp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.10), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.10), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.100), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.200), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.210), action=(ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
   table=7 (lr_in_dnat         ), priority=100  , match=(ip && ip4.dst == 172.168.0.20), action=(flags.loopback = 1; ct_dnat(10.0.0.3);)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.200), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.80,10.0.0.81; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.10 && tcp && reg9[[16..31]] == 9082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.10 && tcp && reg9[[16..31]] == 9082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062; force_snat);)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.200), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.80,10.0.0.81; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.10 && tcp && tcp.dst == 9082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.100 && tcp && tcp.dst == 8082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.210 && udp && udp.dst == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062; force_snat);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -5449,6 +5459,7 @@ AT_CHECK([grep "lr_out_chk_dnat_local" lr0flows | sed 's/table=./table=?/' | sor
 
 AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
   table=? (lr_out_undnat      ), priority=0    , match=(1), action=(next;)
+  table=? (lr_out_undnat      ), priority=100  , match=(nd || nd_rs || nd_ra), action=(next;)
   table=? (lr_out_undnat      ), priority=50   , match=(ip), action=(flags.loopback = 1; ct_dnat;)
 ])
 
@@ -5496,31 +5507,27 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.200), action=(reg0 = 172.168.0.200; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.10 && tcp), action=(reg0 = 10.0.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.10 && tcp), action=(reg0 = 172.168.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.100 && tcp), action=(reg0 = 172.168.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.210 && udp), action=(reg0 = 172.168.0.210; reg9[[16..31]] = udp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip6.dst == def0::2 && tcp), action=(xxreg0 = def0::2; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 10.0.0.10), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.10), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.100), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.200), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.210), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip6.dst == def0::2), action=(ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
   table=7 (lr_in_dnat         ), priority=100  , match=(ip && ip4.dst == 172.168.0.20), action=(flags.loopback = 1; ct_dnat(10.0.0.3);)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.200), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.80,10.0.0.81; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.10 && tcp && reg9[[16..31]] == 9082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip6 && xxreg0 == def0::2 && tcp && reg9[[16..31]] == 8000 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.10 && tcp && reg9[[16..31]] == 9082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip6 && xxreg0 == def0::2 && tcp && reg9[[16..31]] == 8000), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=[[aef0::2]]:80,[[aef0::3]]:80; force_snat);)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.200), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.80,10.0.0.81; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.10 && tcp && tcp.dst == 9082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.100 && tcp && tcp.dst == 8082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.210 && udp && udp.dst == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip6 && ip6.dst == def0::2 && tcp && tcp.dst == 8000), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=[[aef0::2]]:80,[[aef0::3]]:80; force_snat);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -5532,6 +5539,7 @@ AT_CHECK([grep "lr_out_chk_dnat_local" lr0flows | sed 's/table=./table=?/' | sor
 
 AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
   table=? (lr_out_undnat      ), priority=0    , match=(1), action=(next;)
+  table=? (lr_out_undnat      ), priority=100  , match=(nd || nd_rs || nd_ra), action=(next;)
   table=? (lr_out_undnat      ), priority=50   , match=(ip), action=(flags.loopback = 1; ct_dnat;)
 ])
 
@@ -5572,18 +5580,17 @@ AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.210 && tcp), action=(reg0 = 172.168.0.210; reg9[[16..31]] = tcp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 172.168.0.210 && udp), action=(reg0 = 172.168.0.210; reg9[[16..31]] = udp.dst; ct_dnat;)
-  table=5 (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 172.168.0.210), action=(ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.210 && tcp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.210 && tcp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062; force_snat);)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.210 && tcp && tcp.dst == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.0.210 && udp && udp.dst == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062; force_snat);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -5594,6 +5601,7 @@ AT_CHECK([grep "lr_out_chk_dnat_local" lr0flows | sed 's/table=./table=?/' | sor
 
 AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
   table=? (lr_out_undnat      ), priority=0    , match=(1), action=(next;)
+  table=? (lr_out_undnat      ), priority=100  , match=(nd || nd_rs || nd_ra), action=(next;)
   table=? (lr_out_undnat      ), priority=50   , match=(ip), action=(flags.loopback = 1; ct_dnat;)
 ])
 
@@ -5634,9 +5642,11 @@ ovn-sbctl set service_monitor $sm_vip2 status=offline
 
 AT_CHECK([ovn-sbctl dump-flows lr0 | grep "lr_in_dnat" | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.10.10 && ct_mark.natted == 1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.10.10), action=(reg0 = 0; reject { outport <-> inport; next(pipeline=egress,table=3);};)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.10.10), action=(reg0 = 0; reject { outport <-> inport; next(pipeline=egress,table=3);};)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -5646,9 +5656,11 @@ check ovn-nbctl --wait=sb set load_balancer lb5 options:skip_snat=true
 
 AT_CHECK([ovn-sbctl dump-flows lr0 | grep "lr_in_dnat" | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.10.10 && ct_mark.natted == 1), action=(flags.skip_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.10.10), action=(flags.skip_snat_for_lb = 1; reg0 = 0; reject { outport <-> inport; next(pipeline=egress,table=3);};)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.10.10), action=(flags.skip_snat_for_lb = 1; reg0 = 0; reject { outport <-> inport; next(pipeline=egress,table=3);};)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -5660,9 +5672,58 @@ check ovn-nbctl --wait=sb set logical_router lr0 options:lb_force_snat_ip="route
 
 AT_CHECK([ovn-sbctl dump-flows lr0 | grep "lr_in_dnat" | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.168.10.10 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.168.10.10), action=(flags.force_snat_for_lb = 1; reg0 = 0; reject { outport <-> inport; next(pipeline=egress,table=3);};)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.10.10), action=(flags.force_snat_for_lb = 1; reg0 = 0; reject { outport <-> inport; next(pipeline=egress,table=3);};)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
+])
+
+# LB with event=false and reject=false
+check ovn-nbctl lr-lb-del lr0
+check ovn-nbctl remove logical_router lr0 options lb_force_snat_ip
+AT_CHECK([ovn-nbctl create load_balancer name=lb6 options:reject=false options:event=false vips:\"172.168.10.30\"=\"\" protocol=tcp], [0], [ignore])
+check ovn-nbctl --wait=sb lr-lb-add lr0 lb6
+
+AT_CHECK([ovn-sbctl dump-flows lr0 | grep "lr_in_dnat" | sort], [0], [dnl
+  table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.10.30), action=(drop;)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
+])
+
+# LB with event=false, reject=false and skip_snat
+check ovn-nbctl --wait=sb set load_balancer lb6 options:skip_snat=true
+
+AT_CHECK([ovn-sbctl dump-flows lr0 | grep "lr_in_dnat" | sort], [0], [dnl
+  table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.10.30), action=(flags.skip_snat_for_lb = 1; drop;)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
+])
+
+check ovn-nbctl remove load_balancer lb6 options skip_snat
+
+# LB with event=false, reject=false and force_snat
+check ovn-nbctl --wait=sb set logical_router lr0 options:lb_force_snat_ip="router_ip"
+
+AT_CHECK([ovn-sbctl dump-flows lr0 | grep "lr_in_dnat" | sort], [0], [dnl
+  table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.168.10.30), action=(flags.force_snat_for_lb = 1; drop;)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -6692,11 +6753,12 @@ dnl Flows to skip TTL == {0, 1} check for IGMP and MLD packets.
 AT_CHECK([grep -e 'lr_in_ip_input    ' lrflows | grep -e 'igmp' -e 'mld' -e 'ip.ttl == {0, 1}' | sed 's/table=../table=??/'], [0], [dnl
   table=??(lr_in_ip_input     ), priority=120  , match=((mldv1 || mldv2) && ip.ttl == 1), action=(next;)
   table=??(lr_in_ip_input     ), priority=120  , match=(igmp && ip.ttl == 1), action=(next;)
-  table=??(lr_in_ip_input     ), priority=100  , match=(inport == "lrp1" && ip4 && ip.ttl == {0, 1} && !ip.later_frag), action=(icmp4 {eth.dst <-> eth.src; icmp4.type = 11; /* Time exceeded */ icmp4.code = 0; /* TTL exceeded in transit */ ip4.dst = ip4.src; ip4.src = 10.10.10.1 ; ip.ttl = 254; outport = "lrp1"; flags.loopback = 1; output; };)
-  table=??(lr_in_ip_input     ), priority=100  , match=(inport == "lrp1" && ip6 && ip6.src == 1010::/64 && ip.ttl == {0, 1} && !ip.later_frag), action=(icmp6 {eth.dst <-> eth.src; ip6.dst = ip6.src; ip6.src = 1010::1 ; ip.ttl = 254; icmp6.type = 3; /* Time exceeded */ icmp6.code = 0; /* TTL exceeded in transit */ outport = "lrp1"; flags.loopback = 1; output; };)
-  table=??(lr_in_ip_input     ), priority=100  , match=(inport == "lrp2" && ip4 && ip.ttl == {0, 1} && !ip.later_frag), action=(icmp4 {eth.dst <-> eth.src; icmp4.type = 11; /* Time exceeded */ icmp4.code = 0; /* TTL exceeded in transit */ ip4.dst = ip4.src; ip4.src = 20.20.20.1 ; ip.ttl = 254; outport = "lrp2"; flags.loopback = 1; output; };)
-  table=??(lr_in_ip_input     ), priority=100  , match=(inport == "lrp2" && ip6 && ip6.src == 2020::/64 && ip.ttl == {0, 1} && !ip.later_frag), action=(icmp6 {eth.dst <-> eth.src; ip6.dst = ip6.src; ip6.src = 2020::1 ; ip.ttl = 254; icmp6.type = 3; /* Time exceeded */ icmp6.code = 0; /* TTL exceeded in transit */ outport = "lrp2"; flags.loopback = 1; output; };)
-  table=??(lr_in_ip_input     ), priority=30   , match=(ip4 && ip.ttl == {0, 1}), action=(drop;)
+  table=??(lr_in_ip_input     ), priority=32   , match=(ip.ttl == {0, 1} && !ip.later_frag && (ip4.mcast || ip6.mcast)), action=(drop;)
+  table=??(lr_in_ip_input     ), priority=31   , match=(inport == "lrp1" && ip4 && ip.ttl == {0, 1} && !ip.later_frag), action=(icmp4 {eth.dst <-> eth.src; icmp4.type = 11; /* Time exceeded */ icmp4.code = 0; /* TTL exceeded in transit */ ip4.dst = ip4.src; ip4.src = 10.10.10.1 ; ip.ttl = 254; outport = "lrp1"; flags.loopback = 1; output; };)
+  table=??(lr_in_ip_input     ), priority=31   , match=(inport == "lrp1" && ip6 && ip6.src == 1010::/64 && ip.ttl == {0, 1} && !ip.later_frag), action=(icmp6 {eth.dst <-> eth.src; ip6.dst = ip6.src; ip6.src = 1010::1 ; ip.ttl = 254; icmp6.type = 3; /* Time exceeded */ icmp6.code = 0; /* TTL exceeded in transit */ outport = "lrp1"; flags.loopback = 1; output; };)
+  table=??(lr_in_ip_input     ), priority=31   , match=(inport == "lrp2" && ip4 && ip.ttl == {0, 1} && !ip.later_frag), action=(icmp4 {eth.dst <-> eth.src; icmp4.type = 11; /* Time exceeded */ icmp4.code = 0; /* TTL exceeded in transit */ ip4.dst = ip4.src; ip4.src = 20.20.20.1 ; ip.ttl = 254; outport = "lrp2"; flags.loopback = 1; output; };)
+  table=??(lr_in_ip_input     ), priority=31   , match=(inport == "lrp2" && ip6 && ip6.src == 2020::/64 && ip.ttl == {0, 1} && !ip.later_frag), action=(icmp6 {eth.dst <-> eth.src; ip6.dst = ip6.src; ip6.src = 2020::1 ; ip.ttl = 254; icmp6.type = 3; /* Time exceeded */ icmp6.code = 0; /* TTL exceeded in transit */ outport = "lrp2"; flags.loopback = 1; output; };)
+  table=??(lr_in_ip_input     ), priority=30   , match=(ip.ttl == {0, 1}), action=(drop;)
 ])
 
 dnl Flows to "route" (statically forward) without decrementing TTL for
@@ -6755,6 +6817,7 @@ AT_CHECK([grep -e "ls_in_acl" lsflows | sed 's/table=../table=??/' | sort], [0],
   table=??(ls_in_acl          ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;)
   table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(next;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl_hint     ), priority=1    , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;)
@@ -6809,6 +6872,7 @@ AT_CHECK([grep -e "ls_in_acl" lsflows | sed 's/table=../table=??/' | sort], [0],
   table=??(ls_in_acl_after_lb ), priority=2003 , match=(reg0[[8]] == 1 && (ip4 && icmp)), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=2004 , match=(reg0[[10]] == 1 && (ip4 && ip4.dst == 10.0.0.2)), action=(ct_commit { ct_mark.blocked = 1; }; /* drop */)
   table=??(ls_in_acl_after_lb ), priority=2004 , match=(reg0[[9]] == 1 && (ip4 && ip4.dst == 10.0.0.2)), action=(/* drop */)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl_hint     ), priority=1    , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;)
@@ -6863,6 +6927,7 @@ AT_CHECK([grep -e "ls_in_acl" lsflows | sed 's/table=../table=??/' | sort], [0],
   table=??(ls_in_acl_after_lb ), priority=2001 , match=(reg0[[9]] == 1 && (ip4)), action=(/* drop */)
   table=??(ls_in_acl_after_lb ), priority=2004 , match=(reg0[[10]] == 1 && (ip4 && ip4.dst == 10.0.0.2)), action=(ct_commit { ct_mark.blocked = 1; }; /* drop */)
   table=??(ls_in_acl_after_lb ), priority=2004 , match=(reg0[[9]] == 1 && (ip4 && ip4.dst == 10.0.0.2)), action=(/* drop */)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl_hint     ), priority=1    , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;)
@@ -7154,11 +7219,14 @@ flow="inport == \"lsp1\" && eth.src == 00:00:00:00:00:01 && eth.dst == 00:00:00:
 AS_BOX([No ACL, default_acl_drop not set])
 check ovn-nbctl --wait=sb sync
 AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/' | sort], [0], [dnl
+  table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl          ), priority=65535, match=(1), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(next;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_hint     ), priority=65535, match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
+  table=??(ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_out_acl         ), priority=65535, match=(1), action=(next;)
   table=??(ls_out_acl_hint    ), priority=65535, match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
@@ -7173,11 +7241,14 @@ output("lsp2");
 AS_BOX([No ACL, default_acl_drop false])
 check ovn-nbctl --wait=sb set NB_Global . options:default_acl_drop=false
 AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/' | sort], [0], [dnl
+  table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl          ), priority=65535, match=(1), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(next;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_hint     ), priority=65535, match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
+  table=??(ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_out_acl         ), priority=65535, match=(1), action=(next;)
   table=??(ls_out_acl_hint    ), priority=65535, match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
@@ -7192,11 +7263,14 @@ output("lsp2");
 AS_BOX([No ACL, default_acl_drop true])
 check ovn-nbctl --wait=sb set NB_Global . options:default_acl_drop=true
 AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/' | sort], [0], [dnl
+  table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl          ), priority=65535, match=(1), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(next;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_hint     ), priority=65535, match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
+  table=??(ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_out_acl         ), priority=65535, match=(1), action=(next;)
   table=??(ls_out_acl_hint    ), priority=65535, match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
@@ -7218,12 +7292,15 @@ AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/
   table=??(ls_in_acl          ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl          ), priority=1001 , match=(ip4 && tcp), action=(next;)
   table=??(ls_in_acl          ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;)
+  table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(next;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
   table=??(ls_out_acl         ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_acl         ), priority=34000, match=(eth.src == $svc_monitor_mac), action=(next;)
+  table=??(ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_out_acl_hint    ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=110  , match=(eth.src == $svc_monitor_mac), action=(next;)
@@ -7240,12 +7317,15 @@ AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/
   table=??(ls_in_acl          ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl          ), priority=1001 , match=(ip4 && tcp), action=(next;)
   table=??(ls_in_acl          ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;)
+  table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(next;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
   table=??(ls_out_acl         ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_acl         ), priority=34000, match=(eth.src == $svc_monitor_mac), action=(next;)
+  table=??(ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_out_acl_hint    ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=110  , match=(eth.src == $svc_monitor_mac), action=(next;)
@@ -7262,12 +7342,15 @@ AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/
   table=??(ls_in_acl          ), priority=0    , match=(1), action=(drop;)
   table=??(ls_in_acl          ), priority=1001 , match=(ip4 && tcp), action=(next;)
   table=??(ls_in_acl          ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;)
+  table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(drop;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
   table=??(ls_out_acl         ), priority=0    , match=(1), action=(drop;)
   table=??(ls_out_acl         ), priority=34000, match=(eth.src == $svc_monitor_mac), action=(next;)
+  table=??(ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_out_acl_hint    ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=110  , match=(eth.src == $svc_monitor_mac), action=(next;)
@@ -7292,6 +7375,7 @@ AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/
   table=??(ls_in_acl          ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;)
   table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(drop;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl_hint     ), priority=1    , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;)
@@ -7343,13 +7427,16 @@ check ovn-nbctl --wait=sb remove NB_Global . options default_acl_drop
 AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/' | sort], [0], [dnl
   table=??(ls_in_acl          ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl          ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;)
+  table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=1001 , match=(ip4 && tcp), action=(next;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
   table=??(ls_out_acl         ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_acl         ), priority=34000, match=(eth.src == $svc_monitor_mac), action=(next;)
+  table=??(ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_out_acl_hint    ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=110  , match=(eth.src == $svc_monitor_mac), action=(next;)
@@ -7365,13 +7452,16 @@ check ovn-nbctl --wait=sb set NB_Global . options:default_acl_drop=false
 AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/' | sort], [0], [dnl
   table=??(ls_in_acl          ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl          ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;)
+  table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=1001 , match=(ip4 && tcp), action=(next;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
   table=??(ls_out_acl         ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_acl         ), priority=34000, match=(eth.src == $svc_monitor_mac), action=(next;)
+  table=??(ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_out_acl_hint    ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=110  , match=(eth.src == $svc_monitor_mac), action=(next;)
@@ -7387,13 +7477,16 @@ check ovn-nbctl --wait=sb set NB_Global . options:default_acl_drop=true
 AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/' | sort], [0], [dnl
   table=??(ls_in_acl          ), priority=0    , match=(1), action=(drop;)
   table=??(ls_in_acl          ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;)
+  table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(drop;)
   table=??(ls_in_acl_after_lb ), priority=1001 , match=(ip4 && tcp), action=(next;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
   table=??(ls_out_acl         ), priority=0    , match=(1), action=(drop;)
   table=??(ls_out_acl         ), priority=34000, match=(eth.src == $svc_monitor_mac), action=(next;)
+  table=??(ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_out_acl_hint    ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=110  , match=(eth.src == $svc_monitor_mac), action=(next;)
@@ -7418,6 +7511,7 @@ AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(drop;)
   table=??(ls_in_acl_after_lb ), priority=1001 , match=(reg0[[7]] == 1 && (ip4 && tcp)), action=(reg0[[1]] = 1; next;)
   table=??(ls_in_acl_after_lb ), priority=1001 , match=(reg0[[8]] == 1 && (ip4 && tcp)), action=(next;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl_hint     ), priority=1    , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;)
@@ -7469,13 +7563,16 @@ check ovn-nbctl --wait=sb remove NB_Global . options default_acl_drop
 AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/' | sort], [0], [dnl
   table=??(ls_in_acl          ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl          ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;)
+  table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(next;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
   table=??(ls_out_acl         ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_acl         ), priority=1001 , match=(ip4 && tcp), action=(next;)
   table=??(ls_out_acl         ), priority=34000, match=(eth.src == $svc_monitor_mac), action=(next;)
+  table=??(ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_out_acl_hint    ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=110  , match=(eth.src == $svc_monitor_mac), action=(next;)
@@ -7491,13 +7588,16 @@ check ovn-nbctl --wait=sb set NB_Global . options:default_acl_drop=false
 AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/' | sort], [0], [dnl
   table=??(ls_in_acl          ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl          ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;)
+  table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(next;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
   table=??(ls_out_acl         ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_acl         ), priority=1001 , match=(ip4 && tcp), action=(next;)
   table=??(ls_out_acl         ), priority=34000, match=(eth.src == $svc_monitor_mac), action=(next;)
+  table=??(ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_out_acl_hint    ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=110  , match=(eth.src == $svc_monitor_mac), action=(next;)
@@ -7513,13 +7613,16 @@ check ovn-nbctl --wait=sb set NB_Global . options:default_acl_drop=true
 AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/' | sort], [0], [dnl
   table=??(ls_in_acl          ), priority=0    , match=(1), action=(drop;)
   table=??(ls_in_acl          ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;)
+  table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(drop;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
   table=??(ls_out_acl         ), priority=0    , match=(1), action=(drop;)
   table=??(ls_out_acl         ), priority=1001 , match=(ip4 && tcp), action=(next;)
   table=??(ls_out_acl         ), priority=34000, match=(eth.src == $svc_monitor_mac), action=(next;)
+  table=??(ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_out_acl_hint    ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
   table=??(ls_out_pre_acl     ), priority=110  , match=(eth.src == $svc_monitor_mac), action=(next;)
@@ -7542,6 +7645,7 @@ AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/
   table=??(ls_in_acl          ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;)
   table=??(ls_in_acl          ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=0    , match=(1), action=(drop;)
+  table=??(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=??(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;)
   table=??(ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
   table=??(ls_in_acl_hint     ), priority=1    , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;)
@@ -7886,8 +7990,10 @@ check ovn-nbctl                                               \
 AS_BOX([No chassis registered - use ct_lb_mark and ct_mark.natted])
 check ovn-nbctl --wait=sb sync
 AT_CHECK([ovn-sbctl lflow-list | grep -e natted -e ct_lb], [0], [dnl
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 66.66.66.66 && ct_mark.natted == 1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 66.66.66.66), action=(ct_lb_mark(backends=42.42.42.2);)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 66.66.66.66), action=(ct_lb_mark(backends=42.42.42.2);)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=6 (ls_in_pre_stateful ), priority=120  , match=(reg0[[2]] == 1 && ip4.dst == 66.66.66.66), action=(reg1 = 66.66.66.66; ct_lb_mark;)
   table=6 (ls_in_pre_stateful ), priority=110  , match=(reg0[[2]] == 1), action=(ct_lb_mark;)
   table=12(ls_in_lb           ), priority=110  , match=(ct.new && ip4.dst == 66.66.66.66), action=(reg0[[1]] = 0; ct_lb_mark(backends=42.42.42.2);)
@@ -7898,8 +8004,10 @@ AS_BOX([Chassis registered that doesn't support ct_lb_mark - use ct_lb and ct_la
 check ovn-sbctl chassis-add hv geneve 127.0.0.1
 check ovn-nbctl --wait=sb sync
 AT_CHECK([ovn-sbctl lflow-list | grep -e natted -e ct_lb], [0], [dnl
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 66.66.66.66 && ct_label.natted == 1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 66.66.66.66), action=(ct_lb(backends=42.42.42.2);)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 66.66.66.66), action=(ct_lb(backends=42.42.42.2);)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_label.natted && ct_label.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_label.natted && ct_label.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_label.natted), action=(next;)
   table=6 (ls_in_pre_stateful ), priority=120  , match=(reg0[[2]] == 1 && ip4.dst == 66.66.66.66), action=(reg1 = 66.66.66.66; ct_lb;)
   table=6 (ls_in_pre_stateful ), priority=110  , match=(reg0[[2]] == 1), action=(ct_lb;)
   table=12(ls_in_lb           ), priority=110  , match=(ct.new && ip4.dst == 66.66.66.66), action=(reg0[[1]] = 0; ct_lb(backends=42.42.42.2);)
@@ -7910,8 +8018,10 @@ AS_BOX([Chassis upgrades and supports ct_lb_mark - use ct_lb_mark and ct_mark.na
 check ovn-sbctl set chassis hv other_config:ct-no-masked-label=true
 check ovn-nbctl --wait=sb sync
 AT_CHECK([ovn-sbctl lflow-list | grep -e natted -e ct_lb], [0], [dnl
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 66.66.66.66 && ct_mark.natted == 1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 66.66.66.66), action=(ct_lb_mark(backends=42.42.42.2);)
+  table=7 (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 66.66.66.66), action=(ct_lb_mark(backends=42.42.42.2);)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=6 (ls_in_pre_stateful ), priority=120  , match=(reg0[[2]] == 1 && ip4.dst == 66.66.66.66), action=(reg1 = 66.66.66.66; ct_lb_mark;)
   table=6 (ls_in_pre_stateful ), priority=110  , match=(reg0[[2]] == 1), action=(ct_lb_mark;)
   table=12(ls_in_lb           ), priority=110  , match=(ct.new && ip4.dst == 66.66.66.66), action=(reg0[[1]] = 0; ct_lb_mark(backends=42.42.42.2);)
@@ -8244,15 +8354,17 @@ AT_CAPTURE_FILE([R1flows])
 
 AT_CHECK([grep "lr_in_lb_aff_check" R1flows | sort], [0], [dnl
   table=6 (lr_in_lb_aff_check ), priority=0    , match=(1), action=(next;)
-  table=6 (lr_in_lb_aff_check ), priority=100  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80), action=(reg9[[6]] = chk_lb_aff(); next;)
+  table=6 (lr_in_lb_aff_check ), priority=100  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.16.0.10 && tcp && tcp.dst == 80), action=(reg0 = ip4.dst; reg9[[16..31]] = tcp.dst; reg9[[6]] = chk_lb_aff(); next;)
 ])
 AT_CHECK([grep "lr_in_dnat " R1flows | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80), action=(ct_lb_mark(backends=10.0.0.2:80,20.0.0.2:80);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.16.0.10 && tcp && tcp.dst == 80), action=(ct_lb_mark(backends=10.0.0.2:80,20.0.0.2:80);)
   table=7 (lr_in_dnat         ), priority=150  , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 10.0.0.2 && reg8[[0..15]] == 80), action=(reg0 = 172.16.0.10; ct_lb_mark(backends=10.0.0.2:80);)
   table=7 (lr_in_dnat         ), priority=150  , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 20.0.0.2 && reg8[[0..15]] == 80), action=(reg0 = 172.16.0.10; ct_lb_mark(backends=20.0.0.2:80);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -8270,11 +8382,13 @@ AT_CAPTURE_FILE([R1flows_skip_snat])
 
 AT_CHECK([grep "lr_in_dnat " R1flows_skip_snat | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.skip_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.skip_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.2:80,20.0.0.2:80; skip_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.16.0.10 && tcp && tcp.dst == 80), action=(flags.skip_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.2:80,20.0.0.2:80; skip_snat);)
   table=7 (lr_in_dnat         ), priority=150  , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 10.0.0.2 && reg8[[0..15]] == 80), action=(reg0 = 172.16.0.10; flags.skip_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.2:80; skip_snat);)
   table=7 (lr_in_dnat         ), priority=150  , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 20.0.0.2 && reg8[[0..15]] == 80), action=(reg0 = 172.16.0.10; flags.skip_snat_for_lb = 1; ct_lb_mark(backends=20.0.0.2:80; skip_snat);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -8289,11 +8403,13 @@ AT_CAPTURE_FILE([R1flows_force_snat])
 
 AT_CHECK([grep "lr_in_dnat " R1flows_force_snat | sort], [0], [dnl
   table=7 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
-  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.2:80,20.0.0.2:80; force_snat);)
+  table=7 (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 172.16.0.10 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.2:80,20.0.0.2:80; force_snat);)
   table=7 (lr_in_dnat         ), priority=150  , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 10.0.0.2 && reg8[[0..15]] == 80), action=(reg0 = 172.16.0.10; flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.2:80; force_snat);)
   table=7 (lr_in_dnat         ), priority=150  , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 20.0.0.2 && reg8[[0..15]] == 80), action=(reg0 = 172.16.0.10; flags.force_snat_for_lb = 1; ct_lb_mark(backends=20.0.0.2:80; force_snat);)
+  table=7 (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=7 (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=7 (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=7 (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -8569,12 +8685,13 @@ ovn-sbctl dump-flows | DUMP_FLOWS_SORTED > lflows0
 
 AT_CHECK([grep -e "lr_in_defrag" -e "lr_in_dnat" lflows0], [0], [dnl
   table=? (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=? (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 192.168.0.1), action=(reg0 = 192.168.0.1; ct_dnat;)
-  table=? (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=? (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 192.168.0.1), action=(ct_dnat;)
   table=? (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=? (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 192.168.0.1 && ct_mark.natted == 1), action=(next;)
-  table=? (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 192.168.0.1), action=(ct_lb_mark(backends=192.168.1.10);)
+  table=? (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 192.168.0.1), action=(ct_lb_mark(backends=192.168.1.10);)
+  table=? (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=? (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=? (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=? (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=? (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=? (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -8588,6 +8705,7 @@ AT_CHECK([grep -e "ls_in_acl" -e "ls_out_acl" lflows0 | grep "priority=65532"],
   table=? (ls_out_acl         ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(next;)
   table=? (ls_out_acl         ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;)
   table=? (ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
+  table=?(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=?(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;)
 ])
 
@@ -8599,10 +8717,12 @@ ovn-sbctl dump-flows | DUMP_FLOWS_SORTED > lflows1
 
 AT_CHECK([grep -e "lr_in_defrag" -e "lr_in_dnat" lflows1], [0], [dnl
   table=? (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=? (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 192.168.0.1), action=(reg0 = 192.168.0.1; ct_dnat;)
+  table=? (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 192.168.0.1), action=(ct_dnat;)
   table=? (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=? (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 192.168.0.1 && ct_label.natted == 1), action=(next;)
-  table=? (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 192.168.0.1), action=(ct_lb(backends=192.168.1.10);)
+  table=? (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 192.168.0.1), action=(ct_lb(backends=192.168.1.10);)
+  table=? (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_label.natted), action=(next;)
+  table=? (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_label.natted && ct_label.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=? (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_label.natted && ct_label.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
 ])
 
 AT_CHECK([grep -e "ls_in_acl" -e "ls_out_acl" lflows1 | grep "priority=65532"], [0], [dnl
@@ -8614,6 +8734,7 @@ AT_CHECK([grep -e "ls_in_acl" -e "ls_out_acl" lflows1 | grep "priority=65532"],
   table=? (ls_out_acl         ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_label.blocked == 0), action=(next;)
   table=? (ls_out_acl         ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_label.blocked == 1)), action=(drop;)
   table=? (ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
+  table=?(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=?(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;)
 ])
 
@@ -8625,12 +8746,13 @@ ovn-sbctl dump-flows | DUMP_FLOWS_SORTED > lflows2
 
 AT_CHECK([grep -e "lr_in_defrag" -e "lr_in_dnat" lflows2], [0], [dnl
   table=? (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
-  table=? (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 192.168.0.1), action=(reg0 = 192.168.0.1; ct_dnat;)
-  table=? (lr_in_defrag       ), priority=50   , match=(icmp || icmp6), action=(ct_dnat;)
+  table=? (lr_in_defrag       ), priority=100  , match=(ip && ip4.dst == 192.168.0.1), action=(ct_dnat;)
   table=? (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
-  table=? (lr_in_dnat         ), priority=110  , match=(ct.est && !ct.rel && ip4 && reg0 == 192.168.0.1 && ct_mark.natted == 1), action=(next;)
-  table=? (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && reg0 == 192.168.0.1), action=(ct_lb_mark(backends=192.168.1.10);)
+  table=? (lr_in_dnat         ), priority=110  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 192.168.0.1), action=(ct_lb_mark(backends=192.168.1.10);)
+  table=? (lr_in_dnat         ), priority=50   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted), action=(next;)
   table=? (lr_in_dnat         ), priority=50   , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;)
+  table=? (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=? (lr_in_dnat         ), priority=70   , match=(ct.est && !ct.rel && !ct.new && ct_mark.natted && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; next;)
   table=? (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;)
   table=? (lr_in_dnat         ), priority=70   , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;)
 ])
@@ -8644,8 +8766,104 @@ AT_CHECK([grep -e "ls_in_acl" -e "ls_out_acl" lflows2 | grep "priority=65532"],
   table=? (ls_out_acl         ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(next;)
   table=? (ls_out_acl         ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;)
   table=? (ls_out_acl         ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
+  table=?(ls_in_acl_after_lb ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;)
   table=?(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;)
 ])
 
 AT_CLEANUP
 ])
+
+OVN_FOR_EACH_NORTHD_NO_HV([
+AT_SETUP([Chassis-feature compatibitility - remote chassis])
+ovn_start
+
+AS_BOX([Local chassis])
+check ovn-sbctl chassis-add hv1 geneve 127.0.0.1 \
+  -- set chassis hv1 other_config:ct-no-masked-label=true \
+  -- set chassis hv1 other_config:ovn-ct-lb-related=true \
+  -- set chassis hv1 other_config:mac-binding-timestamp=true
+
+check ovn-nbctl --wait=sb sync
+
+AT_CHECK([as northd ovn-appctl -t NORTHD_TYPE debug/chassis-features-list], [0], [dnl
+ct_no_masked_label:    true
+ct_lb_related:         true
+mac_binding_timestamp: true
+])
+
+AS_BOX([Remote chassis])
+check ovn-sbctl chassis-add hv2 geneve 127.0.0.2 \
+  -- set chassis hv2 other_config:is-remote=true \
+  -- set chassis hv2 other_config:ct-no-masked-label=false \
+  -- set chassis hv2 other_config:ovn-ct-lb-related=false \
+  -- set chassis hv2 other_config:mac-binding-timestamp=false
+
+check ovn-nbctl --wait=sb sync
+
+AT_CHECK([as northd ovn-appctl -t NORTHD_TYPE debug/chassis-features-list], [0], [dnl
+ct_no_masked_label:    true
+ct_lb_related:         true
+mac_binding_timestamp: true
+])
+
+AT_CLEANUP
+])
+
+AT_SETUP([Localnet ports on LS with LB])
+ovn_start
+# In the past, traffic arriving on localnet ports has skipped conntrack.
+# This test ensures that we still skip conntrack for localnet ports,
+# *except* for the case where the logical switch has a load balancer
+# configured. In this case, the localnet port will not skip conntrack,
+# allowing for traffic to be load balanced on the localnet port.
+
+check ovn-nbctl ls-add sw
+check ovn-nbctl lsp-add sw sw-ln
+check ovn-nbctl lsp-set-type sw-ln localnet
+check ovn-nbctl lsp-set-addresses sw-ln unknown
+check ovn-nbctl --wait=sb sync
+
+# Since this test is only concerned with logical flows, we don't need to
+# configure anything else that we normally would with regards to localnet
+# ports
+
+
+# First, ensure that conntrack is skipped for the localnet port since there
+# isn't a load balancer configured.
+
+AT_CHECK([ovn-sbctl lflow-list sw | grep ls_in_pre_lb | grep priority=110 | grep sw-ln | sed 's/table=../table=??/'], [0], [dnl
+  table=??(ls_in_pre_lb       ), priority=110  , match=(ip && inport == "sw-ln"), action=(next;)
+])
+
+AT_CHECK([ovn-sbctl lflow-list sw | grep ls_out_pre_lb | grep priority=110 | grep sw-ln | sed 's/table=../table=??/'], [0], [dnl
+  table=??(ls_out_pre_lb      ), priority=110  , match=(ip && outport == "sw-ln"), action=(ct_clear; next;)
+])
+
+# Now add a load balancer and ensure that we no longer are skipping conntrack
+# for the localnet port
+
+check ovn-nbctl lb-add lb 10.0.0.1:80 10.0.0.100:8080 tcp
+check ovn-nbctl ls-lb-add sw lb
+check ovn-nbctl --wait=sb sync
+
+AT_CHECK([ovn-sbctl lflow-list sw | grep ls_in_pre_lb | grep priority=110 | grep sw-ln | sed 's/table=../table=??/'], [0], [dnl
+])
+
+AT_CHECK([ovn-sbctl lflow-list sw | grep ls_out_pre_lb | grep priority=110 | grep sw-ln | sed 's/table=../table=??/'], [0], [dnl
+])
+
+# And ensure that removing the load balancer from the switch results in skipping
+# conntrack again
+check ovn-nbctl ls-lb-del sw lb
+check ovn-nbctl --wait=sb sync
+
+AT_CHECK([ovn-sbctl lflow-list sw | grep ls_in_pre_lb | grep priority=110 | grep sw-ln | sed 's/table=../table=??/'], [0], [dnl
+  table=??(ls_in_pre_lb       ), priority=110  , match=(ip && inport == "sw-ln"), action=(next;)
+])
+
+AT_CHECK([ovn-sbctl lflow-list sw | grep ls_out_pre_lb | grep priority=110 | grep sw-ln | sed 's/table=../table=??/'], [0], [dnl
+  table=??(ls_out_pre_lb      ), priority=110  , match=(ip && outport == "sw-ln"), action=(ct_clear; next;)
+])
+
+AT_CLEANUP
+])
diff --git a/tests/ovn.at b/tests/ovn.at
index 55de7c85b..3515a1e3c 100644
--- a/tests/ovn.at
+++ b/tests/ovn.at
@@ -5753,7 +5753,7 @@ check ovn-nbctl --wait=hv sync
 packet="inport==\"ls1-lp1\" && eth.src==$ls1_lp1_mac && eth.dst==$rp_ls1_mac &&
         ip4 && ip.ttl==64 && ip4.src==$ls1_lp1_ip && ip4.dst==$ls2_lp1_ip &&
         udp && udp.src==53 && udp.dst==4369"
-AT_CHECK([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 
 echo "---------NB dump-----"
@@ -5803,7 +5803,7 @@ packet="inport==\"ls1-lp1\" && eth.src==$ls1_lp1_mac && eth.dst==$rp_ls1_mac &&
         ip4 && ip.ttl==64 && ip4.src==$ls1_lp1_ip && ip4.dst==$ls2_lp1_ip &&
         udp && udp.src==53 && udp.dst==4369"
 
-as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 # The 2nd packet sent shound not be received.
 OVN_CHECK_PACKETS([hv2/vif1-tx.pcap], [expected])
 
@@ -7741,7 +7741,6 @@ ls3_p1_mac=00:00:00:01:02:05
 check ovn-nbctl --wait=hv lr-policy-add R1 10 "ip4.src==192.168.1.0/24 && ip4.dst==172.16.1.0/24" drop
 
 # Check logical flow
-ovn-sbctl dump-flows > sbflows
 AT_CHECK([ovn-sbctl dump-flows | grep lr_in_policy | grep "192.168.1.0" | wc -l], [0], [dnl
 1
 ])
@@ -7751,15 +7750,12 @@ packet="inport==\"ls1-lp1\" && eth.src==$ls1_p1_mac && eth.dst==$ls1_ro_mac &&
        ip4 && ip.ttl==64 && ip4.src==$ls1_p1_ip && ip4.dst==$ls2_p1_ip &&
        udp && udp.src==53 && udp.dst==4369"
 
-as pbr-hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as pbr-hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Check if packet hit the drop policy
-AT_CHECK([ovs-ofctl dump-flows br-int | \
+OVS_WAIT_UNTIL([test "1" = "$(ovs-ofctl dump-flows br-int | \
     grep "nw_src=192.168.1.0/24,nw_dst=172.16.1.0/24 actions=drop" | \
-    grep "priority=10" | \
-    grep "n_packets=1" | wc -l], [0], [dnl
-1
-])
+    grep "priority=10" | grep "n_packets=1" -c)"])
 
 # Expected to drop the packet.
 $PYTHON "$ovs_srcdir/utilities/ovs-pcap.in" pbr-hv/vif2-tx.pcap > vif2.packets
@@ -7770,7 +7766,7 @@ AT_FAIL_IF([test "$rcvd_packet" != ""])
 check ovn-nbctl --wait=hv lr-policy-add R1 20 "ip4.src==192.168.1.0/24 && ip4.dst==172.16.1.0/24" allow
 
 # Check logical flow
-AT_CHECK([ovn-sbctl dump-flows | grep lr_in_policy | grep "192.168.1.0" | wc -l], [0], [dnl
+AT_CHECK([ovn-sbctl dump-flows | grep lr_in_policy | grep "192.168.1.0" -c], [0], [dnl
 2
 ])
 
@@ -7778,15 +7774,12 @@ AT_CHECK([ovn-sbctl dump-flows | grep lr_in_policy | grep "192.168.1.0" | wc -l]
 packet="inport==\"ls1-lp1\" && eth.src==$ls1_p1_mac && eth.dst==$ls1_ro_mac &&
        ip4 && ip.ttl==64 && ip4.src==$ls1_p1_ip && ip4.dst==$ls2_p1_ip &&
        udp && udp.src==53 && udp.dst==4369"
-as pbr-hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as pbr-hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Check if packet hit the allow policy
-sleep 1
-AT_CHECK([ovn-sbctl dump-flows | grep lr_in_policy | \
-    grep "192.168.1.0" | \
-    grep "priority=20" | wc -l], [0], [dnl
-1
-])
+OVS_WAIT_UNTIL([test "1" = "$(ovs-ofctl dump-flows br-int | \
+    grep "nw_src=192.168.1.0/24,nw_dst=172.16.1.0/24" | \
+    grep "priority=20" | grep "n_packets=1" -c)"])
 
 # Expected packet has TTL decreased by 1
 expected="eth.src==$ls2_ro_mac && eth.dst==$ls2_p1_mac &&
@@ -7802,7 +7795,7 @@ check ovn-nbctl --wait=hv lr-policy-add R1 30 "ip4.src==192.168.1.0/24 && ip4.ds
 # Check logical flow
 AT_CHECK([ovn-sbctl dump-flows | grep lr_in_policy | \
     grep "192.168.1.0" | \
-    grep "priority=30" | wc -l], [0], [dnl
+    grep "priority=30" -c], [0], [dnl
 1
 ])
 
@@ -7810,21 +7803,12 @@ AT_CHECK([ovn-sbctl dump-flows | grep lr_in_policy | \
 packet="inport==\"ls1-lp1\" && eth.src==$ls1_p1_mac && eth.dst==$ls1_ro_mac &&
        ip4 && ip.ttl==64 && ip4.src==$ls1_p1_ip && ip4.dst==$ls2_p1_ip &&
        udp && udp.src==53 && udp.dst==4369"
-as pbr-hv ovs-appctl -t ovn-controller inject-pkt "$packet"
-sleep 1
+OVS_WAIT_UNTIL([as pbr-hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
-echo "southbound flows"
-ovn-sbctl --ovs dump-flows > sbflows
-AT_CAPTURE_FILE([sbflows])
-echo "ovs flows"
-ovs-ofctl dump-flows br-int > brflows
-AT_CAPTURE_FILE([brflows])
 # Check if packet hit the allow policy
-AT_CHECK([grep "nw_src=192.168.1.0/24,nw_dst=172.16.1.0/24" brflows | \
-    grep "priority=30" | \
-    grep "n_packets=1" | wc -l], [0], [dnl
-1
-])
+OVS_WAIT_UNTIL([test "1" = "$(ovs-ofctl dump-flows br-int | \
+    grep "nw_src=192.168.1.0/24,nw_dst=172.16.1.0/24" | \
+    grep "priority=30" | grep "n_packets=1" -c)"])
 echo "packet hit reroute policy"
 
 # Expected packet has TTL decreased by 1
@@ -7927,9 +7911,7 @@ ls3_p1_mac=00:00:00:01:02:05
 check ovn-nbctl --wait=sb lr-policy-add R1 10 "ip6.src==2001::/64 && ip6.dst==2002::/64" drop
 
 # Check logical flow
-ovn-sbctl dump-flows > sbflows
-AT_CAPTURE_FILE([sbflows])
-AT_CHECK([grep lr_in_policy sbflows | grep "2001" | wc -l], [0], [dnl
+AT_CHECK([ovn-sbctl dump-flows | grep lr_in_policy | grep "2001" -c], [0], [dnl
 1
 ])
 
@@ -7938,15 +7920,12 @@ packet="inport==\"ls1-lp1\" && eth.src==$ls1_p1_mac && eth.dst==$ls1_ro_mac &&
        ip6 && ip.ttl==64 && ip6.src==$ls1_p1_ip && ip6.dst==$ls2_p1_ip &&
        udp && udp.src==53 && udp.dst==4369"
 
-as pbr-hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as pbr-hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Check if packet hit the drop policy
-AT_CHECK([ovs-ofctl dump-flows br-int | \
+OVS_WAIT_UNTIL([test "1" = "$(ovs-ofctl dump-flows br-int | \
     grep "ipv6_src=2001::/64,ipv6_dst=2002::/64 actions=drop" | \
-    grep "priority=10" | \
-    grep "n_packets=1" | wc -l], [0], [dnl
-1
-])
+    grep "priority=10" | grep "n_packets=1" -c)"])
 
 # Expected to drop the packet.
 $PYTHON "$ovs_srcdir/utilities/ovs-pcap.in" pbr-hv/vif2-tx.pcap > vif2.packets
@@ -7956,9 +7935,7 @@ AT_FAIL_IF([test -s vif2.packets])
 check ovn-nbctl --wait=sb lr-policy-add R1 20 "ip6.src==2001::/64 && ip6.dst==2002::/64" allow
 
 # Check logical flow
-ovn-sbctl dump-flows > sbflows2
-AT_CAPTURE_FILE([sbflows2])
-AT_CHECK([grep lr_in_policy sbflows2 | grep "2001" | wc -l], [0], [dnl
+AT_CHECK([ovn-sbctl dump-flows | grep lr_in_policy | grep "2001" -c], [0], [dnl
 2
 ])
 
@@ -7966,16 +7943,12 @@ AT_CHECK([grep lr_in_policy sbflows2 | grep "2001" | wc -l], [0], [dnl
 packet="inport==\"ls1-lp1\" && eth.src==$ls1_p1_mac && eth.dst==$ls1_ro_mac &&
        ip6 && ip.ttl==64 && ip6.src==$ls1_p1_ip && ip6.dst==$ls2_p1_ip &&
        udp && udp.src==53 && udp.dst==4369"
-as pbr-hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as pbr-hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Check if packet hit the allow policy
-ovn-sbctl dump-flows > sbflows3
-AT_CAPTURE_FILE([sbflows3])
-AT_CHECK([grep lr_in_policy sbflows3 | \
-    grep "2001" | \
-    grep "priority=20" | wc -l], [0], [dnl
-1
-])
+OVS_WAIT_UNTIL([test "1" = "$(ovs-ofctl dump-flows br-int | \
+    grep "ipv6_src=2001::/64,ipv6_dst=2002::/64"  | \
+    grep "priority=20" | grep "n_packets=1" -c)"])
 
 # Expected packet has TTL decreased by 1
 expected="eth.src==$ls2_ro_mac && eth.dst==$ls2_p1_mac &&
@@ -7989,11 +7962,9 @@ OVN_CHECK_PACKETS([pbr-hv/vif2-tx.pcap], [expected])
 check ovn-nbctl --wait=sb lr-policy-add R1 30 "ip6.src==2001::/64 && ip6.dst==2002::/64" reroute 2003::2
 
 # Check logical flow
-ovn-sbctl dump-flows > sbflows4
-AT_CAPTURE_FILE([sbflows4])
-AT_CHECK([grep lr_in_policy sbflows4 | \
+AT_CHECK([ovn-sbctl dump-flows | grep lr_in_policy | \
     grep "2001" | \
-    grep "priority=30" | wc -l], [0], [dnl
+    grep "priority=30" -c], [0], [dnl
 1
 ])
 
@@ -8001,19 +7972,12 @@ AT_CHECK([grep lr_in_policy sbflows4 | \
 packet="inport==\"ls1-lp1\" && eth.src==$ls1_p1_mac && eth.dst==$ls1_ro_mac &&
        ip6 && ip.ttl==64 && ip6.src==$ls1_p1_ip && ip6.dst==$ls2_p1_ip &&
        udp && udp.src==53 && udp.dst==4369"
-as pbr-hv ovs-appctl -t ovn-controller inject-pkt "$packet"
-sleep 1
+OVS_WAIT_UNTIL([as pbr-hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
-ovn-sbctl dump-flows > sbflows5
-ovs-ofctl dump-flows br-int > offlows5
-AT_CAPTURE_FILE([sbflows5])
-AT_CAPTURE_FILE([offlows5])
 # Check if packet hit the allow policy
-AT_CHECK([grep "ipv6_src=2001::/64,ipv6_dst=2002::/64" offlows5 | \
-    grep "priority=30" | \
-    grep "n_packets=1" | wc -l], [0], [dnl
-1
-])
+OVS_WAIT_UNTIL([test "1" = "$(ovs-ofctl dump-flows br-int | \
+    grep "ipv6_src=2001::/64,ipv6_dst=2002::/64"  | \
+    grep "priority=30" | grep "n_packets=1" -c)"])
 
 # Expected packet has TTL decreased by 1
 expected="eth.src==$ls3_ro_mac && eth.dst==$ls3_p1_mac &&
@@ -9531,73 +9495,73 @@ AT_CAPTURE_FILE([sbflows])
 packet="inport==\"lp1\" && eth.src==$lp1_mac && eth.dst==$lp2_mac &&
         ip4 && ip.ttl==64 && ip4.src==$lp1_ip && ip4.dst==$lp2_ip &&
         tcp && tcp.flags==2 && tcp.src==4360 && tcp.dst==80"
-as hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Send packet that should be dropped with logging in the ingress pipeline.
 packet="inport==\"lp1\" && eth.src==$lp1_mac && eth.dst==$lp2_mac &&
         ip4 && ip.ttl==64 && ip4.src==$lp1_ip && ip4.dst==$lp2_ip &&
         tcp && tcp.flags==2 && tcp.src==4361 && tcp.dst==81"
-as hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Send packet that should be dropped without logging in the eggress pipeline.
 packet="inport==\"lp1\" && eth.src==$lp1_mac && eth.dst==$lp2_mac &&
         ip4 && ip.ttl==64 && ip4.src==$lp1_ip && ip4.dst==$lp2_ip &&
         tcp && tcp.flags==2 && tcp.src==4360 && tcp.dst==180"
-as hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Send packet that should be dropped with logging in the egress pipeline.
 packet="inport==\"lp1\" && eth.src==$lp1_mac && eth.dst==$lp2_mac &&
         ip4 && ip.ttl==64 && ip4.src==$lp1_ip && ip4.dst==$lp2_ip &&
         tcp && tcp.flags==2 && tcp.src==4361 && tcp.dst==181"
-as hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Send packet that should be allowed without logging.
 packet="inport==\"lp1\" && eth.src==$lp1_mac && eth.dst==$lp2_mac &&
         ip4 && ip.ttl==64 && ip4.src==$lp1_ip && ip4.dst==$lp2_ip &&
         tcp && tcp.flags==2 && tcp.src==4362 && tcp.dst==82"
-as hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Send packet that should be allowed with logging.
 packet="inport==\"lp1\" && eth.src==$lp1_mac && eth.dst==$lp2_mac &&
         ip4 && ip.ttl==64 && ip4.src==$lp1_ip && ip4.dst==$lp2_ip &&
         tcp && tcp.flags==2 && tcp.src==4363 && tcp.dst==83"
-as hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Send packet that should allow related flows without logging.
 packet="inport==\"lp1\" && eth.src==$lp1_mac && eth.dst==$lp2_mac &&
         ip4 && ip.ttl==64 && ip4.src==$lp1_ip && ip4.dst==$lp2_ip &&
         tcp && tcp.flags==2 && tcp.src==4364 && tcp.dst==84"
-as hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Send packet that should allow related flows with logging.
 packet="inport==\"lp1\" && eth.src==$lp1_mac && eth.dst==$lp2_mac &&
         ip4 && ip.ttl==64 && ip4.src==$lp1_ip && ip4.dst==$lp2_ip &&
         tcp && tcp.flags==2 && tcp.src==4365 && tcp.dst==85"
-as hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Send packet that should be rejected without logging in the ingress pipeline.
 packet="inport==\"lp1\" && eth.src==$lp1_mac && eth.dst==$lp2_mac &&
         ip4 && ip.ttl==64 && ip4.src==$lp1_ip && ip4.dst==$lp2_ip &&
         tcp && tcp.flags==2 && tcp.src==4366 && tcp.dst==86"
-as hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Send packet that should be rejected with logging in the ingress pipeline.
 packet="inport==\"lp1\" && eth.src==$lp1_mac && eth.dst==$lp2_mac &&
         ip4 && ip.ttl==64 && ip4.src==$lp1_ip && ip4.dst==$lp2_ip &&
         tcp && tcp.flags==2 && tcp.src==4367 && tcp.dst==87"
-as hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Send packet that should be rejected without logging in the egress pipeline.
 packet="inport==\"lp1\" && eth.src==$lp1_mac && eth.dst==$lp2_mac &&
         ip4 && ip.ttl==64 && ip4.src==$lp1_ip && ip4.dst==$lp2_ip &&
         tcp && tcp.flags==2 && tcp.src==4366 && tcp.dst==186"
-as hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Send packet that should be rejected with logging in the egress pipeline.
 packet="inport==\"lp1\" && eth.src==$lp1_mac && eth.dst==$lp2_mac &&
         ip4 && ip.ttl==64 && ip4.src==$lp1_ip && ip4.dst==$lp2_ip &&
         tcp && tcp.flags==2 && tcp.src==4367 && tcp.dst==187"
-as hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 OVS_WAIT_UNTIL([ test 8 = $(grep -c 'acl_log' hv/ovn-controller.log) ])
 
@@ -12254,7 +12218,7 @@ nexthop_mac="f00000010204"
 AS_BOX([Send ip packet from foo1 to 8.8.8.8])
 src_mac="f00000010203"
 dst_mac="000001010203"
-packet=${foo_mac}${foo1_mac}08004500001c0000000040110000${foo1_ip}${dst_ip}0035111100080000
+packet=${foo_mac}${foo1_mac}080045000028000000004006a916${foo1_ip}${dst_ip}0035111112345678000000005002faf069450000
 
 AS_BOX([Wait for GARPs announcing gw IP to arrive])
 OVS_WAIT_UNTIL([
@@ -12265,15 +12229,12 @@ grep actions=mod_dl_dst:f0:00:00:01:02:04 | wc -l` -eq 1
 AS_BOX([Verify VLAN tagged packet on bridge connecting hv1 and hv2])
 # VLAN tagged packet with router port(192.168.1.1) MAC as destination MAC
 # is expected on bridge connecting hv1 and hv2
-expected=${foo_mac}${foo1_mac}8100000208004500001c0000000040110000${foo1_ip}${dst_ip}0035111100080000
+expected=${foo_mac}${foo1_mac}81000002080045000028000000004006a916${foo1_ip}${dst_ip}0035111112345678000000005002faf069450000
 echo $expected > hv1-br-ex_n2.expected
 
 AS_BOX([Verify packet at outside1 i.e nexthop(172.16.1.1) port])
 # Packet to Expect at outside1 i.e nexthop(172.16.1.1) port.
-# As connection tracking not enabled for this test, snat can't be done on the packet.
-# We still see foo1 as the source ip address. But source mac(gateway MAC) and
-# dest mac(nexthop mac) are properly configured.
-expected=${nexthop_mac}${gw_mac}08004500001c000000003f110100${foo1_ip}${dst_ip}0035111100080000
+expected=${nexthop_mac}${gw_mac}080045000028000000003f06beaa${gw_ip}${dst_ip}0035111112345678000000005002faf07dd90000
 echo $expected > hv3-vif1.expected
 
 check as hv1 ovs-appctl dpctl/del-flows
@@ -12304,7 +12265,7 @@ cat hv1-br-ex_n2.expected > expout
 AT_CHECK([sort hv1-br-ex_n2], [0], [expout])
 
 AS_BOX([Check expected packet on nexthop interface])
-$PYTHON "$ovs_srcdir/utilities/ovs-pcap.in" hv3/vif1-tx.pcap | grep ${foo1_ip}${dst_ip} | uniq > hv3-vif1
+$PYTHON "$ovs_srcdir/utilities/ovs-pcap.in" hv3/vif1-tx.pcap | grep ${gw_ip}${dst_ip} | uniq > hv3-vif1
 cat hv3-vif1.expected > expout
 AT_CHECK([sort hv3-vif1], [0], [expout])
 
@@ -13268,30 +13229,27 @@ as hv2 ovs-ofctl dump-flows br-int table=37
 gw1_chassis=$(fetch_column Chassis _uuid name=gw1)
 gw2_chassis=$(fetch_column Chassis _uuid name=gw2)
 
-OVS_WAIT_UNTIL([as hv1 ovs-ofctl dump-flows br-int table=37 | \
-grep active_backup | grep slaves:$hv1_gw1_ofport,$hv1_gw2_ofport \
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=37 | \
+grep active_backup | grep members:$hv1_gw1_ofport,$hv1_gw2_ofport \
 | wc -l], [0], [1
 ])
 
-OVS_WAIT_UNTIL([as hv2 ovs-ofctl dump-flows br-int table=37 | \
-grep active_backup | grep slaves:$hv2_gw1_ofport,$hv2_gw2_ofport \
+OVS_WAIT_FOR_OUTPUT([as hv2 ovs-ofctl dump-flows br-int table=37 | \
+grep active_backup | grep members:$hv2_gw1_ofport,$hv2_gw2_ofport \
 | wc -l], [0], [1
 ])
 
-# make sure that flows for handling the outside router port reside on gw1
-OVS_WAIT_UNTIL([as gw1 ovs-ofctl dump-flows br-int table=25 | \
-grep 00:00:02:01:02:04 | wc -l], [0], [[1
+# make sure that flows for handling the outside router port reside on gw1 through ls_in_l2_lkup table
+OVS_WAIT_FOR_OUTPUT([as gw1 ovs-ofctl dump-flows br-int table=33 | \
+grep "dl_dst=00:00:02:01:02:04" | wc -l], [0], [[1
 ]])
-OVS_WAIT_UNTIL([as gw2 ovs-ofctl dump-flows br-int table=25 | \
-grep 00:00:02:01:02:04 | wc -l], [0], [[0
+OVS_WAIT_FOR_OUTPUT([as gw2 ovs-ofctl dump-flows br-int table=33 | \
+grep "dl_dst=00:00:02:01:02:04" | wc -l], [0], [[0
 ]])
 
-# make sure ARP responder flows for outside router port reside on gw1 too
-OVS_WAIT_UNTIL([as gw1 ovs-ofctl dump-flows br-int table=9 | \
-grep arp_tpa=192.168.0.101 | wc -l], [0], [[1
-]])
-OVS_WAIT_UNTIL([as gw2 ovs-ofctl dump-flows br-int table=9 | grep arp_tpa=192.168.0.101 | wc -l], [0], [[0
-]])
+# make sure ARP responder flows for outside router port reside on gw1 too through ls_in_arp_rsp table
+OVS_WAIT_UNTIL([test `as gw1 ovs-ofctl dump-flows br-int table=27 | \
+grep arp_tpa=192.168.0.101 | wc -l` -ge 1])
 
 # check that the chassis redirect port has been claimed by the gw1 chassis
 wait_row_count Port_Binding 1 logical_port=cr-outside chassis=$gw1_chassis
@@ -13314,13 +13272,13 @@ wait_for_ports_up
 check ovn-nbctl --wait=hv sync
 
 # we make sure that the hypervisors noticed, and inverted the slave ports
-OVS_WAIT_UNTIL([as hv1 ovs-ofctl dump-flows br-int table=37 | \
-grep active_backup | grep slaves:$hv1_gw2_ofport,$hv1_gw1_ofport \
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=37 | \
+grep active_backup | grep members:$hv1_gw2_ofport,$hv1_gw1_ofport \
 | wc -l], [0], [1
 ])
 
-OVS_WAIT_UNTIL([as hv2 ovs-ofctl dump-flows br-int table=37 | \
-grep active_backup | grep slaves:$hv2_gw2_ofport,$hv2_gw1_ofport \
+OVS_WAIT_FOR_OUTPUT([as hv2 ovs-ofctl dump-flows br-int table=37 | \
+grep active_backup | grep members:$hv2_gw2_ofport,$hv2_gw1_ofport \
 | wc -l], [0], [1
 ])
 
@@ -13372,11 +13330,11 @@ AT_CHECK([ovs-vsctl --bare --columns bfd find Interface name=ovn-hv1-0],[0],
 ]])
 
 # make sure that flows for handling the outside router port reside on gw2 now
-OVS_WAIT_UNTIL([as gw2 ovs-ofctl dump-flows br-int table=25 | \
-grep 00:00:02:01:02:04 | wc -l], [0], [[1
+OVS_WAIT_FOR_OUTPUT([as gw2 ovs-ofctl dump-flows br-int table=33 | \
+grep "dl_dst=00:00:02:01:02:04" | wc -l], [0], [[1
 ]])
-OVS_WAIT_UNTIL([as gw1 ovs-ofctl dump-flows br-int table=25 | \
-grep 00:00:02:01:02:04 | wc -l], [0], [[0
+OVS_WAIT_FOR_OUTPUT([as gw1 ovs-ofctl dump-flows br-int table=33 | \
+grep "dl_dst=00:00:02:01:02:04" | wc -l], [0], [[0
 ]])
 
 # disconnect GW2 from the network, GW1 should take over
@@ -13386,12 +13344,12 @@ as main ovs-vsctl del-port n1 $port
 
 bfd_dump
 
-# make sure that flows for handling the outside router port reside on gw2 now
-OVS_WAIT_UNTIL([as gw1 ovs-ofctl dump-flows br-int table=25 | \
-grep 00:00:02:01:02:04 | wc -l], [0], [[1
+# make sure that flows for handling the outside router port reside on gw1 now
+OVS_WAIT_FOR_OUTPUT([as gw1 ovs-ofctl dump-flows br-int table=33 | \
+grep "dl_dst=00:00:02:01:02:04" | wc -l], [0], [[1
 ]])
-OVS_WAIT_UNTIL([as gw2 ovs-ofctl dump-flows br-int table=25 | \
-grep 00:00:02:01:02:04 | wc -l], [0], [[0
+OVS_WAIT_FOR_OUTPUT([as gw2 ovs-ofctl dump-flows br-int table=33 | \
+grep "dl_dst=00:00:02:01:02:04" | wc -l], [0], [[0
 ]])
 
 # check that the chassis redirect port has been reclaimed by the gw1 chassis
@@ -13470,45 +13428,16 @@ ovn-nbctl set Logical_Router_Port outside ha_chassis_group=$hagrp1_uuid
 wait_row_count HA_Chassis_Group 1
 wait_row_count HA_Chassis 2
 
-OVS_WAIT_UNTIL([as hv1 ovs-ofctl dump-flows br-int table=37 | \
-grep active_backup | grep slaves:$hv1_gw1_ofport,$hv1_gw2_ofport \
-| wc -l], [0], [1
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=37 | \
+grep active_backup | grep members:$hv1_gw1_ofport,$hv1_gw2_ofport \
+| wc -l], [0], [0
 ])
 
-OVS_WAIT_UNTIL([as hv2 ovs-ofctl dump-flows br-int table=37 | \
-grep active_backup | grep slaves:$hv2_gw1_ofport,$hv2_gw2_ofport \
-| wc -l], [0], [1
+OVS_WAIT_FOR_OUTPUT([as hv2 ovs-ofctl dump-flows br-int table=37 | \
+grep active_backup | grep members:$hv2_gw1_ofport,$hv2_gw2_ofport \
+| wc -l], [0], [0
 ])
 
-# make sure that flows for handling the outside router port reside on gw1
-OVS_WAIT_UNTIL([as gw1 ovs-ofctl dump-flows br-int table=24 | \
-grep 00:00:02:01:02:04 | wc -l], [0], [[1
-]])
-OVS_WAIT_UNTIL([as gw2 ovs-ofctl dump-flows br-int table=24 | \
-grep 00:00:02:01:02:04 | wc -l], [0], [[0
-]])
-
-# make sure ARP responder flows for outside router port reside on gw1 too
-OVS_WAIT_UNTIL([as gw1 ovs-ofctl dump-flows br-int table=9 | \
-grep arp_tpa=192.168.0.101 | wc -l], [0], [[1
-]])
-OVS_WAIT_UNTIL([as gw2 ovs-ofctl dump-flows br-int table=9 | grep arp_tpa=192.168.0.101 | wc -l], [0], [[0
-]])
-
-# check that the chassis redirect port has been claimed by the gw1 chassis
-#
-# XXX actually it doesn't happen, the test has always been wrong here
-# because the following just checks that "wc -l" succeeds (and it always
-# does):
-#
-#   OVS_WAIT_UNTIL([ovn-sbctl --columns chassis --bare find Port_Binding \
-#   logical_port=cr-outside | grep $gw1_chassis | wc -l], [0],[[1
-#   ]])
-#
-# If it were correct, then the following would be a good substitute:
-#
-#   wait_row_count Port_Binding 1 logical_port=cr-outside chassis=$gw1_chassis
-
 # Re add the ovs ports.
 for i in 1 2; do
     as hv$i
@@ -13519,6 +13448,34 @@ for i in 1 2; do
         ofport-request=1
 done
 
+# Re-add gw2
+as gw2 ovn_attach n1 br-phys 192.168.0.1
+
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=37 | \
+grep active_backup | grep members:$hv1_gw1_ofport,$hv1_gw2_ofport \
+| wc -l], [0], [1
+])
+
+OVS_WAIT_FOR_OUTPUT([as hv2 ovs-ofctl dump-flows br-int table=37 | \
+grep active_backup | grep members:$hv2_gw1_ofport,$hv2_gw2_ofport \
+| wc -l], [0], [1
+])
+
+# make sure that flows for handling the outside router port reside on gw1
+OVS_WAIT_FOR_OUTPUT([as gw1 ovs-ofctl dump-flows br-int table=33 | \
+grep "dl_dst=00:00:02:01:02:04" | wc -l], [0], [[1
+]])
+OVS_WAIT_FOR_OUTPUT([as gw2 ovs-ofctl dump-flows br-int table=33 | \
+grep "dl_dst:00:00:02:01:02:04" | wc -l], [0], [[0
+]])
+
+# make sure ARP responder flows for outside router port reside on gw1 too
+OVS_WAIT_UNTIL([test `as gw1 ovs-ofctl dump-flows br-int table=27 | \
+grep arp_tpa=192.168.0.101 | wc -l` -ge 1 ])
+
+# check that the chassis redirect port has been claimed by the gw1 chassis
+wait_row_count Port_Binding 1 logical_port=cr-outside chassis=$gw1_chassis
+
 hv1_ch_uuid=$(fetch_column Chassis _uuid name=hv1)
 hv2_ch_uuid=$(fetch_column Chassis _uuid name=hv2)
 exp_ref_ch_list="$hv1_ch_uuid $hv2_ch_uuid"
@@ -13527,29 +13484,18 @@ wait_column "$exp_ref_ch_list" HA_Chassis_Group ref_chassis
 # Increase the priority of gw2
 ovn-nbctl --wait=sb ha-chassis-group-add-chassis hagrp1 gw2 40
 
-OVS_WAIT_UNTIL([as hv1 ovs-ofctl dump-flows br-int table=37 | \
-grep active_backup | grep slaves:$hv1_gw2_ofport,$hv1_gw1_ofport \
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=37 | \
+grep active_backup | grep members:$hv1_gw2_ofport,$hv1_gw1_ofport \
 | wc -l], [0], [1
 ])
 
-OVS_WAIT_UNTIL([as hv2 ovs-ofctl dump-flows br-int table=37 | \
-grep active_backup | grep slaves:$hv2_gw2_ofport,$hv2_gw1_ofport \
+OVS_WAIT_FOR_OUTPUT([as hv2 ovs-ofctl dump-flows br-int table=37 | \
+grep active_backup | grep members:$hv2_gw2_ofport,$hv2_gw1_ofport \
 | wc -l], [0], [1
 ])
 
 # check that the chassis redirect port has been reclaimed by the gw2 chassis
-#
-# XXX actually it doesn't happen, the test has always been wrong here
-# because the following just checks that "wc -l" succeeds (and it always
-# does):
-#
-#   OVS_WAIT_UNTIL([ovn-sbctl --columns chassis --bare find Port_Binding \
-#   logical_port=cr-outside | grep $gw2_chassis | wc -l], [0],[[1
-#   ]])
-#
-# If it were correct, then the following would be a good substitute:
-#
-#   wait_row_count Port_Binding 1 logical_port=cr-outside chassis=$gw2_chassis
+wait_row_count Port_Binding 1 logical_port=cr-outside chassis=$gw2_chassis
 
 # check BFD enablement on tunnel ports from gw1 #########
 as gw1
@@ -13588,11 +13534,11 @@ AT_CHECK([ovs-vsctl --bare --columns bfd find Interface name=ovn-hv1-0],[0],
 ]])
 
 # make sure that flows for handling the outside router port reside on gw2 now
-OVS_WAIT_UNTIL([as gw2 ovs-ofctl dump-flows br-int table=24 | \
-grep 00:00:02:01:02:04 | wc -l], [0], [[1
+OVS_WAIT_FOR_OUTPUT([as gw2 ovs-ofctl dump-flows br-int table=33 | \
+grep "dl_dst=00:00:02:01:02:04" | wc -l], [0], [[1
 ]])
-OVS_WAIT_UNTIL([as gw1 ovs-ofctl dump-flows br-int table=24 | \
-grep 00:00:02:01:02:04 | wc -l], [0], [[0
+OVS_WAIT_FOR_OUTPUT([as gw1 ovs-ofctl dump-flows br-int table=33 | \
+grep "dl_dst=00:00:02:01:02:04" | wc -l], [0], [[0
 ]])
 
 # disconnect GW2 from the network, GW1 should take over
@@ -13603,11 +13549,11 @@ as main ovs-vsctl del-port n1 $port
 bfd_dump
 
 # make sure that flows for handling the outside router port reside on gw2 now
-OVS_WAIT_UNTIL([as gw1 ovs-ofctl dump-flows br-int table=24 | \
-grep 00:00:02:01:02:04 | wc -l], [0], [[1
+OVS_WAIT_FOR_OUTPUT([as gw1 ovs-ofctl dump-flows br-int table=33 | \
+grep "dl_dst=00:00:02:01:02:04" | wc -l], [0], [[1
 ]])
-OVS_WAIT_UNTIL([as gw2 ovs-ofctl dump-flows br-int table=24 | \
-grep 00:00:02:01:02:04 | wc -l], [0], [[0
+OVS_WAIT_FOR_OUTPUT([as gw2 ovs-ofctl dump-flows br-int table=33 | \
+grep "dl_dst=00:00:02:01:02:04" | wc -l], [0], [[0
 ]])
 
 # check that the chassis redirect port has been reclaimed by the gw1 chassis
@@ -13889,6 +13835,133 @@ OVN_CLEANUP([gw1],[gw2],[hv1])
 AT_CLEANUP
 ])
 
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([allow IPv6 RA / NA / MLD by default])
+AT_SKIP_IF([test $HAVE_SCAPY = no])
+ovn_start
+net_add n1
+sim_add hv1
+as hv1
+ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.1
+
+lsp_mac_prefix=50:64:00:00:00:0
+lsp_ip_prefix=10.0.0.
+lsp_ip6_prefix=aef0::5264:00ff:fe00:000
+
+check ovn-nbctl ls-add ls0
+for i in 1 2; do
+  check ovn-nbctl lsp-add ls0 lsp$i
+  check ovn-nbctl lsp-set-addresses lsp$i \
+    "${lsp_mac_prefix}$i ${lsp_ip_prefix}$i ${lsp_ip6_prefix}$i"
+
+  # forbid all traffic for the ports
+  check ovn-nbctl acl-add ls0 \
+      from-lport 1000 "inport == \"lsp$i\"" drop
+  check ovn-nbctl --apply-after-lb acl-add ls0\
+      from-lport 1000 "inport == \"lsp$i\"" drop
+  check ovn-nbctl acl-add ls0 \
+      to-lport 1000 "outport == \"lsp$i\"" drop
+
+  check ovs-vsctl -- add-port br-int vif$i -- \
+      set interface vif$i external-ids:iface-id=lsp$i \
+      options:tx_pcap=hv1/vif$i-tx.pcap \
+      options:rxq_pcap=hv1/vif$i-rx.pcap
+  : > $i.expected
+done
+
+router_mac=fa:16:3e:00:00:01
+router_prefix=fdad:1234:5678::
+router_ip=${router_prefix}1
+check ovn-nbctl lr-add lr0
+check ovn-nbctl lrp-add lr0 lrp0 ${router_mac} ${router_ip}/64
+check ovn-nbctl set Logical_Router_Port lrp0 ipv6_ra_configs:address_mode="slaac"
+check ovn-nbctl \
+    -- lsp-add ls0 rp0 \
+    -- set Logical_Switch_Port rp0 type=router \
+                     options:router-port=lrp0 \
+                     addresses='"${router_mac} ${router_ip}"'
+
+wait_for_ports_up
+
+test_ns_na() {
+    local inport=$1 src_mac=$2 dst_mac=$3 src_ip=$4 dst_ip=$5
+
+    packet=$(fmt_pkt "
+        Ether(dst='ff:ff:ff:ff:ff:ff', src='${src_mac}') /
+        IPv6(src='${src_ip}', dst='ff02::1:ff00:2') /
+        ICMPv6ND_NS(tgt='${dst_ip}')
+    ")
+    as hv1 ovs-appctl netdev-dummy/receive vif${inport} $packet
+
+    expected_packet=$(fmt_pkt "
+        Ether(dst='${src_mac}', src='${dst_mac}') /
+        IPv6(src='${dst_ip}', dst='${src_ip}') /
+        ICMPv6ND_NA(tgt='${dst_ip}', R=0, S=1) /
+        ICMPv6NDOptDstLLAddr(lladdr='${dst_mac}')
+    ")
+    echo $expected_packet >> $inport.expected
+}
+
+test_rs_ra() {
+    local inport=$1 src_mac=$2 src_ip=$3
+    local router_mac=$4 router_prefix=$5 router_ip=$6
+
+    packet=$(fmt_pkt "
+        Ether(dst='ff:ff:ff:ff:ff:ff', src='${src_mac}') /
+        IPv6(src='${src_ip}', dst='ff02::2') /
+        ICMPv6ND_RS()
+    ")
+    as hv1 ovs-appctl netdev-dummy/receive vif${inport} $packet
+
+    expected_packet=$(fmt_pkt "
+        Ether(dst='${src_mac}', src='${router_mac}') /
+        IPv6(src='${router_ip}', dst='${src_ip}') /
+        ICMPv6ND_RA(chlim=255, prf=0, routerlifetime=65535) /
+        ICMPv6NDOptSrcLLAddr(lladdr='${router_mac}') /
+        ICMPv6NDOptPrefixInfo(prefix='${router_prefix}')
+    ")
+    echo $expected_packet >> $inport.expected
+}
+
+test_mldv2() {
+    local inport=$1 outport=$2 src_mac=$3 src_ip=$4
+
+    packet=$(fmt_pkt "
+        Ether(dst='ff:ff:ff:ff:ff:ff', src='${src_mac}') /
+        IPv6(src='${src_ip}', dst='ff02::2') /
+        ICMPv6MLQuery2()
+    ")
+    as hv1 ovs-appctl netdev-dummy/receive vif${inport} $packet
+
+    expected_packet=$packet
+    echo $expected_packet >> $outport.expected
+}
+
+src_mac=${lsp_mac_prefix}1
+dst_mac=${lsp_mac_prefix}2
+src_ip=${lsp_ip6_prefix}1
+dst_ip=${lsp_ip6_prefix}2
+
+as hv1
+test_ns_na 1 $src_mac $dst_mac $src_ip $dst_ip
+
+as hv1
+router_local_ip=fe80::f816:3eff:fe00:1
+test_rs_ra 1 $src_mac $src_ip $router_mac $router_prefix $router_local_ip
+
+as hv1
+src_ip=fe80::1
+test_mldv2 1 2 $src_mac $src_ip
+
+OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [1.expected])
+OVN_CHECK_PACKETS([hv1/vif2-tx.pcap], [2.expected])
+
+OVN_CLEANUP([hv1])
+
+AT_CLEANUP
+])
+
 OVN_FOR_EACH_NORTHD([
 AT_SETUP([IPv6 Neighbor Solicitation for unknown MAC])
 AT_KEYWORDS([ovn-nd_ns for unknown mac])
@@ -17210,7 +17283,7 @@ test_icmp() {
                   icmp4.code==0"
     shift; shift; shift; shift; shift; shift
     hv=hv`vif_to_hv $inport`
-    as $hv ovs-appctl -t ovn-controller inject-pkt "$packet"
+    OVS_WAIT_UNTIL([as $hv ovs-appctl -t ovn-controller inject-pkt "$packet"])
     in_ls=`vif_to_ls $inport`
     in_lrp=`vif_to_lrp $inport`
     for outport; do
@@ -18276,7 +18349,7 @@ AT_SETUP([TTL exceeded])
 AT_KEYWORDS([ttl-exceeded])
 ovn_start
 
-# test_ip_packet INPORT HV ETH_SRC ETH_DST IPV4_SRC IPV4_DST IPV4_ROUTER IP_CHKSUM EXP_IP_CHKSUM EXP_ICMP_CHKSUM
+# test_ip_packet INPORT HV ETH_SRC ETH_DST IPV4_SRC IPV4_DST IPV4_ROUTER IP_CHKSUM EXP_IP_CHKSUM EXP_ICMP_CHKSUM SHOULD_REPLY
 #
 # Causes a packet to be received on INPORT of the hypervisor HV. The packet is an IPv4 packet with
 # ETH_SRC, ETH_DST, IPV4_SRC, IPV4_DST, IP_CHKSUM as specified and TTL set to 1.
@@ -18292,6 +18365,7 @@ test_ip_packet() {
     local inport=$1 hv=$2 eth_src=$3 eth_dst=$4 ipv4_src=$5 ipv4_dst=$6 ip_router=$7 ip_chksum=$8
     local exp_ip_chksum=$9 exp_icmp_chksum=${10}
     shift 10
+    local should_reply=$1
 
     local ip_ttl=01
     local packet=${eth_dst}${eth_src}08004500001400004000${ip_ttl}01${ip_chksum}${ipv4_src}${ipv4_dst}
@@ -18300,27 +18374,31 @@ test_ip_packet() {
     local icmp_type_code_response=0b00
     local icmp_data=00000000
     local reply_icmp_payload=${icmp_type_code_response}${exp_icmp_chksum}${icmp_data}
-    local reply=${eth_src}${eth_dst}08004500003000004000${reply_icmp_ttl}01${exp_ip_chksum}${ip_router}${ipv4_src}${reply_icmp_payload}
-    echo $reply$orig_pkt_in_reply >> vif$inport.expected
+    if test $should_reply == yes; then
+        local reply=${eth_src}${eth_dst}08004500003000004000${reply_icmp_ttl}01${exp_ip_chksum}${ip_router}${ipv4_src}${reply_icmp_payload}
+        echo $reply$orig_pkt_in_reply >> vif$inport.expected
+    fi
 
     as hv$hv ovs-appctl netdev-dummy/receive vif$inport $packet
 }
 
-# test_ip6_packet INPORT HV ETH_SRC ETH_DST IPV6_SRC IPV6_DST IPV6_ROUTER EXP_ICMP_CHKSUM
+# test_ip6_packet INPORT HV ETH_SRC ETH_DST IPV6_SRC IPV6_DST IPV6_ROUTER EXP_ICMP_CHKSUM SHOULD_REPLY
 #
 # Causes a packet to be received on INPORT of the hypervisor HV. The packet is an IPv6
 # packet with ETH_SRC, ETH_DST, IPV6_SRC and IPV6_DST as specified.
 # IPV6_ROUTER and EXP_ICMP_CHKSUM are the source IP and checksum of the icmpv6 ttl exceeded
 # packet sent by OVN logical router
 test_ip6_packet() {
-    local inport=$1 hv=$2 eth_src=$3 eth_dst=$4 ipv6_src=$5 ipv6_dst=$6 ipv6_router=$7 exp_icmp_chksum=$8
+    local inport=$1 hv=$2 eth_src=$3 eth_dst=$4 ipv6_src=$5 ipv6_dst=$6 ipv6_router=$7 exp_icmp_chksum=$8 should_reply=$9
     shift 8
 
     local ip6_hdr=6000000000151101${ipv6_src}${ipv6_dst}
     local packet=${eth_dst}${eth_src}86dd${ip6_hdr}dbb8303900155bac6b646f65206676676e6d66720a
 
-    local reply=${eth_src}${eth_dst}86dd6000000000453afe${ipv6_router}${ipv6_src}0300${exp_icmp_chksum}00000000${ip6_hdr}dbb8303900155bac6b646f65206676676e6d66720a
-    echo $reply >> vif$inport.expected
+    if test $should_reply == yes; then
+        local reply=${eth_src}${eth_dst}86dd6000000000453afe${ipv6_router}${ipv6_src}0300${exp_icmp_chksum}00000000${ip6_hdr}dbb8303900155bac6b646f65206676676e6d66720a
+        echo $reply >> vif$inport.expected
+    fi
 
     as hv$hv ovs-appctl netdev-dummy/receive vif$inport $packet
 }
@@ -18343,6 +18421,8 @@ for i in 1 2; do
             options:tx_pcap=hv$i/vif$i-tx.pcap \
             options:rxq_pcap=hv$i/vif$i-rx.pcap \
             ofport-request=$i
+
+    ovs-appctl -t ovn-controller vlog/set file:dbg:pinctrl
 done
 
 ovn-nbctl lr-add lr0
@@ -18358,10 +18438,22 @@ OVN_POPULATE_ARP
 wait_for_ports_up
 ovn-nbctl --wait=hv sync
 
-test_ip_packet 1 1 000000000001 00000000ff01 $(ip_to_hex 192 168 1 1) $(ip_to_hex 192 168 2 1) $(ip_to_hex 192 168 1 254) 0000 f87c ea96
-test_ip6_packet 1 1 000000000001 00000000ff01 20010db8000100000000000000000011 20010db8000200000000000000000011 20010db8000100000000000000000001 1c22
+test_ip_packet 1 1 000000000001 00000000ff01 $(ip_to_hex 192 168 1 1) $(ip_to_hex 192 168 2 1) $(ip_to_hex 192 168 1 254) 0000 f87c ea96 yes
+test_ip6_packet 1 1 000000000001 00000000ff01 20010db8000100000000000000000011 20010db8000200000000000000000011 20010db8000100000000000000000001 1c22 yes
+
+# Should not send ICMP for multicast
+test_ip_packet 1 1 000000000001 01005e7f0001 $(ip_to_hex 192 168 1 1) $(ip_to_hex 239 255 0 1) $(ip_to_hex 192 168 1 254) 0000 000000000 no
+test_ip6_packet 1 1 000000000001 333300000001 20010db8000100000000000000000011 ff020000000000000000000000000001 20010db8000100000000000000000001 0000 no
+
 OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [vif1.expected])
 
+# Confirm from debug log that we only see 2 packet-ins (no packet-ins for
+# multicasts). This is necessary because not seeing ICMP messages doesn't
+# necessarily mean the packet-in didn't happen. It is possible that packet-in
+# is processed but the ICMP message got dropped.
+AT_CHECK([grep -c packet-in hv1/ovn-controller.log], [0], [2
+])
+
 OVN_CLEANUP([hv1], [hv2])
 AT_CLEANUP
 ])
@@ -18656,7 +18748,7 @@ packet="inport==\"sw1-p1\" && eth.src==$sw1_p1_mac && eth.dst==$sw1_ro_mac &&
        udp && udp.src==53 && udp.dst==4369"
 
 # Start by Sending the packet and make sure it makes it there as expected
-as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Expected packet has TTL decreased by 1
 expected="eth.src==$sw2_ro_mac && eth.dst==$sw2_p1_mac &&
@@ -18670,7 +18762,7 @@ OVN_CHECK_PACKETS([hv2/vif1-tx.pcap], [expected])
 as hv2 ovs-appctl -t ovn-controller exit
 
 # Now send the packet again. This time, it should not arrive.
-as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 OVN_CHECK_PACKETS([hv2/vif1-tx.pcap], [expected])
 
@@ -19552,7 +19644,7 @@ packet="inport==\"sw1-p1\" && eth.src==$sw1_p1_mac && eth.dst==$sw1_ro_mac &&
        udp && udp.src==53 && udp.dst==4369"
 
 # Start by Sending the packet and make sure it makes it there as expected
-as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Expected packet has TTL decreased by 1
 expected="eth.src==$sw2_ro_mac && eth.dst==$sw2_p1_mac &&
@@ -19566,7 +19658,7 @@ OVN_CHECK_PACKETS([hv2/vif1-tx.pcap], [expected])
 as hv2 ovs-appctl -t ovn-controller exit --restart
 
 # Now send the packet again. This time, it should still arrive
-as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 cat expected expected > expected2
 
@@ -19705,7 +19797,7 @@ test_ip_packet_larger() {
     # Set the packet length to 114.
     pkt_len=0072
     packet=${dst_mac}${src_mac}08004500${pkt_len}000000004001c3dd
-    orig_packet_l3=${src_ip}${dst_ip}0304fcfb00000000
+    orig_packet_l3=${src_ip}${dst_ip}0800f7ff00000000
     orig_packet_l3=${orig_packet_l3}000000000000000000000000000000000000
     orig_packet_l3=${orig_packet_l3}000000000000000000000000000000000000
     orig_packet_l3=${orig_packet_l3}000000000000000000000000000000000000
@@ -19729,10 +19821,10 @@ test_ip_packet_larger() {
         # Packet to expect at br-phys.
         src_mac="000020201213"
         dst_mac="00000012af11"
-        src_ip=`ip_to_hex 10 0 0 3`
+        src_ip=`ip_to_hex 172 168 0 100`
         dst_ip=`ip_to_hex 172 168 0 3`
-        expected=${dst_mac}${src_mac}08004500${pkt_len}000000003f01c4dd
-        expected=${expected}${src_ip}${dst_ip}0304fcfb00000000
+        expected=${dst_mac}${src_mac}08004500${pkt_len}000000003f0121d4
+        expected=${expected}${src_ip}${dst_ip}0800f7ff00000000
         expected=${expected}000000000000000000000000000000000000
         expected=${expected}000000000000000000000000000000000000
         expected=${expected}000000000000000000000000000000000000
@@ -19793,7 +19885,7 @@ test_ip_packet_larger_ext() {
     # Set the packet length to 114.
     pkt_len=0072
     packet=${dst_mac}${src_mac}08004500${pkt_len}000000004001${checksum}
-    orig_packet_l3=${src_ip}${dst_ip}0900000000000000
+    orig_packet_l3=${src_ip}${dst_ip}0800f7ff00000000
     orig_packet_l3=${orig_packet_l3}000000000000000000000000000000000000
     orig_packet_l3=${orig_packet_l3}000000000000000000000000000000000000
     orig_packet_l3=${orig_packet_l3}000000000000000000000000000000000000
@@ -19810,7 +19902,7 @@ test_ip_packet_larger_ext() {
     dst_ip=`ip_to_hex 172 168 0 4`
     # pkt len should be 146 (28 (icmp packet) + 118 (orig ip + payload))
     reply_pkt_len=008e
-    ip_csum=f39b
+    ip_csum=$7
     icmp_reply=${src_mac}${dst_mac}08004500${reply_pkt_len}00004000fe01${reply_checksum}
     icmp_reply=${icmp_reply}${src_ip}${dst_ip}0304${ip_csum}0000$(printf "%04x" $mtu)
     icmp_reply=${icmp_reply}4500${pkt_len}000000004001${checksum}
@@ -19985,10 +20077,10 @@ OVS_WAIT_FOR_OUTPUT([
 ])
 
 AS_BOX([testing ingress traffic mtu 100 - IPv4])
-test_ip_packet_larger_ext 1 000020201213 $(ip_to_hex 172 168 0 100) 20cf 100 22b6
+test_ip_packet_larger_ext 1 000020201213 $(ip_to_hex 172 168 0 100) 20d3 100 22b6 fc97
 
 AS_BOX([testing ingress traffic mtu 100 - IPv4 FIP])
-test_ip_packet_larger_ext 2 f00000010204 $(ip_to_hex 172 168 0 110) 20c5 100 22ac
+test_ip_packet_larger_ext 2 f00000010204 $(ip_to_hex 172 168 0 110) 20c5 100 22ac fc9b
 
 AS_BOX([testing ingress traffic mtu 100 - IPv6])
 test_ip6_packet_larger_ext 1 000020201213 20000000000000000000000000000001 100 cc7a
@@ -20055,10 +20147,10 @@ OVS_WAIT_FOR_OUTPUT([
 ])
 
 AS_BOX([testing ingress traffic mtu 100 for gw router - IPv4])
-test_ip_packet_larger_ext 1 000020201213 $(ip_to_hex 172 168 0 100) 20cf 100 22b6
+test_ip_packet_larger_ext 1 000020201213 $(ip_to_hex 172 168 0 100) 20d3 100 22b6 fc97
 
 AS_BOX([testing ingress traffic mtu 100 for gw router - IPv6])
-test_ip6_packet_larger_ext 1 000020201213 20000000000000000000000000000001 100 cc7a
+test_ip6_packet_larger_ext 1 000020201213 20000000000000000000000000000001 100 cc7a fc9b
 
 OVN_CLEANUP([hv1])
 AT_CLEANUP
@@ -21116,7 +21208,7 @@ check_virtual_offlows_not_present hv2
 send_garp 1 1 $eth_src $eth_dst $spa $tpa
 
 OVS_WAIT_UNTIL([test x$(ovn-sbctl --bare --columns chassis find port_binding \
-logical_port=sw0-vir) = x$hv1_ch_uuid], [0], [])
+logical_port=sw0-vir) = x$hv1_ch_uuid])
 
 AT_CHECK([test x$(ovn-sbctl --bare --columns virtual_parent find port_binding \
 logical_port=sw0-vir) = xsw0-p1])
@@ -21184,7 +21276,7 @@ tpa=$(ip_to_hex 10 0 0 10)
 send_garp 1 2 $eth_src $eth_dst $spa $tpa
 
 OVS_WAIT_UNTIL([test x$(ovn-sbctl --bare --columns chassis find port_binding \
-logical_port=sw0-vir) = x$hv1_ch_uuid], [0], [])
+logical_port=sw0-vir) = x$hv1_ch_uuid])
 
 OVS_WAIT_UNTIL([test x$(ovn-sbctl --bare --columns virtual_parent find port_binding \
 logical_port=sw0-vir) = xsw0-p3])
@@ -21217,7 +21309,7 @@ tpa=$(ip_to_hex 10 0 0 10)
 send_garp 2 1 $eth_src $eth_dst $spa $tpa
 
 OVS_WAIT_UNTIL([test x$(ovn-sbctl --bare --columns chassis find port_binding \
-logical_port=sw0-vir) = x$hv2_ch_uuid], [0], [])
+logical_port=sw0-vir) = x$hv2_ch_uuid])
 
 AT_CHECK([test x$(ovn-sbctl --bare --columns virtual_parent find port_binding \
 logical_port=sw0-vir) = xsw0-p2])
@@ -21249,7 +21341,7 @@ tpa=$(ip_to_hex 10 0 0 4)
 send_arp_reply 1 1 $eth_src $eth_dst $spa $tpa
 
 OVS_WAIT_UNTIL([test x$(ovn-sbctl --bare --columns chassis find port_binding \
-logical_port=sw0-vir) = x$hv1_ch_uuid], [0], [])
+logical_port=sw0-vir) = x$hv1_ch_uuid])
 sleep 1
 
 AT_CHECK([test x$(ovn-sbctl --bare --columns virtual_parent find port_binding \
@@ -21275,7 +21367,7 @@ check_virtual_offlows_not_present hv2
 as hv1 ovs-vsctl del-port hv1-vif1
 
 OVS_WAIT_UNTIL([test x$(ovn-sbctl --bare --columns chassis find port_binding \
-logical_port=sw0-vir) = x], [0], [])
+logical_port=sw0-vir) = x])
 sleep 1
 
 AT_CHECK([test x$(ovn-sbctl --bare --columns virtual_parent find port_binding \
@@ -21310,7 +21402,7 @@ send_arp_reply 2 1 $eth_src $eth_dst $spa $tpa
 sleep 1
 
 OVS_WAIT_UNTIL([test x$(ovn-sbctl --bare --columns chassis find port_binding \
-logical_port=sw0-vir) = x$hv2_ch_uuid], [0], [])
+logical_port=sw0-vir) = x$hv2_ch_uuid])
 sleep 1
 AT_CHECK([test x$(ovn-sbctl --bare --columns virtual_parent find port_binding \
 logical_port=sw0-vir) = xsw0-p2])
@@ -21335,7 +21427,7 @@ check_virtual_offlows_not_present hv1
 ovn-nbctl lsp-del sw0-p2
 
 OVS_WAIT_UNTIL([test x$(ovn-sbctl --bare --columns chassis find port_binding \
-logical_port=sw0-vir) = x], [0], [])
+logical_port=sw0-vir) = x])
 AT_CHECK([test x$(ovn-sbctl --bare --columns virtual_parent find port_binding \
 logical_port=sw0-vir) = x])
 
@@ -21516,7 +21608,7 @@ AT_CAPTURE_FILE([offlows])
 packet0="inport==\"sw0-p11\" && eth.src==00:00:00:00:00:11 && eth.dst==00:00:00:00:00:21 &&
          ip4 && ip.ttl==64 && ip4.src==192.168.1.11 && ip4.dst==192.168.1.100 &&
          tcp && tcp.src==10000 && tcp.dst==80"
-as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet0"
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet0"])
 ovn-nbctl --wait=hv
 
 ovn-sbctl list controller_event > events
@@ -21545,7 +21637,7 @@ packet1="inport==\"sw1-p0\" && eth.src==00:00:00:00:00:33 && eth.dst==00:00:00:0
          ip4 && ip.ttl==64 && ip4.src==192.168.2.11 && ip4.dst==192.168.2.100 &&
          tcp && tcp.src==10000 && tcp.dst==80"
 
-as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet1"
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet1"])
 ovn-nbctl --wait=hv
 ovn-sbctl list controller_event
 uuid=$(ovn-sbctl list controller_event | awk '/_uuid/{print $3}')
@@ -21561,7 +21653,7 @@ packet2="inport==\"sw0-p11\" && eth.src==00:00:00:00:00:11 && eth.dst==00:00:00:
          ip6 && ip.ttl==64 && ip6.src==2001::11 && ip6.dst==2001::10 &&
          tcp && tcp.src==10000 && tcp.dst==50051"
 
-as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet2"
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet2"])
 ovn-nbctl --wait=hv
 ovn-sbctl list controller_event
 uuid=$(ovn-sbctl list controller_event | awk '/_uuid/{print $3}')
@@ -23744,7 +23836,7 @@ send_garp 1 1 $eth_src $eth_dst $spa $tpa
 
 wait_row_count MAC_Binding 1
 
-OVS_WAIT_UNTIL([ovn-sbctl --format=csv --bare --columns logical_port,ip,mac \
+OVS_WAIT_FOR_OUTPUT([ovn-sbctl --format=csv --bare --columns logical_port,ip,mac \
 list mac_binding], [0], [lr0-sw0
 10.0.0.30
 50:54:00:00:00:03
@@ -23791,7 +23883,7 @@ grep table_id=10 | wc -l`])
 
 check_row_count MAC_Binding 1
 
-OVS_WAIT_UNTIL([ovn-sbctl --format=csv --bare --columns logical_port,ip,mac \
+OVS_WAIT_FOR_OUTPUT([ovn-sbctl --format=csv --bare --columns logical_port,ip,mac \
 list mac_binding], [0], [lr0-sw0
 10.0.0.30
 50:54:00:00:00:13
@@ -23820,7 +23912,7 @@ OVS_WAIT_UNTIL(
 | wc -l`]
 )
 
-OVS_WAIT_UNTIL([ovn-sbctl --format=csv --bare --columns logical_port,ip,mac \
+OVS_WAIT_FOR_OUTPUT([ovn-sbctl --format=csv --bare --columns logical_port,ip,mac \
 find mac_binding ip=10.0.0.50], [0], [lr0-sw0
 10.0.0.50
 50:54:00:00:00:33
@@ -24377,7 +24469,7 @@ AT_CAPTURE_FILE([sbflows2])
 OVS_WAIT_FOR_OUTPUT(
   [ovn-sbctl dump-flows > sbflows2
    ovn-sbctl dump-flows lr0 | grep ct_lb_mark | grep priority=120 | sed 's/table=..//'], 0,
-  [  (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.3:80,20.0.0.3:80; hash_fields="ip_dst,ip_src,tcp_dst,tcp_src");)
+  [  (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.3:80,20.0.0.3:80; hash_fields="ip_dst,ip_src,tcp_dst,tcp_src");)
 ])
 
 # get the svc monitor mac.
@@ -24419,8 +24511,7 @@ AT_CHECK(
 AT_CAPTURE_FILE([sbflows4])
 ovn-sbctl dump-flows lr0 > sbflows4
 AT_CHECK([grep lr_in_dnat sbflows4 | grep priority=120 | sed 's/table=..//' | sort], [0], [dnl
-  (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;)
-  (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && is_chassis_resident("cr-lr0-public")), action=(drop;)
+  (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip4 && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80 && is_chassis_resident("cr-lr0-public")), action=(drop;)
 ])
 
 # Delete sw0-p1
@@ -24576,7 +24667,7 @@ AT_CAPTURE_FILE([sbflows2])
 OVS_WAIT_FOR_OUTPUT(
   [ovn-sbctl dump-flows > sbflows2
    ovn-sbctl dump-flows lr0 | grep ct_lb_mark | grep priority=120 | sed 's/table=..//'], 0,
-  [  (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip6 && xxreg0 == 2001::a && tcp && reg9[[16..31]] == 80 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=[[2001::3]]:80,[[2002::3]]:80; hash_fields="ip_dst,ip_src,tcp_dst,tcp_src");)
+  [  (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip6 && ip6.dst == 2001::a && tcp && tcp.dst == 80 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=[[2001::3]]:80,[[2002::3]]:80; hash_fields="ip_dst,ip_src,tcp_dst,tcp_src");)
 ])
 
 # get the svc monitor mac.
@@ -24618,8 +24709,7 @@ AT_CHECK(
 AT_CAPTURE_FILE([sbflows4])
 ovn-sbctl dump-flows lr0 > sbflows4
 AT_CHECK([grep lr_in_dnat sbflows4 | grep priority=120 | sed 's/table=..//' | sort], [0], [dnl
-  (lr_in_dnat         ), priority=120  , match=(ct.est && !ct.rel && ip6 && xxreg0 == 2001::a && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;)
-  (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip6 && xxreg0 == 2001::a && tcp && reg9[[16..31]] == 80 && is_chassis_resident("cr-lr0-public")), action=(drop;)
+  (lr_in_dnat         ), priority=120  , match=(ct.new && !ct.rel && ip6 && ip6.dst == 2001::a && tcp && tcp.dst == 80 && is_chassis_resident("cr-lr0-public")), action=(drop;)
 ])
 
 # Delete sw0-p1
@@ -25447,7 +25537,7 @@ for s_az in $(seq 1 $n_az); do
                     udp && udp.src==53 && udp.dst==4369"
             echo "sending: $packet"
             AT_CHECK([ovn_trace --ovs "$packet" > ${s_az}-${d_az}-$i.ovn-trace])
-            AT_CHECK([ovs-appctl -t ovn-controller inject-pkt "$packet"])
+            OVS_WAIT_UNTIL([ovs-appctl -t ovn-controller inject-pkt "$packet"])
             ovs_inport=$(ovs-vsctl --bare --columns=ofport find Interface external-ids:iface-id="$ovn_inport")
 
             ovs_packet=$(echo $packet | ovstest test-ovn expr-to-packets)
@@ -26002,7 +26092,7 @@ for i in $(seq 5001 5010); do
     packet="inport==\"lsp11\" && eth.src==f0:00:00:00:01:11 && eth.dst==00:00:00:01:01:01 &&
             ip4 && ip.ttl==64 && ip4.src==192.168.1.11 && ip4.dst==10.0.0.123 &&
             tcp && tcp.src==$i && tcp.dst==80"
-    AT_CHECK([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
+    OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
     for j in 1 2; do
         # Assume all packets go to lsp2${j}.
@@ -26121,7 +26211,7 @@ wait_for_ports_up
 # Test 1
 packet="inport==\"lsp11\" && eth.src==f0:00:00:00:01:11 && eth.dst==00:00:00:01:01:01 &&
         ip4 && ip.ttl==64 && ip4.src==192.168.1.11 && ip4.dst==2.2.2.2 && icmp"
-AT_CHECK([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Assume no packets go neither to lsp21 nor to lsp22.
 > expected_lsp21
@@ -26151,7 +26241,7 @@ done
 # Test 2
 packet="inport==\"lsp11\" && eth.src==f0:00:00:00:01:11 && eth.dst==00:00:00:01:01:01 &&
         ip4 && ip.ttl==64 && ip4.src==192.168.1.11 && ip4.dst==1.1.1.1 && icmp"
-AT_CHECK([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Assume all packets go to lsp22.
 exp_packet="eth.src==00:00:00:01:02:01 && eth.dst==f0:00:00:00:02:22 &&
@@ -26181,7 +26271,7 @@ done
 # Test 3
 packet="inport==\"lsp21\" && eth.src==f0:00:00:00:02:21 && eth.dst==00:00:00:01:02:01 &&
         ip4 && ip.ttl==64 && ip4.src==192.168.2.21 && ip4.dst==2.2.2.2 && icmp"
-AT_CHECK([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Assume all packets go to lsp21.
 exp_packet="eth.src==00:00:00:01:02:01 && eth.dst==f0:00:00:00:02:21 &&
@@ -26278,7 +26368,7 @@ wait_for_ports_up
 # test 1
 packet="inport==\"lsp11\" && eth.src==f0:00:00:00:01:11 && eth.dst==00:00:00:01:01:01 &&
         ip4 && ip.ttl==64 && ip4.src==192.168.1.11 && ip4.dst==192.168.2.21 && icmp"
-AT_CHECK([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Assume all packets go to lsp21.
 exp_packet="eth.src==00:00:00:01:02:01 && eth.dst==f0:00:00:00:02:21 && ip4 &&
@@ -26312,7 +26402,7 @@ ovs-vsctl set interface hv1-vif2 options:tx_pcap=hv1/vif2-tx.pcap
 # test 2
 packet="inport==\"lsp11\" && eth.src==f0:00:00:00:01:11 && eth.dst==00:00:00:01:01:01 &&
         ip4 && ip.ttl==64 && ip4.src==192.168.1.11 && ip4.dst==192.168.2.200 && icmp"
-AT_CHECK([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Assume all packets go to lsp11.
 exp_packet="eth.src==00:00:00:01:01:01 && eth.dst==f0:00:00:00:01:11 && ip4 &&
@@ -26417,7 +26507,7 @@ for i in $(seq 1 2); do
     packet="inport==\"lsp${i}1\" && eth.src==f0:00:00:00:0${i}:1${i} &&
             eth.dst==00:00:00:01:0${i}:01 && ip4 && ip.ttl==64 &&
             ip4.src==192.168.${i}.${i}1 && ip4.dst==10.0.0.1 && icmp"
-    AT_CHECK([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
+    OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
     # Assume all packets go to lsp${di}1.
     exp_packet="eth.src==00:00:00:01:0${di}:01 && eth.dst==f0:00:00:00:0${di}:1${di} &&
@@ -26530,7 +26620,7 @@ for i in $(seq 1 2); do
     packet="inport==\"lsp${i}1\" && eth.src==f0:00:00:00:0${i}:1${i} &&
             eth.dst==00:00:00:01:0${i}:01 && ip6 && ip.ttl==64 &&
             ip6.src==2001:db8:${i}::${i}1 && ip6.dst==2001:db8:2000::1 && icmp6"
-    AT_CHECK([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
+    OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
     # Assume all packets go to lsp${di}1.
     exp_packet="eth.src==00:00:00:01:0${di}:01 && eth.dst==f0:00:00:00:0${di}:1${di} && ip6 &&
@@ -26650,7 +26740,7 @@ dst_ip=172.16.1.11
 packet="inport==\"lsp11\" && eth.src==$src_mac && eth.dst==$dst_mac &&
         ip4 && ip.ttl==64 && ip4.src==$src_ip && ip4.dst==$dst_ip &&
         udp && udp.src==53 && udp.dst==4369"
-check as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Check if the packet hit the forwarding group policy
 AT_CAPTURE_FILE([offlows2])
@@ -27173,7 +27263,7 @@ ovn_attach n1 br-phys 192.168.0.1
 
 # Chassis hv1 should add flows for the ls1 datapath in table 8 (ls_in_port_sec_l2).
 dp_key=$(ovn-sbctl --bare --columns tunnel_key list Datapath_Binding ls1)
-OVS_WAIT_UNTIL([as hv1 ovs-ofctl dump-flows br-int | grep -E "table=8.*metadata=0x${dp_key}"], [0], [ignore])
+OVS_WAIT_UNTIL([as hv1 ovs-ofctl dump-flows br-int | grep -E "table=8.*metadata=0x${dp_key}"])
 
 OVN_CLEANUP([hv1])
 AT_CLEANUP
@@ -27199,7 +27289,7 @@ ovs-vsctl add-br br-phys
 ovn_attach n1 br-phys 192.168.0.1
 
 # Port_Binding should be released.
-OVS_WAIT_UNTIL([test 0 = $(ovn-sbctl show | grep Port_Binding -c)], [0])
+OVS_WAIT_UNTIL([test 0 = $(ovn-sbctl show | grep Port_Binding -c)])
 
 OVN_CLEANUP([hv1])
 AT_CLEANUP
@@ -27332,22 +27422,24 @@ sleep 5
 send_ipv4_pkt() {
     local hv=$1 inport=$2 eth_src=$3 eth_dst=$4
     local ip_src=$5 ip_dst=$6
-    packet=${eth_dst}${eth_src}08004500001c0000000040110000${ip_src}${ip_dst}0035111100080000
+    local ip_cksum=$7 tcp_cksum=$8
+    packet=${eth_dst}${eth_src}080045000028000000004006${ip_cksum}${ip_src}${ip_dst}0035111112345678000000005002faf0${tcp_cksum}0000
     tcpdump_hex $packet
     as $hv ovs-appctl netdev-dummy/receive ${inport} ${packet}
 }
 
 send_icmp6_packet() {
-    local hv=$1 inport=$2 eth_src=$3 eth_dst=$4 ipv6_src=$5 ipv6_dst=$6
+    local hv=$1 inport=$2 eth_src=$3 eth_dst=$4 ipv6_src=$5 ipv6_dst=$6 tcp_cksum=$7
 
-    local ip6_hdr=6000000000083aff${ipv6_src}${ipv6_dst}
-    local packet=${eth_dst}${eth_src}86dd${ip6_hdr}8000dcb662f00001
+    local ip6_hdr=60000000001406ff${ipv6_src}${ipv6_dst}
+    local packet=${eth_dst}${eth_src}86dd${ip6_hdr}0035111112345678000000005002faf0${tcp_cksum}0000
 
     as $hv ovs-appctl netdev-dummy/receive ${inport} ${packet}
 }
 
 send_ipv4_pkt hv1 hv1-vif1 505400000003 00000000ff01 \
-    $(ip_to_hex 10 0 0 3) $(ip_to_hex 172 168 0 120)
+    $(ip_to_hex 10 0 0 3) $(ip_to_hex 172 168 0 120) \
+    c3ad 83dc
 
 AT_CAPTURE_FILE([offlows2])
 OVS_WAIT_UNTIL([
@@ -27364,7 +27456,8 @@ AT_CHECK([
 
 # Send the pkt from sw0-port2. Packet should not be marked.
 send_ipv4_pkt hv1 hv1-vif2 505400000004 00000000ff01 \
-    $(ip_to_hex 10 0 0 4) $(ip_to_hex 172 168 0 120)
+    $(ip_to_hex 10 0 0 4) $(ip_to_hex 172 168 0 120) \
+    c3ac 83db
 
 AT_CHECK([
     test 1 -eq $(as hv1 ovs-ofctl dump-flows br-phys table=0 | \
@@ -27398,7 +27491,8 @@ AT_CHECK([
 
 ovn-nbctl set logical_router_policy $pol1 options:pkt_mark=2
 send_ipv4_pkt hv1 hv1-vif1 505400000003 00000000ff01 \
-    $(ip_to_hex 10 0 0 3) $(ip_to_hex 172 168 0 120)
+    $(ip_to_hex 10 0 0 3) $(ip_to_hex 172 168 0 120) \
+    c3ad 83dc
 
 OVS_WAIT_UNTIL([
     test 1 -eq $(as hv1 ovs-ofctl dump-flows br-int table=23 | \
@@ -27431,7 +27525,8 @@ AT_CHECK([
 # Send with src ip 10.0.0.5. The reroute policy should be hit
 # and the packet should be marked with 5.
 send_ipv4_pkt hv1 hv1-vif1 505400000003 00000000ff01 \
-    $(ip_to_hex 10 0 0 5) $(ip_to_hex 172 168 0 120)
+    $(ip_to_hex 10 0 0 5) $(ip_to_hex 172 168 0 120) \
+    c3ab 83da
 
 OVS_WAIT_UNTIL([
     test 1 -eq $(as hv1 ovs-ofctl dump-flows br-phys table=0 | \
@@ -27443,7 +27538,7 @@ OVS_WAIT_UNTIL([
 src_ip6=aef00000000000000000000000000004
 dst_ip6=bef00000000000000000000000000004
 
-send_icmp6_packet hv1 hv1-vif2 505400000004 00000000ff01 ${src_ip6} ${dst_ip6}
+send_icmp6_packet hv1 hv1-vif2 505400000004 00000000ff01 ${src_ip6} ${dst_ip6} cd16
 
 OVS_WAIT_UNTIL([
     test 1 -eq $(as hv1 ovs-ofctl dump-flows br-phys table=0 | \
@@ -27463,7 +27558,7 @@ AT_CHECK([
 src_ip6=aef00000000000000000000000000004
 dst_ip6=bef00000000000000000000000000005
 
-send_icmp6_packet hv1 hv1-vif2 505400000004 00000000ff01 ${src_ip6} ${dst_ip6}
+send_icmp6_packet hv1 hv1-vif2 505400000004 00000000ff01 ${src_ip6} ${dst_ip6} cd15
 
 OVS_WAIT_UNTIL([
     test 1 -eq $(as hv1 ovs-ofctl dump-flows br-phys table=0 | \
@@ -28737,7 +28832,7 @@ src_mac="f00000000102"
 dst_mac="000000000101"
 src_ip=`ip_to_hex 10 0 1 2`
 dst_ip=`ip_to_hex 10 0 1 1`
-packet=${dst_mac}${src_mac}08004500001c0000000040110000${src_ip}${dst_ip}0035111100080000
+packet=${dst_mac}${src_mac}08004500001c00000000401164cf${src_ip}${dst_ip}0035111100080000
 as hv1 ovs-appctl netdev-dummy/receive hv1-vif1 $packet
 
 # Even after configuring a router owned IP for SNAT, no packet-ins should
@@ -28763,7 +28858,7 @@ src_mac="f00000000202"
 dst_mac="000000000201"
 src_ip=`ip_to_hex 10 0 2 2`
 dst_ip=`ip_to_hex 10 0 1 1`
-packet=${dst_mac}${src_mac}08004500001c0000000040110000${src_ip}${dst_ip}0035111100080000
+packet=${dst_mac}${src_mac}08004500001c00000000401163cf${src_ip}${dst_ip}0035111100080000
 as hv1 ovs-appctl netdev-dummy/receive hv1-vif2 $packet
 
 # Still no packet-ins should reach ovn-controller.
@@ -29548,7 +29643,9 @@ OVS_WAIT_UNTIL([test x$(ovn-nbctl lsp-get-up sw1-p1) = xup])
 
 check ovn-nbctl lb-add lb-ipv4-tcp 88.88.88.88:8080 42.42.42.1:4041 tcp
 check ovn-nbctl lb-add lb-ipv4-udp 88.88.88.88:4040 42.42.42.1:2021 udp
+check ovn-nbctl lb-add lb-ipv4 88.88.88.89 42.42.42.2
 check ovn-nbctl lb-add lb-ipv6-tcp [[8800::0088]]:8080 [[4200::1]]:4041 tcp
+check ovn-nbctl lb-add lb-ipv6 8800::0089 4200::2
 check ovn-nbctl --wait=hv lb-add lb-ipv6-udp [[8800::0088]]:4040 [[4200::1]]:2021 udp
 
 AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST], [1], [dnl
@@ -29839,6 +29936,119 @@ AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | ofctl_strip_all | grep -
  table=70, priority=100,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
 ])
 
+check ovn-nbctl --wait=hv ls-lb-add sw0 lb-ipv4
+OVS_WAIT_UNTIL(
+    [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 7]
+)
+
+OVS_WAIT_UNTIL(
+    [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 7]
+)
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=68, priority=100,ct_mark=0x2/0x2,ip,reg1=0x58585859,nw_src=42.42.42.2,nw_dst=42.42.42.2 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.89,NXM_OF_IP_PROTO[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,udp,reg1=0x58585858,reg2=0xfc8/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST], [1], [dnl
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=70, priority=100,tcp,reg1=0x58585858,reg2=0x1f90/0xffff actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+ table=70, priority=100,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=100,udp,reg1=0x58585858,reg2=0xfc8/0xffff actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=90,ip,reg1=0x58585859 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.89))
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=68, priority=100,ct_mark=0x2/0x2,ip,reg1=0x58585859,nw_src=42.42.42.2,nw_dst=42.42.42.2 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.89,NXM_OF_IP_PROTO[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,udp,reg1=0x58585858,reg2=0xfc8/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST], [1], [dnl
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=70, priority=100,tcp,reg1=0x58585858,reg2=0x1f90/0xffff actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+ table=70, priority=100,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=100,udp,reg1=0x58585858,reg2=0xfc8/0xffff actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=90,ip,reg1=0x58585859 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.89))
+])
+
+check ovn-nbctl --wait=hv ls-lb-add sw0 lb-ipv6
+OVS_WAIT_UNTIL(
+    [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 8]
+)
+
+OVS_WAIT_UNTIL(
+    [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 8]
+)
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=68, priority=100,ct_mark=0x2/0x2,ip,reg1=0x58585859,nw_src=42.42.42.2,nw_dst=42.42.42.2 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.89,NXM_OF_IP_PROTO[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,ipv6,reg4=0x88000000,reg5=0,reg6=0,reg7=0x89,ipv6_src=4200::2,ipv6_dst=4200::2 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::89,NXM_OF_IP_PROTO[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,udp,reg1=0x58585858,reg2=0xfc8/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST], [1], [dnl
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=70, priority=100,tcp,reg1=0x58585858,reg2=0x1f90/0xffff actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+ table=70, priority=100,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=100,udp,reg1=0x58585858,reg2=0xfc8/0xffff actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=90,ip,reg1=0x58585859 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.89))
+ table=70, priority=90,ipv6,reg4=0x88000000,reg5=0,reg6=0,reg7=0x89 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::89))
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=68, priority=100,ct_mark=0x2/0x2,ip,reg1=0x58585859,nw_src=42.42.42.2,nw_dst=42.42.42.2 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.89,NXM_OF_IP_PROTO[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,ipv6,reg4=0x88000000,reg5=0,reg6=0,reg7=0x89,ipv6_src=4200::2,ipv6_dst=4200::2 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::89,NXM_OF_IP_PROTO[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,udp,reg1=0x58585858,reg2=0xfc8/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_mark=0x2/0x2,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST], [1], [dnl
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=70, priority=100,tcp,reg1=0x58585858,reg2=0x1f90/0xffff actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+ table=70, priority=100,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=100,udp,reg1=0x58585858,reg2=0xfc8/0xffff actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=90,ip,reg1=0x58585859 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.89))
+ table=70, priority=90,ipv6,reg4=0x88000000,reg5=0,reg6=0,reg7=0x89 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::89))
+])
+
+check ovn-nbctl --wait=hv ls-lb-del sw0 lb-ipv4
+check ovn-nbctl --wait=hv ls-lb-del sw0 lb-ipv6
+
 # Check backwards compatibility with ovn-northd versions that don't store the
 # original destination tuple.
 #
@@ -31743,7 +31953,7 @@ packet="inport==\"sw1-lp1\" && eth.src==00:00:04:01:02:03 &&
        ip4.src==10.0.0.100 && ip4.dst==20.0.0.200 &&
        udp && udp.src==53 && udp.dst==4369"
 
-as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Check if packet hit the drop rule
 AT_CHECK([ovs-ofctl dump-flows br-int | grep "nw_dst=20.0.0.0/24" | \
@@ -31770,7 +31980,7 @@ packet="inport==\"sw1-lp1\" && eth.src==00:00:04:01:02:03 &&
        ip4.src==10.0.0.100 && ip4.dst==20.0.0.200 &&
        udp && udp.src==53 && udp.dst==4369"
 
-as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv1 ovs-appctl -t ovn-controller inject-pkt "$packet"])
 
 # Check if packet hit the drop rule
 AT_CHECK([ovs-ofctl dump-flows br-int "nw_src=10.0.0.0/24" | \
@@ -31857,7 +32067,7 @@ packet="inport==\"ls1-lp1\" && eth.src==$ls1_p1_mac && eth.dst==$bcast_mac &&
        arp.op==1 && arp.sha==$ls1_p1_mac && arp.spa==$ls1_p1_ip &&
        arp.tha==$bcast_mac && arp.tpa==$proxy_ip1"
 
-as hv1 ovn-appctl -t ovn-controller inject-pkt "$packet"
+OVS_WAIT_UNTIL([as hv1 ovn-appctl -t ovn-controller inject-pkt "$packet"])
 
 as hv1 ovs-ofctl dump-flows br-int| grep 169.254.239.254 | grep priority=50 > debug1
 AT_CAPTURE_FILE([debug1])
@@ -32108,7 +32318,6 @@ ovn-nbctl lrp-set-gateway-chassis DR-S3 hv4
 
 ovn-nbctl --wait=sb sync
 OVN_POPULATE_ARP
-
 vif_to_ls () {
     case ${1} in dnl (
         vif?[[11]]) echo ls ;; dnl (
@@ -32222,6 +32431,9 @@ echo "Send Dummy ARP"
 sip=`ip_to_hex 172 16 1 10`
 tip=`ip_to_hex 172 16 1 50`
 test_arp vif-north1 f0f000000011 $sip $tip
+OVS_WAIT_UNTIL(
+    [test 1 = `as hv1 ovs-ofctl dump-flows br-int table=67 | grep dl_src=f0:f0:00:00:00:11 | wc -l`]
+)
 
 echo "Send traffic North to South"
 sip=`ip_to_hex 172 16 1 10`
@@ -32242,6 +32454,9 @@ echo "Send Dummy ARP"
 sip=`ip_to_hex 10 0 0 10`
 tip=`ip_to_hex 10 0 0 50`
 test_arp vif-north2 f0f000000022 $sip $tip
+OVS_WAIT_UNTIL(
+    [test 1 = `as hv1 ovs-ofctl dump-flows br-int table=67 | grep dl_src=f0:f0:00:00:00:22 | wc -l`]
+)
 
 echo "Send traffic South to North2"
 sip=`ip_to_hex 20 0 0 10`
@@ -32255,6 +32470,9 @@ echo "Send Dummy ARP"
 sip=`ip_to_hex 192 168 0 10`
 tip=`ip_to_hex 192 168 0 50`
 test_arp vif-north3 f0f000000033 $sip $tip
+OVS_WAIT_UNTIL(
+    [test 1 = `as hv1 ovs-ofctl dump-flows br-int table=67 | grep dl_src=f0:f0:00:00:00:33 | wc -l`]
+)
 
 echo "Send traffic South to North3"
 sip=`ip_to_hex 20 0 0 10`
@@ -34926,7 +35144,8 @@ check ovs-vsctl add-port br-int p1 -- set interface p1 external_ids:iface-id=lsp
 wait_for_ports_up
 ovn-nbctl --wait=hv sync
 
-check ovn-nbctl lb-add lb1 "192.168.0.10" "192.168.10.10,192.168.10.20"
+check ovn-nbctl lb-add lb1 "192.168.0.10" "192.168.10.10,192.168.10.20" \
+    -- set load_balancer lb1 options:ct_flush="true"
 check ovn-nbctl ls-lb-add sw lb1
 
 # Remove a single backend
@@ -34949,7 +35168,8 @@ AT_CHECK([grep -q "Flushing CT for 5-tuple: vip=192.168.0.10:0, backend=192.168.
 AT_CHECK([grep -q "Flushing CT for 5-tuple: vip=192.168.0.10:0, backend=192.168.10.30:0, protocol=0" hv1/ovn-controller.log], [0])
 
 # Check flush for LB with port and protocol
-check ovn-nbctl lb-add lb1 "192.168.30.10:80" "192.168.40.10:8080,192.168.40.20:8090" udp
+check ovn-nbctl lb-add lb1 "192.168.30.10:80" "192.168.40.10:8080,192.168.40.20:8090" udp \
+    -- set load_balancer lb1 options:ct_flush="true"
 check ovn-nbctl ls-lb-add sw lb1
 check ovn-nbctl lb-del lb1
 check ovn-nbctl --wait=hv sync
@@ -34958,7 +35178,8 @@ AT_CHECK([grep -q "Flushing CT for 5-tuple: vip=192.168.30.10:80, backend=192.16
 AT_CHECK([grep -q "Flushing CT for 5-tuple: vip=192.168.30.10:80, backend=192.168.40.20:8090, protocol=17" hv1/ovn-controller.log], [0])
 
 # Check recompute when LB is no longer local
-check ovn-nbctl lb-add lb1 "192.168.50.10:80" "192.168.60.10:8080"
+check ovn-nbctl lb-add lb1 "192.168.50.10:80" "192.168.60.10:8080" \
+    -- set load_balancer lb1 options:ct_flush="true"
 check ovn-nbctl ls-lb-add sw lb1
 check ovs-vsctl remove interface p1 external_ids iface-id
 check ovn-appctl inc-engine/recompute
@@ -34968,6 +35189,193 @@ AT_CHECK([grep -q "Flushing CT for 5-tuple: vip=192.168.50.10:80, backend=192.16
 
 AT_CHECK([test "$(grep -c "Flushing CT for 5-tuple" hv1/ovn-controller.log)" = "6"], [0])
 
+# Check if CT flush is disabled by default
+check ovn-nbctl lb-del lb1
+check ovn-nbctl lb-add lb1 "192.168.70.10:80" "192.168.80.10:8080,192.168.90.10:8080"
+check ovn-nbctl ls-lb-add sw lb1
+check ovs-vsctl set interface p1 external_ids:iface-id=lsp1
+check ovn-nbctl --wait=hv sync
+
+AT_CHECK([test "$(grep -c "Flushing CT for 5-tuple" hv1/ovn-controller.log)" = "6"], [0])
+
+# Remove one backend
+check ovn-nbctl --wait=hv set load_balancer lb1 vips='"192.168.70.10:80"="192.168.80.10:8080"'
+
+AT_CHECK([grep -q "Flushing CT for 5-tuple: vip=192.168.70.10:80, backend=192.168.90.10:8080, protocol=6" hv1/ovn-controller.log], [1])
+AT_CHECK([test "$(grep -c "Flushing CT for 5-tuple" hv1/ovn-controller.log)" = "6"], [0])
+
+check ovn-nbctl --wait=hv lb-del lb1
+AT_CHECK([grep -q "Flushing CT for 5-tuple: vip=192.168.70.10:80, backend=192.168.80.10:8080, protocol=6" hv1/ovn-controller.log], [1])
+AT_CHECK([test "$(grep -c "Flushing CT for 5-tuple" hv1/ovn-controller.log)" = "6"], [0])
+
+OVN_CLEANUP([hv1])
+AT_CLEANUP
+])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([Re-create encap tunnels during integration bridge migration])
+ovn_start
+net_add n1
+
+sim_add hv1
+as hv1
+ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.1
+
+sim_add hv2
+as hv2
+ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.2
+
+check ovn-nbctl --wait=hv sync
+
+check_tunnel_port() {
+    local hv=$1
+    local br=$2
+    local id=$3
+
+    as $hv
+    OVS_WAIT_UNTIL([
+        test "$(ovs-vsctl --format=table --no-headings find port external_ids:ovn-chassis-id="$id" | wc -l)" = "1"
+    ])
+    local tunnel_id=$(ovs-vsctl --bare --columns _uuid find port external_ids:ovn-chassis-id="$id")
+    AT_CHECK([ovs-vsctl --bare --columns ports find bridge name="$br" | grep -q "$tunnel_id"])
+}
+
+# Check that both chassis have tunnel
+check_tunnel_port hv1 br-int hv2@192.168.0.2
+check_tunnel_port hv2 br-int hv1@192.168.0.1
+
+# Stop ovn-controller on hv1
+check as hv1 ovn-appctl -t ovn-controller exit --restart
+
+# The tunnel should remain intact
+check_tunnel_port hv1 br-int hv2@192.168.0.2
+
+# Change the bridge to br-int1 on hv1
+as hv1
+check ovs-vsctl add-br br-int1
+check ovs-vsctl set open . external_ids:ovn-bridge="br-int1"
+start_daemon ovn-controller --verbose="encaps:dbg"
+check ovn-nbctl --wait=hv sync
+
+# Check that the tunnel was created on br-int1 instead
+check_tunnel_port hv1 br-int1 hv2@192.168.0.2
+check grep -q "Clearing old tunnel port \"ovn-hv2-0\" (hv2@192.168.0.2) from bridge \"br-int\"" hv1/ovn-controller.log
+
+# Change the bridge to br-int1 on hv2
+as hv2
+check ovn-appctl vlog/set encaps:dbg
+check ovs-vsctl add-br br-int1
+check ovs-vsctl set open . external_ids:ovn-bridge="br-int1"
+check ovn-nbctl --wait=hv sync
+
+
+# Check that the tunnel was created on br-int1 instead
+check_tunnel_port hv2 br-int1 hv1@192.168.0.1
+check grep -q "Clearing old tunnel port \"ovn-hv1-0\" (hv1@192.168.0.1) from bridge \"br-int\"" hv2/ovn-controller.log
+
+# Stop ovn-controller on hv1
+check as hv1 ovn-appctl -t ovn-controller exit --restart
+
+# The tunnel should remain intact
+check_tunnel_port hv1 br-int1 hv2@192.168.0.2
+prev_id=$(ovs-vsctl --bare --columns _uuid find port external_ids:ovn-chassis-id="hv2@192.168.0.2")
+
+# Start the controller again
+start_daemon ovn-controller --verbose="encaps:dbg"
+check ovn-nbctl --wait=hv sync
+check_tunnel_port hv1 br-int1 hv2@192.168.0.2
+current_id=$(ovs-vsctl --bare --columns _uuid find port external_ids:ovn-chassis-id="hv2@192.168.0.2")
+
+# The tunnel should be the same after restart
+check test "$current_id" = "$prev_id"
+
+OVN_CLEANUP([hv1],[hv2])
+AT_CLEANUP
+])
+
+# NOTE: This test case runs two ovn-controllers inside the same sandbox (hv1).
+# Each controller uses a unique chassis name - hv1 and hv2 - and manage
+# different bridges with different ports.
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([Encaps tunnel cleanup does not interfere with multiple controller on the same host])
+ovn_start
+net_add n1
+
+sim_add hv1
+as hv1
+ovs-vsctl add-br br-phys-1
+ovn_attach n1 br-phys-1 192.168.0.1 24
+
+
+# now start the second virtual controller
+ovs-vsctl add-br br-phys-2
+
+
+# the file is read once at startup so it's safe to write it
+# here after the first ovn-controller has started
+echo hv2 > ${OVN_SYSCONFDIR}/system-id-override
+
+# for some reason SSL ovsdb configuration overrides CLI, so
+# delete ssl config from ovsdb to give CLI arguments priority
+ovs-vsctl del-ssl
+
+start_virtual_controller n1 br-phys-2 br-int-2 192.168.0.2 24 geneve,vxlan hv2 \
+    --pidfile=${OVS_RUNDIR}/ovn-controller-2.pid \
+    --log-file=${OVS_RUNDIR}/ovn-controller-2.log \
+    -p $PKIDIR/testpki-hv2-privkey.pem \
+    -c $PKIDIR/testpki-hv2-cert.pem \
+    -C $PKIDIR/testpki-cacert.pem
+pidfile="$OVS_RUNDIR"/ovn-controller-2.pid
+on_exit "test -e \"$pidfile\" && kill \`cat \"$pidfile\"\`"
+
+ovn-nbctl --wait=hv sync
+
+check_tunnel_port() {
+    local hv=$1
+    local br=$2
+    local id=$3
+
+    as $hv
+    OVS_WAIT_UNTIL([
+        test "$(ovs-vsctl --format=table --no-headings find port external_ids:ovn-chassis-id="$id" | wc -l)" = "1"
+    ])
+    local tunnel_id=$(ovs-vsctl --bare --columns _uuid find port external_ids:ovn-chassis-id="$id")
+    AT_CHECK([ovs-vsctl --bare --columns ports find bridge name="$br" | grep -q "$tunnel_id"])
+}
+
+check_tunnel_port hv1 br-int hv2@192.168.0.2
+check_tunnel_port hv1 br-int-2 hv1@192.168.0.1
+prev_id1=$(ovs-vsctl --bare --columns _uuid find port external_ids:ovn-chassis-id="hv1@192.168.0.1")
+prev_id2=$(ovs-vsctl --bare --columns _uuid find port external_ids:ovn-chassis-id="hv2@192.168.0.2")
+
+# The hv2 is running we can remove the override file
+rm -f ${OVN_SYSCONFDIR}/system-id-override
+
+check ovn-appctl -t ovn-controller exit --restart
+
+# for some reason SSL ovsdb configuration overrides CLI, so
+# delete ssl config from ovsdb to give CLI arguments priority
+ovs-vsctl del-ssl
+
+start_daemon ovn-controller --verbose="encaps:dbg" \
+    -p $PKIDIR/testpki-hv1-privkey.pem \
+    -c $PKIDIR/testpki-hv1-cert.pem \
+    -C $PKIDIR/testpki-cacert.pem
+
+check ovn-nbctl --wait=hv sync
+
+check_tunnel_port hv1 br-int hv2@192.168.0.2
+check_tunnel_port hv1 br-int-2 hv1@192.168.0.1
+current_id1=$(ovs-vsctl --bare --columns _uuid find port external_ids:ovn-chassis-id="hv1@192.168.0.1")
+current_id2=$(ovs-vsctl --bare --columns _uuid find port external_ids:ovn-chassis-id="hv2@192.168.0.2")
+
+# Check that restart of hv1 ovn-controller did not interfere with hv2
+AT_CHECK([grep -q "Clearing old tunnel port \"ovn0-hv1-0\" (hv1@192.168.0.1) from bridge \"br-int-2\"" hv1/ovn-controller.log], [1])
+check test "$current_id1" = "$prev_id1"
+check test "$current_id2" = "$prev_id2"
+
 OVN_CLEANUP([hv1])
 AT_CLEANUP
 ])
diff --git a/tests/ovs-macros.at b/tests/ovs-macros.at
index 36b58b5ae..cc5f6e3b1 100644
--- a/tests/ovs-macros.at
+++ b/tests/ovs-macros.at
@@ -256,6 +256,13 @@ ovs_wait () {
     ovs_wait_failed
     AT_FAIL_IF([:])
 }
+
+check_ovs_wait_until_args() {
+   AT_FAIL_IF([test $1 -ge 3])
+   dnl The second argument should not be a number (confused with AT_CHECK ?).
+   AT_FAIL_IF([test $1 -eq 2 && test "$2" -eq "$2" 2>/dev/null])
+}
+
 OVS_END_SHELL_HELPERS
 m4_define([OVS_WAIT], [dnl
 ovs_wait_cond () {
@@ -276,7 +283,8 @@ dnl zero code within reasonable time limit, then
 dnl the test fails.  In that case, runs IF-FAILED
 dnl before aborting.
 m4_define([OVS_WAIT_UNTIL],
-  [OVS_WAIT([$1], [$2], [AT_LINE], [until $1])])
+  [check_ovs_wait_until_args "$#" "$2"
+   OVS_WAIT([$1], [$2], [AT_LINE], [until $1])])
 
 dnl OVS_WAIT_FOR_OUTPUT(COMMAND, EXIT-STATUS, STDOUT, STDERR)
 dnl OVS_WAIT_FOR_OUTPUT_UNQUOTED(COMMAND, EXIT-STATUS, STDOUT, STDERR)
diff --git a/tests/system-common-macros.at b/tests/system-common-macros.at
index d65f359a6..b8c5ae9ad 100644
--- a/tests/system-common-macros.at
+++ b/tests/system-common-macros.at
@@ -44,43 +44,8 @@ m4_define([NS_CHECK_EXEC],
 # appropriate type, and allows additional arguments to be passed.
 m4_define([ADD_BR], [ovs-vsctl _ADD_BR([$1]) -- $2])
 
-# ADD_INT([port], [namespace], [ovs-br], [ip_addr] [ip6_addr])
-#
-# Add an internal port to 'ovs-br', then shift it into 'namespace' and
-# configure it with 'ip_addr' (specified in CIDR notation).
-# Optionally add an ipv6 address
-m4_define([ADD_INT],
-    [ AT_CHECK([ovs-vsctl add-port $3 $1 -- set int $1 type=internal])
-      AT_CHECK([ip link set $1 netns $2])
-      NS_CHECK_EXEC([$2], [ip addr add $4 dev $1])
-      NS_CHECK_EXEC([$2], [ip link set dev $1 up])
-      if test -n "$5"; then
-        NS_CHECK_EXEC([$2], [ip -6 addr add $5 dev $1])
-      fi
-    ]
-)
-
-# NS_ADD_INT([port], [namespace], [ovs-br], [ip_addr] [mac_addr] [ip6_addr] [default_gw] [default_ipv6_gw])
-# Create a namespace
-# Add an internal port to 'ovs-br', then shift it into 'namespace'.
-# Configure it with 'ip_addr' (specified in CIDR notation) and ip6_addr.
-# Set mac_addr
-# Add default gw for ipv4 and ipv6
-m4_define([NS_ADD_INT],
-    [ AT_CHECK([ovs-vsctl add-port $3 $1 -- set int $1 type=internal  external_ids:iface-id=$1])
-      ADD_NAMESPACES($2)
-      AT_CHECK([ip link set $1 netns $2])
-      NS_CHECK_EXEC([$2], [ip link set $1 address $5])
-      NS_CHECK_EXEC([$2], [ip link set dev $1 up])
-      NS_CHECK_EXEC([$2], [ip addr add $4 dev $1])
-      NS_CHECK_EXEC([$2], [ip addr add $6 dev $1])
-      NS_CHECK_EXEC([$2], [ip route add default via $7 dev $1])
-      NS_CHECK_EXEC([$2], [ip -6 route add default via $8 dev $1])
-    ]
-)
-
 # ADD_VETH([port], [namespace], [ovs-br], [ip_addr] [mac_addr], [gateway],
-#          [ip_addr_flags])
+#          [ip_addr_flags] [ip6_addr] [gateway6])
 #
 # Add a pair of veth ports. 'port' will be added to name space 'namespace',
 # and "ovs-'port'" will be added to ovs bridge 'ovs-br'.
@@ -108,6 +73,12 @@ m4_define([ADD_VETH],
       if test -n "$6"; then
         NS_CHECK_EXEC([$2], [ip route add default via $6])
       fi
+      if test -n "$8"; then
+        NS_CHECK_EXEC([$2], [ip addr add $8 dev $1])
+      fi
+      if test -n "$9"; then
+        NS_CHECK_EXEC([$2], [ip route add default via $9])
+      fi
       on_exit "ip link del ovs-$1"
     ]
 )
@@ -263,7 +234,7 @@ m4_define([STRIP_MONITOR_CSUM], [grep "csum:" | sed 's/csum:.*/csum: <skip>/'])
 # and limit the output to the rows containing 'ip-addr'.
 #
 m4_define([FORMAT_CT],
-    [[grep -F "dst=$1" | sed -e 's/port=[0-9]*/port=<cleared>/g' -e 's/id=[0-9]*/id=<cleared>/g' -e 's/state=[0-9_A-Z]*/state=<cleared>/g' | sort | uniq]])
+    [[grep -F "dst=$1," | sed -e 's/port=[0-9]*/port=<cleared>/g' -e 's/id=[0-9]*/id=<cleared>/g' -e 's/state=[0-9_A-Z]*/state=<cleared>/g' | sort | uniq]])
 
 # NETNS_DAEMONIZE([namespace], [command], [pidfile])
 #
diff --git a/tests/system-ovn-kmod.at b/tests/system-ovn-kmod.at
index dd4996041..3c3e5bc61 100644
--- a/tests/system-ovn-kmod.at
+++ b/tests/system-ovn-kmod.at
@@ -215,3 +215,139 @@ as
 OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
 /connection dropped.*/d"])
 AT_CLEANUP
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([LB correctly de-fragments traffic])
+AT_KEYWORDS([ovnlb])
+
+CHECK_CONNTRACK()
+CHECK_CONNTRACK_NAT()
+AT_SKIP_IF([test $HAVE_SCAPY = no])
+
+ovn_start
+OVS_TRAFFIC_VSWITCHD_START()
+ADD_BR([br-int])
+ADD_BR([br-ext])
+
+# Logical network:
+# 2 logical switches "public" (192.168.1.0/24) and "internal" (172.16.1.0/24)
+# connected to a router lr.
+# internal has a server.
+# client is connected through localnet.
+#
+# Load balancer for udp 192.168.1.20:4242 172.16.1.2 4242.
+
+check ovs-ofctl add-flow br-ext action=normal
+# Set external-ids in br-int needed for ovn-controller
+check ovs-vsctl \
+        -- set Open_vSwitch . external-ids:system-id=hv1 \
+        -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+        -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+        -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+        -- set bridge br-int fail-mode=secure other-config:disable-in-band=true \
+        -- set Open_vSwitch . external-ids:ovn-bridge-mappings=phynet:br-ext
+
+
+# Start ovn-controller
+start_daemon ovn-controller
+
+check ovn-nbctl lr-add lr
+check ovn-nbctl ls-add internal
+check ovn-nbctl ls-add public
+
+check ovn-nbctl lrp-add lr lr-pub 00:00:01:01:02:03 192.168.1.1/24
+check ovn-nbctl lsp-add  public pub-lr -- set Logical_Switch_Port pub-lr \
+    type=router options:router-port=lr-pub addresses=\"00:00:01:01:02:03\"
+
+check ovn-nbctl lrp-add lr lr-internal 00:00:01:01:02:04 172.16.1.1/24
+check ovn-nbctl lsp-add internal internal-lr -- set Logical_Switch_Port internal-lr \
+    type=router options:router-port=lr-internal addresses=\"00:00:01:01:02:04\"
+
+ovn-nbctl lsp-add public ln_port \
+                -- lsp-set-addresses ln_port unknown \
+                -- lsp-set-type ln_port localnet \
+                -- lsp-set-options ln_port network_name=phynet
+
+ADD_NAMESPACES(client)
+ADD_VETH(client, client, br-ext, "192.168.1.2/24", "f0:00:00:01:02:03", \
+         "192.168.1.1")
+
+ADD_NAMESPACES(server)
+ADD_VETH(server, server, br-int, "172.16.1.2/24", "f0:00:0f:01:02:03", \
+         "172.16.1.1")
+check ovn-nbctl lsp-add internal server \
+-- lsp-set-addresses server "f0:00:0f:01:02:03 172.16.1.2"
+
+# Config OVN load-balancer with a VIP.
+check ovn-nbctl lb-add lb1 192.168.1.20:4242 172.16.1.2:4242 udp
+check ovn-nbctl lr-lb-add lr lb1
+check ovn-nbctl set logical_router lr options:chassis=hv1
+check ovn-nbctl set logical_router_port lr-internal options:gateway_mtu=800
+
+ovn-nbctl --wait=hv sync
+
+NETNS_DAEMONIZE([server], [nc -l -u 172.16.1.2 4242 > /dev/null], [server.pid])
+
+# Collect ICMP packets on client side
+NETNS_DAEMONIZE([client], [tcpdump -l -U -i client -vnne \
+icmp > client.pcap 2>client_err], [tcpdump0.pid])
+OVS_WAIT_UNTIL([grep "listening" client_err])
+
+# Collect UDP packets on server side
+NETNS_DAEMONIZE([server], [tcpdump -l -U -i server -vnne \
+'udp and ip[[6:2]] > 0 and not ip[[6]] = 64' > server.pcap 2>server_err], [tcpdump1.pid])
+OVS_WAIT_UNTIL([grep "listening" server_err])
+
+check ip netns exec client python3 << EOF
+import os
+import socket
+import sys
+import time
+
+FILE="client.pcap"
+
+
+def contains_string(file, str):
+    file = open(file, "r")
+    for line in file.readlines():
+        if str in line:
+            return True
+    return False
+
+
+def need_frag_received():
+    for _ in range(20):
+        if os.path.getsize(FILE) and contains_string(FILE, "need to frag"):
+            return True
+        time.sleep(0.5)
+    return False
+
+
+sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+sock.sendto(b"x" * 1000, ("192.168.1.20", 4242))
+if need_frag_received():
+    sock.sendto(b"x" * 1000, ("192.168.1.20", 4242))
+else:
+    print("Missing need frag")
+    sys.exit(1)
+EOF
+
+OVS_WAIT_UNTIL([test "$(cat server.pcap | wc -l)" = "4"])
+
+
+OVS_APP_EXIT_AND_WAIT([ovn-controller])
+
+as ovn-sb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as ovn-nb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as northd
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+as
+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+/connection dropped.*/d"])
+AT_CLEANUP
+])
diff --git a/tests/system-ovn.at b/tests/system-ovn.at
index 84a459d6a..40f808515 100644
--- a/tests/system-ovn.at
+++ b/tests/system-ovn.at
@@ -1569,7 +1569,6 @@ bar3_ct=$(ovs-appctl dpctl/dump-conntrack | grep 30.0.0.2 | grep 172.16.1.4 -c)
 AT_CHECK([test $(ovs-appctl dpctl/dump-conntrack | grep 30.0.0.2 | grep 172.16.1 -c) -ne 0])
 
 if [[ "$bar1_ct" == "20" ]]; then
-    AT_CHECK([test $bar1_ct -eq 20])
     AT_CHECK([test $bar2_ct -eq 0])
     AT_CHECK([test $bar3_ct -eq 0])
 else
@@ -1577,17 +1576,15 @@ else
 fi
 
 if [[ "$bar2_ct" == "20" ]]; then
-    AT_CHECK([test $bar1_ct -eq 20])
-    AT_CHECK([test $bar2_ct -eq 0])
+    AT_CHECK([test $bar1_ct -eq 0])
     AT_CHECK([test $bar3_ct -eq 0])
 else
     AT_CHECK([test $bar2_ct -eq 0])
 fi
 
 if [[ "$bar3_ct" == "20" ]]; then
-    AT_CHECK([test $bar1_ct -eq 20])
+    AT_CHECK([test $bar1_ct -eq 0])
     AT_CHECK([test $bar2_ct -eq 0])
-    AT_CHECK([test $bar3_ct -eq 0])
 else
     AT_CHECK([test $bar3_ct -eq 0])
 fi
@@ -4850,9 +4847,9 @@ NS_CHECK_EXEC([lsp], [tcpdump -l -nn -c 3 -i lsp ${filter} > lsp.pcap 2>tcpdump_
 OVS_WAIT_UNTIL([grep "listening" tcpdump_err])
 
 # Generate IPv4 UDP hairpin traffic.
-NS_CHECK_EXEC([lsp], [echo a | nc -u 88.88.88.88 4040 &], [0])
-NS_CHECK_EXEC([lsp], [echo a | nc -u 88.88.88.89 4040 &], [0])
-NS_CHECK_EXEC([lsp], [echo a | nc -u 88.88.88.90 2021 &], [0])
+NS_CHECK_EXEC([lsp], [echo a | nc -u 88.88.88.88 4040], [ignore], [ignore], [ignore])
+NS_CHECK_EXEC([lsp], [echo a | nc -u 88.88.88.89 4040], [ignore], [ignore], [ignore])
+NS_CHECK_EXEC([lsp], [echo a | nc -u 88.88.88.90 2021], [ignore], [ignore], [ignore])
 
 # Check hairpin traffic.
 OVS_WAIT_UNTIL([
@@ -4949,9 +4946,9 @@ NS_CHECK_EXEC([lsp], [tcpdump -l -nn -c 3 -i lsp $filter > lsp.pcap 2>tcpdump_er
 OVS_WAIT_UNTIL([grep "listening" tcpdump_err])
 
 # Generate IPv6 UDP hairpin traffic.
-NS_CHECK_EXEC([lsp], [echo a | nc -u 8800::0088 4040 &], [0])
-NS_CHECK_EXEC([lsp], [echo a | nc -u 8800::0089 4040 &], [0])
-NS_CHECK_EXEC([lsp], [echo a | nc -u 8800::0090 2021 &], [0])
+NS_CHECK_EXEC([lsp], [echo a | nc -u 8800::0088 4040], [ignore], [ignore], [ignore])
+NS_CHECK_EXEC([lsp], [echo a | nc -u 8800::0089 4040], [ignore], [ignore], [ignore])
+NS_CHECK_EXEC([lsp], [echo a | nc -u 8800::0090 2021], [ignore], [ignore], [ignore])
 
 # Check hairpin traffic.
 OVS_WAIT_UNTIL([
@@ -7190,7 +7187,7 @@ NS_EXEC([sw01], [tcpdump -l -n -i sw01 icmp -Q in > reject.pcap &])
 check ovn-nbctl --may-exist meter-add acl-meter drop 10 pktps 0
 ip netns exec sw01 scapy -H <<-EOF
 p = IP(src="192.168.1.2", dst="192.168.1.1") / UDP(dport = 12345) / Raw(b"X"*64)
-send (p, iface='sw01', loop = 0, verbose = 0, count = 100)
+send (p, iface='sw01', loop = 0, verbose = 0, count = 40)
 EOF
 
 # 10pps
@@ -8482,11 +8479,18 @@ check ovn-nbctl lsp-set-addresses ln unknown
 check ovn-nbctl lr-nat-add lr1 snat 172.16.1.10 192.168.1.0/24
 check ovn-nbctl lr-nat-add lr1 snat 1711::10 2001::/64
 
-NS_ADD_INT(ls1p1, ls1p1, br-int, "192.168.1.1/24", "00:00:00:01:01:01", "2001::1/64", "192.168.1.254", "2001::a" )
-NS_ADD_INT(ls1p2, ls1p2, br-int, "192.168.1.2/24", "00:00:00:01:01:02", "2001::2/64", "192.168.1.254", "2001::a" )
+ADD_NAMESPACES(ls1p1)
+ADD_VETH(ls1p1, ls1p1, br-int, "192.168.1.1/24", "00:00:00:01:01:01", \
+         "192.168.1.254", , "2001::1/64", "2001::a")
+
+ADD_NAMESPACES(ls1p2)
+ADD_VETH(ls1p2, ls1p2, br-int, "192.168.1.2/24", "00:00:00:01:01:02", \
+         "192.168.1.254", , "2001::2/64", "2001::a")
 
 ADD_NAMESPACES(ext1)
-ADD_INT(ext1, ext1, br0, 172.16.1.1/24, 1711::1/64)
+ADD_VETH(ext1, ext1, br0, "172.16.1.1/24", "00:ee:00:01:01:01", \
+         "172.16.1.254", , "1711::1/64", "1711::a")
+
 check ovn-nbctl --wait=hv sync
 wait_for_ports_up
 OVS_WAIT_UNTIL([test "$(ip netns exec ls1p1 ip a | grep 2001::1 | grep tentative)" = ""])
@@ -8548,25 +8552,17 @@ wait_igmp_flows_installed()
 }
 
 ADD_NAMESPACES(vm1)
-ADD_INT([vm1], [vm1], [br-int], [42.42.42.1/24])
-NS_CHECK_EXEC([vm1], [ip link set vm1 address 00:00:00:00:00:01], [0])
-NS_CHECK_EXEC([vm1], [ip route add default via 42.42.42.5], [0])
-check ovs-vsctl set Interface vm1 external_ids:iface-id=vm1
+ADD_VETH(vm1, vm1, br-int, "42.42.42.1/24", "00:00:00:00:00:01", \
+         "42.42.42.5")
 
 ADD_NAMESPACES(vm2)
-ADD_INT([vm2], [vm2], [br-int], [42.42.42.2/24])
-NS_CHECK_EXEC([vm2], [ip link set vm2 address 00:00:00:00:00:02], [0])
-NS_CHECK_EXEC([vm2], [ip link set lo up], [0])
-check ovs-vsctl set Interface vm2 external_ids:iface-id=vm2
+ADD_VETH(vm2, vm2, br-int, "42.42.42.2/24", "00:00:00:00:00:02")
 
 ADD_NAMESPACES(vm3)
 NETNS_DAEMONIZE([vm3], [tcpdump -n -i any -nnleX > vm3.pcap 2>/dev/null], [tcpdump3.pid])
 
-ADD_INT([vm3], [vm3], [br-int], [42.42.42.3/24])
-NS_CHECK_EXEC([vm3], [ip link set vm3 address 00:00:00:00:00:03], [0])
-NS_CHECK_EXEC([vm3], [ip link set lo up], [0])
-NS_CHECK_EXEC([vm3], [ip route add default via 42.42.42.5], [0])
-check ovs-vsctl set Interface vm3 external_ids:iface-id=vm3
+ADD_VETH(vm3, vm3, br-int, "42.42.42.3/24", "00:00:00:00:00:03", \
+         "42.42.42.5")
 
 NS_CHECK_EXEC([vm2], [sysctl -w net.ipv4.igmp_max_memberships=100], [ignore], [ignore])
 NS_CHECK_EXEC([vm3], [sysctl -w net.ipv4.igmp_max_memberships=100], [ignore], [ignore])
@@ -9639,7 +9635,7 @@ start_daemon ovn-controller
 #         |
 # VM2 ----+
 #
-# Two templated load balancer applied on LS1 and GW-Router with
+# Four templated load balancer applied on LS1 and GW-Router with
 # VM1 as backend.  The VIPs should be accessible from both VM2 and VM3.
 
 check ovn-nbctl                                                   \
@@ -9667,7 +9663,7 @@ check ovn-nbctl                                                   \
 # VIP=66.66.66.66:777 backends=42.42.42.2:4343 proto=udp
 
 AT_CHECK([ovn-nbctl -- create chassis_template_var chassis="hv1" \
-    variables="{vip=66.66.66.66,vport1=666,backends1=\"42.42.42.2:4242\",vport2=777,backends2=\"42.42.42.2:4343\"}"],
+    variables="{vip=66.66.66.66,vport1=666,backends1=\"42.42.42.2:4242\",vport2=777,backends2=\"42.42.42.2:4343\",vport3=888,vport4=999}"],
          [0], [ignore])
 
 check ovn-nbctl --template lb-add lb-test-tcp "^vip:^vport1" "^backends1" tcp \
@@ -9678,6 +9674,18 @@ check ovn-nbctl --template lb-add lb-test-udp "^vip:^vport2" "^backends2" udp \
     -- ls-lb-add ls1 lb-test-udp                                              \
     -- lr-lb-add rtr lb-test-udp
 
+# Add a TCP template LB with explicit backends that eventually expands to:
+# VIP=66.66.66.66:888 backends=42.42.42.2:4242 proto=tcp
+# And a UDP template LB that eventually expands to:
+# VIP=66.66.66.66:999 backends=42.42.42.2:4343 proto=udp
+check ovn-nbctl --template lb-add lb-test-tcp2 "^vip:^vport3" "42.42.42.2:4242" tcp ipv4 \
+    -- ls-lb-add ls1 lb-test-tcp2                                                        \
+    -- lr-lb-add rtr lb-test-tcp2
+
+check ovn-nbctl --template lb-add lb-test-udp2 "^vip:^vport4" "42.42.42.2:4343" udp ipv4 \
+    -- ls-lb-add ls1 lb-test-udp2                                                        \
+    -- lr-lb-add rtr lb-test-udp2
+
 ADD_NAMESPACES(vm1)
 ADD_VETH(vm1, vm1, br-int, "42.42.42.2/24", "00:00:00:00:00:01", "42.42.42.1")
 
@@ -9698,13 +9706,15 @@ name: 'backends2' value: '42.42.42.2:4343'
 name: 'vip' value: '66.66.66.66'
 name: 'vport1' value: '666'
 name: 'vport2' value: '777'
+name: 'vport3' value: '888'
+name: 'vport4' value: '999'
 ])
 
 # Start IPv4 TCP server on vm1.
 NETNS_DAEMONIZE([vm1], [nc -k -l 42.42.42.2 4242], [nc-vm1.pid])
 
 NETNS_DAEMONIZE([vm1],
-    [tcpdump -n -i vm1 -nnleX -c3 udp and dst 42.42.42.2 and dst port 4343 > vm1.pcap 2>/dev/null],
+    [tcpdump -n -i vm1 -nnleX -c6 udp and dst 42.42.42.2 and dst port 4343 > vm1.pcap 2>/dev/null],
     [tcpdump1.pid])
 
 # Make sure connecting to the VIP works (hairpin, via ls and via lr).
@@ -9712,13 +9722,21 @@ NS_CHECK_EXEC([vm1], [nc 66.66.66.66 666 -z], [0], [ignore], [ignore])
 NS_CHECK_EXEC([vm2], [nc 66.66.66.66 666 -z], [0], [ignore], [ignore])
 NS_CHECK_EXEC([vm3], [nc 66.66.66.66 666 -z], [0], [ignore], [ignore])
 
-NS_CHECK_EXEC([vm1], [echo a | nc -u 66.66.66.66 777 &], [0])
-NS_CHECK_EXEC([vm2], [echo a | nc -u 66.66.66.66 777 &], [0])
-NS_CHECK_EXEC([vm3], [echo a | nc -u 66.66.66.66 777 &], [0])
+NS_CHECK_EXEC([vm1], [echo a | nc -u 66.66.66.66 777], [ignore], [ignore], [ignore])
+NS_CHECK_EXEC([vm2], [echo a | nc -u 66.66.66.66 777], [ignore], [ignore], [ignore])
+NS_CHECK_EXEC([vm3], [echo a | nc -u 66.66.66.66 777], [ignore], [ignore], [ignore])
+
+NS_CHECK_EXEC([vm1], [nc 66.66.66.66 888 -z], [0], [ignore], [ignore])
+NS_CHECK_EXEC([vm2], [nc 66.66.66.66 888 -z], [0], [ignore], [ignore])
+NS_CHECK_EXEC([vm3], [nc 66.66.66.66 888 -z], [0], [ignore], [ignore])
+
+NS_CHECK_EXEC([vm1], [echo a | nc -u 66.66.66.66 999], [ignore], [ignore], [ignore])
+NS_CHECK_EXEC([vm2], [echo a | nc -u 66.66.66.66 999], [ignore], [ignore], [ignore])
+NS_CHECK_EXEC([vm3], [echo a | nc -u 66.66.66.66 999], [ignore], [ignore], [ignore])
 
 OVS_WAIT_UNTIL([
     requests=`grep "UDP" -c vm1.pcap`
-    test "${requests}" -ge "3"
+    test "${requests}" -ge "6"
 ])
 
 AT_CLEANUP
@@ -9753,7 +9771,7 @@ start_daemon ovn-controller
 #         |
 # VM2 ----+
 #
-# Two templated load balancer applied on LS1 and GW-Router with
+# Four templated load balancer applied on LS1 and GW-Router with
 # VM1 as backend.  The VIPs should be accessible from both VM2 and VM3.
 
 check ovn-nbctl                                                   \
@@ -9781,7 +9799,7 @@ check ovn-nbctl                                                   \
 # VIP=[6666::1]:777 backends=[4242::2]:4343 proto=udp
 
 AT_CHECK([ovn-nbctl -- create chassis_template_var chassis="hv1" \
-    variables="{vip=\"6666::1\",vport1=666,backends1=\"[[4242::2]]:4242\",vport2=777,backends2=\"[[4242::2]]:4343\"}"],
+    variables="{vip=\"6666::1\",vport1=666,backends1=\"[[4242::2]]:4242\",vport2=777,backends2=\"[[4242::2]]:4343\",vport3=888,vport4=999}"],
          [0], [ignore])
 
 check ovn-nbctl --template lb-add lb-test-tcp "^vip:^vport1" "^backends1" tcp ipv6 \
@@ -9792,6 +9810,18 @@ check ovn-nbctl --template lb-add lb-test-udp "^vip:^vport2" "^backends2" udp ip
     -- ls-lb-add ls1 lb-test-udp                                                   \
     -- lr-lb-add rtr lb-test-udp
 
+# Add a TCP template LB with explicit backends that eventually expands to:
+# VIP=[6666::1]:888 backends=[4242::2]:4242 proto=tcp
+# And a UDP template LB that eventually expands to:
+# VIP=[6666::1]:999 backends=[4242::2]:4343 proto=udp
+check ovn-nbctl --template lb-add lb-test-tcp2 "^vip:^vport3" "[[4242::2]]:4242" tcp ipv6 \
+    -- ls-lb-add ls1 lb-test-tcp2                                                         \
+    -- lr-lb-add rtr lb-test-tcp2
+
+check ovn-nbctl --template lb-add lb-test-udp2 "^vip:^vport4" "[[4242::2]]:4343" udp ipv6 \
+    -- ls-lb-add ls1 lb-test-udp2                                                         \
+    -- lr-lb-add rtr lb-test-udp2
+
 ADD_NAMESPACES(vm1)
 ADD_VETH(vm1, vm1, br-int, "4242::2/64", "00:00:00:00:00:01", "4242::1")
 OVS_WAIT_UNTIL([test "$(ip netns exec vm1 ip a | grep 4242::2 | grep tentative)" = ""])
@@ -9815,13 +9845,15 @@ name: 'backends2' value: '[[4242::2]]:4343'
 name: 'vip' value: '6666::1'
 name: 'vport1' value: '666'
 name: 'vport2' value: '777'
+name: 'vport3' value: '888'
+name: 'vport4' value: '999'
 ])
 
 # Start IPv6 TCP server on vm1.
 NETNS_DAEMONIZE([vm1], [nc -k -l 4242::2 4242], [nc-vm1.pid])
 
 NETNS_DAEMONIZE([vm1],
-    [tcpdump -n -i vm1 -nnleX -c3 udp and dst 4242::2 and dst port 4343 > vm1.pcap 2>/dev/null],
+    [tcpdump -n -i vm1 -nnleX -c6 udp and dst 4242::2 and dst port 4343 > vm1.pcap 2>/dev/null],
     [tcpdump1.pid])
 
 # Make sure connecting to the VIP works (hairpin, via ls and via lr).
@@ -9829,13 +9861,21 @@ NS_CHECK_EXEC([vm1], [nc 6666::1 666 -z], [0], [ignore], [ignore])
 NS_CHECK_EXEC([vm2], [nc 6666::1 666 -z], [0], [ignore], [ignore])
 NS_CHECK_EXEC([vm3], [nc 6666::1 666 -z], [0], [ignore], [ignore])
 
-NS_CHECK_EXEC([vm1], [echo a | nc -u 6666::1 777 &], [0])
-NS_CHECK_EXEC([vm2], [echo a | nc -u 6666::1 777 &], [0])
-NS_CHECK_EXEC([vm3], [echo a | nc -u 6666::1 777 &], [0])
+NS_CHECK_EXEC([vm1], [echo a | nc -u 6666::1 777], [ignore], [ignore], [ignore])
+NS_CHECK_EXEC([vm2], [echo a | nc -u 6666::1 777], [ignore], [ignore], [ignore])
+NS_CHECK_EXEC([vm3], [echo a | nc -u 6666::1 777], [ignore], [ignore], [ignore])
+
+NS_CHECK_EXEC([vm1], [nc 6666::1 888 -z], [0], [ignore], [ignore])
+NS_CHECK_EXEC([vm2], [nc 6666::1 888 -z], [0], [ignore], [ignore])
+NS_CHECK_EXEC([vm3], [nc 6666::1 888 -z], [0], [ignore], [ignore])
+
+NS_CHECK_EXEC([vm1], [echo a | nc -u 6666::1 999], [ignore], [ignore], [ignore])
+NS_CHECK_EXEC([vm2], [echo a | nc -u 6666::1 999], [ignore], [ignore], [ignore])
+NS_CHECK_EXEC([vm3], [echo a | nc -u 6666::1 999], [ignore], [ignore], [ignore])
 
 OVS_WAIT_UNTIL([
     requests=`grep "UDP" -c vm1.pcap`
-    test "${requests}" -ge "3"
+    test "${requests}" -ge "6"
 ])
 
 AT_CLEANUP
@@ -10587,11 +10627,13 @@ check ovn-nbctl lsp-add bar bar3 \
 -- lsp-set-addresses bar3 "f0:00:0f:01:02:05 172.16.1.4"
 
 # Config OVN load-balancer with a VIP.
-check ovn-nbctl lb-add lb1 30.0.0.1 "172.16.1.2,172.16.1.3,172.16.1.4"
+check ovn-nbctl lb-add lb1 30.0.0.1 "172.16.1.2,172.16.1.3,172.16.1.4" \
+    -- set load_balancer lb1 options:ct_flush="true"
 check ovn-nbctl ls-lb-add foo lb1
 
 # Create another load-balancer with another VIP.
 lb2_uuid=`ovn-nbctl create load_balancer name=lb2 vips:30.0.0.3="172.16.1.2,172.16.1.3,172.16.1.4"`
+check ovn-nbctl set load_balancer lb2 options:ct_flush="true"
 check ovn-nbctl ls-lb-add foo lb2
 
 # Config OVN load-balancer with another VIP (this time with ports).
@@ -10607,16 +10649,18 @@ OVS_START_L7([bar1], [http])
 OVS_START_L7([bar2], [http])
 OVS_START_L7([bar3], [http])
 
-OVS_WAIT_FOR_OUTPUT([
-    for i in `seq 1 20`; do
-        ip netns exec foo1 wget 30.0.0.1 -t 5 -T 1 --retry-connrefused -v -o wget$i.log;
-    done
-    ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
+m4_define([LB1_CT_ENTRIES], [dnl
 tcp,orig=(src=192.168.1.2,dst=30.0.0.1,sport=<cleared>,dport=<cleared>),reply=(src=172.16.1.2,dst=192.168.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
 tcp,orig=(src=192.168.1.2,dst=30.0.0.1,sport=<cleared>,dport=<cleared>),reply=(src=172.16.1.3,dst=192.168.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
 tcp,orig=(src=192.168.1.2,dst=30.0.0.1,sport=<cleared>,dport=<cleared>),reply=(src=172.16.1.4,dst=192.168.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
 ])
 
+OVS_WAIT_FOR_OUTPUT([
+    for i in `seq 1 20`; do
+        ip netns exec foo1 wget 30.0.0.1 -t 5 -T 1 --retry-connrefused -v -o wget$i.log;
+    done
+    ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [LB1_CT_ENTRIES])
+
 OVS_WAIT_FOR_OUTPUT([
     for i in `seq 1 20`; do
         ip netns exec foo1 wget 30.0.0.2:8000 -t 5 -T 1 --retry-connrefused -v -o wget$i.log;
@@ -10690,6 +10734,191 @@ check ovn-nbctl lb-del lb2
 
 OVS_WAIT_UNTIL([test "$(ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.3) | wc -l)" = "0"])
 
+# Check that LB has CT flush disabled by default
+check ovn-nbctl lb-add lb1 30.0.0.1 "172.16.1.2,172.16.1.3,172.16.1.4"
+check ovn-nbctl ls-lb-add foo lb1
+
+OVS_WAIT_FOR_OUTPUT([
+    for i in `seq 1 20`; do
+        ip netns exec foo1 wget 30.0.0.1 -t 5 -T 1 --retry-connrefused -v -o wget$i.log;
+    done
+    ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [LB1_CT_ENTRIES])
+
+# Remove one backend
+check ovn-nbctl --wait=hv set load_balancer lb1 vips='"30.0.0.1"="172.16.1.2,172.16.1.3"'
+
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [LB1_CT_ENTRIES])
+
+# Remove whole LB
+check ovn-nbctl --wait=hv lb-del lb1
+
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [LB1_CT_ENTRIES])
+
+OVS_APP_EXIT_AND_WAIT([ovn-controller])
+
+as ovn-sb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as ovn-nb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as northd
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+as
+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+/connection dropped.*/d"])
+AT_CLEANUP
+])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([ACL and committing to conntrack])
+AT_KEYWORDS([acl])
+
+CHECK_CONNTRACK()
+CHECK_CONNTRACK_NAT()
+ovn_start
+OVS_TRAFFIC_VSWITCHD_START()
+ADD_BR([br-int])
+# Set external-ids in br-int needed for ovn-controller
+ovs-vsctl \
+        -- set Open_vSwitch . external-ids:system-id=hv1 \
+        -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+        -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+        -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+        -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
+
+start_daemon ovn-controller
+
+check ovn-nbctl lr-add r1
+check ovn-nbctl lrp-add r1 r1_s1 00:de:ad:fe:00:01 173.0.1.1/24
+check ovn-nbctl lrp-add r1 r1_s2 00:de:ad:fe:00:02 173.0.2.1/24
+
+check ovn-nbctl ls-add s1
+check ovn-nbctl lsp-add s1 s1_r1
+check ovn-nbctl lsp-set-type s1_r1 router
+check ovn-nbctl lsp-set-addresses s1_r1 router
+check ovn-nbctl lsp-set-options s1_r1 router-port=r1_s1
+
+check ovn-nbctl ls-add s2
+check ovn-nbctl lsp-add s2 s2_r1
+check ovn-nbctl lsp-set-type s2_r1 router
+check ovn-nbctl lsp-set-addresses s2_r1 router
+check ovn-nbctl lsp-set-options s2_r1 router-port=r1_s2
+
+check ovn-nbctl lsp-add s1 vm1
+check ovn-nbctl lsp-set-addresses vm1 "00:de:ad:01:00:01 173.0.1.2"
+
+check ovn-nbctl lsp-add s2 vm2
+check ovn-nbctl lsp-set-addresses vm2 "00:de:ad:01:00:02 173.0.2.2"
+
+check ovn-nbctl lsp-add s2 vm3
+check ovn-nbctl lsp-set-addresses vm3 "00:de:ad:01:00:03 173.0.2.3"
+
+check ovn-nbctl lb-add lb1 30.0.0.1:80 173.0.2.2:80 udp
+check ovn-nbctl lb-add lb2 20.0.0.1:80 173.0.1.2:80 udp
+check ovn-nbctl lb-add lb1 30.0.0.1 173.0.2.2
+check ovn-nbctl lb-add lb2 173.0.2.250 173.0.1.3
+check ovn-nbctl ls-lb-add s1 lb1
+check ovn-nbctl ls-lb-add s2 lb2
+
+ADD_NAMESPACES(vm1)
+ADD_VETH(vm1, vm1, br-int, "173.0.1.2/24", "00:de:ad:01:00:01", \
+         "173.0.1.1")
+ADD_NAMESPACES(vm2)
+ADD_VETH(vm2, vm2, br-int, "173.0.2.2/24", "00:de:ad:01:00:02", \
+         "173.0.2.1")
+ADD_NAMESPACES(vm3)
+ADD_VETH(vm3, vm3, br-int, "173.0.2.250/24", "00:de:ad:01:00:03", \
+         "173.0.2.1")
+
+check ovn-nbctl acl-add s1 from-lport 1001 "ip" allow
+check ovn-nbctl acl-add s1 to-lport 1002 "ip" allow
+check ovn-nbctl acl-add s2 from-lport 1003 "ip" allow
+check ovn-nbctl acl-add s2 to-lport 1004 "ip" allow
+check ovn-nbctl --wait=hv sync
+AS_BOX([initial ping])
+# Send ping in background. Same ping, same flow throughout the test
+on_exit 'kill $(pidof ping)'
+NS_EXEC([vm1], [ping -c 10000 -i 0.1 30.0.0.1 > icmp.txt &])
+
+# Check for conntrack entries
+OVS_WAIT_FOR_OUTPUT([
+    ovs-appctl dpctl/dump-conntrack | FORMAT_CT(173.0.1.2) | \
+      sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
+icmp,orig=(src=173.0.1.2,dst=173.0.2.2,id=<cleared>,type=8,code=0),reply=(src=173.0.2.2,dst=173.0.1.2,id=<cleared>,type=0,code=0),zone=<cleared>
+icmp,orig=(src=173.0.1.2,dst=30.0.0.1,id=<cleared>,type=8,code=0),reply=(src=173.0.2.2,dst=173.0.1.2,id=<cleared>,type=0,code=0),zone=<cleared>,mark=2
+])
+
+# Now check for multiple ct_commits
+ovs-appctl dpctl/dump-flows > dp_flows
+zone_id=$(ovn-appctl -t ovn-controller ct-zone-list | grep vm1 | cut -d ' ' -f2)
+AT_CHECK([test 1 = `cat dp_flows | grep "commit,zone=$zone_id" | wc -l`])
+
+check ovn-nbctl acl-del s1 from-lport 1001 "ip"
+check ovn-nbctl acl-del s1 to-lport 1002 "ip"
+check ovn-nbctl acl-del s2 from-lport 1003 "ip"
+check ovn-nbctl acl-del s2 to-lport 1004 "ip"
+
+AS_BOX([acl drop echo request])
+check ovn-nbctl --log --severity=alert --name=drop-flow-s1 acl-add s1 to-lport 2001 icmp4 drop
+# acl-drop to-lport s1 apply to traffic from s1 to vm1 and s1 to r1.
+check ovn-nbctl --wait=hv sync
+
+# Check that traffic is blocked
+# Wait for some packets to hit the rule to avoid potential race conditions. Then count packets.
+OVS_WAIT_UNTIL([test `cat ovn-controller.log | grep acl_log | grep -c drop-flow-s1` -gt "0"])
+total_icmp_pkts=$(cat icmp.txt | grep ttl | wc -l)
+
+# Wait some time and check whether packets went through. In the worse race condition, the sleep is too short
+# and this test will still succeed.
+sleep 1
+OVS_WAIT_UNTIL([
+        total_icmp1_pkts=$(cat icmp.txt | grep ttl | wc -l)
+        test "${total_icmp1_pkts}" -eq "${total_icmp_pkts}"
+])
+
+AS_BOX([acl allow-related echo request])
+check ovn-nbctl acl-add s1 to-lport 2002 "icmp4 && ip4.src == 173.0.1.2" allow-related
+# This rule has higher priority than to-lport 2001 icmp4 drop.
+# So traffic from s1 (w/ src=173.0.1.2) to r1 should be accepted
+# (return) traffic from s1 to vm1 should be accepted as return traffic
+check ovn-nbctl --wait=hv sync
+OVS_WAIT_UNTIL([
+        total_icmp1_pkts=$(cat icmp.txt | grep ttl | wc -l)
+        test "${total_icmp1_pkts}" -gt "${total_icmp_pkts}"
+])
+
+# Check we did not break handling acl-drop for existing flows
+AS_BOX([acl drop echo request in s2])
+check ovn-nbctl acl-del s1 to-lport 2001 icmp4
+check ovn-nbctl --log --severity=alert --name=drop-flow-s2 acl-add s2 to-lport 2001 icmp4 drop
+check ovn-nbctl --wait=hv sync
+
+OVS_WAIT_UNTIL([test `cat ovn-controller.log | grep acl_log | grep -c drop-flow-s2` -gt "0"])
+
+OVS_WAIT_FOR_OUTPUT([
+    ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | \
+      sed -e 's/zone=[[0-9]]*/zone=<cleared>/' | \
+      sed -e 's/mark=[[0-9]]*/mark=<cleared>/'], [0], [dnl
+icmp,orig=(src=173.0.1.2,dst=30.0.0.1,id=<cleared>,type=8,code=0),reply=(src=173.0.2.2,dst=173.0.1.2,id=<cleared>,type=0,code=0),zone=<cleared>,mark=<cleared>
+])
+total_icmp_pkts=$(cat icmp.txt | grep ttl | wc -l)
+
+# Allow ping again
+AS_BOX([acl allow echo request in s2])
+check ovn-nbctl acl-add s2 to-lport 2005 icmp4 allow
+check ovn-nbctl --wait=hv sync
+OVS_WAIT_FOR_OUTPUT([
+    ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | \
+      sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
+icmp,orig=(src=173.0.1.2,dst=30.0.0.1,id=<cleared>,type=8,code=0),reply=(src=173.0.2.2,dst=173.0.1.2,id=<cleared>,type=0,code=0),zone=<cleared>,mark=2
+])
+OVS_WAIT_UNTIL([
+        total_icmp1_pkts=$(cat icmp.txt | grep ttl | wc -l)
+        test "${total_icmp1_pkts}" -gt "${total_icmp_pkts}"
+])
+
 OVS_APP_EXIT_AND_WAIT([ovn-controller])
 
 as ovn-sb
@@ -10706,3 +10935,605 @@ OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
 /connection dropped.*/d"])
 AT_CLEANUP
 ])
+
+# This tests port->up/down and ovn-installed after adding and removing Ports and Interfaces.
+# 3 Conditions x 3 tests:
+# - 3 Conditions:
+#   - In normal conditions
+#   - Remove interface while starting and stopping SB and Controller
+#   - Remove and add back interface while starting and stopping SB and Controller
+# - 3 tests:
+#   - Add/Remove Logical Port
+#   - Add/Remove iface-id
+#   - Add/Remove Interface
+# Each tests/conditions checks for
+# - Port_binding->chassis
+# - Port up or down
+# - ovn-installed
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([ovn-install on slow ovsdb])
+AT_KEYWORDS([ovn-install])
+
+OVS_TRAFFIC_VSWITCHD_START()
+# Restart ovsdb-server, this time with tcp
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+start_daemon ovsdb-server --remote=punix:"$OVS_RUNDIR"/db.sock --remote=ptcp:0:127.0.0.1
+
+ovn_start
+ADD_BR([br-int])
+
+# Set external-ids in br-int needed for ovn-controller
+ovs-vsctl \
+        -- set Open_vSwitch . external-ids:system-id=hv1 \
+        -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+        -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+        -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+        -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
+
+# Start ovn-controller
+PARSE_LISTENING_PORT([$ovs_base/ovsdb-server.log], [TCP_PORT])
+start_daemon ovn-controller tcp:127.0.0.1:$TCP_PORT
+
+check ovn-nbctl ls-add ls1
+check ovn-nbctl set Logical_Switch ls1 other_config:subnet=10.1.0.0/16
+
+check ovn-nbctl --wait=hv sync
+
+add_logical_ports() {
+  echo Adding logical ports
+  check ovn-nbctl lsp-add ls1 lsp1
+  check ovn-nbctl lsp-add ls1 lsp2
+}
+
+remove_logical_ports() {
+  echo Removing logical ports
+  check ovn-nbctl lsp-del lsp1
+  check ovn-nbctl lsp-del lsp2
+}
+
+add_ovs_interface() {
+  echo Adding interface $1 $2
+  ovs-vsctl --no-wait -- add-port br-int $1 \
+                      -- set Interface $1 external_ids:iface-id=$2 \
+                      -- set Interface $1 type=internal
+}
+add_ovs_interfaces() {
+  add_ovs_interface vif1 lsp1
+  add_ovs_interface vif2 lsp2
+}
+remove_ovs_interface() {
+  echo Removing interface $1
+  check ovs-vsctl --no-wait -- del-port $1
+}
+remove_ovs_interfaces() {
+  remove_ovs_interface vif1
+  remove_ovs_interface vif2
+}
+add_iface_ids() {
+  echo Adding iface-id vif1 lsp1
+  ovs-vsctl --no-wait -- set Interface vif1 external_ids:iface-id=lsp1
+  echo Adding iface-id vif2 lsp2
+  ovs-vsctl --no-wait -- set Interface vif2 external_ids:iface-id=lsp2
+}
+remove_iface_id() {
+  echo Removing iface-id $1
+  check ovs-vsctl remove Interface $1 external_ids iface-id
+}
+remove_iface_ids() {
+  remove_iface_id vif1
+  remove_iface_id vif2
+}
+wait_for_local_bindings() {
+  OVS_WAIT_UNTIL(
+      [test `ovs-appctl -t ovn-controller debug/dump-local-bindings | grep interface | wc -l` -eq 2],
+      [kill -CONT $(cat ovn-sb/ovsdb-server.pid)]
+  )
+}
+sleep_sb() {
+  echo SB going to sleep
+  AT_CHECK([kill -STOP $(cat ovn-sb/ovsdb-server.pid)])
+}
+wake_up_sb() {
+  echo SB waking up
+  AT_CHECK([kill -CONT $(cat ovn-sb/ovsdb-server.pid)])
+}
+sleep_controller() {
+  echo Controller going to sleep
+  ovn-appctl debug/pause
+  OVS_WAIT_UNTIL([test x$(ovn-appctl -t ovn-controller debug/status) = "xpaused"])
+}
+
+stop_ovsdb_controller_updates() {
+  TCP_PORT=$1
+  echo Stopping updates from ovn-controller to ovsdb using port $TCP_PORT
+  on_exit 'iptables -C INPUT -p tcp --destination-port $TCP_PORT -j DROP 2>/dev/null && iptables -D INPUT -p tcp --destination-port $TCP_PORT -j DROP'
+  iptables -A INPUT -p tcp --destination-port $TCP_PORT -j DROP
+}
+restart_ovsdb_controller_updates() {
+  TCP_PORT=$1
+  echo Restarting updates from ovn-controller to ovsdb
+  iptables -D INPUT -p tcp --destination-port $TCP_PORT  -j DROP
+}
+wake_up_controller() {
+  echo Controller waking up
+  ovn-appctl debug/resume
+}
+ensure_controller_run() {
+# We want to make sure controller could run at least one full loop.
+# We can't use wait=hv as sb might be sleeping.
+# Use 2 ovn-appctl to guarentee that ovn-controller run the full loop, and not just the unixctl handling
+  OVS_WAIT_UNTIL([test x$(ovn-appctl -t ovn-controller debug/status) = "xrunning"])
+  OVS_WAIT_UNTIL([test x$(ovn-appctl -t ovn-controller debug/status) = "xrunning"])
+}
+sleep_ovsdb() {
+  echo OVSDB going to sleep
+  AT_CHECK([kill -STOP $(cat ovsdb-server.pid)])
+}
+wake_up_ovsdb() {
+  echo OVSDB waking up
+  AT_CHECK([kill -CONT $(cat ovsdb-server.pid)])
+}
+check_ovn_installed() {
+  OVS_WAIT_UNTIL([test `ovs-vsctl get Interface vif1 external_ids:ovn-installed` = '"true"'])
+  OVS_WAIT_UNTIL([test `ovs-vsctl get Interface vif2 external_ids:ovn-installed` = '"true"'])
+}
+check_ovn_uninstalled() {
+  OVS_WAIT_UNTIL([test x`ovs-vsctl get Interface vif2 external_ids:ovn-installed` = x])
+  OVS_WAIT_UNTIL([test x`ovs-vsctl get Interface vif1 external_ids:ovn-installed` = x])
+}
+check_ports_up() {
+  OVS_WAIT_UNTIL([test `ovn-sbctl get Port_Binding lsp1 up` = 'true'])
+  OVS_WAIT_UNTIL([test `ovn-sbctl get Port_Binding lsp2 up` = 'true'])
+}
+check_ports_down() {
+  OVS_WAIT_UNTIL([test `ovn-sbctl get Port_Binding lsp1 up` = 'false'])
+  OVS_WAIT_UNTIL([test `ovn-sbctl get Port_Binding lsp2 up` = 'false'])
+}
+
+check_ports_bound() {
+  ch=$(fetch_column Chassis _uuid name=hv1)
+  wait_row_count Port_Binding 1 logical_port=lsp1 chassis=$ch
+  wait_row_count Port_Binding 1 logical_port=lsp2 chassis=$ch
+}
+check_ports_unbound() {
+  wait_column "" Port_Binding chassis logical_port=lsp1
+  wait_column "" Port_Binding chassis logical_port=lsp2
+}
+add_logical_ports
+add_ovs_interfaces
+wait_for_local_bindings
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
+############################################################
+########## Remove interface while removing iface-id ########
+############################################################
+AS_BOX(["Remove interface while removing iface-id"])
+stop_ovsdb_controller_updates $TCP_PORT
+remove_iface_id vif1
+ensure_controller_run
+# OVSDB should be seen as ro now
+remove_iface_id vif2
+ensure_controller_run
+# Controller delaying ovn-install removal for vif2 as ovsdb ro
+sleep_controller
+restart_ovsdb_controller_updates $TCP_PORT
+remove_ovs_interface vif2
+# vif2, for which we want to remove ovn-install, is deleted
+wake_up_controller
+check_ovn_uninstalled
+check_ports_down
+check_ports_unbound
+add_ovs_interface vif2 lsp2
+add_iface_ids
+check_ovn_installed
+check_ports_up
+check_ports_bound
+############################################################
+################### Add/Remove iface-id ####################
+############################################################
+AS_BOX(["iface-id removal and added back (no sleeping sb or controller)"])
+remove_iface_ids
+check_ovn_uninstalled
+check_ports_down
+check_ports_unbound
+add_iface_ids
+check_ovn_installed
+check_ports_up
+check_ports_bound
+
+AS_BOX(["iface-id removal"])
+sleep_sb
+remove_iface_ids
+ensure_controller_run
+sleep_controller
+wake_up_sb
+wake_up_controller
+check_ovn_uninstalled
+check_ports_down
+check_ports_unbound
+add_iface_ids
+check ovn-nbctl --wait=hv sync
+
+AS_BOX(["iface-id removal 2"])
+# Block IDL from ovn-controller to OVSDB
+stop_ovsdb_controller_updates $TCP_PORT
+remove_iface_id vif2
+ensure_controller_run
+
+# OVSDB should now be seen as read-only by ovn-controller
+remove_iface_id vif1
+ensure_controller_run
+
+# Restart connection from ovn-controller to OVSDB
+restart_ovsdb_controller_updates $TCP_PORT
+check_ovn_uninstalled
+check_ports_down
+check_ports_unbound
+
+add_iface_ids
+check ovn-nbctl --wait=hv sync
+
+AS_BOX(["iface-id removal and added back"])
+sleep_sb
+remove_iface_ids
+ensure_controller_run
+sleep_controller
+add_iface_ids
+wake_up_sb
+wake_up_controller
+check_ovn_installed
+check_ports_up
+check_ports_bound
+############################################################
+###################### Add/Remove Interface ################
+############################################################
+AS_BOX(["Interface removal and added back (no sleeping sb or controller)"])
+remove_ovs_interfaces
+check_ovn_uninstalled
+check_ports_down
+check_ports_unbound
+add_ovs_interfaces
+check_ovn_installed
+check_ports_up
+check_ports_bound
+check ovn-nbctl --wait=hv sync
+
+AS_BOX(["Interface removal"])
+sleep_sb
+remove_ovs_interfaces
+ensure_controller_run
+sleep_controller
+wake_up_sb
+wake_up_controller
+check_ovn_uninstalled
+check_ports_down
+check_ports_unbound
+add_ovs_interfaces
+check ovn-nbctl --wait=hv sync
+
+AS_BOX(["Interface removal and added back"])
+sleep_sb
+remove_ovs_interfaces
+ensure_controller_run
+sleep_controller
+add_ovs_interfaces
+wake_up_sb
+wake_up_controller
+check_ovn_installed
+check_ports_up
+check_ports_bound
+check ovn-nbctl --wait=hv sync
+############################################################
+###################### Add/Remove Logical Port #############
+############################################################
+AS_BOX(["Logical port removal and added back (no sleeping sb or controller)"])
+remove_logical_ports
+check_ovn_uninstalled
+check_ports_unbound
+sleep_ovsdb
+add_logical_ports
+ensure_controller_run
+wake_up_ovsdb
+check_ovn_installed
+check_ports_up
+check_ports_bound
+check ovn-nbctl --wait=hv sync
+
+AS_BOX(["Logical port removal"])
+sleep_sb
+remove_logical_ports
+ensure_controller_run
+sleep_controller
+wake_up_sb
+wake_up_controller
+check_ovn_uninstalled
+check_ports_unbound
+add_logical_ports
+check ovn-nbctl --wait=hv sync
+
+AS_BOX(["Logical port removal and added back"])
+sleep_sb
+remove_logical_ports
+ensure_controller_run
+sleep_controller
+add_logical_ports
+wake_up_sb
+wake_up_controller
+check_ovn_installed
+check_ports_up
+check_ports_bound
+
+OVS_APP_EXIT_AND_WAIT([ovn-controller])
+
+as ovn-sb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as ovn-nb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as northd
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+as
+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+/connection dropped.*/d"])
+AT_CLEANUP
+])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([ovn mirroring])
+AT_KEYWORDS([mirror])
+AT_SKIP_IF([test $HAVE_TCPDUMP = no])
+
+CHECK_CONNTRACK()
+CHECK_CONNTRACK_NAT()
+ovn_start
+OVS_TRAFFIC_VSWITCHD_START()
+ADD_BR([br-int])
+ADD_BR([br-mirror])
+
+# Set external-ids in br-int needed for ovn-controller
+ovs-vsctl \
+        -- set Open_vSwitch . external-ids:system-id=hv1 \
+        -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+        -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+        -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+        -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
+
+# Start ovn-controller
+start_daemon ovn-controller
+
+ovs-ofctl add-flow br-mirror action=normal
+
+ovn-nbctl create Logical_Router name=R1 options:chassis=hv1
+
+ovn-nbctl ls-add foo
+ovn-nbctl ls-add bar
+
+# Connect foo to R1
+ovn-nbctl lrp-add R1 foo 00:00:01:01:02:03 192.168.1.1/24 2001::1/64
+ovn-nbctl lsp-add foo rp-foo -- set Logical_Switch_Port rp-foo \
+    type=router options:router-port=foo addresses=\"00:00:01:01:02:03\"
+
+# Connect bar to R1
+ovn-nbctl lrp-add R1 bar 00:00:01:01:02:04 192.168.2.1/24 2002::1/64
+ovn-nbctl lsp-add bar rp-bar -- set Logical_Switch_Port rp-bar \
+    type=router options:router-port=bar addresses=\"00:00:01:01:02:04\"
+
+# Logical port 'foo1' in switch 'foo'.
+ADD_NAMESPACES(foo1)
+ADD_VETH(foo1, foo1, br-int, "2001::2/64", "f0:00:00:01:02:03", \
+         "2001::1", "nodad", "192.168.1.2/24", "192.168.1.1")
+ovn-nbctl lsp-add foo foo1 \
+-- lsp-set-addresses foo1 "f0:00:00:01:02:03 192.168.1.2 2001::2"
+
+# Logical port 'bar1' in switch 'bar'.
+ADD_NAMESPACES(bar1)
+ADD_VETH(bar1, bar1, br-int, "2002::2/64", "f0:00:00:01:02:05", \
+         "2002::1", "nodad", "192.168.2.2/24", "192.168.2.1")
+ovn-nbctl lsp-add bar bar1 \
+-- lsp-set-addresses bar1 "f0:00:00:01:02:05 192.168.2.2 2002::2"
+
+ovn-nbctl mirror-add mirror0 gre 1 to-lport 172.16.0.100
+ovn-nbctl lsp-attach-mirror bar1 mirror0
+
+ADD_NAMESPACES(mirror)
+ADD_VETH(mirror, mirror, br-mirror, "2003::b/64", "f0:00:00:01:07:06", \
+         "2003::1", "nodad", "172.16.0.100/24", "172.16.0.1")
+AT_CHECK([ip addr add 172.16.0.101/24 dev br-mirror])
+AT_CHECK([ip addr add 2003::a/64 dev br-mirror nodad])
+AT_CHECK([ip link set dev br-mirror up])
+
+NS_CHECK_EXEC([mirror], [tcpdump -l -c 3 -neei mirror proto GRE > gre_mirror4.pcap 2>gre_mirror4_error &])
+OVS_WAIT_UNTIL([grep "listening" gre_mirror4_error])
+
+NS_CHECK_EXEC([foo1], [ping -q -c 3 -i 0.3 -w 2 192.168.2.2 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+OVS_WAIT_UNTIL([
+    n_packets=$(grep "GRE" -c gre_mirror4.pcap)
+    test "${n_packets}" = "3"
+])
+
+killall tcpdump
+
+ovn-nbctl mirror-del mirror0
+ovn-nbctl mirror-add mirror1 gre 2 to-lport 2003::b
+ovn-nbctl lsp-attach-mirror bar1 mirror1
+
+NS_CHECK_EXEC([mirror], [tcpdump -l -c 3 -neei mirror proto GRE > gre_mirror6.pcap 2>gre_mirror6_error &])
+OVS_WAIT_UNTIL([grep "listening" gre_mirror6_error])
+
+NS_CHECK_EXEC([foo1], [ping6 -q -c 3 -i 0.3 -w 2 2002::2 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+OVS_WAIT_UNTIL([
+    n_packets=$(grep "GRE" -c gre_mirror6.pcap)
+    test "${n_packets}" = "3"
+])
+
+killall tcpdump
+
+ovn-nbctl mirror-del mirror1
+ovn-nbctl mirror-add mirror2 erspan 3 to-lport 172.16.0.100
+ovn-nbctl lsp-attach-mirror bar1 mirror2
+
+NS_CHECK_EXEC([mirror], [tcpdump -l -c 3 -neei mirror ip[[22:2]]=0x88be > erspan_mirror4.pcap 2>erspan_mirror4_error &])
+OVS_WAIT_UNTIL([grep "listening" erspan_mirror4_error])
+
+NS_CHECK_EXEC([foo1], [ping -q -c 3 -i 0.3 -w 2 192.168.2.2 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+OVS_WAIT_UNTIL([
+    n_packets=$(grep "gre-proto-0x88be" -c erspan_mirror4.pcap)
+    test "${n_packets}" = "3"
+])
+
+killall tcpdump
+
+ovn-nbctl mirror-del mirror2
+ovn-nbctl mirror-add mirror3 erspan 4 to-lport 2003::b
+ovn-nbctl lsp-attach-mirror bar1 mirror3
+
+NS_CHECK_EXEC([mirror], [tcpdump -l -c 3 -neei mirror ip6[[42:2]]=0x88be > erspan_mirror6.pcap 2>erspan_mirror6_error &])
+OVS_WAIT_UNTIL([grep "listening" erspan_mirror6_error])
+
+NS_CHECK_EXEC([foo1], [ping6 -q -c 3 -i 0.3 -w 2 2002::2 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+OVS_WAIT_UNTIL([
+    n_packets=$(grep "gre-proto-0x88be" -c erspan_mirror6.pcap)
+    test "${n_packets}" = "3"
+])
+
+killall tcpdump
+
+uuid=$(fetch_column nb:mirror _uuid name="mirror3")
+ovn-nbctl set mirror $uuid type=gre
+
+NS_CHECK_EXEC([mirror], [tcpdump -c 3 -l -neei mirror proto GRE > gre_mirror6.pcap 2>gre_mirror6_error &])
+OVS_WAIT_UNTIL([grep "listening" gre_mirror6_error])
+
+NS_CHECK_EXEC([foo1], [ping6 -q -c 3 -i 0.3 -w 2 2002::2 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+OVS_WAIT_UNTIL([
+    n_packets=$(grep "GRE" -c gre_mirror6.pcap)
+    test "${n_packets}" = "3"
+])
+
+killall tcpdump
+
+OVS_APP_EXIT_AND_WAIT([ovn-controller])
+
+as ovn-sb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as ovn-nb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as northd
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+as
+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+/connection dropped.*/d"])
+AT_CLEANUP
+])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([load balancer with localnet port])
+CHECK_CONNTRACK()
+CHECK_CONNTRACK_NAT()
+ovn_start
+OVS_TRAFFIC_VSWITCHD_START()
+ADD_BR([br-int])
+ADD_BR([br-phys], [set Bridge br-phys fail-mode=standalone])
+
+# Set external-ids in br-int needed for ovn-controller
+ovs-vsctl \
+        -- set Open_vSwitch . external-ids:system-id=hv1 \
+        -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+        -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+        -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+        -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
+
+start_daemon ovn-controller
+
+check ovn-nbctl lr-add ro
+check ovn-nbctl lrp-add ro ro-sw 00:00:00:00:00:01 192.168.0.1/24
+check ovn-nbctl lrp-add ro ro-pub 00:00:00:00:01:01 10.0.0.1/24
+
+check ovn-nbctl ls-add sw
+check ovn-nbctl lsp-add sw sw-vm1 \
+    -- lsp-set-addresses sw-vm1 "00:00:00:00:00:02 192.168.0.2"
+check ovn-nbctl lsp-add sw sw-ro \
+    -- lsp-set-type sw-ro router \
+    -- lsp-set-addresses sw-ro router \
+    -- lsp-set-options sw-ro router-port=ro-sw
+
+check ovn-nbctl ls-add pub
+check ovn-nbctl lsp-add pub sw-ln \
+    -- lsp-set-type sw-ln localnet \
+    -- lsp-set-addresses sw-ln unknown \
+    -- lsp-set-options sw-ln network_name=phys
+check ovn-nbctl lsp-add pub pub-ro \
+    -- lsp-set-type pub-ro router \
+    -- lsp-set-addresses pub-ro router \
+    -- lsp-set-options pub-ro router-port=ro-pub
+
+check ovs-vsctl set open . external-ids:ovn-bridge-mappings=phys:br-phys
+
+ADD_NAMESPACES(sw-vm1)
+ADD_VETH(sw-vm1, sw-vm1, br-int, "192.168.0.2/24", "00:00:00:00:00:02", \
+         "192.168.0.1")
+
+ADD_NAMESPACES(ln)
+ADD_VETH(ln, ln, br-phys, "10.0.0.2/24", "00:00:00:00:01:02", \
+         "10.0.0.1")
+
+# We have the basic network set up. Now let's add a load balancer
+# on the "pub" logical switch.
+
+check ovn-nbctl lb-add ln-lb 172.16.0.1:80 192.168.0.2:80 tcp
+check ovn-nbctl ls-lb-add pub ln-lb
+check ovn-nbctl --wait=hv sync
+
+# Add a route so that the localnet port can reach the load balancer
+# VIP.
+NS_CHECK_EXEC([ln], [ip route add 172.16.0.1 via 10.0.0.1])
+NS_CHECK_EXEC([ln], [ip route add 192.168.0.0/24 via 10.0.0.1])
+
+OVS_START_L7([sw-vm1], [http])
+
+NS_CHECK_EXEC([ln], [wget 172.16.0.1 -t 5 -T 1 --retry-connrefused -v -o wget.log])
+
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(172.16.0.1) | \
+sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
+tcp,orig=(src=10.0.0.2,dst=172.16.0.1,sport=<cleared>,dport=<cleared>),reply=(src=192.168.0.2,dst=10.0.0.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
+])
+
+OVS_APP_EXIT_AND_WAIT([ovn-controller])
+
+as ovn-sb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as ovn-nb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as northd
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+as
+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+/connection dropped.*/d"])
+
+AT_CLEANUP
+])
diff --git a/utilities/containers/py-requirements.txt b/utilities/containers/py-requirements.txt
index d7bd21e0d..0d90765c9 100644
--- a/utilities/containers/py-requirements.txt
+++ b/utilities/containers/py-requirements.txt
@@ -1,5 +1,6 @@
 flake8
 hacking>=3.0
+scapy
 sphinx
 setuptools
 pyelftools
diff --git a/utilities/ovn-nbctl.c b/utilities/ovn-nbctl.c
index 45572fd30..9399f9462 100644
--- a/utilities/ovn-nbctl.c
+++ b/utilities/ovn-nbctl.c
@@ -3033,7 +3033,7 @@ nbctl_lb_add(struct ctl_context *ctx)
     }
 
     ovn_lb_vip_format(&lb_vip_parsed, &lb_vip_normalized, template);
-    ovn_lb_vip_backends_format(&lb_vip_parsed, &lb_ips_new, template);
+    ovn_lb_vip_backends_format(&lb_vip_parsed, &lb_ips_new);
     ovn_lb_vip_destroy(&lb_vip_parsed);
 
     const struct nbrec_load_balancer *lb = NULL;
@@ -4204,8 +4204,7 @@ print_routing_policy(const struct nbrec_logical_router_policy *policy,
                       policy->match, policy->action);
         for (int i = 0; i < policy->n_nexthops; i++) {
             char *next_hop = normalize_prefix_str(policy->nexthops[i]);
-            char *fmt = i ? ", %s" : " %25s";
-            ds_put_format(s, fmt, next_hop);
+            ds_put_format(s, i ? ", %s" : " %25s", next_hop ? next_hop : "");
             free(next_hop);
         }
     } else {
@@ -6586,18 +6585,17 @@ print_route(const struct nbrec_logical_router_static_route *route,
 {
 
     char *prefix = normalize_prefix_str(route->ip_prefix);
-    char *next_hop = "";
+    char *next_hop = NULL;
 
     if (!strcmp(route->nexthop, "discard")) {
         next_hop = xasprintf("discard");
     } else if (route->nexthop[0]) {
         next_hop = normalize_prefix_str(route->nexthop);
     }
-    ds_put_format(s, "%25s %25s", prefix, next_hop);
+    ds_put_format(s, "%25s %25s", prefix ? prefix : "",
+                  next_hop ? next_hop : "");
     free(prefix);
-    if (next_hop[0]) {
-        free(next_hop);
-    }
+    free(next_hop);
 
     if (route->policy) {
         ds_put_format(s, " %s", route->policy);