Blob Blame History Raw
diff --git a/.ci/linux-build.sh b/.ci/linux-build.sh
index 0e9f87fa8..731dcacb9 100755
--- a/.ci/linux-build.sh
+++ b/.ci/linux-build.sh
@@ -9,8 +9,7 @@ EXTRA_OPTS="--enable-Werror"
 
 function configure_ovs()
 {
-    git clone https://github.com/openvswitch/ovs.git ovs_src
-    pushd ovs_src
+    pushd ovs
     ./boot.sh && ./configure $* || { cat config.log; exit 1; }
     make -j4 || { cat config.log; exit 1; }
     popd
@@ -19,7 +18,7 @@ function configure_ovs()
 function configure_ovn()
 {
     configure_ovs $*
-    ./boot.sh && ./configure --with-ovs-source=$PWD/ovs_src $* || \
+    ./boot.sh && ./configure $* || \
     { cat config.log; exit 1; }
 }
 
@@ -43,7 +42,7 @@ if [ "$TESTSUITE" ]; then
     # Now we only need to prepare the Makefile without sparse-wrapped CC.
     configure_ovn
 
-    export DISTCHECK_CONFIGURE_FLAGS="$OPTS --with-ovs-source=$PWD/ovs_src"
+    export DISTCHECK_CONFIGURE_FLAGS="$OPTS"
     if ! make distcheck -j4 TESTSUITEFLAGS="-j4" RECHECK=yes; then
         # testsuite.log is necessary for debugging.
         cat */_build/sub/tests/testsuite.log
diff --git a/.ci/osx-build.sh b/.ci/osx-build.sh
index 6617f0b9d..4b78b66dd 100755
--- a/.ci/osx-build.sh
+++ b/.ci/osx-build.sh
@@ -7,8 +7,7 @@ EXTRA_OPTS=""
 
 function configure_ovs()
 {
-    git clone https://github.com/openvswitch/ovs.git ovs_src
-    pushd ovs_src
+    pushd ovs
     ./boot.sh && ./configure $*
     make -j4 || { cat config.log; exit 1; }
     popd
@@ -17,7 +16,7 @@ function configure_ovs()
 function configure_ovn()
 {
     configure_ovs $*
-    ./boot.sh && ./configure $* --with-ovs-source=$PWD/ovs_src
+    ./boot.sh && ./configure $*
 }
 
 configure_ovn $EXTRA_OPTS $*
@@ -32,7 +31,7 @@ if ! "$@"; then
     exit 1
 fi
 if [ "$TESTSUITE" ] && [ "$CC" != "clang" ]; then
-    export DISTCHECK_CONFIGURE_FLAGS="$EXTRA_OPTS --with-ovs-source=$PWD/ovs_src"
+    export DISTCHECK_CONFIGURE_FLAGS="$EXTRA_OPTS"
     if ! make distcheck RECHECK=yes; then
         # testsuite.log is necessary for debugging.
         cat */_build/sub/tests/testsuite.log
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 7be75ca36..d825e257c 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -48,6 +48,8 @@ jobs:
     steps:
     - name: checkout
       uses: actions/checkout@v2
+      with:
+        submodules: recursive
 
     - name: install required dependencies
       run:  sudo apt install -y ${{ env.dependencies }}
@@ -99,6 +101,8 @@ jobs:
     steps:
     - name: checkout
       uses: actions/checkout@v2
+      with:
+        submodules: recursive
     - name: install dependencies
       run:  brew install automake libtool
     - name: prepare
diff --git a/.gitignore b/.gitignore
index 7ca9b3859..68333384e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -94,3 +94,5 @@ testsuite.tmp.orig
 /.venv
 /cxx-check
 /*.ovsschema.stamp
+/compile_ovn.sh
+
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 000000000..e083f6bde
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "ovs"]
+	path = ovs
+	url = https://github.com/openvswitch/ovs
diff --git a/.gitreview b/.gitreview
new file mode 100644
index 000000000..27e8042ac
--- /dev/null
+++ b/.gitreview
@@ -0,0 +1,6 @@
+[gerrit]
+host=code.engineering.redhat.com
+port=22
+project=ovn.git
+defaultbranch=ovn2.13
+
diff --git a/AUTHORS.rst b/AUTHORS.rst
index 5d926c11f..29c2c011c 100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -155,6 +155,7 @@ Geoffrey Wossum                    gwossum@acm.org
 Gianluca Merlo                     gianluca.merlo@gmail.com
 Giuseppe Lettieri                  g.lettieri@iet.unipi.it
 Glen Gibb                          grg@stanford.edu
+Gongming Chen                      gmingchen@tencent.com
 Guoshuai Li                        ligs@dtdream.com
 Guolin Yang                        gyang@vmware.com
 Guru Chaitanya Perakam             gperakam@Brocade.com
diff --git a/Documentation/intro/install/general.rst b/Documentation/intro/install/general.rst
index 65b1f4a40..cee99c63d 100644
--- a/Documentation/intro/install/general.rst
+++ b/Documentation/intro/install/general.rst
@@ -66,6 +66,10 @@ To compile the userspace programs in the OVN distribution, you will
 need the following software:
 
 - Open vSwitch (https://docs.openvswitch.org/en/latest/intro/install/).
+  Open vSwitch is included as a submodule in the OVN source code. It is
+  kept at the minimum recommended version for OVN to operate optimally.
+  See below for instructions about how to use a different OVS source
+  location.
 
 - GNU make
 
@@ -140,27 +144,44 @@ Bootstrapping
 -------------
 
 This step is not needed if you have downloaded a released tarball. If
-you pulled the sources directly from an Open vSwitch Git tree or got a
-Git tree snapshot, then run boot.sh in the top source directory to build
+you pulled the sources directly from an OVN Git tree or got a Git tree
+snapshot, then run boot.sh in the top source directory to build
 the "configure" script::
 
     $ ./boot.sh
 
-Before configuring OVN, clone, configure and build Open vSwitch.
+Before configuring OVN, build Open vSwitch. The easiest way to do this
+is to use the included OVS submodule in the OVN source::
+
+    $ git submodule update --init
+    $ cd ovs
+    $ ./boot.sh
+    $ ./configure
+    $ make
+    $ cd ..
+
+It is not required to use the included OVS submodule; however the OVS
+submodule is guaranteed to be the minimum recommended version of OVS
+to ensure OVN's optimal operation. If you wish to use OVS source code
+from a different location on the file system, then be sure to configure
+and build OVS before building OVN.
 
 .. _general-configuring:
 
 Configuring
 -----------
 
-Configure the package by running the configure script. You need to
-invoke configure with atleast the argument --with-ovs-source.
-For example::
+Then configure the package by running the configure script::
+
+    $ ./configure
+
+If your OVS source directory is not the included OVS submodule, specify the
+location of the OVS source code using --with-ovs-source::
 
     $ ./configure --with-ovs-source=/path/to/ovs/source
 
-If you have built Open vSwitch in a separate directory, then you
-need to provide that path in the option - --with-ovs-build.
+If you have built Open vSwitch in a separate directory from its source
+code, then you need to provide that path in the option - --with-ovs-build.
 
 By default all files are installed under ``/usr/local``. OVN expects to find
 its database in ``/usr/local/etc/ovn`` by default.
diff --git a/Makefile.am b/Makefile.am
index 7ce3d27e4..04a6d7c63 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -48,6 +48,8 @@ AM_CFLAGS = -Wstrict-prototypes
 AM_CFLAGS += $(WARNING_FLAGS)
 AM_CFLAGS += $(OVS_CFLAGS)
 
+AM_DISTCHECK_CONFIGURE_FLAGS = --with-ovs-source=$(PWD)/ovs
+
 if NDEBUG
 AM_CPPFLAGS += -DNDEBUG
 AM_CFLAGS += -fomit-frame-pointer
@@ -105,7 +107,9 @@ EXTRA_DIST = \
 	ovn-ic-nb.ovsschema \
 	ovn-ic-nb.xml \
 	ovn-ic-sb.ovsschema \
-	ovn-ic-sb.xml
+	ovn-ic-sb.xml \
+	.gitreview \
+	compile_ovn.sh
 bin_PROGRAMS =
 sbin_PROGRAMS =
 bin_SCRIPTS =
@@ -157,6 +161,7 @@ noinst_HEADERS += $(EXTRA_DIST)
 
 ro_c = echo '/* -*- mode: c; buffer-read-only: t -*- */'
 ro_shell = printf '\043 Generated automatically -- do not modify!    -*- buffer-read-only: t -*-\n'
+submodules = $(shell grep 'path =' $(srcdir)/.gitmodules | sed -E 's/[\t ]*path =\s*(.*)/\1/g' | xargs)
 
 SUFFIXES += .in
 .in:
@@ -216,6 +221,8 @@ dist-hook-git: distfiles
 	@if test -e $(srcdir)/.git && (git --version) >/dev/null 2>&1; then \
 	  (cd $(srcdir) && git ls-files) | grep -v '\.gitignore$$' | \
 	    grep -v '\.gitattributes$$' | \
+	    grep -v '\.gitmodules$$' | \
+	    grep -v "$(submodules)" | \
 	    LC_ALL=C sort -u > all-gitfiles; \
 	  LC_ALL=C comm -1 -3 distfiles all-gitfiles > missing-distfiles; \
 	  if test -s missing-distfiles; then \
@@ -247,8 +254,8 @@ ALL_LOCAL += config-h-check
 config-h-check:
 	@cd $(srcdir); \
 	if test -e .git && (git --version) >/dev/null 2>&1 && \
-	  git --no-pager grep -L '#include <config\.h>' `git ls-files | grep '\.c$$' | \
-	    grep -vE '^ovs/datapath|^ovs/lib/sflow|^ovs/datapath-windows|^python|^ovs/python'`; \
+	  git --no-pager grep -L '#include <config\.h>' `git ls-files | grep -v $(submodules) | grep '\.c$$' | \
+	    grep -vE '^python'`; \
 	then \
 	  echo "See above for list of violations of the rule that"; \
 	  echo "every C source file must #include <config.h>."; \
@@ -261,8 +268,7 @@ ALL_LOCAL += printf-check
 printf-check:
 	@cd $(srcdir); \
 	if test -e .git && (git --version) >/dev/null 2>&1 && \
-	  git --no-pager grep -n -E -e '%[-+ #0-9.*]*([ztj]|hh)' --and --not -e 'ovs_scan' `git ls-files | grep '\.[ch]$$' | \
-	    grep -vE '^ovs/datapath|^ovs/lib/sflow'`; \
+	  git --no-pager grep -n -E -e '%[-+ #0-9.*]*([ztj]|hh)' --and --not -e 'ovs_scan' `git ls-files | grep -v $(submodules) | grep '\.[ch]$$'`; \
 	then \
 	  echo "See above for list of violations of the rule that"; \
 	  echo "'z', 't', 'j', 'hh' printf() type modifiers are"; \
@@ -288,7 +294,7 @@ ALL_LOCAL += check-assert-h-usage
 check-assert-h-usage:
 	@if test -e $(srcdir)/.git && (git --version) >/dev/null 2>&1 && \
 	  (cd $(srcdir) && git --no-pager grep -l -E '[<]assert.h[>]') | \
-	  $(EGREP) -v '^ovs/lib/(sflow_receiver|vlog).c$$|^ovs/tests/|^tests/'; \
+	  $(EGREP) -v '^tests/'; \
 	then \
 	  echo "Files listed above unexpectedly #include <""assert.h"">."; \
 	  echo "Please use ovs_assert (from util.h) instead of assert."; \
@@ -304,8 +310,7 @@ ALL_LOCAL += check-endian
 check-endian:
 	@if test -e $(srcdir)/.git && (git --version) >/dev/null 2>&1 && \
 	  (cd $(srcdir) && git --no-pager grep -l -E \
-	   -e 'BIG_ENDIAN|LITTLE_ENDIAN' --and --not -e 'BYTE_ORDER' | \
-	  $(EGREP) -v '^ovs/datapath/|^ovs/include/sparse/rte_'); \
+	   -e 'BIG_ENDIAN|LITTLE_ENDIAN' --and --not -e 'BYTE_ORDER'); \
 	then \
 	  echo "See above for list of files that misuse LITTLE""_ENDIAN"; \
 	  echo "or BIG""_ENDIAN.  Please use WORDS_BIGENDIAN instead."; \
@@ -329,9 +334,9 @@ check-tabs:
 	@cd $(srcdir); \
 	if test -e .git && (git --version) >/dev/null 2>&1 && \
 	  grep -ln "^	" \
-	    `git ls-files \
+	    `git ls-files | grep -v $(submodules) \
 	      | grep -v -f build-aux/initial-tab-whitelist` /dev/null \
-	      | $(EGREP) -v ':[ 	]*/?\*'; \
+	      | $(EGREP) -v ':[         ]*/?\*'; \
 	then \
 	  echo "See above for files that use tabs for indentation."; \
 	  echo "Please use spaces instead."; \
@@ -344,8 +349,7 @@ thread-safety-check:
 	@cd $(srcdir); \
 	if test -e .git && (git --version) >/dev/null 2>&1 && \
 	  grep -n -f build-aux/thread-safety-blacklist \
-	    `git ls-files | grep '\.[ch]$$' \
-	      | $(EGREP) -v '^ovs/datapath|^ovs/lib/sflow'` /dev/null \
+	    `git ls-files | grep -v $(submodules) | grep '\.[ch]$$'` /dev/null \
 	      | $(EGREP) -v ':[ 	]*/?\*'; \
 	then \
 	  echo "See above for list of calls to functions that are"; \
@@ -361,7 +365,7 @@ ALL_LOCAL += check-ifconfig
 check-ifconfig:
 	@if test -e $(srcdir)/.git && (git --version) >/dev/null 2>&1 && \
 	  (cd $(srcdir) && git --no-pager grep -l -E -e 'ifconfig' | \
-           $(EGREP) -v 'Makefile.am|ovs-vsctl-bashcomp|openvswitch-custom\.te'); \
+	   $(EGREP) -v 'Makefile.am|openvswitch-custom\.te'); \
 	then \
 	  echo "See above for list of files that use or reference"; \
           echo "'ifconfig'.  Please use 'ip' instead."; \
diff --git a/NEWS b/NEWS
index f71ec329c..57a9ba939 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,19 @@
+Post-v20.12.0
+-------------------------
+  - Support ECMP multiple nexthops for reroute router policies.
+   - BFD protocol support according to RFC880 [0]. Introduce next-hop BFD
+     availability check for OVN static routes.
+     [0] https://tools.ietf.org/html/rfc5880)
+  - Change the semantic of the "Logical_Switch_Port.up" field such that it is
+    set to "true" only when all corresponding OVS openflow operations have
+    been processed.  This also introduces a new "OVS.Interface.external-id",
+    "ovn-installed".  This external-id is set by ovn-controller only after all
+    openflow operations corresponding to the OVS interface being added have
+    been processed.
+  - Add a new option to Load_Balancer.options, "hairpin_snat_ip", to allow
+    users to explicitly select which source IP should be used for load
+    balancer hairpin traffic.
+
 OVN v20.12.0 - 18 Dec 2020
 --------------------------
    - The "datapath" argument to ovn-trace is now optional, since the
diff --git a/acinclude.m4 b/acinclude.m4
index a797adc82..2ca15cb33 100644
--- a/acinclude.m4
+++ b/acinclude.m4
@@ -338,7 +338,7 @@ AC_DEFUN([OVN_CHECK_OVS], [
       AC_ERROR([$OVSDIR is not an OVS source directory])
     fi
   else
-    AC_ERROR([OVS source dir path needs to be specified (use --with-ovs-source)])
+    OVSDIR=$srcdir/ovs
   fi
 
   AC_MSG_RESULT([$OVSDIR])
diff --git a/build-aux/initial-tab-whitelist b/build-aux/initial-tab-whitelist
index 216cd2ed3..b2f5a0791 100644
--- a/build-aux/initial-tab-whitelist
+++ b/build-aux/initial-tab-whitelist
@@ -8,3 +8,4 @@
 ^xenserver/
 ^debian/rules.modules$
 ^debian/rules$
+^\.gitmodules$
diff --git a/compile_ovn.sh b/compile_ovn.sh
new file mode 100755
index 000000000..1b980df4f
--- /dev/null
+++ b/compile_ovn.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+git submodule update --init
+
+pushd ovs
+./boot.sh
+./configure --enable-Werror --enable-sparse
+make -j5
+popd
+
+./boot.sh
+./configure --enable-Werror --enable-sparse
+make -j5
+
diff --git a/controller-vtep/binding.c b/controller-vtep/binding.c
index 83377157e..01d5a16d2 100644
--- a/controller-vtep/binding.c
+++ b/controller-vtep/binding.c
@@ -109,7 +109,12 @@ update_pb_chassis(const struct sbrec_port_binding *port_binding_rec,
                      port_binding_rec->chassis->name,
                      chassis_rec->name);
         }
+
         sbrec_port_binding_set_chassis(port_binding_rec, chassis_rec);
+        if (port_binding_rec->n_up) {
+            bool up = true;
+            sbrec_port_binding_set_up(port_binding_rec, &up, 1);
+        }
     }
 }
 
diff --git a/controller/automake.mk b/controller/automake.mk
index 45e1bdd36..9b8debd2f 100644
--- a/controller/automake.mk
+++ b/controller/automake.mk
@@ -18,6 +18,8 @@ controller_ovn_controller_SOURCES = \
 	controller/lport.h \
 	controller/ofctrl.c \
 	controller/ofctrl.h \
+	controller/ofctrl-seqno.c \
+	controller/ofctrl-seqno.h \
 	controller/pinctrl.c \
 	controller/pinctrl.h \
 	controller/patch.c \
@@ -25,7 +27,10 @@ controller_ovn_controller_SOURCES = \
 	controller/ovn-controller.c \
 	controller/ovn-controller.h \
 	controller/physical.c \
-	controller/physical.h
+	controller/physical.h \
+	controller/mac-learn.c \
+	controller/mac-learn.h
+
 controller_ovn_controller_LDADD = lib/libovn.la $(OVS_LIBDIR)/libopenvswitch.la
 man_MANS += controller/ovn-controller.8
 EXTRA_DIST += controller/ovn-controller.8.xml
diff --git a/controller/binding.c b/controller/binding.c
index cb60c5d67..4e6c75696 100644
--- a/controller/binding.c
+++ b/controller/binding.c
@@ -18,6 +18,7 @@
 #include "ha-chassis.h"
 #include "lflow.h"
 #include "lport.h"
+#include "ofctrl-seqno.h"
 #include "patch.h"
 
 #include "lib/bitmap.h"
@@ -34,6 +35,38 @@
 
 VLOG_DEFINE_THIS_MODULE(binding);
 
+/* External ID to be set in the OVS.Interface record when the OVS interface
+ * is ready for use, i.e., is bound to an OVN port and its corresponding
+ * flows have been installed.
+ */
+#define OVN_INSTALLED_EXT_ID "ovn-installed"
+
+/* Set of OVS interface IDs that have been released in the most recent
+ * processing iterations.  This gets updated in release_lport() and is
+ * periodically emptied in binding_seqno_run().
+ */
+static struct sset binding_iface_released_set =
+    SSET_INITIALIZER(&binding_iface_released_set);
+
+/* Set of OVS interface IDs that have been bound in the most recent
+ * processing iterations.  This gets updated in release_lport() and is
+ * periodically emptied in binding_seqno_run().
+ */
+static struct sset binding_iface_bound_set =
+    SSET_INITIALIZER(&binding_iface_bound_set);
+
+static void
+binding_iface_released_add(const char *iface_id)
+{
+    sset_add(&binding_iface_released_set, iface_id);
+}
+
+static void
+binding_iface_bound_add(const char *iface_id)
+{
+    sset_add(&binding_iface_bound_set, iface_id);
+}
+
 #define OVN_QOS_TYPE "linux-htb"
 
 struct qos_queue {
@@ -688,6 +721,7 @@ local_binding_add_child(struct local_binding *lbinding,
                         struct local_binding *child)
 {
     local_binding_add(&lbinding->children, child);
+    child->parent = lbinding;
 }
 
 static struct local_binding *
@@ -697,6 +731,13 @@ local_binding_find_child(struct local_binding *lbinding,
     return local_binding_find(&lbinding->children, child_name);
 }
 
+static void
+local_binding_delete_child(struct local_binding *lbinding,
+                           struct local_binding *child)
+{
+    shash_find_and_delete(&lbinding->children, child->name);
+}
+
 static bool
 is_lport_vif(const struct sbrec_port_binding *pb)
 {
@@ -823,15 +864,52 @@ get_lport_type(const struct sbrec_port_binding *pb)
     return LP_UNKNOWN;
 }
 
+/* For newly claimed ports, if 'notify_up' is 'false':
+ * - set the 'pb.up' field to true if 'pb' has no 'parent_pb'.
+ * - set the 'pb.up' field to true if 'parent_pb.up' is 'true' (e.g., for
+ *   container and virtual ports).
+ * Otherwise request a notification to be sent when the OVS flows
+ * corresponding to 'pb' have been installed.
+ *
+ * Note:
+ *   Updates (directly or through a notification) the 'pb->up' field only if
+ *   it's explicitly set to 'false'.
+ *   This is to ensure compatibility with older versions of ovn-northd.
+ */
+static void
+claimed_lport_set_up(const struct sbrec_port_binding *pb,
+                     const struct sbrec_port_binding *parent_pb,
+                     const struct sbrec_chassis *chassis_rec,
+                     bool notify_up)
+{
+    if (!notify_up) {
+        bool up = true;
+        if (!parent_pb || (parent_pb->n_up && parent_pb->up[0])) {
+            sbrec_port_binding_set_up(pb, &up, 1);
+        }
+        return;
+    }
+
+    if (pb->chassis != chassis_rec || (pb->n_up && !pb->up[0])) {
+        binding_iface_bound_add(pb->logical_port);
+    }
+}
+
 /* Returns false if lport is not claimed due to 'sb_readonly'.
  * Returns true otherwise.
  */
 static bool
 claim_lport(const struct sbrec_port_binding *pb,
+            const struct sbrec_port_binding *parent_pb,
             const struct sbrec_chassis *chassis_rec,
             const struct ovsrec_interface *iface_rec,
-            bool sb_readonly, struct hmap *tracked_datapaths)
+            bool sb_readonly, bool notify_up,
+            struct hmap *tracked_datapaths)
 {
+    if (!sb_readonly) {
+        claimed_lport_set_up(pb, parent_pb, chassis_rec, notify_up);
+    }
+
     if (pb->chassis != chassis_rec) {
         if (sb_readonly) {
             return false;
@@ -900,7 +978,12 @@ release_lport(const struct sbrec_port_binding *pb, bool sb_readonly,
         sbrec_port_binding_set_virtual_parent(pb, NULL);
     }
 
+    if (pb->n_up) {
+        bool up = false;
+        sbrec_port_binding_set_up(pb, &up, 1);
+    }
     update_lport_tracking(pb, tracked_datapaths);
+    binding_iface_released_add(pb->logical_port);
     VLOG_INFO("Releasing lport %s from this chassis.", pb->logical_port);
     return true;
 }
@@ -958,8 +1041,7 @@ release_local_binding_children(const struct sbrec_chassis *chassis_rec,
             }
         }
 
-        /* Clear the local bindings' 'pb' and 'iface'. */
-        l->pb = NULL;
+        /* Clear the local bindings' 'iface'. */
         l->iface = NULL;
     }
 
@@ -998,8 +1080,12 @@ consider_vif_lport_(const struct sbrec_port_binding *pb,
     if (lbinding_set) {
         if (can_bind) {
             /* We can claim the lport. */
-            if (!claim_lport(pb, b_ctx_in->chassis_rec, lbinding->iface,
-                             !b_ctx_in->ovnsb_idl_txn,
+            const struct sbrec_port_binding *parent_pb =
+                lbinding->parent ? lbinding->parent->pb : NULL;
+
+            if (!claim_lport(pb, parent_pb, b_ctx_in->chassis_rec,
+                             lbinding->iface, !b_ctx_in->ovnsb_idl_txn,
+                             !lbinding->parent,
                              b_ctx_out->tracked_dp_bindings)){
                 return false;
             }
@@ -1203,8 +1289,8 @@ consider_nonvif_lport_(const struct sbrec_port_binding *pb,
                            b_ctx_out->tracked_dp_bindings);
 
         update_local_lport_ids(pb, b_ctx_out);
-        return claim_lport(pb, b_ctx_in->chassis_rec, NULL,
-                           !b_ctx_in->ovnsb_idl_txn,
+        return claim_lport(pb, NULL, b_ctx_in->chassis_rec, NULL,
+                           !b_ctx_in->ovnsb_idl_txn, false,
                            b_ctx_out->tracked_dp_bindings);
     } else if (pb->chassis == b_ctx_in->chassis_rec) {
         return release_lport(pb, !b_ctx_in->ovnsb_idl_txn,
@@ -2063,6 +2149,16 @@ handle_deleted_vif_lport(const struct sbrec_port_binding *pb,
      * when the interface change happens. */
     if (is_lport_container(pb)) {
         remove_local_lports(pb->logical_port, b_ctx_out);
+
+        /* If the container port is removed we should also remove it from
+         * its parent's children set.
+         */
+        if (lbinding) {
+            if (lbinding->parent) {
+                local_binding_delete_child(lbinding->parent, lbinding);
+            }
+            local_binding_destroy(lbinding);
+        }
     }
 
     handle_deleted_lport(pb, b_ctx_in, b_ctx_out);
@@ -2132,13 +2228,26 @@ bool
 binding_handle_port_binding_changes(struct binding_ctx_in *b_ctx_in,
                                     struct binding_ctx_out *b_ctx_out)
 {
-    bool handled = true;
+    /* Run the tracked port binding loop twice to ensure correctness:
+     * 1. First to handle deleted changes.  This is split in four sub-parts
+     *    because child local bindings must be cleaned up first:
+     *    a. Container ports first.
+     *    b. Then virtual ports.
+     *    c. Then regular VIFs.
+     *    d. Last other ports.
+     * 2. Second to handle add/update changes.
+     */
+    struct shash deleted_container_pbs =
+        SHASH_INITIALIZER(&deleted_container_pbs);
+    struct shash deleted_virtual_pbs =
+        SHASH_INITIALIZER(&deleted_virtual_pbs);
+    struct shash deleted_vif_pbs =
+        SHASH_INITIALIZER(&deleted_vif_pbs);
+    struct shash deleted_other_pbs =
+        SHASH_INITIALIZER(&deleted_other_pbs);
     const struct sbrec_port_binding *pb;
+    bool handled = true;
 
-    /* Run the tracked port binding loop twice. One to handle deleted
-     * changes. And another to handle add/update changes.
-     * This will ensure correctness.
-     */
     SBREC_PORT_BINDING_TABLE_FOR_EACH_TRACKED (pb,
                                                b_ctx_in->port_binding_table) {
         if (!sbrec_port_binding_is_deleted(pb)) {
@@ -2146,18 +2255,60 @@ binding_handle_port_binding_changes(struct binding_ctx_in *b_ctx_in,
         }
 
         enum en_lport_type lport_type = get_lport_type(pb);
-        if (lport_type == LP_VIF || lport_type == LP_VIRTUAL) {
-            handled = handle_deleted_vif_lport(pb, lport_type, b_ctx_in,
-                                                b_ctx_out);
+
+        if (lport_type == LP_VIF) {
+            if (is_lport_container(pb)) {
+                shash_add(&deleted_container_pbs, pb->logical_port, pb);
+            } else {
+                shash_add(&deleted_vif_pbs, pb->logical_port, pb);
+            }
+        } else if (lport_type == LP_VIRTUAL) {
+            shash_add(&deleted_virtual_pbs, pb->logical_port, pb);
         } else {
-            handle_deleted_lport(pb, b_ctx_in, b_ctx_out);
+            shash_add(&deleted_other_pbs, pb->logical_port, pb);
         }
+    }
 
+    struct shash_node *node;
+    struct shash_node *node_next;
+    SHASH_FOR_EACH_SAFE (node, node_next, &deleted_container_pbs) {
+        handled = handle_deleted_vif_lport(node->data, LP_VIF, b_ctx_in,
+                                           b_ctx_out);
+        shash_delete(&deleted_container_pbs, node);
         if (!handled) {
-            break;
+            goto delete_done;
+        }
+    }
+
+    SHASH_FOR_EACH_SAFE (node, node_next, &deleted_virtual_pbs) {
+        handled = handle_deleted_vif_lport(node->data, LP_VIRTUAL, b_ctx_in,
+                                           b_ctx_out);
+        shash_delete(&deleted_virtual_pbs, node);
+        if (!handled) {
+            goto delete_done;
         }
     }
 
+    SHASH_FOR_EACH_SAFE (node, node_next, &deleted_vif_pbs) {
+        handled = handle_deleted_vif_lport(node->data, LP_VIF, b_ctx_in,
+                                           b_ctx_out);
+        shash_delete(&deleted_vif_pbs, node);
+        if (!handled) {
+            goto delete_done;
+        }
+    }
+
+    SHASH_FOR_EACH_SAFE (node, node_next, &deleted_other_pbs) {
+        handle_deleted_lport(node->data, b_ctx_in, b_ctx_out);
+        shash_delete(&deleted_other_pbs, node);
+    }
+
+delete_done:
+    shash_destroy(&deleted_container_pbs);
+    shash_destroy(&deleted_virtual_pbs);
+    shash_destroy(&deleted_vif_pbs);
+    shash_destroy(&deleted_other_pbs);
+
     if (!handled) {
         return false;
     }
@@ -2288,3 +2439,155 @@ binding_handle_port_binding_changes(struct binding_ctx_in *b_ctx_in,
     destroy_qos_map(&qos_map);
     return handled;
 }
+
+/* Registered ofctrl seqno type for port_binding flow installation. */
+static size_t binding_seq_type_pb_cfg;
+
+/* Binding specific seqno to be acked by ofctrl when flows for new interfaces
+ * have been installed.
+ */
+static uint32_t binding_iface_seqno = 0;
+
+/* Map indexed by iface-id containing the sequence numbers that when acked
+ * indicate that the OVS flows for the iface-id have been installed.
+ */
+static struct simap binding_iface_seqno_map =
+    SIMAP_INITIALIZER(&binding_iface_seqno_map);
+
+void
+binding_init(void)
+{
+    binding_seq_type_pb_cfg = ofctrl_seqno_add_type();
+}
+
+/* Processes new release/bind operations OVN ports.  For newly bound ports
+ * it creates ofctrl seqno update requests that will be acked when
+ * corresponding OVS flows have been installed.
+ *
+ * NOTE: Should be called only when valid SB and OVS transactions are
+ * available.
+ */
+void
+binding_seqno_run(struct shash *local_bindings)
+{
+    const char *iface_id;
+    const char *iface_id_next;
+
+    SSET_FOR_EACH_SAFE (iface_id, iface_id_next, &binding_iface_released_set) {
+        struct shash_node *lb_node = shash_find(local_bindings, iface_id);
+
+        /* If the local binding still exists (i.e., the OVS interface is
+         * still configured locally) then remove the external id and remove
+         * it from the in-flight seqno map.
+         */
+        if (lb_node) {
+            struct local_binding *lb = lb_node->data;
+
+            if (lb->iface && smap_get(&lb->iface->external_ids,
+                                      OVN_INSTALLED_EXT_ID)) {
+                ovsrec_interface_update_external_ids_delkey(
+                    lb->iface, OVN_INSTALLED_EXT_ID);
+            }
+        }
+        simap_find_and_delete(&binding_iface_seqno_map, iface_id);
+        sset_delete(&binding_iface_released_set,
+                    SSET_NODE_FROM_NAME(iface_id));
+    }
+
+    bool new_ifaces = false;
+    uint32_t new_seqno = binding_iface_seqno + 1;
+
+    SSET_FOR_EACH_SAFE (iface_id, iface_id_next, &binding_iface_bound_set) {
+        struct shash_node *lb_node = shash_find(local_bindings, iface_id);
+
+        struct local_binding *lb = lb_node ? lb_node->data : NULL;
+
+        /* Make sure the binding is still complete, i.e., both SB port_binding
+         * and OVS interface still exist.
+         *
+         * If so, then this is a newly bound interface, make sure we reset the
+         * Port_Binding 'up' field and the OVS Interface 'external-id'.
+         */
+        if (lb && lb->pb && lb->iface) {
+            new_ifaces = true;
+
+            if (smap_get(&lb->iface->external_ids, OVN_INSTALLED_EXT_ID)) {
+                ovsrec_interface_update_external_ids_delkey(
+                    lb->iface, OVN_INSTALLED_EXT_ID);
+            }
+            if (lb->pb->n_up) {
+                bool up = false;
+                sbrec_port_binding_set_up(lb->pb, &up, 1);
+            }
+            simap_put(&binding_iface_seqno_map, lb->name, new_seqno);
+        }
+        sset_delete(&binding_iface_bound_set, SSET_NODE_FROM_NAME(iface_id));
+    }
+
+    /* Request a seqno update when the flows for new interfaces have been
+     * installed in OVS.
+     */
+    if (new_ifaces) {
+        binding_iface_seqno = new_seqno;
+        ofctrl_seqno_update_create(binding_seq_type_pb_cfg, new_seqno);
+    }
+}
+
+/* Processes ofctrl seqno ACKs for new bindings.  Sets the
+ * 'OVN_INSTALLED_EXT_ID' external-id in the OVS interface and the
+ * Port_Binding.up field for all ports for which OVS flows have been
+ * installed.
+ *
+ * NOTE: Should be called only when valid SB and OVS transactions are
+ * available.
+ */
+void
+binding_seqno_install(struct shash *local_bindings)
+{
+    struct ofctrl_acked_seqnos *acked_seqnos =
+            ofctrl_acked_seqnos_get(binding_seq_type_pb_cfg);
+    struct simap_node *node;
+    struct simap_node *node_next;
+
+    SIMAP_FOR_EACH_SAFE (node, node_next, &binding_iface_seqno_map) {
+        struct shash_node *lb_node = shash_find(local_bindings, node->name);
+
+        if (!lb_node) {
+            goto del_seqno;
+        }
+
+        struct local_binding *lb = lb_node->data;
+        if (!lb->pb || !lb->iface) {
+            goto del_seqno;
+        }
+
+        if (!ofctrl_acked_seqnos_contains(acked_seqnos, node->data)) {
+            continue;
+        }
+
+        ovsrec_interface_update_external_ids_setkey(lb->iface,
+                                                    OVN_INSTALLED_EXT_ID,
+                                                    "true");
+        if (lb->pb->n_up) {
+            bool up = true;
+
+            sbrec_port_binding_set_up(lb->pb, &up, 1);
+            struct shash_node *child_node;
+            SHASH_FOR_EACH (child_node, &lb->children) {
+                struct local_binding *lb_child = child_node->data;
+                sbrec_port_binding_set_up(lb_child->pb, &up, 1);
+            }
+        }
+
+del_seqno:
+        simap_delete(&binding_iface_seqno_map, node);
+    }
+
+    ofctrl_acked_seqnos_destroy(acked_seqnos);
+}
+
+void
+binding_seqno_flush(void)
+{
+    simap_clear(&binding_iface_seqno_map);
+}
diff --git a/controller/binding.h b/controller/binding.h
index c9740560f..c9ebef4b1 100644
--- a/controller/binding.h
+++ b/controller/binding.h
@@ -100,6 +100,7 @@ struct local_binding {
 
     /* shash of 'struct local_binding' representing children. */
     struct shash children;
+    struct local_binding *parent;
 };
 
 static inline struct local_binding *
@@ -134,4 +135,9 @@ bool binding_handle_ovs_interface_changes(struct binding_ctx_in *,
 bool binding_handle_port_binding_changes(struct binding_ctx_in *,
                                          struct binding_ctx_out *);
 void binding_tracked_dp_destroy(struct hmap *tracked_datapaths);
+
+void binding_init(void);
+void binding_seqno_run(struct shash *local_bindings);
+void binding_seqno_install(struct shash *local_bindings);
+void binding_seqno_flush(void);
 #endif /* controller/binding.h */
diff --git a/controller/chassis.c b/controller/chassis.c
index b4d4b0e37..0937e33e6 100644
--- a/controller/chassis.c
+++ b/controller/chassis.c
@@ -28,6 +28,7 @@
 #include "lib/ovn-sb-idl.h"
 #include "ovn-controller.h"
 #include "lib/util.h"
+#include "ovn/features.h"
 
 VLOG_DEFINE_THIS_MODULE(chassis);
 
@@ -293,6 +294,7 @@ chassis_build_other_config(struct smap *config, const char *bridge_mappings,
     smap_replace(config, "iface-types", iface_types);
     smap_replace(config, "ovn-chassis-mac-mappings", chassis_macs);
     smap_replace(config, "is-interconn", is_interconn ? "true" : "false");
+    smap_replace(config, OVN_FEATURE_PORT_UP_NOTIF, "true");
 }
 
 /*
@@ -363,6 +365,11 @@ chassis_other_config_changed(const char *bridge_mappings,
         return true;
     }
 
+    if (!smap_get_bool(&chassis_rec->other_config, OVN_FEATURE_PORT_UP_NOTIF,
+                       false)) {
+        return true;
+    }
+
     return false;
 }
 
diff --git a/controller/lflow.c b/controller/lflow.c
index c02585b1e..76a4deaa0 100644
--- a/controller/lflow.c
+++ b/controller/lflow.c
@@ -88,6 +88,11 @@ static void lflow_resource_destroy_lflow(struct lflow_resource_ref *,
 static bool
 lookup_port_cb(const void *aux_, const char *port_name, unsigned int *portp)
 {
+    if (!strcmp(port_name, "none")) {
+        *portp = 0;
+        return true;
+    }
+
     const struct lookup_port_aux *aux = aux_;
 
     const struct sbrec_port_binding *pb
@@ -480,22 +485,19 @@ lflow_handle_changed_flows(struct lflow_ctx_in *l_ctx_in,
     struct controller_event_options controller_event_opts;
     controller_event_opts_init(&controller_event_opts);
 
-    /* Handle flow removing first (for deleted or updated lflows), and then
-     * handle reprocessing or adding flows, so that when the flows being
-     * removed and added with same match conditions can be processed in the
-     * proper order */
-
+    /* Flood remove the flows for all the tracked lflows.  Its possible that
+     * lflow_add_flows_for_datapath() may have been called before calling
+     * this function. */
     struct hmap flood_remove_nodes = HMAP_INITIALIZER(&flood_remove_nodes);
     struct ofctrl_flood_remove_node *ofrn, *next;
     SBREC_LOGICAL_FLOW_TABLE_FOR_EACH_TRACKED (lflow,
                                                l_ctx_in->logical_flow_table) {
+        VLOG_DBG("delete lflow "UUID_FMT, UUID_ARGS(&lflow->header_.uuid));
+        ofrn = xmalloc(sizeof *ofrn);
+        ofrn->sb_uuid = lflow->header_.uuid;
+        hmap_insert(&flood_remove_nodes, &ofrn->hmap_node,
+                    uuid_hash(&ofrn->sb_uuid));
         if (!sbrec_logical_flow_is_new(lflow)) {
-            VLOG_DBG("delete lflow "UUID_FMT,
-                     UUID_ARGS(&lflow->header_.uuid));
-            ofrn = xmalloc(sizeof *ofrn);
-            ofrn->sb_uuid = lflow->header_.uuid;
-            hmap_insert(&flood_remove_nodes, &ofrn->hmap_node,
-                        uuid_hash(&ofrn->sb_uuid));
             if (l_ctx_out->lflow_cache_map) {
                 lflow_cache_delete(l_ctx_out->lflow_cache_map, lflow);
             }
@@ -525,21 +527,6 @@ lflow_handle_changed_flows(struct lflow_ctx_in *l_ctx_in,
     }
     hmap_destroy(&flood_remove_nodes);
 
-    /* Now handle new lflows only. */
-    SBREC_LOGICAL_FLOW_TABLE_FOR_EACH_TRACKED (lflow,
-                                               l_ctx_in->logical_flow_table) {
-        if (sbrec_logical_flow_is_new(lflow)) {
-            VLOG_DBG("add lflow "UUID_FMT,
-                     UUID_ARGS(&lflow->header_.uuid));
-            if (!consider_logical_flow(lflow, &dhcp_opts, &dhcpv6_opts,
-                                       &nd_ra_opts, &controller_event_opts,
-                                       l_ctx_in, l_ctx_out)) {
-                ret = false;
-                l_ctx_out->conj_id_overflow = true;
-                break;
-            }
-        }
-    }
     dhcp_opts_destroy(&dhcp_opts);
     dhcp_opts_destroy(&dhcpv6_opts);
     nd_ra_opts_destroy(&nd_ra_opts);
@@ -668,9 +655,8 @@ update_conj_id_ofs(uint32_t *conj_id_ofs, uint32_t n_conjs)
 static void
 add_matches_to_flow_table(const struct sbrec_logical_flow *lflow,
                           const struct sbrec_datapath_binding *dp,
-                          struct hmap *matches, size_t conj_id_ofs,
-                          uint8_t ptable, uint8_t output_ptable,
-                          struct ofpbuf *ovnacts,
+                          struct hmap *matches, uint8_t ptable,
+                          uint8_t output_ptable, struct ofpbuf *ovnacts,
                           bool ingress, struct lflow_ctx_in *l_ctx_in,
                           struct lflow_ctx_out *l_ctx_out)
 {
@@ -702,15 +688,14 @@ add_matches_to_flow_table(const struct sbrec_logical_flow *lflow,
         .lb_hairpin_ptable = OFTABLE_CHK_LB_HAIRPIN,
         .lb_hairpin_reply_ptable = OFTABLE_CHK_LB_HAIRPIN_REPLY,
         .ct_snat_vip_ptable = OFTABLE_CT_SNAT_FOR_VIP,
+        .fdb_ptable = OFTABLE_GET_FDB,
+        .fdb_lookup_ptable = OFTABLE_LOOKUP_FDB,
     };
     ovnacts_encode(ovnacts->data, ovnacts->size, &ep, &ofpacts);
 
     struct expr_match *m;
     HMAP_FOR_EACH (m, hmap_node, matches) {
         match_set_metadata(&m->match, htonll(dp->tunnel_key));
-        if (m->match.wc.masks.conj_id) {
-            m->match.flow.conj_id += conj_id_ofs;
-        }
         if (datapath_is_switch(dp)) {
             unsigned int reg_index
                 = (ingress ? MFF_LOG_INPORT : MFF_LOG_OUTPORT) - MFF_REG0;
@@ -744,7 +729,7 @@ add_matches_to_flow_table(const struct sbrec_logical_flow *lflow,
                 struct ofpact_conjunction *dst;
 
                 dst = ofpact_put_CONJUNCTION(&conj);
-                dst->id = src->id + conj_id_ofs;
+                dst->id = src->id;
                 dst->clause = src->clause;
                 dst->n_clauses = src->n_clauses;
             }
@@ -915,9 +900,9 @@ consider_logical_flow__(const struct sbrec_logical_flow *lflow,
             return true;
         }
 
-        add_matches_to_flow_table(lflow, dp, &matches, *l_ctx_out->conj_id_ofs,
-                                  ptable, output_ptable, &ovnacts, ingress,
-                                  l_ctx_in, l_ctx_out);
+        expr_matches_prepare(&matches, *l_ctx_out->conj_id_ofs);
+        add_matches_to_flow_table(lflow, dp, &matches, ptable, output_ptable,
+                                  &ovnacts, ingress, l_ctx_in, l_ctx_out);
 
         ovnacts_free(ovnacts.data, ovnacts.size);
         ofpbuf_uninit(&ovnacts);
@@ -930,10 +915,11 @@ consider_logical_flow__(const struct sbrec_logical_flow *lflow,
         lflow_cache_get(l_ctx_out->lflow_cache_map, lflow);
 
     if (lc && lc->type == LCACHE_T_MATCHES) {
-        /* 'matches' is cached. No need to do expr parsing.
+        /* 'matches' is cached. No need to do expr parsing and no need
+         * to call expr_matches_prepare() to update the conj ids.
          * Add matches to flow table and return. */
-        add_matches_to_flow_table(lflow, dp, lc->expr_matches, lc->conj_id_ofs,
-                                  ptable, output_ptable, &ovnacts, ingress,
+        add_matches_to_flow_table(lflow, dp, lc->expr_matches, ptable,
+                                  output_ptable, &ovnacts, ingress,
                                   l_ctx_in, l_ctx_out);
         ovnacts_free(ovnacts.data, ovnacts.size);
         ofpbuf_uninit(&ovnacts);
@@ -1009,10 +995,11 @@ consider_logical_flow__(const struct sbrec_logical_flow *lflow,
         }
     }
 
+    expr_matches_prepare(matches, lc->conj_id_ofs);
+
     /* Encode OVN logical actions into OpenFlow. */
-    add_matches_to_flow_table(lflow, dp, matches, lc->conj_id_ofs,
-                              ptable, output_ptable, &ovnacts, ingress,
-                              l_ctx_in, l_ctx_out);
+    add_matches_to_flow_table(lflow, dp, matches, ptable, output_ptable,
+                              &ovnacts, ingress, l_ctx_in, l_ctx_out);
     ovnacts_free(ovnacts.data, ovnacts.size);
     ofpbuf_uninit(&ovnacts);
 
@@ -1080,6 +1067,18 @@ put_load(const uint8_t *data, size_t len,
     bitwise_one(ofpact_set_field_mask(sf), sf->field->n_bytes, ofs, n_bits);
 }
 
+static void
+put_load64(uint64_t value, enum mf_field_id dst, int ofs, int n_bits,
+           struct ofpbuf *ofpacts)
+{
+    struct ofpact_set_field *sf = ofpact_put_set_field(ofpacts,
+                                                       mf_from_id(dst), NULL,
+                                                       NULL);
+    ovs_be64 n_value = htonll(value);
+    bitwise_copy(&n_value, 8, 0, sf->value, sf->field->n_bytes, ofs, n_bits);
+    bitwise_one(ofpact_set_field_mask(sf), sf->field->n_bytes, ofs, n_bits);
+}
+
 static void
 consider_neighbor_flow(struct ovsdb_idl_index *sbrec_port_binding_by_name,
                        const struct hmap *local_datapaths,
@@ -1173,6 +1172,184 @@ add_neighbor_flows(struct ovsdb_idl_index *sbrec_port_binding_by_name,
     }
 }
 
+/* Builds the "learn()" action to be triggered by packets initiating a
+ * hairpin session.
+ *
+ * This will generate flows in table OFTABLE_CHK_LB_HAIRPIN_REPLY of the form:
+ * - match:
+ *     metadata=<orig-pkt-metadata>,ip/ipv6,ip.src=<backend>,ip.dst=<vip>
+ *     nw_proto='lb_proto',tp_src_port=<backend-port>
+ * - action:
+ *     set MLF_LOOKUP_LB_HAIRPIN_BIT=1
+ */
+static void
+add_lb_vip_hairpin_reply_action(struct in6_addr *vip6, ovs_be32 vip,
+                                uint8_t lb_proto, bool has_l4_port,
+                                uint64_t cookie, struct ofpbuf *ofpacts)
+{
+    struct match match = MATCH_CATCHALL_INITIALIZER;
+    struct ofpact_learn *ol = ofpact_put_LEARN(ofpacts);
+    struct ofpact_learn_spec *ol_spec;
+    unsigned int imm_bytes;
+    uint8_t *src_imm;
+
+    /* Once learned, hairpin reply flows are permanent until the VIP/backend
+     * is removed.
+     */
+    ol->flags = NX_LEARN_F_DELETE_LEARNED;
+    ol->idle_timeout = OFP_FLOW_PERMANENT;
+    ol->hard_timeout = OFP_FLOW_PERMANENT;
+    ol->priority = OFP_DEFAULT_PRIORITY;
+    ol->table_id = OFTABLE_CHK_LB_HAIRPIN_REPLY;
+    ol->cookie = htonll(cookie);
+
+    /* Match on metadata of the packet that created the hairpin session. */
+    ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+
+    ol_spec->dst.field = mf_from_id(MFF_METADATA);
+    ol_spec->dst.ofs = 0;
+    ol_spec->dst.n_bits = ol_spec->dst.field->n_bits;
+    ol_spec->n_bits = ol_spec->dst.n_bits;
+    ol_spec->dst_type = NX_LEARN_DST_MATCH;
+    ol_spec->src_type = NX_LEARN_SRC_FIELD;
+    ol_spec->src.field = mf_from_id(MFF_METADATA);
+
+    /* Match on the same ETH type as the packet that created the hairpin
+     * session.
+     */
+    ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+    ol_spec->dst.field = mf_from_id(MFF_ETH_TYPE);
+    ol_spec->dst.ofs = 0;
+    ol_spec->dst.n_bits = ol_spec->dst.field->n_bits;
+    ol_spec->n_bits = ol_spec->dst.n_bits;
+    ol_spec->dst_type = NX_LEARN_DST_MATCH;
+    ol_spec->src_type = NX_LEARN_SRC_IMMEDIATE;
+    union mf_value imm_eth_type = {
+        .be16 = !vip6 ? htons(ETH_TYPE_IP) : htons(ETH_TYPE_IPV6)
+    };
+    mf_write_subfield_value(&ol_spec->dst, &imm_eth_type, &match);
+
+    /* Push value last, as this may reallocate 'ol_spec'. */
+    imm_bytes = DIV_ROUND_UP(ol_spec->dst.n_bits, 8);
+    src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes));
+    memcpy(src_imm, &imm_eth_type, imm_bytes);
+
+    /* Hairpin replies have ip.src == <backend-ip>. */
+    ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+    if (!vip6) {
+        ol_spec->dst.field = mf_from_id(MFF_IPV4_SRC);
+        ol_spec->src.field = mf_from_id(MFF_IPV4_SRC);
+    } else {
+        ol_spec->dst.field = mf_from_id(MFF_IPV6_SRC);
+        ol_spec->src.field = mf_from_id(MFF_IPV6_SRC);
+    }
+    ol_spec->dst.ofs = 0;
+    ol_spec->dst.n_bits = ol_spec->dst.field->n_bits;
+    ol_spec->n_bits = ol_spec->dst.n_bits;
+    ol_spec->dst_type = NX_LEARN_DST_MATCH;
+    ol_spec->src_type = NX_LEARN_SRC_FIELD;
+
+    /* Hairpin replies have ip.dst == <vip>. */
+    union mf_value imm_ip;
+    ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+    if (!vip6) {
+        ol_spec->dst.field = mf_from_id(MFF_IPV4_DST);
+        imm_ip = (union mf_value) {
+            .be32 = vip
+        };
+    } else {
+        ol_spec->dst.field = mf_from_id(MFF_IPV6_DST);
+        imm_ip = (union mf_value) {
+            .ipv6 = *vip6
+        };
+    }
+    ol_spec->dst.ofs = 0;
+    ol_spec->dst.n_bits = ol_spec->dst.field->n_bits;
+    ol_spec->n_bits = ol_spec->dst.n_bits;
+    ol_spec->dst_type = NX_LEARN_DST_MATCH;
+    ol_spec->src_type = NX_LEARN_SRC_IMMEDIATE;
+    mf_write_subfield_value(&ol_spec->dst, &imm_ip, &match);
+
+    /* Push value last, as this may reallocate 'ol_spec' */
+    imm_bytes = DIV_ROUND_UP(ol_spec->dst.n_bits, 8);
+    src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes));
+    memcpy(src_imm, &imm_ip, imm_bytes);
+
+    /* Hairpin replies have the same nw_proto as packets that created the
+     * session.
+     */
+    union mf_value imm_proto = {
+        .u8 = lb_proto,
+    };
+    ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+    ol_spec->dst.field = mf_from_id(MFF_IP_PROTO);
+    ol_spec->src.field = mf_from_id(MFF_IP_PROTO);
+    ol_spec->dst.ofs = 0;
+    ol_spec->dst.n_bits = ol_spec->dst.field->n_bits;
+    ol_spec->n_bits = ol_spec->dst.n_bits;
+    ol_spec->dst_type = NX_LEARN_DST_MATCH;
+    ol_spec->src_type = NX_LEARN_SRC_IMMEDIATE;
+    mf_write_subfield_value(&ol_spec->dst, &imm_proto, &match);
+
+    /* Push value last, as this may reallocate 'ol_spec' */
+    imm_bytes = DIV_ROUND_UP(ol_spec->dst.n_bits, 8);
+    src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes));
+    memcpy(src_imm, &imm_proto, imm_bytes);
+
+    /* Hairpin replies have source port == <backend-port>. */
+    if (has_l4_port) {
+        ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+        switch (lb_proto) {
+        case IPPROTO_TCP:
+            ol_spec->dst.field = mf_from_id(MFF_TCP_SRC);
+            ol_spec->src.field = mf_from_id(MFF_TCP_DST);
+            break;
+        case IPPROTO_UDP:
+            ol_spec->dst.field = mf_from_id(MFF_UDP_SRC);
+            ol_spec->src.field = mf_from_id(MFF_UDP_DST);
+            break;
+        case IPPROTO_SCTP:
+            ol_spec->dst.field = mf_from_id(MFF_SCTP_SRC);
+            ol_spec->src.field = mf_from_id(MFF_SCTP_DST);
+            break;
+        default:
+            OVS_NOT_REACHED();
+            break;
+        }
+        ol_spec->dst.ofs = 0;
+        ol_spec->dst.n_bits = ol_spec->dst.field->n_bits;
+        ol_spec->n_bits = ol_spec->dst.n_bits;
+        ol_spec->dst_type = NX_LEARN_DST_MATCH;
+        ol_spec->src_type = NX_LEARN_SRC_FIELD;
+    }
+
+    /* Set MLF_LOOKUP_LB_HAIRPIN_BIT for hairpin replies. */
+    ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+    ol_spec->dst.field = mf_from_id(MFF_LOG_FLAGS);
+    ol_spec->dst.ofs = MLF_LOOKUP_LB_HAIRPIN_BIT;
+    ol_spec->dst.n_bits = 1;
+    ol_spec->n_bits = ol_spec->dst.n_bits;
+    ol_spec->dst_type = NX_LEARN_DST_LOAD;
+    ol_spec->src_type = NX_LEARN_SRC_IMMEDIATE;
+    union mf_value imm_reg_value = {
+        .u8 = 1
+    };
+    mf_write_subfield_value(&ol_spec->dst, &imm_reg_value, &match);
+
+    /* Push value last, as this may reallocate 'ol_spec' */
+    imm_bytes = DIV_ROUND_UP(ol_spec->dst.n_bits, 8);
+    src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes));
+    memcpy(src_imm, &imm_reg_value, imm_bytes);
+
+    ofpact_finish_LEARN(ofpacts, &ol);
+}
+
+/* Adds flows to detect hairpin sessions.
+ *
+ * For backwards compatibilty with older ovn-northd versions, uses
+ * ct_nw_dst(), ct_ipv6_dst(), ct_tp_dst(), otherwise uses the
+ * original destination tuple stored by ovn-northd.
+ */
 static void
 add_lb_vip_hairpin_flows(struct ovn_controller_lb *lb,
                          struct ovn_lb_vip *lb_vip,
@@ -1182,43 +1359,81 @@ add_lb_vip_hairpin_flows(struct ovn_controller_lb *lb,
 {
     uint64_t stub[1024 / 8];
     struct ofpbuf ofpacts = OFPBUF_STUB_INITIALIZER(stub);
+    struct match hairpin_match = MATCH_CATCHALL_INITIALIZER;
 
     uint8_t value = 1;
     put_load(&value, sizeof value, MFF_LOG_FLAGS,
              MLF_LOOKUP_LB_HAIRPIN_BIT, 1, &ofpacts);
 
-    struct match hairpin_match = MATCH_CATCHALL_INITIALIZER;
-    struct match hairpin_reply_match = MATCH_CATCHALL_INITIALIZER;
+    /* Matching on ct_nw_dst()/ct_ipv6_dst()/ct_tp_dst() requires matching
+     * on ct_state first.
+     */
+    if (!lb->hairpin_orig_tuple) {
+        uint32_t ct_state = OVS_CS_F_TRACKED | OVS_CS_F_DST_NAT;
+        match_set_ct_state_masked(&hairpin_match, ct_state, ct_state);
+    }
 
     if (IN6_IS_ADDR_V4MAPPED(&lb_vip->vip)) {
-        ovs_be32 ip4 = in6_addr_get_mapped_ipv4(&lb_backend->ip);
+        ovs_be32 bip4 = in6_addr_get_mapped_ipv4(&lb_backend->ip);
+        ovs_be32 vip4 = in6_addr_get_mapped_ipv4(&lb_vip->vip);
+        ovs_be32 snat_vip4 = lb->hairpin_snat_ips.n_ipv4_addrs
+                        ? lb->hairpin_snat_ips.ipv4_addrs[0].addr
+                        : vip4;
 
         match_set_dl_type(&hairpin_match, htons(ETH_TYPE_IP));
-        match_set_nw_src(&hairpin_match, ip4);
-        match_set_nw_dst(&hairpin_match, ip4);
-
-        match_set_dl_type(&hairpin_reply_match,
-                          htons(ETH_TYPE_IP));
-        match_set_nw_src(&hairpin_reply_match, ip4);
-        match_set_nw_dst(&hairpin_reply_match,
-                         in6_addr_get_mapped_ipv4(&lb_vip->vip));
+        match_set_nw_src(&hairpin_match, bip4);
+        match_set_nw_dst(&hairpin_match, bip4);
+
+        if (!lb->hairpin_orig_tuple) {
+            match_set_ct_nw_dst(&hairpin_match, vip4);
+        } else {
+            match_set_reg(&hairpin_match,
+                          MFF_LOG_LB_ORIG_DIP_IPV4 - MFF_LOG_REG0,
+                          ntohl(vip4));
+        }
+
+        add_lb_vip_hairpin_reply_action(NULL, snat_vip4, lb_proto,
+                                        lb_backend->port,
+                                        lb->slb->header_.uuid.parts[0],
+                                        &ofpacts);
     } else {
+        struct in6_addr *bip6 = &lb_backend->ip;
+        struct in6_addr *snat_vip6 =
+            lb->hairpin_snat_ips.n_ipv6_addrs
+            ? &lb->hairpin_snat_ips.ipv6_addrs[0].addr
+            : &lb_vip->vip;
         match_set_dl_type(&hairpin_match, htons(ETH_TYPE_IPV6));
-        match_set_ipv6_src(&hairpin_match, &lb_backend->ip);
-        match_set_ipv6_dst(&hairpin_match, &lb_backend->ip);
+        match_set_ipv6_src(&hairpin_match, bip6);
+        match_set_ipv6_dst(&hairpin_match, bip6);
 
-        match_set_dl_type(&hairpin_reply_match,
-                          htons(ETH_TYPE_IPV6));
-        match_set_ipv6_src(&hairpin_reply_match, &lb_backend->ip);
-        match_set_ipv6_dst(&hairpin_reply_match, &lb_vip->vip);
+        if (!lb->hairpin_orig_tuple) {
+            match_set_ct_ipv6_dst(&hairpin_match, &lb_vip->vip);
+        } else {
+            ovs_be128 vip6_value;
+
+            memcpy(&vip6_value, &lb_vip->vip, sizeof vip6_value);
+            match_set_xxreg(&hairpin_match,
+                            MFF_LOG_LB_ORIG_DIP_IPV6 - MFF_LOG_XXREG0,
+                            ntoh128(vip6_value));
+        }
+
+        add_lb_vip_hairpin_reply_action(snat_vip6, 0, lb_proto,
+                                        lb_backend->port,
+                                        lb->slb->header_.uuid.parts[0],
+                                        &ofpacts);
     }
 
     if (lb_backend->port) {
         match_set_nw_proto(&hairpin_match, lb_proto);
         match_set_tp_dst(&hairpin_match, htons(lb_backend->port));
-
-        match_set_nw_proto(&hairpin_reply_match, lb_proto);
-        match_set_tp_src(&hairpin_reply_match, htons(lb_backend->port));
+        if (!lb->hairpin_orig_tuple) {
+            match_set_ct_nw_proto(&hairpin_match, lb_proto);
+            match_set_ct_tp_dst(&hairpin_match, htons(lb_vip->vip_port));
+        } else {
+            match_set_reg_masked(&hairpin_match,
+                                 MFF_LOG_LB_ORIG_TP_DPORT - MFF_REG0,
+                                 lb_vip->vip_port, UINT16_MAX);
+        }
     }
 
     /* In the original direction, only match on traffic that was already
@@ -1239,23 +1454,19 @@ add_lb_vip_hairpin_flows(struct ovn_controller_lb *lb,
     ofctrl_add_flow(flow_table, OFTABLE_CHK_LB_HAIRPIN, 100,
                     lb->slb->header_.uuid.parts[0], &hairpin_match,
                     &ofpacts, &lb->slb->header_.uuid);
-
-    for (size_t i = 0; i < lb->slb->n_datapaths; i++) {
-        match_set_metadata(&hairpin_reply_match,
-                           htonll(lb->slb->datapaths[i]->tunnel_key));
-
-        ofctrl_add_flow(flow_table, OFTABLE_CHK_LB_HAIRPIN_REPLY, 100,
-                        lb->slb->header_.uuid.parts[0],
-                        &hairpin_reply_match,
-                        &ofpacts, &lb->slb->header_.uuid);
-    }
-
     ofpbuf_uninit(&ofpacts);
 }
 
+/* Adds flows to perform SNAT for hairpin sessions.
+ *
+ * For backwards compatibilty with older ovn-northd versions, uses
+ * ct_nw_dst(), ct_ipv6_dst(), ct_tp_dst(), otherwise uses the
+ * original destination tuple stored by ovn-northd.
+ */
 static void
 add_lb_ct_snat_vip_flows(struct ovn_controller_lb *lb,
                          struct ovn_lb_vip *lb_vip,
+                         uint8_t lb_proto,
                          struct ovn_desired_flow_table *flow_table)
 {
     uint64_t stub[1024 / 8];
@@ -1279,25 +1490,65 @@ add_lb_ct_snat_vip_flows(struct ovn_controller_lb *lb,
 
     if (IN6_IS_ADDR_V4MAPPED(&lb_vip->vip)) {
         nat->range_af = AF_INET;
-        nat->range.addr.ipv4.min = in6_addr_get_mapped_ipv4(&lb_vip->vip);
+        nat->range.addr.ipv4.min =
+            lb->hairpin_snat_ips.n_ipv4_addrs
+            ? lb->hairpin_snat_ips.ipv4_addrs[0].addr
+            : in6_addr_get_mapped_ipv4(&lb_vip->vip);
     } else {
         nat->range_af = AF_INET6;
-        nat->range.addr.ipv6.min = lb_vip->vip;
+        nat->range.addr.ipv6.min
+            = lb->hairpin_snat_ips.n_ipv6_addrs
+            ? lb->hairpin_snat_ips.ipv6_addrs[0].addr
+            : lb_vip->vip;
     }
     ofpacts.header = ofpbuf_push_uninit(&ofpacts, nat_offset);
     ofpact_finish(&ofpacts, &ct->ofpact);
 
     struct match match = MATCH_CATCHALL_INITIALIZER;
+
+    /* Matching on ct_nw_dst()/ct_ipv6_dst()/ct_tp_dst() requires matching
+     * on ct_state first.
+     */
+    if (!lb->hairpin_orig_tuple) {
+        uint32_t ct_state = OVS_CS_F_TRACKED | OVS_CS_F_DST_NAT;
+        match_set_ct_state_masked(&match, ct_state, ct_state);
+    }
+
     if (IN6_IS_ADDR_V4MAPPED(&lb_vip->vip)) {
+        ovs_be32 vip4 = in6_addr_get_mapped_ipv4(&lb_vip->vip);
+
         match_set_dl_type(&match, htons(ETH_TYPE_IP));
-        match_set_ct_nw_dst(&match, nat->range.addr.ipv4.min);
+
+        if (!lb->hairpin_orig_tuple) {
+            match_set_ct_nw_dst(&match, vip4);
+        } else {
+            match_set_reg(&match, MFF_LOG_LB_ORIG_DIP_IPV4 - MFF_LOG_REG0,
+                          ntohl(vip4));
+        }
     } else {
         match_set_dl_type(&match, htons(ETH_TYPE_IPV6));
-        match_set_ct_ipv6_dst(&match, &lb_vip->vip);
+
+        if (!lb->hairpin_orig_tuple) {
+            match_set_ct_ipv6_dst(&match, &lb_vip->vip);
+        } else {
+            ovs_be128 vip6_value;
+
+            memcpy(&vip6_value, &lb_vip->vip, sizeof vip6_value);
+            match_set_xxreg(&match, MFF_LOG_LB_ORIG_DIP_IPV6 - MFF_LOG_XXREG0,
+                            ntoh128(vip6_value));
+        }
     }
 
-    uint32_t ct_state = OVS_CS_F_TRACKED | OVS_CS_F_DST_NAT;
-    match_set_ct_state_masked(&match, ct_state, ct_state);
+    match_set_nw_proto(&match, lb_proto);
+    if (lb_vip->vip_port) {
+        if (!lb->hairpin_orig_tuple) {
+            match_set_ct_nw_proto(&match, lb_proto);
+            match_set_ct_tp_dst(&match, htons(lb_vip->vip_port));
+        } else {
+            match_set_reg_masked(&match, MFF_LOG_LB_ORIG_TP_DPORT - MFF_REG0,
+                                 lb_vip->vip_port, UINT16_MAX);
+        }
+    }
 
     for (size_t i = 0; i < lb->slb->n_datapaths; i++) {
         match_set_metadata(&match,
@@ -1351,7 +1602,7 @@ consider_lb_hairpin_flows(const struct sbrec_load_balancer *sbrec_lb,
                                      flow_table);
         }
 
-        add_lb_ct_snat_vip_flows(lb, lb_vip, flow_table);
+        add_lb_ct_snat_vip_flows(lb, lb_vip, lb_proto, flow_table);
     }
 
     ovn_controller_lb_destroy(lb);
@@ -1404,6 +1655,61 @@ lflow_handle_changed_neighbors(
     }
 }
 
+static void
+consider_fdb_flows(const struct sbrec_fdb *fdb,
+                   const struct hmap *local_datapaths,
+                   struct ovn_desired_flow_table *flow_table)
+{
+    if (!get_local_datapath(local_datapaths, fdb->dp_key)) {
+        return;
+    }
+
+    struct eth_addr mac;
+    if (!eth_addr_from_string(fdb->mac, &mac)) {
+        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+        VLOG_WARN_RL(&rl, "bad 'mac' %s", fdb->mac);
+        return;
+    }
+
+    struct match match = MATCH_CATCHALL_INITIALIZER;
+    match_set_metadata(&match, htonll(fdb->dp_key));
+    match_set_dl_dst(&match, mac);
+
+    uint64_t stub[1024 / 8];
+    struct ofpbuf ofpacts = OFPBUF_STUB_INITIALIZER(stub);
+    put_load64(fdb->port_key, MFF_LOG_OUTPORT, 0, 32, &ofpacts);
+    ofctrl_add_flow(flow_table, OFTABLE_GET_FDB, 100,
+                    fdb->header_.uuid.parts[0], &match, &ofpacts,
+                    &fdb->header_.uuid);
+    ofpbuf_clear(&ofpacts);
+
+    uint8_t value = 1;
+    put_load(&value, sizeof value, MFF_LOG_FLAGS,
+             MLF_LOOKUP_FDB_BIT, 1, &ofpacts);
+
+    struct match lookup_match = MATCH_CATCHALL_INITIALIZER;
+    match_set_metadata(&lookup_match, htonll(fdb->dp_key));
+    match_set_dl_src(&lookup_match, mac);
+    match_set_reg(&lookup_match, MFF_LOG_INPORT - MFF_REG0, fdb->port_key);
+    ofctrl_add_flow(flow_table, OFTABLE_LOOKUP_FDB, 100,
+                    fdb->header_.uuid.parts[0], &lookup_match, &ofpacts,
+                    &fdb->header_.uuid);
+    ofpbuf_uninit(&ofpacts);
+}
+
+/* Adds an OpenFlow flow to flow tables for each MAC binding in the OVN
+ * southbound database. */
+static void
+add_fdb_flows(const struct sbrec_fdb_table *fdb_table,
+              const struct hmap *local_datapaths,
+              struct ovn_desired_flow_table *flow_table)
+{
+    const struct sbrec_fdb *fdb;
+    SBREC_FDB_TABLE_FOR_EACH (fdb, fdb_table) {
+        consider_fdb_flows(fdb, local_datapaths, flow_table);
+    }
+}
+
 
 /* Translates logical flows in the Logical_Flow table in the OVN_SB database
  * into OpenFlow flows.  See ovn-architecture(7) for more information. */
@@ -1431,6 +1737,8 @@ lflow_run(struct lflow_ctx_in *l_ctx_in, struct lflow_ctx_out *l_ctx_out)
                        l_ctx_out->flow_table);
     add_lb_hairpin_flows(l_ctx_in->lb_table, l_ctx_in->local_datapaths,
                          l_ctx_out->flow_table);
+    add_fdb_flows(l_ctx_in->fdb_table, l_ctx_in->local_datapaths,
+                  l_ctx_out->flow_table);
 }
 
 void
@@ -1582,3 +1890,37 @@ lflow_handle_changed_lbs(struct lflow_ctx_in *l_ctx_in,
 
     return true;
 }
+
+bool
+lflow_handle_changed_fdbs(struct lflow_ctx_in *l_ctx_in,
+                         struct lflow_ctx_out *l_ctx_out)
+{
+    const struct sbrec_fdb *fdb;
+
+    SBREC_FDB_TABLE_FOR_EACH_TRACKED (fdb, l_ctx_in->fdb_table) {
+        if (sbrec_fdb_is_deleted(fdb)) {
+            VLOG_DBG("Remove fdb flows for deleted fdb "UUID_FMT,
+                     UUID_ARGS(&fdb->header_.uuid));
+            ofctrl_remove_flows(l_ctx_out->flow_table, &fdb->header_.uuid);
+        }
+    }
+
+    SBREC_FDB_TABLE_FOR_EACH_TRACKED (fdb, l_ctx_in->fdb_table) {
+        if (sbrec_fdb_is_deleted(fdb)) {
+            continue;
+        }
+
+        if (!sbrec_fdb_is_new(fdb)) {
+            VLOG_DBG("Remove fdb flows for updated fdb "UUID_FMT,
+                     UUID_ARGS(&fdb->header_.uuid));
+            ofctrl_remove_flows(l_ctx_out->flow_table, &fdb->header_.uuid);
+        }
+
+        VLOG_DBG("Add fdb flows for fdb "UUID_FMT,
+                 UUID_ARGS(&fdb->header_.uuid));
+        consider_fdb_flows(fdb, l_ctx_in->local_datapaths,
+                           l_ctx_out->flow_table);
+    }
+
+    return true;
+}
diff --git a/controller/lflow.h b/controller/lflow.h
index ba79cc374..2eb2cb112 100644
--- a/controller/lflow.h
+++ b/controller/lflow.h
@@ -60,9 +60,9 @@ struct uuid;
  * you make any changes. */
 #define OFTABLE_PHY_TO_LOG            0
 #define OFTABLE_LOG_INGRESS_PIPELINE  8 /* First of LOG_PIPELINE_LEN tables. */
-#define OFTABLE_REMOTE_OUTPUT        32
-#define OFTABLE_LOCAL_OUTPUT         33
-#define OFTABLE_CHECK_LOOPBACK       34
+#define OFTABLE_REMOTE_OUTPUT        37
+#define OFTABLE_LOCAL_OUTPUT         38
+#define OFTABLE_CHECK_LOOPBACK       39
 #define OFTABLE_LOG_EGRESS_PIPELINE  40 /* First of LOG_PIPELINE_LEN tables. */
 #define OFTABLE_SAVE_INPORT          64
 #define OFTABLE_LOG_TO_PHY           65
@@ -71,9 +71,8 @@ struct uuid;
 #define OFTABLE_CHK_LB_HAIRPIN       68
 #define OFTABLE_CHK_LB_HAIRPIN_REPLY 69
 #define OFTABLE_CT_SNAT_FOR_VIP      70
-
-/* The number of tables for the ingress and egress pipelines. */
-#define LOG_PIPELINE_LEN 24
+#define OFTABLE_GET_FDB              71
+#define OFTABLE_LOOKUP_FDB           72
 
 enum ref_type {
     REF_TYPE_ADDRSET,
@@ -136,6 +135,7 @@ struct lflow_ctx_in {
     const struct sbrec_logical_flow_table *logical_flow_table;
     const struct sbrec_logical_dp_group_table *logical_dp_group_table;
     const struct sbrec_multicast_group_table *mc_group_table;
+    const struct sbrec_fdb_table *fdb_table;
     const struct sbrec_chassis *chassis;
     const struct sbrec_load_balancer_table *lb_table;
     const struct hmap *local_datapaths;
@@ -167,6 +167,7 @@ void lflow_handle_changed_neighbors(
     const struct hmap *local_datapaths,
     struct ovn_desired_flow_table *);
 bool lflow_handle_changed_lbs(struct lflow_ctx_in *, struct lflow_ctx_out *);
+bool lflow_handle_changed_fdbs(struct lflow_ctx_in *, struct lflow_ctx_out *);
 void lflow_destroy(void);
 
 void lflow_cache_init(struct hmap *);
diff --git a/controller/mac-learn.c b/controller/mac-learn.c
new file mode 100644
index 000000000..27634dca8
--- /dev/null
+++ b/controller/mac-learn.c
@@ -0,0 +1,180 @@
+/* Copyright (c) 2020, Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#include "mac-learn.h"
+
+/* OpenvSwitch lib includes. */
+#include "openvswitch/vlog.h"
+#include "lib/packets.h"
+#include "lib/smap.h"
+
+VLOG_DEFINE_THIS_MODULE(mac_learn);
+
+#define MAX_MAC_BINDINGS 1000
+#define MAX_FDB_ENTRIES  1000
+
+static size_t mac_binding_hash(uint32_t dp_key, uint32_t port_key,
+                               struct in6_addr *);
+static struct mac_binding *mac_binding_find(struct hmap *mac_bindings,
+                                            uint32_t dp_key,
+                                            uint32_t port_key,
+                                            struct in6_addr *ip, size_t hash);
+static size_t fdb_entry_hash(uint32_t dp_key, struct eth_addr *);
+
+static struct fdb_entry *fdb_entry_find(struct hmap *fdbs, uint32_t dp_key,
+                                        struct eth_addr *mac, size_t hash);
+
+/* mac_binding functions. */
+void
+ovn_mac_bindings_init(struct hmap *mac_bindings)
+{
+    hmap_init(mac_bindings);
+}
+
+void
+ovn_mac_bindings_flush(struct hmap *mac_bindings)
+{
+    struct mac_binding *mb;
+    HMAP_FOR_EACH_POP (mb, hmap_node, mac_bindings) {
+        free(mb);
+    }
+}
+
+void
+ovn_mac_bindings_destroy(struct hmap *mac_bindings)
+{
+    ovn_mac_bindings_flush(mac_bindings);
+    hmap_destroy(mac_bindings);
+}
+
+struct mac_binding *
+ovn_mac_binding_add(struct hmap *mac_bindings, uint32_t dp_key,
+                    uint32_t port_key, struct in6_addr *ip,
+                    struct eth_addr mac)
+{
+    uint32_t hash = mac_binding_hash(dp_key, port_key, ip);
+
+    struct mac_binding *mb =
+        mac_binding_find(mac_bindings, dp_key, port_key, ip, hash);
+    if (!mb) {
+        if (hmap_count(mac_bindings) >= MAX_MAC_BINDINGS) {
+            return NULL;
+        }
+
+        mb = xmalloc(sizeof *mb);
+        mb->dp_key = dp_key;
+        mb->port_key = port_key;
+        mb->ip = *ip;
+        hmap_insert(mac_bindings, &mb->hmap_node, hash);
+    }
+    mb->mac = mac;
+
+    return mb;
+}
+
+/* fdb functions. */
+void
+ovn_fdb_init(struct hmap *fdbs)
+{
+    hmap_init(fdbs);
+}
+
+void
+ovn_fdbs_flush(struct hmap *fdbs)
+{
+    struct fdb_entry *fdb_e;
+    HMAP_FOR_EACH_POP (fdb_e, hmap_node, fdbs) {
+        free(fdb_e);
+    }
+}
+
+void
+ovn_fdbs_destroy(struct hmap *fdbs)
+{
+   ovn_fdbs_flush(fdbs);
+   hmap_destroy(fdbs);
+}
+
+struct fdb_entry *
+ovn_fdb_add(struct hmap *fdbs, uint32_t dp_key, struct eth_addr mac,
+            uint32_t port_key)
+{
+    uint32_t hash = fdb_entry_hash(dp_key, &mac);
+
+    struct fdb_entry *fdb_e =
+        fdb_entry_find(fdbs, dp_key, &mac, hash);
+    if (!fdb_e) {
+        if (hmap_count(fdbs) >= MAX_FDB_ENTRIES) {
+            return NULL;
+        }
+
+        fdb_e = xzalloc(sizeof *fdb_e);
+        fdb_e->dp_key = dp_key;
+        fdb_e->mac = mac;
+        hmap_insert(fdbs, &fdb_e->hmap_node, hash);
+    }
+    fdb_e->port_key = port_key;
+
+    return fdb_e;
+
+}
+
+/* mac_binding related static functions. */
+
+static size_t
+mac_binding_hash(uint32_t dp_key, uint32_t port_key, struct in6_addr *ip)
+{
+    return hash_bytes(ip, sizeof *ip, hash_2words(dp_key, port_key));
+}
+
+static struct mac_binding *
+mac_binding_find(struct hmap *mac_bindings, uint32_t dp_key,
+                   uint32_t port_key, struct in6_addr *ip, size_t hash)
+{
+    struct mac_binding *mb;
+    HMAP_FOR_EACH_WITH_HASH (mb, hmap_node, hash, mac_bindings) {
+        if (mb->dp_key == dp_key && mb->port_key == port_key &&
+            IN6_ARE_ADDR_EQUAL(&mb->ip, ip)) {
+            return mb;
+        }
+    }
+
+    return NULL;
+}
+
+/* fdb related static functions. */
+
+static size_t
+fdb_entry_hash(uint32_t dp_key, struct eth_addr *mac)
+{
+    uint64_t mac64 = eth_addr_to_uint64(*mac);
+    return hash_2words(dp_key, hash_uint64(mac64));
+}
+
+static struct fdb_entry *
+fdb_entry_find(struct hmap *fdbs, uint32_t dp_key,
+               struct eth_addr *mac, size_t hash)
+{
+    struct fdb_entry *fdb_e;
+    HMAP_FOR_EACH_WITH_HASH (fdb_e, hmap_node, hash, fdbs) {
+        if (fdb_e->dp_key == dp_key && eth_addr_equals(fdb_e->mac, *mac)) {
+            return fdb_e;
+        }
+    }
+
+    return NULL;
+}
diff --git a/controller/mac-learn.h b/controller/mac-learn.h
new file mode 100644
index 000000000..e7e8ba2d3
--- /dev/null
+++ b/controller/mac-learn.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2020 Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OVN_MAC_LEARN_H
+#define OVN_MAC_LEARN_H 1
+
+#include <sys/types.h>
+#include <netinet/in.h>
+#include "openvswitch/hmap.h"
+
+struct mac_binding {
+    struct hmap_node hmap_node; /* In a hmap. */
+
+    /* Key. */
+    uint32_t dp_key;
+    uint32_t port_key; /* Port from where this mac_binding is learnt. */
+    struct in6_addr ip;
+
+    /* Value. */
+    struct eth_addr mac;
+};
+
+void ovn_mac_bindings_init(struct hmap *mac_bindings);
+void ovn_mac_bindings_flush(struct hmap *mac_bindings);
+void ovn_mac_bindings_destroy(struct hmap *mac_bindings);
+
+struct mac_binding *ovn_mac_binding_add(struct hmap *mac_bindings,
+                                        uint32_t dp_key, uint32_t port_key,
+                                        struct in6_addr *ip,
+                                        struct eth_addr mac);
+
+
+
+struct fdb_entry {
+    struct hmap_node hmap_node; /* In a hmap. */
+
+    /* Key. */
+    uint32_t dp_key;
+    struct eth_addr mac;
+
+    /* value. */
+    uint32_t port_key;
+};
+
+void ovn_fdb_init(struct hmap *fdbs);
+void ovn_fdbs_flush(struct hmap *fdbs);
+void ovn_fdbs_destroy(struct hmap *fdbs);
+
+struct fdb_entry *ovn_fdb_add(struct hmap *fdbs,
+                                uint32_t dp_key, struct eth_addr mac,
+                                uint32_t port_key);
+
+#endif /* OVN_MAC_LEARN_H */
diff --git a/controller/ofctrl-seqno.c b/controller/ofctrl-seqno.c
new file mode 100644
index 000000000..c9334b078
--- /dev/null
+++ b/controller/ofctrl-seqno.c
@@ -0,0 +1,254 @@
+/* Copyright (c) 2021, Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#include "hash.h"
+#include "ofctrl-seqno.h"
+#include "openvswitch/list.h"
+#include "util.h"
+
+/* A sequence number update request, i.e., when the barrier corresponding to
+ * the 'flow_cfg' sequence number is replied to by OVS then it is safe
+ * to inform the application that the 'req_cfg' seqno has been processed.
+ */
+struct ofctrl_seqno_update {
+    struct ovs_list list_node; /* In 'ofctrl_seqno_updates'. */
+    size_t seqno_type;         /* Application specific seqno type.
+                                * Relevant only for 'req_cfg'.
+                                */
+    uint64_t flow_cfg;         /* The seqno that needs to be acked by OVS
+                                * before 'req_cfg' can be acked for the
+                                * application.
+                                */
+    uint64_t req_cfg;          /* Application specific seqno. */
+};
+
+/* List of in flight sequence number updates. */
+static struct ovs_list ofctrl_seqno_updates;
+
+/* Last sequence number request sent to OVS. */
+static uint64_t ofctrl_req_seqno;
+
+/* State of seqno requests for a given application seqno type. */
+struct ofctrl_seqno_state {
+    struct ovs_list acked_cfgs; /* Acked requests since the last time the
+                                 * application consumed acked requests.
+                                 */
+    uint64_t cur_cfg;           /* Last acked application seqno. */
+    uint64_t req_cfg;           /* Last requested application seqno. */
+};
+
+/* Per application seqno type states. */
+static size_t n_ofctrl_seqno_states;
+static struct ofctrl_seqno_state *ofctrl_seqno_states;
+
+/* ofctrl_acked_seqnos related static function prototypes. */
+static void ofctrl_acked_seqnos_init(struct ofctrl_acked_seqnos *seqnos,
+                                     uint64_t last_acked);
+static void ofctrl_acked_seqnos_add(struct ofctrl_acked_seqnos *seqnos,
+                                    uint32_t val);
+
+/* ofctrl_seqno_update related static function prototypes. */
+static void ofctrl_seqno_update_create__(size_t seqno_type, uint64_t req_cfg);
+static void ofctrl_seqno_update_list_destroy(struct ovs_list *seqno_list);
+static void ofctrl_seqno_cfg_run(size_t seqno_type,
+                                 struct ofctrl_seqno_update *update);
+
+/* Returns the collection of acked ofctrl_seqno_update requests of type
+ * 'seqno_type'.  It's the responsibility of the caller to free memory by
+ * calling ofctrl_acked_seqnos_destroy().
+ */
+struct ofctrl_acked_seqnos *
+ofctrl_acked_seqnos_get(size_t seqno_type)
+{
+    struct ofctrl_acked_seqnos *acked_seqnos = xmalloc(sizeof *acked_seqnos);
+    struct ofctrl_seqno_state *state = &ofctrl_seqno_states[seqno_type];
+    struct ofctrl_seqno_update *update;
+
+    ofctrl_acked_seqnos_init(acked_seqnos, state->cur_cfg);
+
+    ovs_assert(seqno_type < n_ofctrl_seqno_states);
+    LIST_FOR_EACH_POP (update, list_node, &state->acked_cfgs) {
+        ofctrl_acked_seqnos_add(acked_seqnos, update->req_cfg);
+        free(update);
+    }
+    return acked_seqnos;
+}
+
+void
+ofctrl_acked_seqnos_destroy(struct ofctrl_acked_seqnos *seqnos)
+{
+    if (!seqnos) {
+        return;
+    }
+
+    struct ofctrl_ack_seqno *seqno_node;
+    HMAP_FOR_EACH_POP (seqno_node, node, &seqnos->acked) {
+        free(seqno_node);
+    }
+    hmap_destroy(&seqnos->acked);
+    free(seqnos);
+}
+
+/* Returns true if 'val' is one of the acked sequence numbers in 'seqnos'. */
+bool
+ofctrl_acked_seqnos_contains(const struct ofctrl_acked_seqnos *seqnos,
+                             uint32_t val)
+{
+    struct ofctrl_ack_seqno *sn;
+
+    HMAP_FOR_EACH_WITH_HASH (sn, node, hash_int(val, 0), &seqnos->acked) {
+        if (sn->seqno == val) {
+            return true;
+        }
+    }
+    return false;
+}
+
+void
+ofctrl_seqno_init(void)
+{
+    ovs_list_init(&ofctrl_seqno_updates);
+}
+
+/* Adds a new type of application specific seqno updates. */
+size_t
+ofctrl_seqno_add_type(void)
+{
+    size_t new_type = n_ofctrl_seqno_states;
+    n_ofctrl_seqno_states++;
+
+    struct ofctrl_seqno_state *new_states =
+        xzalloc(n_ofctrl_seqno_states * sizeof *new_states);
+
+    for (size_t i = 0; i < n_ofctrl_seqno_states - 1; i++) {
+        ovs_list_move(&new_states[i].acked_cfgs,
+                      &ofctrl_seqno_states[i].acked_cfgs);
+    }
+    ovs_list_init(&new_states[new_type].acked_cfgs);
+
+    free(ofctrl_seqno_states);
+    ofctrl_seqno_states = new_states;
+    return new_type;
+}
+
+/* Creates a new seqno update request for an application specific
+ * 'seqno_type'.
+ */
+void
+ofctrl_seqno_update_create(size_t seqno_type, uint64_t new_cfg)
+{
+    ovs_assert(seqno_type < n_ofctrl_seqno_states);
+
+    struct ofctrl_seqno_state *state = &ofctrl_seqno_states[seqno_type];
+
+    /* If new_cfg didn't change since the last request there should already
+     * be an update pending.
+     */
+    if (new_cfg == state->req_cfg) {
+        return;
+    }
+
+    state->req_cfg = new_cfg;
+    ofctrl_seqno_update_create__(seqno_type, new_cfg);
+}
+
+/* Should be called when the application is certain that all OVS flow updates
+ * corresponding to 'flow_cfg' were processed.  Populates the application
+ * specific lists of acked requests in 'ofctrl_seqno_states'.
+ */
+void
+ofctrl_seqno_run(uint64_t flow_cfg)
+{
+    struct ofctrl_seqno_update *update, *prev;
+    LIST_FOR_EACH_SAFE (update, prev, list_node, &ofctrl_seqno_updates) {
+        if (flow_cfg < update->flow_cfg) {
+            break;
+        }
+
+        ovs_list_remove(&update->list_node);
+        ofctrl_seqno_cfg_run(update->seqno_type, update);
+    }
+}
+
+/* Returns the seqno to be used when sending a barrier request to OVS. */
+uint64_t
+ofctrl_seqno_get_req_cfg(void)
+{
+    return ofctrl_req_seqno;
+}
+
+/* Should be called whenever the openflow connection to OVS is lost.  Flushes
+ * all pending 'ofctrl_seqno_updates'.
+ */
+void
+ofctrl_seqno_flush(void)
+{
+    for (size_t i = 0; i < n_ofctrl_seqno_states; i++) {
+        ofctrl_seqno_update_list_destroy(&ofctrl_seqno_states[i].acked_cfgs);
+    }
+    ofctrl_seqno_update_list_destroy(&ofctrl_seqno_updates);
+    ofctrl_req_seqno = 0;
+}
+
+static void
+ofctrl_acked_seqnos_init(struct ofctrl_acked_seqnos *seqnos,
+                         uint64_t last_acked)
+{
+    hmap_init(&seqnos->acked);
+    seqnos->last_acked = last_acked;
+}
+
+static void
+ofctrl_acked_seqnos_add(struct ofctrl_acked_seqnos *seqnos, uint32_t val)
+{
+    seqnos->last_acked = val;
+
+    struct ofctrl_ack_seqno *sn = xmalloc(sizeof *sn);
+    hmap_insert(&seqnos->acked, &sn->node, hash_int(val, 0));
+    sn->seqno = val;
+}
+
+static void
+ofctrl_seqno_update_create__(size_t seqno_type, uint64_t req_cfg)
+{
+    struct ofctrl_seqno_update *update = xmalloc(sizeof *update);
+
+    ofctrl_req_seqno++;
+    ovs_list_push_back(&ofctrl_seqno_updates, &update->list_node);
+    update->seqno_type = seqno_type;
+    update->flow_cfg = ofctrl_req_seqno;
+    update->req_cfg = req_cfg;
+}
+
+static void
+ofctrl_seqno_update_list_destroy(struct ovs_list *seqno_list)
+{
+    struct ofctrl_seqno_update *update;
+
+    LIST_FOR_EACH_POP (update, list_node, seqno_list) {
+        free(update);
+    }
+}
+
+static void
+ofctrl_seqno_cfg_run(size_t seqno_type, struct ofctrl_seqno_update *update)
+{
+    ovs_assert(seqno_type < n_ofctrl_seqno_states);
+    ovs_list_push_back(&ofctrl_seqno_states[seqno_type].acked_cfgs,
+                       &update->list_node);
+    ofctrl_seqno_states[seqno_type].cur_cfg = update->req_cfg;
+}
diff --git a/controller/ofctrl-seqno.h b/controller/ofctrl-seqno.h
new file mode 100644
index 000000000..876947c26
--- /dev/null
+++ b/controller/ofctrl-seqno.h
@@ -0,0 +1,49 @@
+/* Copyright (c) 2021, Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OFCTRL_SEQNO_H
+#define OFCTRL_SEQNO_H 1
+
+#include <stdint.h>
+
+#include <openvswitch/hmap.h>
+
+/* Collection of acked ofctrl_seqno_update requests and the most recent
+ * 'last_acked' value.
+ */
+struct ofctrl_acked_seqnos {
+    struct hmap acked;
+    uint64_t last_acked;
+};
+
+/* Acked application specific seqno.  Stored in ofctrl_acked_seqnos.acked. */
+struct ofctrl_ack_seqno {
+    struct hmap_node node;
+    uint64_t seqno;
+};
+
+struct ofctrl_acked_seqnos *ofctrl_acked_seqnos_get(size_t seqno_type);
+void ofctrl_acked_seqnos_destroy(struct ofctrl_acked_seqnos *seqnos);
+bool ofctrl_acked_seqnos_contains(const struct ofctrl_acked_seqnos *seqnos,
+                                  uint32_t val);
+
+void ofctrl_seqno_init(void);
+size_t ofctrl_seqno_add_type(void);
+void ofctrl_seqno_update_create(size_t seqno_type, uint64_t new_cfg);
+void ofctrl_seqno_run(uint64_t flow_cfg);
+uint64_t ofctrl_seqno_get_req_cfg(void);
+void ofctrl_seqno_flush(void);
+
+#endif /* controller/ofctrl-seqno.h */
diff --git a/controller/ofctrl.c b/controller/ofctrl.c
index a1ac69531..415d9b7e1 100644
--- a/controller/ofctrl.c
+++ b/controller/ofctrl.c
@@ -268,13 +268,14 @@ enum ofctrl_state {
 /* An in-flight update to the switch's flow table.
  *
  * When we receive a barrier reply from the switch with the given 'xid', we
- * know that the switch is caught up to northbound database sequence number
- * 'nb_cfg' (and make that available to the client via ofctrl_get_cur_cfg(), so
- * that it can store it into our Chassis record's nb_cfg column). */
+ * know that the switch is caught up to the requested sequence number
+ * 'req_cfg' (and make that available to the client via ofctrl_get_cur_cfg(),
+ * so that it can store it into external state, e.g., our Chassis record's
+ * nb_cfg column). */
 struct ofctrl_flow_update {
     struct ovs_list list_node;  /* In 'flow_updates'. */
     ovs_be32 xid;               /* OpenFlow transaction ID for barrier. */
-    int64_t nb_cfg;             /* Northbound database sequence number. */
+    uint64_t req_cfg;           /* Requested sequence number. */
 };
 
 static struct ofctrl_flow_update *
@@ -286,8 +287,8 @@ ofctrl_flow_update_from_list_node(const struct ovs_list *list_node)
 /* Currently in-flight updates. */
 static struct ovs_list flow_updates;
 
-/* nb_cfg of latest committed flow update. */
-static int64_t cur_cfg;
+/* req_cfg of latest committed flow update. */
+static uint64_t cur_cfg;
 
 /* Current state. */
 static enum ofctrl_state state;
@@ -632,8 +633,8 @@ recv_S_UPDATE_FLOWS(const struct ofp_header *oh, enum ofptype type,
         struct ofctrl_flow_update *fup = ofctrl_flow_update_from_list_node(
             ovs_list_front(&flow_updates));
         if (fup->xid == oh->xid) {
-            if (fup->nb_cfg >= cur_cfg) {
-                cur_cfg = fup->nb_cfg;
+            if (fup->req_cfg >= cur_cfg) {
+                cur_cfg = fup->req_cfg;
             }
             ovs_list_remove(&fup->list_node);
             free(fup);
@@ -763,7 +764,7 @@ ofctrl_destroy(void)
     shash_destroy(&symtab);
 }
 
-int64_t
+uint64_t
 ofctrl_get_cur_cfg(void)
 {
     return cur_cfg;
@@ -1246,10 +1247,23 @@ ofctrl_flood_remove_flows(struct ovn_desired_flow_table *flow_table,
                           struct hmap *flood_remove_nodes)
 {
     struct ofctrl_flood_remove_node *ofrn;
+    int i, n = 0;
+
+    /* flood_remove_flows_for_sb_uuid() will modify the 'flood_remove_nodes'
+     * hash map by inserting new items, so we can't use it for iteration.
+     * Copying the sb_uuids into an array. */
+    struct uuid *sb_uuids;
+    sb_uuids = xmalloc(hmap_count(flood_remove_nodes) * sizeof *sb_uuids);
+    struct hmap flood_remove_uuids = HMAP_INITIALIZER(&flood_remove_uuids);
     HMAP_FOR_EACH (ofrn, hmap_node, flood_remove_nodes) {
-        flood_remove_flows_for_sb_uuid(flow_table, &ofrn->sb_uuid,
+        sb_uuids[n++] = ofrn->sb_uuid;
+    }
+
+    for (i = 0; i < n; i++) {
+        flood_remove_flows_for_sb_uuid(flow_table, &sb_uuids[i],
                                        flood_remove_nodes);
     }
+    free(sb_uuids);
 
     /* remove any related group and meter info */
     HMAP_FOR_EACH (ofrn, hmap_node, flood_remove_nodes) {
@@ -1975,7 +1989,7 @@ update_installed_flows_by_track(struct ovn_desired_flow_table *flow_table,
                  * tracked, so it must have been modified. */
                 installed_flow_mod(&i->flow, &f->flow, msgs);
                 ovn_flow_log(&i->flow, "updating installed (tracked)");
-            } else {
+            } else if (!f->installed_flow) {
                 /* Adding a new flow that conflicts with an existing installed
                  * flow, so add it to the link.  If this flow becomes active,
                  * e.g., it is less restrictive than the previous active flow
@@ -2024,28 +2038,28 @@ void
 ofctrl_put(struct ovn_desired_flow_table *flow_table,
            struct shash *pending_ct_zones,
            const struct sbrec_meter_table *meter_table,
-           int64_t nb_cfg,
+           uint64_t req_cfg,
            bool flow_changed)
 {
     static bool skipped_last_time = false;
-    static int64_t old_nb_cfg = 0;
+    static uint64_t old_req_cfg = 0;
     bool need_put = false;
     if (flow_changed || skipped_last_time || need_reinstall_flows) {
         need_put = true;
-        old_nb_cfg = nb_cfg;
-    } else if (nb_cfg != old_nb_cfg) {
-        /* nb_cfg changed since last ofctrl_put() call */
-        if (cur_cfg == old_nb_cfg) {
+        old_req_cfg = req_cfg;
+    } else if (req_cfg != old_req_cfg) {
+        /* req_cfg changed since last ofctrl_put() call */
+        if (cur_cfg == old_req_cfg) {
             /* If there are no updates pending, we were up-to-date already,
-             * update with the new nb_cfg.
+             * update with the new req_cfg.
              */
             if (ovs_list_is_empty(&flow_updates)) {
-                cur_cfg = nb_cfg;
-                old_nb_cfg = nb_cfg;
+                cur_cfg = req_cfg;
+                old_req_cfg = req_cfg;
             }
         } else {
             need_put = true;
-            old_nb_cfg = nb_cfg;
+            old_req_cfg = req_cfg;
         }
     }
 
@@ -2187,24 +2201,23 @@ ofctrl_put(struct ovn_desired_flow_table *flow_table,
         /* Track the flow update. */
         struct ofctrl_flow_update *fup, *prev;
         LIST_FOR_EACH_REVERSE_SAFE (fup, prev, list_node, &flow_updates) {
-            if (nb_cfg < fup->nb_cfg) {
+            if (req_cfg < fup->req_cfg) {
                 /* This ofctrl_flow_update is for a configuration later than
-                 * 'nb_cfg'.  This should not normally happen, because it means
-                 * that 'nb_cfg' in the SB_Global table of the southbound
-                 * database decreased, and it should normally be monotonically
-                 * increasing. */
-                VLOG_WARN("nb_cfg regressed from %"PRId64" to %"PRId64,
-                          fup->nb_cfg, nb_cfg);
+                 * 'req_cfg'.  This should not normally happen, because it
+                 * means that the local seqno decreased and it should normally
+                 * be monotonically increasing. */
+                VLOG_WARN("req_cfg regressed from %"PRId64" to %"PRId64,
+                          fup->req_cfg, req_cfg);
                 ovs_list_remove(&fup->list_node);
                 free(fup);
-            } else if (nb_cfg == fup->nb_cfg) {
+            } else if (req_cfg == fup->req_cfg) {
                 /* This ofctrl_flow_update is for the same configuration as
-                 * 'nb_cfg'.  Probably, some change to the physical topology
+                 * 'req_cfg'.  Probably, some change to the physical topology
                  * means that we had to revise the OpenFlow flow table even
                  * though the logical topology did not change.  Update fp->xid,
                  * so that we don't send a notification that we're up-to-date
                  * until we're really caught up. */
-                VLOG_DBG("advanced xid target for nb_cfg=%"PRId64, nb_cfg);
+                VLOG_DBG("advanced xid target for req_cfg=%"PRId64, req_cfg);
                 fup->xid = xid_;
                 goto done;
             } else {
@@ -2216,18 +2229,18 @@ ofctrl_put(struct ovn_desired_flow_table *flow_table,
         fup = xmalloc(sizeof *fup);
         ovs_list_push_back(&flow_updates, &fup->list_node);
         fup->xid = xid_;
-        fup->nb_cfg = nb_cfg;
+        fup->req_cfg = req_cfg;
     done:;
     } else if (!ovs_list_is_empty(&flow_updates)) {
-        /* Getting up-to-date with 'nb_cfg' didn't require any extra flow table
-         * changes, so whenever we get up-to-date with the most recent flow
-         * table update, we're also up-to-date with 'nb_cfg'. */
+        /* Getting up-to-date with 'req_cfg' didn't require any extra flow
+         * table changes, so whenever we get up-to-date with the most recent
+         * flow table update, we're also up-to-date with 'req_cfg'. */
         struct ofctrl_flow_update *fup = ofctrl_flow_update_from_list_node(
             ovs_list_back(&flow_updates));
-        fup->nb_cfg = nb_cfg;
+        fup->req_cfg = req_cfg;
     } else {
         /* We were completely up-to-date before and still are. */
-        cur_cfg = nb_cfg;
+        cur_cfg = req_cfg;
     }
 
     flow_table->change_tracked = true;
diff --git a/controller/ofctrl.h b/controller/ofctrl.h
index 64b0ea5dd..88769566a 100644
--- a/controller/ofctrl.h
+++ b/controller/ofctrl.h
@@ -55,12 +55,12 @@ enum mf_field_id ofctrl_get_mf_field_id(void);
 void ofctrl_put(struct ovn_desired_flow_table *,
                 struct shash *pending_ct_zones,
                 const struct sbrec_meter_table *,
-                int64_t nb_cfg,
+                uint64_t nb_cfg,
                 bool flow_changed);
 bool ofctrl_can_put(void);
 void ofctrl_wait(void);
 void ofctrl_destroy(void);
-int64_t ofctrl_get_cur_cfg(void);
+uint64_t ofctrl_get_cur_cfg(void);
 
 void ofctrl_ct_flush_zone(uint16_t zone_id);
 
diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c
index 366fc9c06..288e2e12d 100644
--- a/controller/ovn-controller.c
+++ b/controller/ovn-controller.c
@@ -39,6 +39,7 @@
 #include "lib/vswitch-idl.h"
 #include "lport.h"
 #include "ofctrl.h"
+#include "ofctrl-seqno.h"
 #include "openvswitch/vconn.h"
 #include "openvswitch/vlog.h"
 #include "ovn/actions.h"
@@ -98,6 +99,9 @@ struct pending_pkt {
     char *flow_s;
 };
 
+/* Registered ofctrl seqno type for nb_cfg propagation. */
+static size_t ofctrl_seq_type_nb_cfg;
+
 struct local_datapath *
 get_local_datapath(const struct hmap *local_datapaths, uint32_t tunnel_key)
 {
@@ -583,7 +587,18 @@ add_pending_ct_zone_entry(struct shash *pending_ct_zones,
     pending->state = state; /* Skip flushing zone. */
     pending->zone = zone;
     pending->add = add;
-    shash_add(pending_ct_zones, name, pending);
+
+    /* Its important that we add only one entry for the key 'name'.
+     * Replace 'pending' with 'existing' and free up 'existing'.
+     * Otherwise, we may end up in a continuous loop of adding
+     * and deleting the zone entry in the 'external_ids' of
+     * integration bridge.
+     */
+    struct ct_zone_pending_entry *existing =
+        shash_replace(pending_ct_zones, name, pending);
+    if (existing) {
+        free(existing);
+    }
 }
 
 static void
@@ -798,11 +813,11 @@ restore_ct_zones(const struct ovsrec_bridge_table *bridge_table,
     }
 }
 
-static int64_t
+static uint64_t
 get_nb_cfg(const struct sbrec_sb_global_table *sb_global_table,
            unsigned int cond_seqno, unsigned int expected_cond_seqno)
 {
-    static int64_t nb_cfg = 0;
+    static uint64_t nb_cfg = 0;
 
     /* Delay getting nb_cfg if there are monitor condition changes
      * in flight.  It might be that those changes would instruct the
@@ -825,11 +840,14 @@ static void
 store_nb_cfg(struct ovsdb_idl_txn *sb_txn, struct ovsdb_idl_txn *ovs_txn,
              const struct sbrec_chassis_private *chassis,
              const struct ovsrec_bridge *br_int,
-             unsigned int delay_nb_cfg_report,
-             int64_t cur_cfg)
+             unsigned int delay_nb_cfg_report)
 {
+    struct ofctrl_acked_seqnos *acked_nb_cfg_seqnos =
+        ofctrl_acked_seqnos_get(ofctrl_seq_type_nb_cfg);
+    uint64_t cur_cfg = acked_nb_cfg_seqnos->last_acked;
+
     if (!cur_cfg) {
-        return;
+        goto done;
     }
 
     if (sb_txn && chassis && cur_cfg != chassis->nb_cfg) {
@@ -850,6 +868,9 @@ store_nb_cfg(struct ovsdb_idl_txn *sb_txn, struct ovsdb_idl_txn *ovs_txn,
                                                  cur_cfg_str);
         free(cur_cfg_str);
     }
+
+done:
+    ofctrl_acked_seqnos_destroy(acked_nb_cfg_seqnos);
 }
 
 static const char *
@@ -911,7 +932,8 @@ ctrl_register_ovs_idl(struct ovsdb_idl *ovs_idl)
     SB_NODE(dhcp_options, "dhcp_options") \
     SB_NODE(dhcpv6_options, "dhcpv6_options") \
     SB_NODE(dns, "dns") \
-    SB_NODE(load_balancer, "load_balancer")
+    SB_NODE(load_balancer, "load_balancer") \
+    SB_NODE(fdb, "fdb")
 
 enum sb_engine_node {
 #define SB_NODE(NAME, NAME_STR) SB_##NAME,
@@ -967,6 +989,12 @@ en_ofctrl_is_connected_run(struct engine_node *node, void *data)
     struct ed_type_ofctrl_is_connected *of_data = data;
     if (of_data->connected != ofctrl_is_connected()) {
         of_data->connected = !of_data->connected;
+
+        /* Flush ofctrl seqno requests when the ofctrl connection goes down. */
+        if (!of_data->connected) {
+            ofctrl_seqno_flush();
+            binding_seqno_flush();
+        }
         engine_set_node_state(node, EN_UPDATED);
         return;
     }
@@ -1836,6 +1864,10 @@ static void init_lflow_ctx(struct engine_node *node,
         (struct sbrec_load_balancer_table *)EN_OVSDB_GET(
             engine_get_input("SB_load_balancer", node));
 
+    struct sbrec_fdb_table *fdb_table =
+        (struct sbrec_fdb_table *)EN_OVSDB_GET(
+            engine_get_input("SB_fdb", node));
+
     struct ovsrec_open_vswitch_table *ovs_table =
         (struct ovsrec_open_vswitch_table *)EN_OVSDB_GET(
             engine_get_input("OVS_open_vswitch", node));
@@ -1873,6 +1905,7 @@ static void init_lflow_ctx(struct engine_node *node,
     l_ctx_in->logical_flow_table = logical_flow_table;
     l_ctx_in->logical_dp_group_table = logical_dp_group_table;
     l_ctx_in->mc_group_table = multicast_group_table;
+    l_ctx_in->fdb_table = fdb_table,
     l_ctx_in->chassis = chassis;
     l_ctx_in->lb_table = lb_table;
     l_ctx_in->local_datapaths = &rt_data->local_datapaths;
@@ -2313,6 +2346,23 @@ flow_output_sb_load_balancer_handler(struct engine_node *node, void *data)
     return handled;
 }
 
+static bool
+flow_output_sb_fdb_handler(struct engine_node *node, void *data)
+{
+    struct ed_type_runtime_data *rt_data =
+        engine_get_input_data("runtime_data", node);
+
+    struct ed_type_flow_output *fo = data;
+    struct lflow_ctx_in l_ctx_in;
+    struct lflow_ctx_out l_ctx_out;
+    init_lflow_ctx(node, rt_data, fo, &l_ctx_in, &l_ctx_out);
+
+    bool handled = lflow_handle_changed_fdbs(&l_ctx_in, &l_ctx_out);
+
+    engine_set_node_state(node, EN_UPDATED);
+    return handled;
+}
+
 struct ovn_controller_exit_args {
     bool *exiting;
     bool *restart;
@@ -2389,6 +2439,10 @@ main(int argc, char *argv[])
 
     daemonize_complete();
 
+    /* Register ofctrl seqno types. */
+    ofctrl_seq_type_nb_cfg = ofctrl_seqno_add_type();
+
+    binding_init();
     patch_init();
     pinctrl_init();
     lflow_init();
@@ -2440,6 +2494,10 @@ main(int argc, char *argv[])
         = ip_mcast_index_create(ovnsb_idl_loop.idl);
     struct ovsdb_idl_index *sbrec_igmp_group
         = igmp_group_index_create(ovnsb_idl_loop.idl);
+    struct ovsdb_idl_index *sbrec_fdb_by_dp_key_mac
+        = ovsdb_idl_index_create2(ovnsb_idl_loop.idl,
+                                  &sbrec_fdb_col_mac,
+                                  &sbrec_fdb_col_dp_key);
 
     ovsdb_idl_track_add_all(ovnsb_idl_loop.idl);
     ovsdb_idl_omit_alert(ovnsb_idl_loop.idl,
@@ -2566,6 +2624,8 @@ main(int argc, char *argv[])
     engine_add_input(&en_flow_output, &en_sb_dns, NULL);
     engine_add_input(&en_flow_output, &en_sb_load_balancer,
                      flow_output_sb_load_balancer_handler);
+    engine_add_input(&en_flow_output, &en_sb_fdb,
+                     flow_output_sb_fdb_handler);
 
     engine_add_input(&en_ct_zones, &en_ovs_open_vswitch, NULL);
     engine_add_input(&en_ct_zones, &en_ovs_bridge, NULL);
@@ -2624,6 +2684,7 @@ main(int argc, char *argv[])
     ofctrl_init(&flow_output_data->group_table,
                 &flow_output_data->meter_table,
                 get_ofctrl_probe_interval(ovs_idl_loop.idl));
+    ofctrl_seqno_init();
 
     unixctl_command_register("group-table-list", "", 0, 0,
                              extend_table_list,
@@ -2832,11 +2893,13 @@ main(int argc, char *argv[])
                                     sbrec_mac_binding_by_lport_ip,
                                     sbrec_igmp_group,
                                     sbrec_ip_multicast,
+                                    sbrec_fdb_by_dp_key_mac,
                                     sbrec_dns_table_get(ovnsb_idl_loop.idl),
                                     sbrec_controller_event_table_get(
                                         ovnsb_idl_loop.idl),
                                     sbrec_service_monitor_table_get(
                                         ovnsb_idl_loop.idl),
+                                    sbrec_bfd_table_get(ovnsb_idl_loop.idl),
                                     br_int, chassis,
                                     &runtime_data->local_datapaths,
                                     &runtime_data->active_tunnels);
@@ -2852,17 +2915,29 @@ main(int argc, char *argv[])
                                     sb_monitor_all);
                         }
                     }
+
+                    ofctrl_seqno_update_create(
+                        ofctrl_seq_type_nb_cfg,
+                        get_nb_cfg(sbrec_sb_global_table_get(
+                                                       ovnsb_idl_loop.idl),
+                                              ovnsb_cond_seqno,
+                                              ovnsb_expected_cond_seqno));
+                    if (runtime_data && ovs_idl_txn && ovnsb_idl_txn) {
+                        binding_seqno_run(&runtime_data->local_bindings);
+                    }
+
                     flow_output_data = engine_get_data(&en_flow_output);
                     if (flow_output_data && ct_zones_data) {
                         ofctrl_put(&flow_output_data->flow_table,
                                    &ct_zones_data->pending,
                                    sbrec_meter_table_get(ovnsb_idl_loop.idl),
-                                   get_nb_cfg(sbrec_sb_global_table_get(
-                                                   ovnsb_idl_loop.idl),
-                                              ovnsb_cond_seqno,
-                                              ovnsb_expected_cond_seqno),
+                                   ofctrl_seqno_get_req_cfg(),
                                    engine_node_changed(&en_flow_output));
                     }
+                    ofctrl_seqno_run(ofctrl_get_cur_cfg());
+                    if (runtime_data && ovs_idl_txn && ovnsb_idl_txn) {
+                        binding_seqno_install(&runtime_data->local_bindings);
+                    }
                 }
 
             }
@@ -2888,7 +2963,7 @@ main(int argc, char *argv[])
             }
 
             store_nb_cfg(ovnsb_idl_txn, ovs_idl_txn, chassis_private,
-                         br_int, delay_nb_cfg_report, ofctrl_get_cur_cfg());
+                         br_int, delay_nb_cfg_report);
 
             if (pending_pkt.conn) {
                 struct ed_type_addr_sets *as_data =
diff --git a/controller/pinctrl.c b/controller/pinctrl.c
index 7e3abf0a4..3dc10389d 100644
--- a/controller/pinctrl.c
+++ b/controller/pinctrl.c
@@ -26,6 +26,7 @@
 #include "flow.h"
 #include "ha-chassis.h"
 #include "lport.h"
+#include "mac-learn.h"
 #include "nx-match.h"
 #include "latch.h"
 #include "lib/packets.h"
@@ -38,6 +39,7 @@
 #include "openvswitch/ofp-util.h"
 #include "openvswitch/vlog.h"
 #include "lib/random.h"
+#include "lib/crc32c.h"
 
 #include "lib/dhcp.h"
 #include "ovn-controller.h"
@@ -192,7 +194,6 @@ static void run_put_mac_bindings(
     struct ovsdb_idl_index *sbrec_mac_binding_by_lport_ip)
     OVS_REQUIRES(pinctrl_mutex);
 static void wait_put_mac_bindings(struct ovsdb_idl_txn *ovnsb_idl_txn);
-static void flush_put_mac_bindings(void);
 static void send_mac_binding_buffered_pkts(struct rconn *swconn)
     OVS_REQUIRES(pinctrl_mutex);
 
@@ -323,6 +324,39 @@ put_load(uint64_t value, enum mf_field_id dst, int ofs, int n_bits,
 static void notify_pinctrl_main(void);
 static void notify_pinctrl_handler(void);
 
+static bool bfd_monitor_should_inject(void);
+static void bfd_monitor_wait(long long int timeout);
+static void bfd_monitor_init(void);
+static void bfd_monitor_destroy(void);
+static void bfd_monitor_send_msg(struct rconn *swconn, long long int *bfd_time)
+                                 OVS_REQUIRES(pinctrl_mutex);
+static void
+pinctrl_handle_bfd_msg(struct rconn *swconn, const struct flow *ip_flow,
+                       struct dp_packet *pkt_in)
+                       OVS_REQUIRES(pinctrl_mutex);
+static void bfd_monitor_run(struct ovsdb_idl_txn *ovnsb_idl_txn,
+                            const struct sbrec_bfd_table *bfd_table,
+                            struct ovsdb_idl_index *sbrec_port_binding_by_name,
+                            const struct sbrec_chassis *chassis,
+                            const struct sset *active_tunnels)
+                            OVS_REQUIRES(pinctrl_mutex);
+static void init_fdb_entries(void);
+static void destroy_fdb_entries(void);
+static const struct sbrec_fdb *fdb_lookup(
+    struct ovsdb_idl_index *sbrec_fdb_by_dp_key_mac,
+    uint32_t dp_key, const char *mac);
+static void run_put_fdb(struct ovsdb_idl_txn *ovnsb_idl_txn,
+                        struct ovsdb_idl_index *sbrec_fdb_by_dp_key_mac,
+                        const struct fdb_entry *fdb_e)
+                        OVS_REQUIRES(pinctrl_mutex);
+static void run_put_fdbs(struct ovsdb_idl_txn *ovnsb_idl_txn,
+                        struct ovsdb_idl_index *sbrec_fdb_by_dp_key_mac)
+                        OVS_REQUIRES(pinctrl_mutex);
+static void wait_put_fdbs(struct ovsdb_idl_txn *ovnsb_idl_txn);
+static void pinctrl_handle_put_fdb(const struct flow *md,
+                                   const struct flow *headers)
+                                   OVS_REQUIRES(pinctrl_mutex);
+
 COVERAGE_DEFINE(pinctrl_drop_put_mac_binding);
 COVERAGE_DEFINE(pinctrl_drop_buffered_packets_map);
 COVERAGE_DEFINE(pinctrl_drop_controller_event);
@@ -487,6 +521,8 @@ pinctrl_init(void)
     ip_mcast_snoop_init();
     init_put_vport_bindings();
     init_svc_monitors();
+    bfd_monitor_init();
+    init_fdb_entries();
     pinctrl.br_int_name = NULL;
     pinctrl_handler_seq = seq_create();
     pinctrl_main_seq = seq_create();
@@ -1380,6 +1416,11 @@ buffered_push_packet(struct buffered_packets *bp,
     ofpbuf_init(&bi->ofpacts, 4096);
 
     reload_metadata(&bi->ofpacts, md);
+    /* reload pkt_mark field */
+    const struct mf_field *pkt_mark_field = mf_from_id(MFF_PKT_MARK);
+    union mf_value pkt_mark_value;
+    mf_get_value(pkt_mark_field, &md->flow, &pkt_mark_value);
+    ofpact_put_set_field(&bi->ofpacts, pkt_mark_field, &pkt_mark_value, NULL);
     bi->ofp_port = md->flow.in_port.ofp_port;
 
     struct ofpact_resubmit *resubmit = ofpact_put_RESUBMIT(&bi->ofpacts);
@@ -1763,6 +1804,116 @@ pinctrl_handle_tcp_reset(struct rconn *swconn, const struct flow *ip_flow,
     dp_packet_uninit(&packet);
 }
 
+static void dp_packet_put_sctp_abort(struct dp_packet *packet,
+                                     bool reflect_tag)
+{
+    struct sctp_chunk_header abort = {
+        .sctp_chunk_type = SCTP_CHUNK_TYPE_ABORT,
+        .sctp_chunk_flags = reflect_tag ? SCTP_ABORT_CHUNK_FLAG_T : 0,
+        .sctp_chunk_len = htons(SCTP_CHUNK_HEADER_LEN),
+    };
+
+    dp_packet_put(packet, &abort, sizeof abort);
+}
+
+static void
+pinctrl_handle_sctp_abort(struct rconn *swconn, const struct flow *ip_flow,
+                         struct dp_packet *pkt_in,
+                         const struct match *md, struct ofpbuf *userdata,
+                         bool loopback)
+{
+    if (ip_flow->nw_proto != IPPROTO_SCTP) {
+        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+        VLOG_WARN_RL(&rl, "SCTP_ABORT action on non-SCTP packet");
+        return;
+    }
+
+    struct sctp_header *sh_in = dp_packet_l4(pkt_in);
+    if (!sh_in) {
+        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+        VLOG_WARN_RL(&rl, "SCTP_ABORT action on malformed SCTP packet");
+        return;
+    }
+
+    const struct sctp_chunk_header *sh_in_chunk =
+        dp_packet_get_sctp_payload(pkt_in);
+    if (!sh_in_chunk) {
+        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+        VLOG_WARN_RL(&rl, "SCTP_ABORT action on SCTP packet with no chunks");
+        return;
+    }
+
+    if (sh_in_chunk->sctp_chunk_type == SCTP_CHUNK_TYPE_ABORT) {
+        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+        VLOG_WARN_RL(&rl, "sctp_abort action on incoming SCTP ABORT.");
+        return;
+    }
+
+    const struct sctp_init_chunk *sh_in_init = NULL;
+    if (sh_in_chunk->sctp_chunk_type == SCTP_CHUNK_TYPE_INIT) {
+        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+        sh_in_init = dp_packet_at(pkt_in, pkt_in->l4_ofs +
+                                          SCTP_HEADER_LEN +
+                                          SCTP_CHUNK_HEADER_LEN,
+                                  SCTP_INIT_CHUNK_LEN);
+        if (!sh_in_init) {
+            VLOG_WARN_RL(&rl, "Incomplete SCTP INIT chunk. Ignoring packet.");
+            return;
+        }
+    }
+
+    uint64_t packet_stub[128 / 8];
+    struct dp_packet packet;
+
+    dp_packet_use_stub(&packet, packet_stub, sizeof packet_stub);
+
+    struct eth_addr eth_src = loopback ? ip_flow->dl_dst : ip_flow->dl_src;
+    struct eth_addr eth_dst = loopback ? ip_flow->dl_src : ip_flow->dl_dst;
+
+    if (get_dl_type(ip_flow) == htons(ETH_TYPE_IPV6)) {
+        const struct in6_addr *ip6_src =
+            loopback ? &ip_flow->ipv6_dst : &ip_flow->ipv6_src;
+        const struct in6_addr *ip6_dst =
+            loopback ? &ip_flow->ipv6_src : &ip_flow->ipv6_dst;
+        pinctrl_compose_ipv6(&packet, eth_src, eth_dst,
+                             (struct in6_addr *) ip6_src,
+                             (struct in6_addr *) ip6_dst,
+                             IPPROTO_SCTP, 63, SCTP_HEADER_LEN +
+                                               SCTP_CHUNK_HEADER_LEN);
+    } else {
+        ovs_be32 nw_src = loopback ? ip_flow->nw_dst : ip_flow->nw_src;
+        ovs_be32 nw_dst = loopback ? ip_flow->nw_src : ip_flow->nw_dst;
+        pinctrl_compose_ipv4(&packet, eth_src, eth_dst, nw_src, nw_dst,
+                             IPPROTO_SCTP, 63, SCTP_HEADER_LEN +
+                                               SCTP_CHUNK_HEADER_LEN);
+    }
+
+    struct sctp_header *sh = dp_packet_put_zeros(&packet, sizeof *sh);
+    dp_packet_set_l4(&packet, sh);
+    sh->sctp_dst = ip_flow->tp_src;
+    sh->sctp_src = ip_flow->tp_dst;
+    put_16aligned_be32(&sh->sctp_csum, 0);
+
+    bool tag_reflected;
+    if (get_16aligned_be32(&sh_in->sctp_vtag) == 0 && sh_in_init) {
+        /* See RFC 4960 Section 8.4, item 3. */
+        put_16aligned_be32(&sh->sctp_vtag, sh_in_init->initiate_tag);
+        tag_reflected = false;
+    } else {
+        /* See RFC 4960 Section 8.4, item 8. */
+        sh->sctp_vtag = sh_in->sctp_vtag;
+        tag_reflected = true;
+    }
+
+    dp_packet_put_sctp_abort(&packet, tag_reflected);
+
+    put_16aligned_be32(&sh->sctp_csum, crc32c((void *) sh,
+                                              dp_packet_l4_size(&packet)));
+
+    set_actions_and_enqueue_msg(swconn, &packet, md, userdata);
+    dp_packet_uninit(&packet);
+}
+
 static void
 pinctrl_handle_reject(struct rconn *swconn, const struct flow *ip_flow,
                       struct dp_packet *pkt_in,
@@ -1770,6 +1921,8 @@ pinctrl_handle_reject(struct rconn *swconn, const struct flow *ip_flow,
 {
     if (ip_flow->nw_proto == IPPROTO_TCP) {
         pinctrl_handle_tcp_reset(swconn, ip_flow, pkt_in, md, userdata, true);
+    } else if (ip_flow->nw_proto == IPPROTO_SCTP) {
+        pinctrl_handle_sctp_abort(swconn, ip_flow, pkt_in, md, userdata, true);
     } else {
         pinctrl_handle_icmp(swconn, ip_flow, pkt_in, md, userdata, true, true);
     }
@@ -2884,6 +3037,12 @@ process_packet_in(struct rconn *swconn, const struct ofp_header *msg)
         ovs_mutex_unlock(&pinctrl_mutex);
         break;
 
+    case ACTION_OPCODE_PUT_FDB:
+        ovs_mutex_lock(&pinctrl_mutex);
+        pinctrl_handle_put_fdb(&pin.flow_metadata.flow, &headers);
+        ovs_mutex_unlock(&pinctrl_mutex);
+        break;
+
     case ACTION_OPCODE_PUT_DHCPV6_OPTS:
         pinctrl_handle_put_dhcpv6_opts(swconn, &packet, &pin, &userdata,
                                        &continuation);
@@ -2926,6 +3085,11 @@ process_packet_in(struct rconn *swconn, const struct ofp_header *msg)
                                  &userdata, false);
         break;
 
+    case ACTION_OPCODE_SCTP_ABORT:
+        pinctrl_handle_sctp_abort(swconn, &headers, &packet,
+                                  &pin.flow_metadata, &userdata, false);
+        break;
+
     case ACTION_OPCODE_REJECT:
         pinctrl_handle_reject(swconn, &headers, &packet, &pin.flow_metadata,
                               &userdata);
@@ -2962,6 +3126,12 @@ process_packet_in(struct rconn *swconn, const struct ofp_header *msg)
         ovs_mutex_unlock(&pinctrl_mutex);
         break;
 
+    case ACTION_OPCODE_BFD_MSG:
+        ovs_mutex_lock(&pinctrl_mutex);
+        pinctrl_handle_bfd_msg(swconn, &headers, &packet);
+        ovs_mutex_unlock(&pinctrl_mutex);
+        break;
+
     default:
         VLOG_WARN_RL(&rl, "unrecognized packet-in opcode %"PRIu32,
                      ntohl(ah->opcode));
@@ -3053,6 +3223,8 @@ pinctrl_handler(void *arg_)
     swconn = rconn_create(5, 0, DSCP_DEFAULT, 1 << OFP15_VERSION);
 
     while (!latch_is_set(&pctrl->pinctrl_thread_exit)) {
+        long long int bfd_time = LLONG_MAX;
+
         ovs_mutex_lock(&pinctrl_mutex);
         pinctrl_rconn_setup(swconn, pctrl->br_int_name);
         ip_mcast_snoop_run();
@@ -3085,6 +3257,7 @@ pinctrl_handler(void *arg_)
                 send_ipv6_ras(swconn, &send_ipv6_ra_time);
                 send_ipv6_prefixd(swconn, &send_prefixd_time);
                 send_mac_binding_buffered_pkts(swconn);
+                bfd_monitor_send_msg(swconn, &bfd_time);
                 ovs_mutex_unlock(&pinctrl_mutex);
 
                 ip_mcast_querier_run(swconn, &send_mcast_query_time);
@@ -3102,6 +3275,7 @@ pinctrl_handler(void *arg_)
         ip_mcast_querier_wait(send_mcast_query_time);
         svc_monitors_wait(svc_monitors_next_run_time);
         ipv6_prefixd_wait(send_prefixd_time);
+        bfd_monitor_wait(bfd_time);
 
         new_seq = seq_read(pinctrl_handler_seq);
         seq_wait(pinctrl_handler_seq, new_seq);
@@ -3146,9 +3320,11 @@ pinctrl_run(struct ovsdb_idl_txn *ovnsb_idl_txn,
             struct ovsdb_idl_index *sbrec_mac_binding_by_lport_ip,
             struct ovsdb_idl_index *sbrec_igmp_groups,
             struct ovsdb_idl_index *sbrec_ip_multicast_opts,
+            struct ovsdb_idl_index *sbrec_fdb_by_dp_key_mac,
             const struct sbrec_dns_table *dns_table,
             const struct sbrec_controller_event_table *ce_table,
             const struct sbrec_service_monitor_table *svc_mon_table,
+            const struct sbrec_bfd_table *bfd_table,
             const struct ovsrec_bridge *br_int,
             const struct sbrec_chassis *chassis,
             const struct hmap *local_datapaths,
@@ -3179,6 +3355,9 @@ pinctrl_run(struct ovsdb_idl_txn *ovnsb_idl_txn,
                          local_datapaths);
     sync_svc_monitors(ovnsb_idl_txn, svc_mon_table, sbrec_port_binding_by_name,
                       chassis);
+    bfd_monitor_run(ovnsb_idl_txn, bfd_table, sbrec_port_binding_by_name,
+                    chassis, active_tunnels);
+    run_put_fdbs(ovnsb_idl_txn, sbrec_fdb_by_dp_key_mac);
     ovs_mutex_unlock(&pinctrl_mutex);
 }
 
@@ -3702,6 +3881,7 @@ pinctrl_wait(struct ovsdb_idl_txn *ovnsb_idl_txn)
     wait_put_vport_bindings(ovnsb_idl_txn);
     int64_t new_seq = seq_read(pinctrl_main_seq);
     seq_wait(pinctrl_main_seq, new_seq);
+    wait_put_fdbs(ovnsb_idl_txn);
 }
 
 /* Called by ovn-controller. */
@@ -3722,6 +3902,8 @@ pinctrl_destroy(void)
     destroy_dns_cache();
     ip_mcast_snoop_destroy();
     destroy_svc_monitors();
+    bfd_monitor_destroy();
+    destroy_fdb_entries();
     seq_destroy(pinctrl_main_seq);
     seq_destroy(pinctrl_handler_seq);
 }
@@ -3738,47 +3920,20 @@ pinctrl_destroy(void)
  * available. */
 
 /* Buffered "put_mac_binding" operation. */
-struct put_mac_binding {
-    struct hmap_node hmap_node; /* In 'put_mac_bindings'. */
-
-    /* Key. */
-    uint32_t dp_key;
-    uint32_t port_key;
-    struct in6_addr ip_key;
 
-    /* Value. */
-    struct eth_addr mac;
-};
-
-/* Contains "struct put_mac_binding"s. */
+/* Contains "struct mac_binding"s. */
 static struct hmap put_mac_bindings;
 
 static void
 init_put_mac_bindings(void)
 {
-    hmap_init(&put_mac_bindings);
+    ovn_mac_bindings_init(&put_mac_bindings);
 }
 
 static void
 destroy_put_mac_bindings(void)
 {
-    flush_put_mac_bindings();
-    hmap_destroy(&put_mac_bindings);
-}
-
-static struct put_mac_binding *
-pinctrl_find_put_mac_binding(uint32_t dp_key, uint32_t port_key,
-                             const struct in6_addr *ip_key, uint32_t hash)
-{
-    struct put_mac_binding *pa;
-    HMAP_FOR_EACH_WITH_HASH (pa, hmap_node, hash, &put_mac_bindings) {
-        if (pa->dp_key == dp_key
-            && pa->port_key == port_key
-            && IN6_ARE_ADDR_EQUAL(&pa->ip_key, ip_key)) {
-            return pa;
-        }
-    }
-    return NULL;
+    ovn_mac_bindings_destroy(&put_mac_bindings);
 }
 
 /* Called with in the pinctrl_handler thread context. */
@@ -3798,23 +3953,14 @@ pinctrl_handle_put_mac_binding(const struct flow *md,
         ovs_be128 ip6 = hton128(flow_get_xxreg(md, 0));
         memcpy(&ip_key, &ip6, sizeof ip_key);
     }
-    uint32_t hash = hash_bytes(&ip_key, sizeof ip_key,
-                               hash_2words(dp_key, port_key));
-    struct put_mac_binding *pmb
-        = pinctrl_find_put_mac_binding(dp_key, port_key, &ip_key, hash);
-    if (!pmb) {
-        if (hmap_count(&put_mac_bindings) >= 1000) {
-            COVERAGE_INC(pinctrl_drop_put_mac_binding);
-            return;
-        }
 
-        pmb = xmalloc(sizeof *pmb);
-        hmap_insert(&put_mac_bindings, &pmb->hmap_node, hash);
-        pmb->dp_key = dp_key;
-        pmb->port_key = port_key;
-        pmb->ip_key = ip_key;
+    struct mac_binding *mb = ovn_mac_binding_add(&put_mac_bindings, dp_key,
+                                                 port_key, &ip_key,
+                                                 headers->dl_src);
+    if (!mb) {
+        COVERAGE_INC(pinctrl_drop_put_mac_binding);
+        return;
     }
-    pmb->mac = headers->dl_src;
 
     /* We can send the buffered packet once the main ovn-controller
      * thread calls pinctrl_run() and it writes the mac_bindings stored
@@ -3857,12 +4003,12 @@ mac_binding_lookup(struct ovsdb_idl_index *sbrec_mac_binding_by_lport_ip,
 /* Update or add an IP-MAC binding for 'logical_port'.
  * Caller should make sure that 'ovnsb_idl_txn' is valid. */
 static void
-mac_binding_add(struct ovsdb_idl_txn *ovnsb_idl_txn,
-                struct ovsdb_idl_index *sbrec_mac_binding_by_lport_ip,
-                const char *logical_port,
-                const struct sbrec_datapath_binding *dp,
-                struct eth_addr ea, const char *ip,
-                bool update_only)
+mac_binding_add_to_sb(struct ovsdb_idl_txn *ovnsb_idl_txn,
+                      struct ovsdb_idl_index *sbrec_mac_binding_by_lport_ip,
+                      const char *logical_port,
+                      const struct sbrec_datapath_binding *dp,
+                      struct eth_addr ea, const char *ip,
+                      bool update_only)
 {
     /* Convert ethernet argument to string form for database. */
     char mac_string[ETH_ADDR_STRLEN + 1];
@@ -3918,9 +4064,9 @@ send_garp_locally(struct ovsdb_idl_txn *ovnsb_idl_txn,
         struct ds ip_s = DS_EMPTY_INITIALIZER;
 
         ip_format_masked(ip, OVS_BE32_MAX, &ip_s);
-        mac_binding_add(ovnsb_idl_txn, sbrec_mac_binding_by_lport_ip,
-                        remote->logical_port, remote->datapath,
-                        ea, ds_cstr(&ip_s), update_only);
+        mac_binding_add_to_sb(ovnsb_idl_txn, sbrec_mac_binding_by_lport_ip,
+                              remote->logical_port, remote->datapath,
+                              ea, ds_cstr(&ip_s), update_only);
         ds_destroy(&ip_s);
     }
 }
@@ -3930,30 +4076,30 @@ run_put_mac_binding(struct ovsdb_idl_txn *ovnsb_idl_txn,
                     struct ovsdb_idl_index *sbrec_datapath_binding_by_key,
                     struct ovsdb_idl_index *sbrec_port_binding_by_key,
                     struct ovsdb_idl_index *sbrec_mac_binding_by_lport_ip,
-                    const struct put_mac_binding *pmb)
+                    const struct mac_binding *mb)
 {
     /* Convert logical datapath and logical port key into lport. */
     const struct sbrec_port_binding *pb = lport_lookup_by_key(
         sbrec_datapath_binding_by_key, sbrec_port_binding_by_key,
-        pmb->dp_key, pmb->port_key);
+        mb->dp_key, mb->port_key);
     if (!pb) {
         static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
 
         VLOG_WARN_RL(&rl, "unknown logical port with datapath %"PRIu32" "
-                     "and port %"PRIu32, pmb->dp_key, pmb->port_key);
+                     "and port %"PRIu32, mb->dp_key, mb->port_key);
         return;
     }
 
     /* Convert ethernet argument to string form for database. */
     char mac_string[ETH_ADDR_STRLEN + 1];
     snprintf(mac_string, sizeof mac_string,
-             ETH_ADDR_FMT, ETH_ADDR_ARGS(pmb->mac));
+             ETH_ADDR_FMT, ETH_ADDR_ARGS(mb->mac));
 
     struct ds ip_s = DS_EMPTY_INITIALIZER;
-    ipv6_format_mapped(&pmb->ip_key, &ip_s);
-    mac_binding_add(ovnsb_idl_txn, sbrec_mac_binding_by_lport_ip,
-                    pb->logical_port, pb->datapath, pmb->mac, ds_cstr(&ip_s),
-                    false);
+    ipv6_format_mapped(&mb->ip, &ip_s);
+    mac_binding_add_to_sb(ovnsb_idl_txn, sbrec_mac_binding_by_lport_ip,
+                          pb->logical_port, pb->datapath, mb->mac,
+                          ds_cstr(&ip_s), false);
     ds_destroy(&ip_s);
 }
 
@@ -3970,14 +4116,14 @@ run_put_mac_bindings(struct ovsdb_idl_txn *ovnsb_idl_txn,
         return;
     }
 
-    const struct put_mac_binding *pmb;
-    HMAP_FOR_EACH (pmb, hmap_node, &put_mac_bindings) {
+    const struct mac_binding *mb;
+    HMAP_FOR_EACH (mb, hmap_node, &put_mac_bindings) {
         run_put_mac_binding(ovnsb_idl_txn, sbrec_datapath_binding_by_key,
                             sbrec_port_binding_by_key,
                             sbrec_mac_binding_by_lport_ip,
-                            pmb);
+                            mb);
     }
-    flush_put_mac_bindings();
+    ovn_mac_bindings_flush(&put_mac_bindings);
 }
 
 static void
@@ -4033,14 +4179,6 @@ wait_put_mac_bindings(struct ovsdb_idl_txn *ovnsb_idl_txn)
     }
 }
 
-static void
-flush_put_mac_bindings(void)
-{
-    struct put_mac_binding *pmb;
-    HMAP_FOR_EACH_POP (pmb, hmap_node, &put_mac_bindings) {
-        free(pmb);
-    }
-}
 
 /*
  * Send gratuitous/reverse ARP for vif on localnet.
@@ -5525,7 +5663,8 @@ may_inject_pkts(void)
             !shash_is_empty(&send_garp_rarp_data) ||
             ipv6_prefixd_should_inject() ||
             !ovs_list_is_empty(&mcast_query_list) ||
-            !ovs_list_is_empty(&buffered_mac_bindings));
+            !ovs_list_is_empty(&buffered_mac_bindings) ||
+            bfd_monitor_should_inject());
 }
 
 static void
@@ -6312,6 +6451,665 @@ sync_svc_monitors(struct ovsdb_idl_txn *ovnsb_idl_txn,
 
 }
 
+enum bfd_state {
+    BFD_STATE_ADMIN_DOWN,
+    BFD_STATE_DOWN,
+    BFD_STATE_INIT,
+    BFD_STATE_UP,
+};
+
+enum bfd_flags {
+    BFD_FLAG_MULTIPOINT = 1 << 0,
+    BFD_FLAG_DEMAND = 1 << 1,
+    BFD_FLAG_AUTH = 1 << 2,
+    BFD_FLAG_CTL = 1 << 3,
+    BFD_FLAG_FINAL = 1 << 4,
+    BFD_FLAG_POLL = 1 << 5
+};
+
+#define BFD_FLAGS_MASK  0x3f
+
+static char *
+bfd_get_status(enum bfd_state state)
+{
+    switch (state) {
+    case BFD_STATE_ADMIN_DOWN:
+        return "admin_down";
+    case BFD_STATE_DOWN:
+        return "down";
+    case BFD_STATE_INIT:
+        return "init";
+    case BFD_STATE_UP:
+        return "up";
+    default:
+        return "";
+    }
+}
+
+static struct hmap bfd_monitor_map;
+
+#define BFD_UPDATE_BATCH_TH     10
+static uint16_t bfd_pending_update;
+#define BFD_UPDATE_TIMEOUT      5000LL
+static long long bfd_last_update;
+
+struct bfd_entry {
+    struct hmap_node node;
+    bool erase;
+
+    /* L2 source address */
+    struct eth_addr src_mac;
+    /* IP source address */
+    struct in6_addr ip_src;
+    /* IP destination address */
+    struct in6_addr ip_dst;
+    /* RFC 5881 section 4
+     * The source port MUST be in the range 49152 through 65535.
+     * The same UDP source port number MUST be used for all BFD
+     * Control packets associated with a particular session.
+     * The source port number SHOULD be unique among all BFD
+     * sessions on the system
+     */
+    uint16_t udp_src;
+    ovs_be32 local_disc;
+    ovs_be32 remote_disc;
+
+    uint32_t local_min_tx;
+    uint32_t local_min_rx;
+    uint32_t remote_min_rx;
+
+    bool remote_demand_mode;
+
+    uint8_t local_mult;
+
+    int64_t port_key;
+    int64_t metadata;
+
+    enum bfd_state state;
+    bool change_state;
+
+    uint32_t detection_timeout;
+    long long int last_rx;
+    long long int next_tx;
+};
+
+static void
+bfd_monitor_init(void)
+{
+    hmap_init(&bfd_monitor_map);
+    bfd_last_update = time_msec();
+}
+
+static void
+bfd_monitor_destroy(void)
+{
+    struct bfd_entry *entry;
+    HMAP_FOR_EACH_POP (entry, node, &bfd_monitor_map) {
+        free(entry);
+    }
+    hmap_destroy(&bfd_monitor_map);
+}
+
+static struct bfd_entry *
+pinctrl_find_bfd_monitor_entry_by_port(char *ip, uint16_t port)
+{
+    struct bfd_entry *entry;
+    HMAP_FOR_EACH_WITH_HASH (entry, node, hash_string(ip, 0),
+                             &bfd_monitor_map) {
+        if (entry->udp_src == port) {
+            return entry;
+        }
+    }
+    return NULL;
+}
+
+static struct bfd_entry *
+pinctrl_find_bfd_monitor_entry_by_disc(char *ip, ovs_be32 disc)
+{
+    struct bfd_entry *ret = NULL, *entry;
+
+    HMAP_FOR_EACH_WITH_HASH (entry, node, hash_string(ip, 0),
+                             &bfd_monitor_map) {
+        if (entry->local_disc == disc) {
+            ret = entry;
+            break;
+        }
+    }
+    return ret;
+}
+
+static bool
+bfd_monitor_should_inject(void)
+{
+    long long int cur_time = time_msec();
+    struct bfd_entry *entry;
+
+    HMAP_FOR_EACH (entry, node, &bfd_monitor_map) {
+        if (entry->next_tx < cur_time) {
+            return true;
+        }
+    }
+    return false;
+}
+
+static void
+bfd_monitor_wait(long long int timeout)
+{
+    if (!hmap_is_empty(&bfd_monitor_map)) {
+        poll_timer_wait_until(timeout);
+    }
+}
+
+static void
+bfd_monitor_put_bfd_msg(struct bfd_entry *entry, struct dp_packet *packet,
+                        bool final)
+{
+    int payload_len = sizeof(struct udp_header) + sizeof(struct bfd_msg);
+
+    /* Properly align after the ethernet header */
+    dp_packet_reserve(packet, 2);
+    if (IN6_IS_ADDR_V4MAPPED(&entry->ip_src)) {
+        ovs_be32 ip_src = in6_addr_get_mapped_ipv4(&entry->ip_src);
+        ovs_be32 ip_dst = in6_addr_get_mapped_ipv4(&entry->ip_dst);
+        pinctrl_compose_ipv4(packet, entry->src_mac, eth_addr_broadcast,
+                             ip_src, ip_dst, IPPROTO_UDP, MAXTTL, payload_len);
+    } else {
+        pinctrl_compose_ipv6(packet, entry->src_mac, eth_addr_broadcast,
+                             &entry->ip_src, &entry->ip_dst, IPPROTO_UDP,
+                             MAXTTL, payload_len);
+    }
+
+    struct udp_header *udp = dp_packet_put_zeros(packet, sizeof *udp);
+    udp->udp_len = htons(payload_len);
+    udp->udp_csum = 0;
+    udp->udp_src = htons(entry->udp_src);
+    udp->udp_dst = htons(BFD_DEST_PORT);
+
+    struct bfd_msg *msg = dp_packet_put_zeros(packet, sizeof *msg);
+    msg->vers_diag = (BFD_VERSION << 5);
+    msg->mult = entry->local_mult;
+    msg->length = BFD_PACKET_LEN;
+    msg->flags = final ? BFD_FLAG_FINAL : 0;
+    msg->flags |= entry->state << 6;
+    msg->my_disc = entry->local_disc;
+    msg->your_disc = entry->remote_disc;
+    /* min_tx and min_rx are in us - RFC 5880 page 9 */
+    msg->min_tx = htonl(entry->local_min_tx * 1000);
+    msg->min_rx = htonl(entry->local_min_rx * 1000);
+
+    if (!IN6_IS_ADDR_V4MAPPED(&entry->ip_src)) {
+        /* IPv6 needs UDP checksum calculated */
+        uint32_t csum = packet_csum_pseudoheader6(dp_packet_l3(packet));
+        int len = (uint8_t *)udp - (uint8_t *)dp_packet_eth(packet);
+        csum = csum_continue(csum, udp, dp_packet_size(packet) - len);
+        udp->udp_csum = csum_finish(csum);
+        if (!udp->udp_csum) {
+            udp->udp_csum = htons(0xffff);
+        }
+    }
+}
+
+static void
+pinctrl_send_bfd_tx_msg(struct rconn *swconn, struct bfd_entry *entry,
+                        bool final)
+{
+    uint64_t packet_stub[256 / 8];
+    struct dp_packet packet;
+    dp_packet_use_stub(&packet, packet_stub, sizeof packet_stub);
+    bfd_monitor_put_bfd_msg(entry, &packet, final);
+
+    uint64_t ofpacts_stub[4096 / 8];
+    struct ofpbuf ofpacts = OFPBUF_STUB_INITIALIZER(ofpacts_stub);
+
+    /* Set MFF_LOG_DATAPATH and MFF_LOG_INPORT. */
+    uint32_t dp_key = entry->metadata;
+    uint32_t port_key = entry->port_key;
+    put_load(dp_key, MFF_LOG_DATAPATH, 0, 64, &ofpacts);
+    put_load(port_key, MFF_LOG_INPORT, 0, 32, &ofpacts);
+    put_load(1, MFF_LOG_FLAGS, MLF_LOCAL_ONLY_BIT, 1, &ofpacts);
+    struct ofpact_resubmit *resubmit = ofpact_put_RESUBMIT(&ofpacts);
+    resubmit->in_port = OFPP_CONTROLLER;
+    resubmit->table_id = OFTABLE_LOG_INGRESS_PIPELINE;
+
+    struct ofputil_packet_out po = {
+        .packet = dp_packet_data(&packet),
+        .packet_len = dp_packet_size(&packet),
+        .buffer_id = UINT32_MAX,
+        .ofpacts = ofpacts.data,
+        .ofpacts_len = ofpacts.size,
+    };
+
+    match_set_in_port(&po.flow_metadata, OFPP_CONTROLLER);
+    enum ofp_version version = rconn_get_version(swconn);
+    enum ofputil_protocol proto =
+        ofputil_protocol_from_ofp_version(version);
+    queue_msg(swconn, ofputil_encode_packet_out(&po, proto));
+    dp_packet_uninit(&packet);
+    ofpbuf_uninit(&ofpacts);
+}
+
+
+static bool
+bfd_monitor_need_update(void)
+{
+    long long int cur_time = time_msec();
+
+    if (bfd_pending_update == BFD_UPDATE_BATCH_TH) {
+        goto update;
+    }
+
+    if (bfd_pending_update &&
+        bfd_last_update + BFD_UPDATE_TIMEOUT < cur_time) {
+        goto update;
+    }
+    return false;
+
+update:
+    bfd_last_update = cur_time;
+    bfd_pending_update = 0;
+    return true;
+}
+
+static void
+bfd_check_detection_timeout(struct bfd_entry *entry)
+{
+    if (entry->state == BFD_STATE_ADMIN_DOWN) {
+        return;
+    }
+
+    if (!entry->detection_timeout) {
+        return;
+    }
+
+    long long int cur_time = time_msec();
+    if (cur_time < entry->last_rx + entry->detection_timeout) {
+        return;
+    }
+
+    entry->state = BFD_STATE_DOWN;
+    entry->change_state = true;
+    bfd_last_update = cur_time;
+    bfd_pending_update = 0;
+    notify_pinctrl_main();
+}
+
+static void
+bfd_monitor_send_msg(struct rconn *swconn, long long int *bfd_time)
+    OVS_REQUIRES(pinctrl_mutex)
+{
+    long long int cur_time = time_msec();
+    struct bfd_entry *entry;
+
+    if (bfd_monitor_need_update()) {
+        notify_pinctrl_main();
+    }
+
+    HMAP_FOR_EACH (entry, node, &bfd_monitor_map) {
+        unsigned long tx_timeout;
+
+        bfd_check_detection_timeout(entry);
+
+        if (cur_time < entry->next_tx) {
+            goto next;
+        }
+
+        if (!entry->remote_min_rx) {
+            continue;
+        }
+
+        if (entry->state == BFD_STATE_ADMIN_DOWN) {
+            continue;
+        }
+
+        if (entry->remote_demand_mode) {
+            continue;
+        }
+
+        pinctrl_send_bfd_tx_msg(swconn, entry, false);
+
+        tx_timeout = MAX(entry->local_min_tx, entry->remote_min_rx);
+        tx_timeout -= random_range((tx_timeout * 25) / 100);
+        entry->next_tx = cur_time + tx_timeout;
+next:
+        if (*bfd_time > entry->next_tx) {
+            *bfd_time = entry->next_tx;
+        }
+    }
+}
+
+static bool
+pinctrl_check_bfd_msg(const struct flow *ip_flow, struct dp_packet *pkt_in)
+{
+    if (ip_flow->dl_type != htons(ETH_TYPE_IP) &&
+        ip_flow->dl_type != htons(ETH_TYPE_IPV6)) {
+        return false;
+    }
+
+    if (ip_flow->nw_proto != IPPROTO_UDP) {
+        return false;
+    }
+
+    struct udp_header *udp_hdr = dp_packet_l4(pkt_in);
+    if (udp_hdr->udp_dst != htons(BFD_DEST_PORT)) {
+        return false;
+    }
+
+    const struct bfd_msg *msg = dp_packet_get_udp_payload(pkt_in);
+    uint8_t version = msg->vers_diag >> 5;
+    if (version != BFD_VERSION) {
+        return false;
+    }
+
+    enum bfd_flags flags = msg->flags & BFD_FLAGS_MASK;
+    if (flags & BFD_FLAG_AUTH) {
+        /* AUTH not supported yet */
+        return false;
+    }
+
+    if (msg->length < BFD_PACKET_LEN) {
+        return false;
+    }
+
+    if (!msg->mult) {
+        return false;
+    }
+
+    if (flags & BFD_FLAG_MULTIPOINT) {
+        return false;
+    }
+
+    if (!msg->my_disc) {
+        return false;
+    }
+
+    if ((flags & BFD_FLAG_FINAL) && (flags & BFD_FLAG_POLL)) {
+        return false;
+    }
+
+    enum bfd_state peer_state = msg->flags >> 6;
+    if (peer_state >= BFD_STATE_INIT && !msg->your_disc) {
+        return false;
+    }
+
+    return true;
+}
+
+static void
+pinctrl_handle_bfd_msg(struct rconn *swconn, const struct flow *ip_flow,
+                       struct dp_packet *pkt_in)
+    OVS_REQUIRES(pinctrl_mutex)
+{
+    if (!pinctrl_check_bfd_msg(ip_flow, pkt_in)) {
+        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+        VLOG_WARN_RL(&rl, "BFD packet discarded");
+        return;
+    }
+
+    char *ip_src;
+    if (ip_flow->dl_type == htons(ETH_TYPE_IP)) {
+        ip_src = normalize_ipv4_prefix(ip_flow->nw_src, 32);
+    } else {
+        ip_src = normalize_ipv6_prefix(&ip_flow->ipv6_src, 128);
+    }
+
+    const struct bfd_msg *msg = dp_packet_get_udp_payload(pkt_in);
+    struct bfd_entry *entry =
+        pinctrl_find_bfd_monitor_entry_by_disc(ip_src, msg->your_disc);
+    free(ip_src);
+
+    if (!entry) {
+        return;
+    }
+
+    bool change_state = false;
+    entry->remote_disc = msg->my_disc;
+    uint32_t remote_min_tx = ntohl(msg->min_tx) / 1000;
+    entry->remote_min_rx = ntohl(msg->min_rx) / 1000;
+    entry->detection_timeout = msg->mult * MAX(remote_min_tx,
+                                               entry->local_min_rx);
+
+    enum bfd_state peer_state = msg->flags >> 6;
+    if (peer_state == BFD_STATE_ADMIN_DOWN &&
+        entry->state >= BFD_STATE_INIT) {
+        entry->state = BFD_STATE_DOWN;
+        entry->last_rx = time_msec();
+        change_state = true;
+        goto out;
+    }
+
+    /* bfd state machine */
+    switch (entry->state) {
+    case BFD_STATE_DOWN:
+        if (peer_state == BFD_STATE_DOWN) {
+            entry->state = BFD_STATE_INIT;
+            change_state = true;
+        }
+        if (peer_state == BFD_STATE_INIT) {
+            entry->state = BFD_STATE_UP;
+            change_state = true;
+        }
+        entry->last_rx = time_msec();
+        break;
+    case BFD_STATE_INIT:
+        if (peer_state == BFD_STATE_INIT ||
+            peer_state == BFD_STATE_UP) {
+            entry->state = BFD_STATE_UP;
+            change_state = true;
+        }
+        if (peer_state == BFD_STATE_ADMIN_DOWN) {
+            entry->state = BFD_STATE_DOWN;
+            change_state = true;
+        }
+        entry->last_rx = time_msec();
+        break;
+    case BFD_STATE_UP:
+        if (peer_state == BFD_STATE_ADMIN_DOWN ||
+            peer_state == BFD_STATE_DOWN) {
+            entry->state = BFD_STATE_DOWN;
+            change_state = true;
+        }
+        entry->last_rx = time_msec();
+        break;
+    case BFD_STATE_ADMIN_DOWN:
+    default:
+        break;
+    }
+
+    if (entry->state == BFD_STATE_UP &&
+        (msg->flags & BFD_FLAG_DEMAND)) {
+        entry->remote_demand_mode = true;
+    }
+
+    if (msg->flags & BFD_FLAG_POLL) {
+        pinctrl_send_bfd_tx_msg(swconn, entry, true);
+    }
+
+out:
+    /* let's try to bacth db updates */
+    if (change_state) {
+        entry->change_state = true;
+        bfd_pending_update++;
+    }
+    if (bfd_monitor_need_update()) {
+        notify_pinctrl_main();
+    }
+}
+
+static void
+bfd_monitor_check_sb_conf(const struct sbrec_bfd *sb_bt,
+                          struct bfd_entry *entry)
+{
+    struct lport_addresses dst_addr;
+
+    if (extract_ip_addresses(sb_bt->dst_ip, &dst_addr)) {
+        struct in6_addr addr;
+
+        if (dst_addr.n_ipv6_addrs > 0) {
+            addr = dst_addr.ipv6_addrs[0].addr;
+        } else {
+            addr = in6_addr_mapped_ipv4(dst_addr.ipv4_addrs[0].addr);
+        }
+
+        if (!ipv6_addr_equals(&addr, &entry->ip_dst)) {
+            entry->ip_dst = addr;
+        }
+        destroy_lport_addresses(&dst_addr);
+    }
+
+    if (sb_bt->min_tx != entry->local_min_tx) {
+        entry->local_min_tx = sb_bt->min_tx;
+    }
+
+    if (sb_bt->min_rx != entry->local_min_rx) {
+        entry->local_min_rx = sb_bt->min_rx;
+    }
+
+    if (sb_bt->detect_mult != entry->local_mult) {
+        entry->local_mult = sb_bt->detect_mult;
+    }
+}
+
+static void
+bfd_monitor_run(struct ovsdb_idl_txn *ovnsb_idl_txn,
+                const struct sbrec_bfd_table *bfd_table,
+                struct ovsdb_idl_index *sbrec_port_binding_by_name,
+                const struct sbrec_chassis *chassis,
+                const struct sset *active_tunnels)
+    OVS_REQUIRES(pinctrl_mutex)
+{
+    struct bfd_entry *entry, *next_entry;
+    long long int cur_time = time_msec();
+    bool changed = false;
+
+    HMAP_FOR_EACH (entry, node, &bfd_monitor_map) {
+        entry->erase = true;
+    }
+
+    const struct sbrec_bfd *bt;
+    SBREC_BFD_TABLE_FOR_EACH (bt, bfd_table) {
+        const struct sbrec_port_binding *pb
+            = lport_lookup_by_name(sbrec_port_binding_by_name,
+                                   bt->logical_port);
+        if (!pb) {
+            continue;
+        }
+
+        const char *peer_s = smap_get(&pb->options, "peer");
+        if (!peer_s) {
+            continue;
+        }
+
+        const struct sbrec_port_binding *peer
+            = lport_lookup_by_name(sbrec_port_binding_by_name, peer_s);
+        if (!peer) {
+            continue;
+        }
+
+        char *redirect_name = xasprintf("cr-%s", pb->logical_port);
+        bool resident = lport_is_chassis_resident(
+                sbrec_port_binding_by_name, chassis, active_tunnels,
+                redirect_name);
+        free(redirect_name);
+        if ((strcmp(pb->type, "l3gateway") || pb->chassis != chassis) &&
+            !resident) {
+            continue;
+        }
+
+        entry = pinctrl_find_bfd_monitor_entry_by_port(
+                bt->dst_ip, bt->src_port);
+        if (!entry) {
+            struct eth_addr ea = eth_addr_zero;
+            struct lport_addresses dst_addr;
+            struct in6_addr ip_src, ip_dst;
+            int i;
+
+            ip_dst = in6_addr_mapped_ipv4(htonl(BFD_DEFAULT_DST_IP));
+            ip_src = in6_addr_mapped_ipv4(htonl(BFD_DEFAULT_SRC_IP));
+
+            if (!extract_ip_addresses(bt->dst_ip, &dst_addr)) {
+                continue;
+            }
+
+            for (i = 0; i < pb->n_mac; i++) {
+                struct lport_addresses laddrs;
+
+                if (!extract_lsp_addresses(pb->mac[i], &laddrs)) {
+                    continue;
+                }
+
+                ea = laddrs.ea;
+                if (dst_addr.n_ipv6_addrs > 0 && laddrs.n_ipv6_addrs > 0) {
+                    ip_dst = dst_addr.ipv6_addrs[0].addr;
+                    ip_src = laddrs.ipv6_addrs[0].addr;
+                    destroy_lport_addresses(&laddrs);
+                    break;
+                } else if (laddrs.n_ipv4_addrs > 0) {
+                    ip_dst = in6_addr_mapped_ipv4(dst_addr.ipv4_addrs[0].addr);
+                    ip_src = in6_addr_mapped_ipv4(laddrs.ipv4_addrs[0].addr);
+                    destroy_lport_addresses(&laddrs);
+                    break;
+                }
+                destroy_lport_addresses(&laddrs);
+            }
+            destroy_lport_addresses(&dst_addr);
+
+            if (eth_addr_is_zero(ea)) {
+                continue;
+            }
+
+            entry = xzalloc(sizeof *entry);
+            entry->src_mac = ea;
+            entry->ip_src = ip_src;
+            entry->ip_dst = ip_dst;
+            entry->udp_src = bt->src_port;
+            entry->local_disc = htonl(bt->disc);
+            entry->next_tx = cur_time;
+            entry->last_rx = cur_time;
+            entry->detection_timeout = 30000;
+            entry->metadata = pb->datapath->tunnel_key;
+            entry->port_key = pb->tunnel_key;
+            entry->state = BFD_STATE_ADMIN_DOWN;
+            entry->local_min_tx = bt->min_tx;
+            entry->local_min_rx = bt->min_rx;
+            entry->remote_min_rx = 1; /* RFC5880 page 29 */
+            entry->local_mult = bt->detect_mult;
+
+            uint32_t hash = hash_string(bt->dst_ip, 0);
+            hmap_insert(&bfd_monitor_map, &entry->node, hash);
+        } else if (!strcmp(bt->status, "admin_down") &&
+                   entry->state != BFD_STATE_ADMIN_DOWN) {
+            entry->state = BFD_STATE_ADMIN_DOWN;
+            entry->change_state = false;
+            entry->remote_disc = 0;
+        } else if (strcmp(bt->status, "admin_down") &&
+                   entry->state == BFD_STATE_ADMIN_DOWN) {
+            entry->state = BFD_STATE_DOWN;
+            entry->change_state = false;
+            entry->remote_disc = 0;
+            changed = true;
+        } else if (entry->change_state && ovnsb_idl_txn) {
+            if (entry->state == BFD_STATE_DOWN) {
+                entry->remote_disc = 0;
+            }
+            sbrec_bfd_set_status(bt, bfd_get_status(entry->state));
+            entry->change_state = false;
+        }
+        bfd_monitor_check_sb_conf(bt, entry);
+        entry->erase = false;
+    }
+
+    HMAP_FOR_EACH_SAFE (entry, next_entry, node, &bfd_monitor_map) {
+        if (entry->erase) {
+            hmap_remove(&bfd_monitor_map, &entry->node);
+            free(entry);
+        }
+    }
+
+    if (changed) {
+        notify_pinctrl_handler();
+    }
+}
+
 static uint16_t
 get_random_src_port(void)
 {
@@ -6724,3 +7522,94 @@ pinctrl_handle_svc_check(struct rconn *swconn, const struct flow *ip_flow,
         svc_mon->next_send_time = time_msec() + svc_mon->interval;
     }
 }
+
+static struct hmap put_fdbs;
+
+/* MAC learning (fdb) related functions.  Runs within the main
+ * ovn-controller thread context. */
+
+static void
+init_fdb_entries(void)
+{
+    ovn_fdb_init(&put_fdbs);
+}
+
+static void
+destroy_fdb_entries(void)
+{
+    ovn_fdbs_destroy(&put_fdbs);
+}
+
+static const struct sbrec_fdb *
+fdb_lookup(struct ovsdb_idl_index *sbrec_fdb_by_dp_key_mac, uint32_t dp_key,
+           const char *mac)
+{
+    struct sbrec_fdb *fdb = sbrec_fdb_index_init_row(sbrec_fdb_by_dp_key_mac);
+    sbrec_fdb_index_set_dp_key(fdb, dp_key);
+    sbrec_fdb_index_set_mac(fdb, mac);
+
+    const struct sbrec_fdb *retval
+        = sbrec_fdb_index_find(sbrec_fdb_by_dp_key_mac, fdb);
+
+    sbrec_fdb_index_destroy_row(fdb);
+
+    return retval;
+}
+
+static void
+run_put_fdb(struct ovsdb_idl_txn *ovnsb_idl_txn,
+            struct ovsdb_idl_index *sbrec_fdb_by_dp_key_mac,
+            const struct fdb_entry *fdb_e)
+{
+    /* Convert ethernet argument to string form for database. */
+    char mac_string[ETH_ADDR_STRLEN + 1];
+    snprintf(mac_string, sizeof mac_string,
+             ETH_ADDR_FMT, ETH_ADDR_ARGS(fdb_e->mac));
+
+    /* Update or add an FDB entry. */
+    const struct sbrec_fdb *sb_fdb =
+        fdb_lookup(sbrec_fdb_by_dp_key_mac, fdb_e->dp_key, mac_string);
+    if (!sb_fdb) {
+        sb_fdb = sbrec_fdb_insert(ovnsb_idl_txn);
+        sbrec_fdb_set_dp_key(sb_fdb, fdb_e->dp_key);
+        sbrec_fdb_set_mac(sb_fdb, mac_string);
+    }
+    sbrec_fdb_set_port_key(sb_fdb, fdb_e->port_key);
+}
+
+static void
+run_put_fdbs(struct ovsdb_idl_txn *ovnsb_idl_txn,
+             struct ovsdb_idl_index *sbrec_fdb_by_dp_key_mac)
+             OVS_REQUIRES(pinctrl_mutex)
+{
+    if (!ovnsb_idl_txn) {
+        return;
+    }
+
+    const struct fdb_entry *fdb_e;
+    HMAP_FOR_EACH (fdb_e, hmap_node, &put_fdbs) {
+        run_put_fdb(ovnsb_idl_txn, sbrec_fdb_by_dp_key_mac, fdb_e);
+    }
+    ovn_fdbs_flush(&put_fdbs);
+}
+
+
+static void
+wait_put_fdbs(struct ovsdb_idl_txn *ovnsb_idl_txn)
+{
+    if (ovnsb_idl_txn && !hmap_is_empty(&put_fdbs)) {
+        poll_immediate_wake();
+    }
+}
+
+/* Called with in the pinctrl_handler thread context. */
+static void
+pinctrl_handle_put_fdb(const struct flow *md, const struct flow *headers)
+                       OVS_REQUIRES(pinctrl_mutex)
+{
+    uint32_t dp_key = ntohll(md->metadata);
+    uint32_t port_key = md->regs[MFF_LOG_INPORT - MFF_REG0];
+
+    ovn_fdb_add(&put_fdbs, dp_key, headers->dl_src, port_key);
+    notify_pinctrl_main();
+}
diff --git a/controller/pinctrl.h b/controller/pinctrl.h
index 4b101ec92..cc0a51984 100644
--- a/controller/pinctrl.h
+++ b/controller/pinctrl.h
@@ -31,6 +31,7 @@ struct sbrec_chassis;
 struct sbrec_dns_table;
 struct sbrec_controller_event_table;
 struct sbrec_service_monitor_table;
+struct sbrec_bfd_table;
 
 void pinctrl_init(void);
 void pinctrl_run(struct ovsdb_idl_txn *ovnsb_idl_txn,
@@ -41,9 +42,11 @@ void pinctrl_run(struct ovsdb_idl_txn *ovnsb_idl_txn,
                  struct ovsdb_idl_index *sbrec_mac_binding_by_lport_ip,
                  struct ovsdb_idl_index *sbrec_igmp_groups,
                  struct ovsdb_idl_index *sbrec_ip_multicast_opts,
+                 struct ovsdb_idl_index *sbrec_fdb_by_dp_key_mac,
                  const struct sbrec_dns_table *,
                  const struct sbrec_controller_event_table *,
                  const struct sbrec_service_monitor_table *,
+                 const struct sbrec_bfd_table *,
                  const struct ovsrec_bridge *, const struct sbrec_chassis *,
                  const struct hmap *local_datapaths,
                  const struct sset *active_tunnels);
diff --git a/controller/test-ofctrl-seqno.c b/controller/test-ofctrl-seqno.c
new file mode 100644
index 000000000..fce88d4bd
--- /dev/null
+++ b/controller/test-ofctrl-seqno.c
@@ -0,0 +1,194 @@
+/* Copyright (c) 2021, Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#include "tests/ovstest.h"
+#include "sort.h"
+#include "util.h"
+
+#include "ofctrl-seqno.h"
+
+static void
+test_init(void)
+{
+    ofctrl_seqno_init();
+}
+
+static bool
+test_read_uint_value(struct ovs_cmdl_context *ctx, unsigned int index,
+                     const char *descr, unsigned int *result)
+{
+    if (index >= ctx->argc) {
+        fprintf(stderr, "Missing %s argument\n", descr);
+        return false;
+    }
+
+    const char *arg = ctx->argv[index];
+    if (!str_to_uint(arg, 10, result)) {
+        fprintf(stderr, "Invalid %s: %s\n", descr, arg);
+        return false;
+    }
+    return true;
+}
+
+static int
+test_seqno_compare(size_t a, size_t b, void *values_)
+{
+    uint64_t *values = values_;
+
+    return values[a] == values[b] ? 0 : (values[a] < values[b] ? -1 : 1);
+}
+
+static void
+test_seqno_swap(size_t a, size_t b, void *values_)
+{
+    uint64_t *values = values_;
+    uint64_t tmp = values[a];
+
+    values[a] = values[b];
+    values[b] = tmp;
+}
+
+static void
+test_dump_acked_seqnos(size_t seqno_type)
+{
+    struct ofctrl_acked_seqnos * acked_seqnos =
+        ofctrl_acked_seqnos_get(seqno_type);
+
+    printf("ofctrl-seqno-type: %"PRIuSIZE"\n", seqno_type);
+    printf("  last-acked %"PRIu64"\n", acked_seqnos->last_acked);
+
+    size_t n_acked = hmap_count(&acked_seqnos->acked);
+    uint64_t *acked = xmalloc(n_acked * sizeof *acked);
+    struct ofctrl_ack_seqno *ack_seqno;
+    size_t i = 0;
+
+    /* A bit hacky but ignoring overflows the "total of all seqno + 1" should
+     * be a number that is not part of the acked seqnos.
+     */
+    uint64_t total_seqno = 1;
+    HMAP_FOR_EACH (ack_seqno, node, &acked_seqnos->acked) {
+        ovs_assert(ofctrl_acked_seqnos_contains(acked_seqnos,
+                                                ack_seqno->seqno));
+        total_seqno += ack_seqno->seqno;
+        acked[i++] = ack_seqno->seqno;
+    }
+    ovs_assert(!ofctrl_acked_seqnos_contains(acked_seqnos, total_seqno));
+
+    sort(n_acked, test_seqno_compare, test_seqno_swap, acked);
+
+    for (i = 0; i < n_acked; i++) {
+        printf("  %"PRIu64"\n", acked[i]);
+    }
+
+    free(acked);
+    ofctrl_acked_seqnos_destroy(acked_seqnos);
+}
+
+static void
+test_ofctrl_seqno_add_type(struct ovs_cmdl_context *ctx)
+{
+    unsigned int n_types;
+
+    test_init();
+
+    if (!test_read_uint_value(ctx, 1, "n_types", &n_types)) {
+        return;
+    }
+    for (unsigned int i = 0; i < n_types; i++) {
+        printf("%"PRIuSIZE"\n", ofctrl_seqno_add_type());
+    }
+}
+
+static void
+test_ofctrl_seqno_ack_seqnos(struct ovs_cmdl_context *ctx)
+{
+    unsigned int n_reqs = 0;
+    unsigned int shift = 2;
+    unsigned int n_types;
+    unsigned int n_acks;
+
+    test_init();
+    bool batch_acks = !strcmp(ctx->argv[1], "true");
+
+    if (!test_read_uint_value(ctx, shift++, "n_types", &n_types)) {
+        return;
+    }
+
+    for (unsigned int i = 0; i < n_types; i++) {
+        ovs_assert(ofctrl_seqno_add_type() == i);
+
+        /* Read number of app specific seqnos. */
+        unsigned int n_app_seqnos;
+
+        if (!test_read_uint_value(ctx, shift++, "n_app_seqnos",
+                                  &n_app_seqnos)) {
+            return;
+        }
+
+        for (unsigned int j = 0; j < n_app_seqnos; j++, n_reqs++) {
+            unsigned int app_seqno;
+
+            if (!test_read_uint_value(ctx, shift++, "app_seqno", &app_seqno)) {
+                return;
+            }
+            ofctrl_seqno_update_create(i, app_seqno);
+        }
+    }
+    printf("ofctrl-seqno-req-cfg: %u\n", n_reqs);
+
+    if (!test_read_uint_value(ctx, shift++, "n_acks", &n_acks)) {
+        return;
+    }
+    for (unsigned int i = 0; i < n_acks; i++) {
+        unsigned int ack_seqno;
+
+        if (!test_read_uint_value(ctx, shift++, "ack_seqno", &ack_seqno)) {
+            return;
+        }
+        ofctrl_seqno_run(ack_seqno);
+
+        if (!batch_acks) {
+            for (unsigned int st = 0; st < n_types; st++) {
+                test_dump_acked_seqnos(st);
+            }
+        }
+    }
+    if (batch_acks) {
+        for (unsigned int st = 0; st < n_types; st++) {
+            test_dump_acked_seqnos(st);
+        }
+    }
+}
+
+static void
+test_ofctrl_seqno_main(int argc, char *argv[])
+{
+    set_program_name(argv[0]);
+    static const struct ovs_cmdl_command commands[] = {
+        {"ofctrl_seqno_add_type", NULL, 1, 1,
+         test_ofctrl_seqno_add_type, OVS_RO},
+        {"ofctrl_seqno_ack_seqnos", NULL, 2, INT_MAX,
+         test_ofctrl_seqno_ack_seqnos, OVS_RO},
+        {NULL, NULL, 0, 0, NULL, OVS_RO},
+    };
+    struct ovs_cmdl_context ctx;
+    ctx.argc = argc - 1;
+    ctx.argv = argv + 1;
+    ovs_cmdl_run_command(&ctx, commands);
+}
+
+OVSTEST_REGISTER("test-ofctrl-seqno", test_ofctrl_seqno_main);
diff --git a/include/ovn/actions.h b/include/ovn/actions.h
index 9c1ebf4aa..040213177 100644
--- a/include/ovn/actions.h
+++ b/include/ovn/actions.h
@@ -105,6 +105,11 @@ struct ovn_extend_table;
     OVNACT(CHK_LB_HAIRPIN,    ovnact_result)          \
     OVNACT(CHK_LB_HAIRPIN_REPLY, ovnact_result)       \
     OVNACT(CT_SNAT_TO_VIP,    ovnact_null)            \
+    OVNACT(BFD_MSG,           ovnact_null)            \
+    OVNACT(SCTP_ABORT,        ovnact_nest)            \
+    OVNACT(PUT_FDB,           ovnact_put_fdb)         \
+    OVNACT(GET_FDB,           ovnact_get_fdb)         \
+    OVNACT(LOOKUP_FDB,        ovnact_lookup_fdb)      \
 
 /* enum ovnact_type, with a member OVNACT_<ENUM> for each action. */
 enum OVS_PACKED_ENUM ovnact_type {
@@ -413,6 +418,28 @@ struct ovnact_fwd_group {
     uint8_t ltable;           /* Logical table ID of next table. */
 };
 
+/* OVNACT_PUT_FDB. */
+struct ovnact_put_fdb {
+    struct ovnact ovnact;
+    struct expr_field port;     /* Logical port name. */
+    struct expr_field mac;      /* 48-bit Ethernet address. */
+};
+
+/* OVNACT_GET_FDB. */
+struct ovnact_get_fdb {
+    struct ovnact ovnact;
+    struct expr_field mac;     /* 48-bit Ethernet address. */
+    struct expr_field dst;     /* 32-bit destination field. */
+};
+
+/* OVNACT_LOOKUP_FDB. */
+struct ovnact_lookup_fdb {
+    struct ovnact ovnact;
+    struct expr_field mac;     /* 48-bit Ethernet address. */
+    struct expr_field port;    /* Logical port name. */
+    struct expr_field dst;     /* 1-bit destination field. */
+};
+
 /* Internal use by the helpers below. */
 void ovnact_init(struct ovnact *, enum ovnact_type, size_t len);
 void *ovnact_put(struct ofpbuf *, enum ovnact_type, size_t len);
@@ -627,6 +654,22 @@ enum action_opcode {
      * The actions, in OpenFlow 1.3 format, follow the action_header.
      */
     ACTION_OPCODE_REJECT,
+
+    /* handle_bfd_msg { ...actions ...}."
+     *
+     *  The actions, in OpenFlow 1.3 format, follow the action_header.
+     */
+    ACTION_OPCODE_BFD_MSG,
+
+    /* "sctp_abort { ...actions... }".
+     *
+     * The actions, in OpenFlow 1.3 format, follow the action_header.
+     */
+    ACTION_OPCODE_SCTP_ABORT,
+
+    /* put_fdb(inport, eth.src).
+     */
+    ACTION_OPCODE_PUT_FDB,
 };
 
 /* Header. */
@@ -748,6 +791,10 @@ struct ovnact_encode_params {
                                        * 'chk_lb_hairpin_reply' to resubmit. */
     uint8_t ct_snat_vip_ptable;  /* OpenFlow table for
                                   * 'ct_snat_to_vip' to resubmit. */
+    uint8_t fdb_ptable; /* OpenFlow table for
+                         * 'get_fdb' to resubmit. */
+    uint8_t fdb_lookup_ptable; /* OpenFlow table for
+                                * 'lookup_fdb' to resubmit. */
 };
 
 void ovnacts_encode(const struct ovnact[], size_t ovnacts_len,
diff --git a/include/ovn/automake.mk b/include/ovn/automake.mk
index 54b0e2c0e..582241a57 100644
--- a/include/ovn/automake.mk
+++ b/include/ovn/automake.mk
@@ -2,5 +2,6 @@ ovnincludedir = $(includedir)/ovn
 ovninclude_HEADERS = \
 	include/ovn/actions.h \
 	include/ovn/expr.h \
+	include/ovn/features.h \
 	include/ovn/lex.h  \
 	include/ovn/logical-fields.h
diff --git a/include/ovn/expr.h b/include/ovn/expr.h
index 0a83ec7a8..c2c821818 100644
--- a/include/ovn/expr.h
+++ b/include/ovn/expr.h
@@ -477,6 +477,7 @@ uint32_t expr_to_matches(const struct expr *,
                          const void *aux,
                          struct hmap *matches);
 void expr_matches_destroy(struct hmap *matches);
+void expr_matches_prepare(struct hmap *matches, uint32_t conj_id_ofs);
 void expr_matches_print(const struct hmap *matches, FILE *);
 
 /* Action parsing helper. */
diff --git a/include/ovn/features.h b/include/ovn/features.h
new file mode 100644
index 000000000..10ee46fcd
--- /dev/null
+++ b/include/ovn/features.h
@@ -0,0 +1,22 @@
+/* Copyright (c) 2021, Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OVN_FEATURES_H
+#define OVN_FEATURES_H 1
+
+/* ovn-controller supported feature names. */
+#define OVN_FEATURE_PORT_UP_NOTIF "port-up-notif"
+
+#endif
diff --git a/include/ovn/logical-fields.h b/include/ovn/logical-fields.h
index aee474856..017176f98 100644
--- a/include/ovn/logical-fields.h
+++ b/include/ovn/logical-fields.h
@@ -44,7 +44,13 @@ enum ovn_controller_event {
 /* Logical registers.
  *
  * Make sure these don't overlap with the logical fields! */
-#define MFF_LOG_REG0 MFF_REG0
+#define MFF_LOG_REG0             MFF_REG0
+#define MFF_LOG_LB_ORIG_DIP_IPV4 MFF_REG1
+#define MFF_LOG_LB_ORIG_TP_DPORT MFF_REG2
+
+#define MFF_LOG_XXREG0           MFF_XXREG0
+#define MFF_LOG_LB_ORIG_DIP_IPV6 MFF_XXREG1
+
 #define MFF_N_LOG_REGS 10
 
 void ovn_init_symtab(struct shash *symtab);
@@ -59,6 +65,7 @@ enum mff_log_flags_bits {
     MLF_NESTED_CONTAINER_BIT = 5,
     MLF_LOOKUP_MAC_BIT = 6,
     MLF_LOOKUP_LB_HAIRPIN_BIT = 7,
+    MLF_LOOKUP_FDB_BIT = 8,
 };
 
 /* MFF_LOG_FLAGS_REG flag assignments */
@@ -92,6 +99,9 @@ enum mff_log_flags {
     MLF_LOOKUP_MAC = (1 << MLF_LOOKUP_MAC_BIT),
 
     MLF_LOOKUP_LB_HAIRPIN = (1 << MLF_LOOKUP_LB_HAIRPIN_BIT),
+
+    /* Indicate that the lookup in the fdb table was successful. */
+    MLF_LOOKUP_FDB = (1 << MLF_LOOKUP_FDB_BIT),
 };
 
 /* OVN logical fields
diff --git a/lib/actions.c b/lib/actions.c
index fbaeb34bc..b3433f49e 100644
--- a/lib/actions.c
+++ b/lib/actions.c
@@ -1490,6 +1490,12 @@ parse_TCP_RESET(struct action_context *ctx)
     parse_nested_action(ctx, OVNACT_TCP_RESET, "tcp", ctx->scope);
 }
 
+static void
+parse_SCTP_ABORT(struct action_context *ctx)
+{
+    parse_nested_action(ctx, OVNACT_SCTP_ABORT, "sctp", ctx->scope);
+}
+
 static void
 parse_ND_NA(struct action_context *ctx)
 {
@@ -1571,6 +1577,12 @@ format_TCP_RESET(const struct ovnact_nest *nest, struct ds *s)
     format_nested_action(nest, "tcp_reset", s);
 }
 
+static void
+format_SCTP_ABORT(const struct ovnact_nest *nest, struct ds *s)
+{
+    format_nested_action(nest, "sctp_abort", s);
+}
+
 static void
 format_ND_NA(const struct ovnact_nest *nest, struct ds *s)
 {
@@ -1700,6 +1712,14 @@ encode_TCP_RESET(const struct ovnact_nest *on,
     encode_nested_actions(on, ep, ACTION_OPCODE_TCP_RESET, ofpacts);
 }
 
+static void
+encode_SCTP_ABORT(const struct ovnact_nest *on,
+                  const struct ovnact_encode_params *ep,
+                  struct ofpbuf *ofpacts)
+{
+    encode_nested_actions(on, ep, ACTION_OPCODE_SCTP_ABORT, ofpacts);
+}
+
 static void
 encode_REJECT(const struct ovnact_nest *on,
               const struct ovnact_encode_params *ep,
@@ -2742,6 +2762,31 @@ encode_DHCP6_REPLY(const struct ovnact_null *a OVS_UNUSED,
     encode_controller_op(ACTION_OPCODE_DHCP6_SERVER, ofpacts);
 }
 
+static void
+format_BFD_MSG(const struct ovnact_null *a OVS_UNUSED, struct ds *s)
+{
+    ds_put_cstr(s, "handle_bfd_msg();");
+}
+
+static void
+encode_BFD_MSG(const struct ovnact_null *a OVS_UNUSED,
+               const struct ovnact_encode_params *ep OVS_UNUSED,
+               struct ofpbuf *ofpacts)
+{
+    encode_controller_op(ACTION_OPCODE_BFD_MSG, ofpacts);
+}
+
+static void
+parse_handle_bfd_msg(struct action_context *ctx OVS_UNUSED)
+{
+     if (!lexer_force_match(ctx->lexer, LEX_T_LPAREN)) {
+        return;
+    }
+
+    ovnact_put_BFD_MSG(ctx->ovnacts);
+    lexer_force_match(ctx->lexer, LEX_T_RPAREN);
+}
+
 static void
 parse_SET_QUEUE(struct action_context *ctx)
 {
@@ -3698,6 +3743,172 @@ encode_CT_SNAT_TO_VIP(const struct ovnact_null *null OVS_UNUSED,
     emit_resubmit(ofpacts, ep->ct_snat_vip_ptable);
 }
 
+static void
+format_PUT_FDB(const struct ovnact_put_fdb *put_fdb, struct ds *s)
+{
+    ds_put_cstr(s, "put_fdb(");
+    expr_field_format(&put_fdb->port, s);
+    ds_put_cstr(s, ", ");
+    expr_field_format(&put_fdb->mac, s);
+    ds_put_cstr(s, ");");
+}
+
+static void
+encode_PUT_FDB(const struct ovnact_put_fdb *put_fdb,
+               const struct ovnact_encode_params *ep OVS_UNUSED,
+               struct ofpbuf *ofpacts)
+{
+    const struct arg args[] = {
+        { expr_resolve_field(&put_fdb->port), MFF_LOG_INPORT },
+        { expr_resolve_field(&put_fdb->mac), MFF_ETH_SRC }
+    };
+    encode_setup_args(args, ARRAY_SIZE(args), ofpacts);
+    encode_controller_op(ACTION_OPCODE_PUT_FDB, ofpacts);
+    encode_restore_args(args, ARRAY_SIZE(args), ofpacts);
+}
+
+static void
+parse_put_fdb(struct action_context *ctx, struct ovnact_put_fdb *put_fdb)
+{
+    lexer_force_match(ctx->lexer, LEX_T_LPAREN);
+    action_parse_field(ctx, 0, false, &put_fdb->port);
+    lexer_force_match(ctx->lexer, LEX_T_COMMA);
+    action_parse_field(ctx, 48, false, &put_fdb->mac);
+    lexer_force_match(ctx->lexer, LEX_T_RPAREN);
+}
+
+static void
+ovnact_put_fdb_free(struct ovnact_put_fdb *put_fdb OVS_UNUSED)
+{
+}
+
+static void
+format_GET_FDB(const struct ovnact_get_fdb *get_fdb, struct ds *s)
+{
+    expr_field_format(&get_fdb->dst, s);
+    ds_put_cstr(s, " = get_fdb(");
+    expr_field_format(&get_fdb->mac, s);
+    ds_put_cstr(s, ");");
+}
+
+static void
+encode_GET_FDB(const struct ovnact_get_fdb *get_fdb,
+               const struct ovnact_encode_params *ep,
+               struct ofpbuf *ofpacts)
+{
+    struct mf_subfield dst = expr_resolve_field(&get_fdb->dst);
+    ovs_assert(dst.field);
+
+    const struct arg args[] = {
+        { expr_resolve_field(&get_fdb->mac), MFF_ETH_DST },
+    };
+    encode_setup_args(args, ARRAY_SIZE(args), ofpacts);
+    put_load(0, MFF_LOG_OUTPORT, 0, 32, ofpacts);
+    emit_resubmit(ofpacts, ep->fdb_ptable);
+    encode_restore_args(args, ARRAY_SIZE(args), ofpacts);
+
+    if (dst.field->id != MFF_LOG_OUTPORT) {
+        struct ofpact_reg_move *orm = ofpact_put_REG_MOVE(ofpacts);
+        orm->dst = dst;
+        orm->src.field = mf_from_id(MFF_LOG_OUTPORT);
+        orm->src.ofs = 0;
+        orm->src.n_bits = 32;
+    }
+}
+
+static void
+parse_get_fdb(struct action_context *ctx,
+              struct expr_field *dst,
+              struct ovnact_get_fdb *get_fdb)
+{
+    lexer_get(ctx->lexer); /* Skip get_bfd. */
+    lexer_get(ctx->lexer); /* Skip '('. */
+
+    /* Validate that the destination is a 32-bit, modifiable field if it
+       is not a string field (i.e 'inport' or 'outport'). */
+    if (dst->n_bits) {
+        char *error = expr_type_check(dst, 32, true, ctx->scope);
+        if (error) {
+            lexer_error(ctx->lexer, "%s", error);
+            free(error);
+            return;
+        }
+    }
+    get_fdb->dst = *dst;
+
+    action_parse_field(ctx, 48, false, &get_fdb->mac);
+    lexer_force_match(ctx->lexer, LEX_T_RPAREN);
+}
+
+static void
+ovnact_get_fdb_free(struct ovnact_get_fdb *get_fdb OVS_UNUSED)
+{
+}
+
+static void
+format_LOOKUP_FDB(const struct ovnact_lookup_fdb *lookup_fdb, struct ds *s)
+{
+    expr_field_format(&lookup_fdb->dst, s);
+    ds_put_cstr(s, " = lookup_fdb(");
+    expr_field_format(&lookup_fdb->port, s);
+    ds_put_cstr(s, ", ");
+    expr_field_format(&lookup_fdb->mac, s);
+    ds_put_cstr(s, ");");
+}
+
+static void
+encode_LOOKUP_FDB(const struct ovnact_lookup_fdb *lookup_fdb,
+                  const struct ovnact_encode_params *ep,
+                  struct ofpbuf *ofpacts)
+{
+    const struct arg args[] = {
+        { expr_resolve_field(&lookup_fdb->port), MFF_LOG_INPORT },
+        { expr_resolve_field(&lookup_fdb->mac), MFF_ETH_SRC },
+    };
+    encode_setup_args(args, ARRAY_SIZE(args), ofpacts);
+
+    struct mf_subfield dst = expr_resolve_field(&lookup_fdb->dst);
+    ovs_assert(dst.field);
+
+    put_load(0, MFF_LOG_FLAGS, MLF_LOOKUP_FDB_BIT, 1, ofpacts);
+    emit_resubmit(ofpacts, ep->fdb_lookup_ptable);
+    encode_restore_args(args, ARRAY_SIZE(args), ofpacts);
+
+    struct ofpact_reg_move *orm = ofpact_put_REG_MOVE(ofpacts);
+    orm->dst = dst;
+    orm->src.field = mf_from_id(MFF_LOG_FLAGS);
+    orm->src.ofs = MLF_LOOKUP_FDB_BIT;
+    orm->src.n_bits = 1;
+}
+
+static void
+parse_lookup_fdb(struct action_context *ctx,
+                 struct expr_field *dst,
+                 struct ovnact_lookup_fdb *lookup_fdb)
+{
+    lexer_get(ctx->lexer); /* Skip lookup_bfd. */
+    lexer_get(ctx->lexer); /* Skip '('. */
+
+    /* Validate that the destination is a 1-bit, modifiable field. */
+    char *error = expr_type_check(dst, 1, true, ctx->scope);
+    if (error) {
+        lexer_error(ctx->lexer, "%s", error);
+        free(error);
+        return;
+    }
+    lookup_fdb->dst = *dst;
+
+    action_parse_field(ctx, 0, false, &lookup_fdb->port);
+    lexer_force_match(ctx->lexer, LEX_T_COMMA);
+    action_parse_field(ctx, 48, false, &lookup_fdb->mac);
+    lexer_force_match(ctx->lexer, LEX_T_RPAREN);
+}
+
+static void
+ovnact_lookup_fdb_free(struct ovnact_lookup_fdb *get_fdb OVS_UNUSED)
+{
+}
+
 /* Parses an assignment or exchange or put_dhcp_opts action. */
 static void
 parse_set_action(struct action_context *ctx)
@@ -3758,6 +3969,14 @@ parse_set_action(struct action_context *ctx)
                    && lexer_lookahead(ctx->lexer) == LEX_T_LPAREN) {
             parse_chk_lb_hairpin_reply(
                 ctx, &lhs, ovnact_put_CHK_LB_HAIRPIN_REPLY(ctx->ovnacts));
+        } else if (!strcmp(ctx->lexer->token.s, "get_fdb")
+                   && lexer_lookahead(ctx->lexer) == LEX_T_LPAREN) {
+            parse_get_fdb(
+                ctx, &lhs, ovnact_put_GET_FDB(ctx->ovnacts));
+        } else if (!strcmp(ctx->lexer->token.s, "lookup_fdb")
+                   && lexer_lookahead(ctx->lexer) == LEX_T_LPAREN) {
+            parse_lookup_fdb(
+                ctx, &lhs, ovnact_put_LOOKUP_FDB(ctx->ovnacts));
         } else {
             parse_assignment_action(ctx, false, &lhs);
         }
@@ -3812,6 +4031,8 @@ parse_action(struct action_context *ctx)
         ovnact_put_IGMP(ctx->ovnacts);
     } else if (lexer_match_id(ctx->lexer, "tcp_reset")) {
         parse_TCP_RESET(ctx);
+    } else if (lexer_match_id(ctx->lexer, "sctp_abort")) {
+        parse_SCTP_ABORT(ctx);
     } else if (lexer_match_id(ctx->lexer, "nd_na")) {
         parse_ND_NA(ctx);
     } else if (lexer_match_id(ctx->lexer, "nd_na_router")) {
@@ -3842,10 +4063,14 @@ parse_action(struct action_context *ctx)
         parse_fwd_group_action(ctx);
     } else if (lexer_match_id(ctx->lexer, "handle_dhcpv6_reply")) {
         ovnact_put_DHCP6_REPLY(ctx->ovnacts);
+    } else if (lexer_match_id(ctx->lexer, "handle_bfd_msg")) {
+        parse_handle_bfd_msg(ctx);
     } else if (lexer_match_id(ctx->lexer, "reject")) {
         parse_REJECT(ctx);
     } else if (lexer_match_id(ctx->lexer, "ct_snat_to_vip")) {
         ovnact_put_CT_SNAT_TO_VIP(ctx->ovnacts);
+    } else if (lexer_match_id(ctx->lexer, "put_fdb")) {
+        parse_put_fdb(ctx, ovnact_put_PUT_FDB(ctx->ovnacts));
     } else {
         lexer_syntax_error(ctx->lexer, "expecting action");
     }
diff --git a/lib/expr.c b/lib/expr.c
index 4566d9110..796e88ac7 100644
--- a/lib/expr.c
+++ b/lib/expr.c
@@ -3125,6 +3125,25 @@ expr_to_matches(const struct expr *expr,
     return n_conjs;
 }
 
+/* Prepares the expr matches in the hmap 'matches' by updating the
+ * conj id offsets specified in 'conj_id_ofs'.
+ */
+void
+expr_matches_prepare(struct hmap *matches, uint32_t conj_id_ofs)
+{
+    struct expr_match *m;
+    HMAP_FOR_EACH (m, hmap_node, matches) {
+        if (m->match.wc.masks.conj_id) {
+            m->match.flow.conj_id += conj_id_ofs;
+        }
+
+        for (size_t i = 0; i < m->n; i++) {
+            struct cls_conjunction *src = &m->conjunctions[i];
+            src->id += conj_id_ofs;
+        }
+    }
+}
+
 /* Destroys all of the 'struct expr_match'es in 'matches', as well as the
  * 'matches' hmap itself. */
 void
diff --git a/lib/lb.c b/lib/lb.c
index a90042e58..f305e9a87 100644
--- a/lib/lb.c
+++ b/lib/lb.c
@@ -170,6 +170,24 @@ void ovn_northd_lb_vip_destroy(struct ovn_northd_lb_vip *vip)
     free(vip->backends_nb);
 }
 
+static void
+ovn_lb_get_hairpin_snat_ip(const struct uuid *lb_uuid,
+                           const struct smap *lb_options,
+                           struct lport_addresses *hairpin_addrs)
+{
+    const char *addresses = smap_get(lb_options, "hairpin_snat_ip");
+
+    if (!addresses) {
+        return;
+    }
+
+    if (!extract_ip_address(addresses, hairpin_addrs)) {
+        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+        VLOG_WARN_RL(&rl, "bad hairpin_snat_ip %s in load balancer "UUID_FMT,
+                     addresses, UUID_ARGS(lb_uuid));
+    }
+}
+
 struct ovn_northd_lb *
 ovn_northd_lb_create(const struct nbrec_load_balancer *nbrec_lb,
                      struct hmap *ports,
@@ -189,6 +207,8 @@ ovn_northd_lb_create(const struct nbrec_load_balancer *nbrec_lb,
         struct ovn_lb_vip *lb_vip = &lb->vips[n_vips];
         struct ovn_northd_lb_vip *lb_vip_nb = &lb->vips_nb[n_vips];
 
+        lb_vip->empty_backend_rej = smap_get_bool(&nbrec_lb->options,
+                                                  "reject", false);
         if (!ovn_lb_vip_init(lb_vip, node->key, node->value)) {
             continue;
         }
@@ -222,6 +242,9 @@ ovn_northd_lb_create(const struct nbrec_load_balancer *nbrec_lb,
         ds_chomp(&sel_fields, ',');
         lb->selection_fields = ds_steal_cstr(&sel_fields);
     }
+
+    ovn_lb_get_hairpin_snat_ip(&nbrec_lb->header_.uuid, &nbrec_lb->options,
+                               &lb->hairpin_snat_ips);
     return lb;
 }
 
@@ -258,6 +281,7 @@ ovn_northd_lb_destroy(struct ovn_northd_lb *lb)
     free(lb->vips);
     free(lb->vips_nb);
     free(lb->selection_fields);
+    destroy_lport_addresses(&lb->hairpin_snat_ips);
     free(lb->dps);
     free(lb);
 }
@@ -287,6 +311,12 @@ ovn_controller_lb_create(const struct sbrec_load_balancer *sbrec_lb)
      * correct value.
      */
     lb->n_vips = n_vips;
+
+    lb->hairpin_orig_tuple = smap_get_bool(&sbrec_lb->options,
+                                           "hairpin_orig_tuple",
+                                           false);
+    ovn_lb_get_hairpin_snat_ip(&sbrec_lb->header_.uuid, &sbrec_lb->options,
+                               &lb->hairpin_snat_ips);
     return lb;
 }
 
@@ -297,5 +327,6 @@ ovn_controller_lb_destroy(struct ovn_controller_lb *lb)
         ovn_lb_vip_destroy(&lb->vips[i]);
     }
     free(lb->vips);
+    destroy_lport_addresses(&lb->hairpin_snat_ips);
     free(lb);
 }
diff --git a/lib/lb.h b/lib/lb.h
index 6644ad0d8..9a78c72f3 100644
--- a/lib/lb.h
+++ b/lib/lb.h
@@ -20,6 +20,7 @@
 #include <sys/types.h>
 #include <netinet/in.h>
 #include "openvswitch/hmap.h"
+#include "ovn-util.h"
 
 struct nbrec_load_balancer;
 struct sbrec_load_balancer;
@@ -37,6 +38,11 @@ struct ovn_northd_lb {
     struct ovn_northd_lb_vip *vips_nb;
     size_t n_vips;
 
+    struct lport_addresses hairpin_snat_ips; /* IP (v4 and/or v6) to be used
+                                              * as source for hairpinned
+                                              * traffic.
+                                              */
+
     size_t n_dps;
     size_t n_allocated_dps;
     const struct sbrec_datapath_binding **dps;
@@ -49,6 +55,7 @@ struct ovn_lb_vip {
 
     struct ovn_lb_backend *backends;
     size_t n_backends;
+    bool empty_backend_rej;
 };
 
 struct ovn_lb_backend {
@@ -88,6 +95,14 @@ struct ovn_controller_lb {
 
     struct ovn_lb_vip *vips;
     size_t n_vips;
+    bool hairpin_orig_tuple; /* True if ovn-northd stores the original
+                              * destination tuple in registers.
+                              */
+
+    struct lport_addresses hairpin_snat_ips; /* IP (v4 and/or v6) to be used
+                                              * as source for hairpinned
+                                              * traffic.
+                                              */
 };
 
 struct ovn_controller_lb *ovn_controller_lb_create(
diff --git a/lib/ovn-l7.h b/lib/ovn-l7.h
index c84a0e7a9..d00982449 100644
--- a/lib/ovn-l7.h
+++ b/lib/ovn-l7.h
@@ -26,6 +26,25 @@
 #include "hash.h"
 #include "ovn/logical-fields.h"
 
+#define BFD_PACKET_LEN  24
+#define BFD_DEST_PORT   3784
+#define BFD_VERSION     1
+#define BFD_DEFAULT_SRC_IP 0xA9FE0101 /* 169.254.1.1 */
+#define BFD_DEFAULT_DST_IP 0xA9FE0100 /* 169.254.1.0 */
+
+struct bfd_msg {
+    uint8_t vers_diag;
+    uint8_t flags;
+    uint8_t mult;
+    uint8_t length;
+    ovs_be32 my_disc;
+    ovs_be32 your_disc;
+    ovs_be32 min_tx;
+    ovs_be32 min_rx;
+    ovs_be32 min_rx_echo;
+};
+BUILD_ASSERT_DECL(BFD_PACKET_LEN == sizeof(struct bfd_msg));
+
 /* Generic options map which is used to store dhcpv4 opts and dhcpv6 opts. */
 struct gen_opts_map {
     struct hmap_node hmap_node;
diff --git a/lib/ovn-util.c b/lib/ovn-util.c
index 2136f90fe..8f6719471 100644
--- a/lib/ovn-util.c
+++ b/lib/ovn-util.c
@@ -232,6 +232,27 @@ extract_ip_addresses(const char *address, struct lport_addresses *laddrs)
     return false;
 }
 
+/* Extracts at most one IPv4 and at most one IPv6 address from 'address'
+ * which should be of the format 'IP1 [IP2]'.
+ *
+ * Return true if at most one IPv4 address and at most one IPv6 address
+ * is found in 'address'.  IPs must be host IPs, i.e., no unmasked bits.
+ *
+ * The caller must call destroy_lport_addresses().
+ */
+bool extract_ip_address(const char *address, struct lport_addresses *laddrs)
+{
+    if (!extract_ip_addresses(address, laddrs) ||
+            laddrs->n_ipv4_addrs > 1 ||
+            laddrs->n_ipv6_addrs > 1 ||
+            (laddrs->n_ipv4_addrs && laddrs->ipv4_addrs[0].plen != 32) ||
+            (laddrs->n_ipv6_addrs && laddrs->ipv6_addrs[0].plen != 128)) {
+        destroy_lport_addresses(laddrs);
+        return false;
+    }
+    return true;
+}
+
 /* Extracts the mac, IPv4 and IPv6 addresses from the
  * "nbrec_logical_router_port" parameter 'lrp'.  Stores the IPv4 and
  * IPv6 addresses in the 'ipv4_addrs' and 'ipv6_addrs' fields of
@@ -559,18 +580,30 @@ ovn_destroy_tnlids(struct hmap *tnlids)
     hmap_destroy(tnlids);
 }
 
+/* Returns true if 'tnlid' is present in the hmap 'tnlids'. */
 bool
-ovn_add_tnlid(struct hmap *set, uint32_t tnlid)
+ovn_tnlid_present(struct hmap *tnlids, uint32_t tnlid)
 {
     uint32_t hash = hash_int(tnlid, 0);
     struct tnlid_node *node;
-    HMAP_FOR_EACH_IN_BUCKET (node, hmap_node, hash, set) {
+    HMAP_FOR_EACH_IN_BUCKET (node, hmap_node, hash, tnlids) {
         if (node->tnlid == tnlid) {
-            return false;
+            return true;
         }
     }
 
-    node = xmalloc(sizeof *node);
+    return false;
+}
+
+bool
+ovn_add_tnlid(struct hmap *set, uint32_t tnlid)
+{
+    if (ovn_tnlid_present(set, tnlid)) {
+        return false;
+    }
+
+    uint32_t hash = hash_int(tnlid, 0);
+    struct tnlid_node *node = xmalloc(sizeof *node);
     hmap_insert(set, &node->hmap_node, hash);
     node->tnlid = tnlid;
     return true;
diff --git a/lib/ovn-util.h b/lib/ovn-util.h
index 679f47a97..40ecafe57 100644
--- a/lib/ovn-util.h
+++ b/lib/ovn-util.h
@@ -72,6 +72,7 @@ bool extract_addresses(const char *address, struct lport_addresses *,
                        int *ofs);
 bool extract_lsp_addresses(const char *address, struct lport_addresses *);
 bool extract_ip_addresses(const char *address, struct lport_addresses *);
+bool extract_ip_address(const char *address, struct lport_addresses *);
 bool extract_lrp_networks(const struct nbrec_logical_router_port *,
                           struct lport_addresses *);
 bool extract_sbrec_binding_first_mac(const struct sbrec_port_binding *binding,
@@ -125,6 +126,7 @@ void ovn_conn_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
 struct hmap;
 void ovn_destroy_tnlids(struct hmap *tnlids);
 bool ovn_add_tnlid(struct hmap *set, uint32_t tnlid);
+bool ovn_tnlid_present(struct hmap *tnlids, uint32_t tnlid);
 uint32_t ovn_allocate_tnlid(struct hmap *set, const char *name, uint32_t min,
                             uint32_t max, uint32_t *hint);
 
@@ -227,4 +229,40 @@ bool ip_address_and_port_from_lb_key(const char *key, char **ip_address,
  * value. */
 char *ovn_get_internal_version(void);
 
+
+/* OVN Packet definitions. These may eventually find a home in OVS's
+ * packets.h file. For the time being, they live here because OVN uses them
+ * and OVS does not.
+ */
+#define SCTP_CHUNK_HEADER_LEN 4
+struct sctp_chunk_header {
+    uint8_t sctp_chunk_type;
+    uint8_t sctp_chunk_flags;
+    ovs_be16 sctp_chunk_len;
+};
+BUILD_ASSERT_DECL(SCTP_CHUNK_HEADER_LEN == sizeof(struct sctp_chunk_header));
+
+#define SCTP_INIT_CHUNK_LEN 16
+struct sctp_init_chunk {
+    ovs_be32 initiate_tag;
+    ovs_be32 a_rwnd;
+    ovs_be16 num_outbound_streams;
+    ovs_be16 num_inbound_streams;
+    ovs_be32 initial_tsn;
+};
+BUILD_ASSERT_DECL(SCTP_INIT_CHUNK_LEN == sizeof(struct sctp_init_chunk));
+
+/* These are the only SCTP chunk types that OVN cares about.
+ * There is no need to define the other chunk types until they are
+ * needed.
+ */
+#define SCTP_CHUNK_TYPE_INIT  1
+#define SCTP_CHUNK_TYPE_ABORT 6
+
+/* See RFC 4960 Sections 3.3.7 and 8.5.1 for information on this flag. */
+#define SCTP_ABORT_CHUNK_FLAG_T (1 << 0)
+
+/* The number of tables for the ingress and egress pipelines. */
+#define LOG_PIPELINE_LEN 29
+
 #endif
diff --git a/northd/ovn-northd.8.xml b/northd/ovn-northd.8.xml
index a9a3a9f4f..55b1c9655 100644
--- a/northd/ovn-northd.8.xml
+++ b/northd/ovn-northd.8.xml
@@ -307,7 +307,73 @@
       </li>
     </ul>
 
-    <h3>Ingress Table 3: <code>from-lport</code> Pre-ACLs</h3>
+    <h3>Ingress Table 3: Lookup MAC address learning table</h3>
+
+    <p>
+      This table looks up the MAC learning table of the logical switch
+      datapath to check if the <code>port-mac</code> pair is present
+      or not. MAC is learnt only for logical switch VIF ports whose
+      port security is disabled and 'unknown' address set.
+    </p>
+
+    <ul>
+      <li>
+        <p>
+          For each such logical port <var>p</var> whose port security
+          is disabled and 'unknown' address set following flow
+          is added.
+        </p>
+
+        <ul>
+          <li>
+            Priority 100 flow with the match
+            <code>inport == <var>p</var></code> and action
+            <code>reg0[11] = lookup_fdb(inport, eth.src); next;</code>
+          </li>
+        </ul>
+      </li>
+
+      <li>
+        One priority-0 fallback flow that matches all packets and advances to
+        the next table.
+      </li>
+    </ul>
+
+    <h3>Ingress Table 4: Learn MAC of 'unknown' ports.</h3>
+
+    <p>
+      This table learns the MAC addresses seen on the logical ports
+      whose port security is disabled and 'unknown' address set
+      if the <code>lookup_fdb</code> action returned false in the
+      previous table.
+    </p>
+
+    <ul>
+      <li>
+        <p>
+          For each such logical port <var>p</var> whose port security
+          is disabled and 'unknown' address set following flow
+          is added.
+        </p>
+
+        <ul>
+          <li>
+            Priority 100 flow with the match
+            <code>inport == <var>p</var> &amp;&amp; reg0[11] == 0</code> and
+            action <code>put_fdb(inport, eth.src); next;</code> which stores
+            the <code>port-mac</code> in the mac learning table of the
+            logical switch datapath and advances the packet to the next table.
+          </li>
+        </ul>
+      </li>
+
+      <li>
+        One priority-0 fallback flow that matches all packets and advances to
+        the next table.
+      </li>
+    </ul>
+
+    <h3>Ingress Table 5: <code>from-lport</code> Pre-ACLs</h3>
 
     <p>
       This table prepares flows for possible stateful ACL processing in
@@ -332,7 +398,7 @@
       db="OVN_Northbound"/> table.
     </p>
 
-    <h3>Ingress Table 4: Pre-LB</h3>
+    <h3>Ingress Table 6: Pre-LB</h3>
 
     <p>
       This table prepares flows for possible stateful load balancing processing
@@ -399,7 +465,7 @@
       logical router datapath to logical switch datapath.
     </p>
 
-    <h3>Ingress Table 5: Pre-stateful</h3>
+    <h3>Ingress Table 7: Pre-stateful</h3>
 
     <p>
       This table prepares flows for all possible stateful processing
@@ -410,12 +476,13 @@
       <code>ct_next;</code> action.
     </p>
 
-    <h3>Ingress Table 6: <code>from-lport</code> ACL hints</h3>
+    <h3>Ingress Table 8: <code>from-lport</code> ACL hints</h3>
 
     <p>
       This table consists of logical flows that set hints
       (<code>reg0</code> bits) to be used in the next stage, in the ACL
-      processing table. Multiple hints can be set for the same packet.
+      processing table, if stateful ACLs or load balancers are configured.
+      Multiple hints can be set for the same packet.
       The possible hints are:
     </p>
     <ul>
@@ -489,7 +556,7 @@
       </li>
     </ul>
 
-    <h3>Ingress table 7: <code>from-lport</code> ACLs</h3>
+    <h3>Ingress table 9: <code>from-lport</code> ACLs</h3>
 
     <p>
       Logical flows in this table closely reproduce those in the
@@ -518,8 +585,9 @@
         flows with the
         <code>tcp_reset { output &lt;-&gt; inport;
         next(pipeline=egress,table=5);}</code>
-        action for TCP connections and <code>icmp4/icmp6</code> action
-        for UDP connections.
+        action for TCP connections,<code>icmp4/icmp6</code> action
+        for UDP connections, and <code>sctp_abort {output &lt;-%gt; inport;
+        next(pipeline=egress,table=5);}</code> action for SCTP associations.
       </li>
       <li>
         Other ACLs translate to <code>drop;</code> for new or untracked
@@ -597,7 +665,7 @@
       </li>
     </ul>
 
-    <h3>Ingress Table 8: <code>from-lport</code> QoS Marking</h3>
+    <h3>Ingress Table 10: <code>from-lport</code> QoS Marking</h3>
 
     <p>
       Logical flows in this table closely reproduce those in the
@@ -619,7 +687,7 @@
       </li>
     </ul>
 
-    <h3>Ingress Table 9: <code>from-lport</code> QoS Meter</h3>
+    <h3>Ingress Table 11: <code>from-lport</code> QoS Meter</h3>
 
     <p>
       Logical flows in this table closely reproduce those in the
@@ -641,7 +709,7 @@
       </li>
     </ul>
 
-    <h3>Ingress Table 10: LB</h3>
+    <h3>Ingress Table 12: LB</h3>
 
     <p>
       It contains a priority-0 flow that simply moves traffic to the next
@@ -667,7 +735,7 @@
       connection.)
     </p>
 
-    <h3>Ingress Table 11: Stateful</h3>
+    <h3>Ingress Table 13: Stateful</h3>
 
     <ul>
       <li>
@@ -687,7 +755,11 @@
         of <var>VIP</var>. If health check is enabled, then <var>args</var>
         will only contain those endpoints whose service monitor status entry
         in <code>OVN_Southbound</code> db is either <code>online</code> or
-        empty.
+        empty.  For IPv4 traffic the flow also loads the original destination
+        IP and transport port in registers <code>reg1</code> and
+        <code>reg2</code>.  For IPv6 traffic the flow also loads the original
+        destination IP and transport port in registers <code>xxreg1</code> and
+        <code>reg2</code>.
       </li>
       <li>
         For all the configured load balancing rules for a switch in
@@ -699,40 +771,54 @@
         VIP</var></code>. The action on this flow is <code>
         ct_lb(<var>args</var>)</code>, where <var>args</var> contains comma
         separated IP addresses of the same address family as <var>VIP</var>.
+        For IPv4 traffic the flow also loads the original destination
+        IP and transport port in registers <code>reg1</code> and
+        <code>reg2</code>.  For IPv6 traffic the flow also loads the original
+        destination IP and transport port in registers <code>xxreg1</code> and
+        <code>reg2</code>.
+      </li>
+
+      <li>
+        If the load balancer is created with <code>--reject</code> option and
+        it has no active backends, a TCP reset segment (for tcp) or an ICMP
+        port unreachable packet (for all other kind of traffic) will be sent
+        whenever an incoming packet is received for this load-balancer.
+        Please note using <code>--reject</code> option will disable
+        empty_lb SB controller event for this load balancer.
       </li>
+
       <li>
         A priority-100 flow commits packets to connection tracker using
         <code>ct_commit; next;</code> action based on a hint provided by
         the previous tables (with a match for <code>reg0[1] == 1</code>).
       </li>
       <li>
-        A priority-100 flow sends the packets to connection tracker using
+        Priority-100 flows that send the packets to connection tracker using
         <code>ct_lb;</code> as the action based on a hint provided by the
-        previous tables (with a match for <code>reg0[2] == 1</code>).
+        previous tables (with a match for <code>reg0[2] == 1</code> and
+        on supported load balancer protocols and address families).
+        For IPv4 traffic the flows also load the original destination
+        IP and transport port in registers <code>reg1</code> and
+        <code>reg2</code>.  For IPv6 traffic the flows also load the original
+        destination IP and transport port in registers <code>xxreg1</code> and
+        <code>reg2</code>.
       </li>
       <li>
         A priority-0 flow that simply moves traffic to the next table.
       </li>
     </ul>
 
-    <h3>Ingress Table 12: Pre-Hairpin</h3>
+    <h3>Ingress Table 14: Pre-Hairpin</h3>
     <ul>
       <li>
         If the logical switch has load balancer(s) configured, then a
-        priorirty-100 flow is added with the match
-        <code>ip &amp;&amp; ct.trk&amp;&amp; ct.dnat</code> to check if the
+        priority-100 flow is added with the match
+        <code>ip &amp;&amp; ct.trk</code> to check if the
         packet needs to be hairpinned (if after load balancing the destination
-        IP matches the source IP) or not by executing the action
-        <code>reg0[6] = chk_lb_hairpin();</code> and advances the packet to
-        the next table.
-      </li>
-
-      <li>
-        If the logical switch has load balancer(s) configured, then a
-        priorirty-90 flow is added with the match <code>ip</code> to check if
-        the packet is a reply for a hairpinned connection or not by executing
-        the action <code>reg0[6] = chk_lb_hairpin_reply();</code> and advances
-        the packet to the next table.
+        IP matches the source IP) or not by executing the actions
+        <code>reg0[6] = chk_lb_hairpin();</code> and
+        <code>reg0[12] = chk_lb_hairpin_reply();</code> and advances the packet
+        to the next table.
       </li>
 
       <li>
@@ -740,21 +826,30 @@
       </li>
     </ul>
 
-    <h3>Ingress Table 13: Nat-Hairpin</h3>
+    <h3>Ingress Table 15: Nat-Hairpin</h3>
     <ul>
       <li>
          If the logical switch has load balancer(s) configured, then a
-         priorirty-100 flow is added with the match
-         <code>ip &amp;&amp; (ct.new || ct.est) &amp;&amp; ct.trk &amp;&amp;
-         ct.dnat &amp;&amp; reg0[6] == 1</code> which hairpins the traffic by
+         priority-100 flow is added with the match
+         <code>ip &amp;&amp; ct.new &amp;&amp; ct.trk &amp;&amp;
+         reg0[6] == 1</code> which hairpins the traffic by
          NATting source IP to the load balancer VIP by executing the action
          <code>ct_snat_to_vip</code> and advances the packet to the next table.
       </li>
 
       <li>
          If the logical switch has load balancer(s) configured, then a
-         priorirty-90 flow is added with the match
-         <code>ip &amp;&amp; reg0[6] == 1</code> which matches on the replies
+         priority-100 flow is added with the match
+         <code>ip &amp;&amp; ct.est &amp;&amp; ct.trk &amp;&amp;
+         reg0[6] == 1</code> which hairpins the traffic by
+         NATting source IP to the load balancer VIP by executing the action
+         <code>ct_snat</code> and advances the packet to the next table.
+      </li>
+
+      <li>
+         If the logical switch has load balancer(s) configured, then a
+         priority-90 flow is added with the match
+         <code>ip &amp;&amp; reg0[12] == 1</code> which matches on the replies
          of hairpinned traffic (i.e., destination IP is VIP,
          source IP is the backend IP and source L4 port is backend port for L4
          load balancers) and executes <code>ct_snat</code> and advances the
@@ -766,7 +861,7 @@
       </li>
     </ul>
 
-    <h3>Ingress Table 14: Hairpin</h3>
+    <h3>Ingress Table 16: Hairpin</h3>
     <ul>
       <li>
         A priority-1 flow that hairpins traffic matched by non-default
@@ -779,7 +874,7 @@
       </li>
     </ul>
 
-    <h3>Ingress Table 15: ARP/ND responder</h3>
+    <h3>Ingress Table 17: ARP/ND responder</h3>
 
     <p>
       This table implements ARP/ND responder in a logical switch for known
@@ -1069,7 +1164,7 @@ output;
       </li>
     </ul>
 
-    <h3>Ingress Table 16: DHCP option processing</h3>
+    <h3>Ingress Table 18: DHCP option processing</h3>
 
     <p>
       This table adds the DHCPv4 options to a DHCPv4 packet from the
@@ -1130,7 +1225,7 @@ next;
       </li>
     </ul>
 
-    <h3>Ingress Table 17: DHCP responses</h3>
+    <h3>Ingress Table 19: DHCP responses</h3>
 
     <p>
       This table implements DHCP responder for the DHCP replies generated by
@@ -1211,7 +1306,7 @@ output;
       </li>
     </ul>
 
-    <h3>Ingress Table 18 DNS Lookup</h3>
+    <h3>Ingress Table 20 DNS Lookup</h3>
 
     <p>
       This table looks up and resolves the DNS names to the corresponding
@@ -1240,7 +1335,7 @@ reg0[4] = dns_lookup(); next;
       </li>
     </ul>
 
-    <h3>Ingress Table 19 DNS Responses</h3>
+    <h3>Ingress Table 21 DNS Responses</h3>
 
     <p>
       This table implements DNS responder for the DNS replies generated by
@@ -1275,7 +1370,7 @@ output;
       </li>
     </ul>
 
-    <h3>Ingress table 20 External ports</h3>
+    <h3>Ingress table 22 External ports</h3>
 
     <p>
       Traffic from the <code>external</code> logical ports enter the ingress
@@ -1318,7 +1413,7 @@ output;
       </li>
     </ul>
 
-    <h3>Ingress Table 21 Destination Lookup</h3>
+    <h3>Ingress Table 23 Destination Lookup</h3>
 
     <p>
       This table implements switching behavior.  It contains these logical
@@ -1481,12 +1576,58 @@ output;
       </li>
 
       <li>
-        One priority-0 fallback flow that matches all packets and outputs them
-        to the <code>MC_UNKNOWN</code> multicast group, which
-        <code>ovn-northd</code> populates with all enabled logical ports that
-        accept unknown destination packets.  As a small optimization, if no
-        logical ports accept unknown destination packets,
-        <code>ovn-northd</code> omits this multicast group and logical flow.
+        One priority-0 fallback flow that matches all packets with the
+        action <code>outport = get_fdb(eth.dst); next;</code>. The action
+        <code>get_fdb</code> gets the port for the <code>eth.dst</code>
+        in the MAC learning table of the logical switch datapath. If there
+        is no entry for <code>eth.dst</code> in the MAC learning table,
+        then it stores <code>none</code> in the <code>outport</code>.
+      </li>
+    </ul>
+
+    <h3>Ingress Table 23 Destination unknown</h3>
+
+    <p>
+      This table handles the packets whose destination was not found or
+      and looked up in the MAC learning table of the logical switch
+      datapath. It contains the following flows.
+    </p>
+
+    <ul>
+      <li>
+        <p>
+          If the logical switch has logical ports with 'unknown' addresses set,
+          then the below logical flow is added
+        </p>
+
+        <ul>
+          <li>
+            Priority 50 flow with the match <code>outport == none</code> then
+            outputs them to the <code>MC_UNKNOWN</code> multicast group, which
+            <code>ovn-northd</code> populates with all enabled logical ports
+            that accept unknown destination packets.  As a small optimization,
+            if no logical ports accept unknown destination packets,
+            <code>ovn-northd</code> omits this multicast group and logical
+            flow.
+          </li>
+        </ul>
+
+        <p>
+          If the logical switch has no logical ports with 'unknown' address
+          set, then the below logical flow is added
+        </p>
+
+        <ul>
+          <li>
+            Priority 50 flow with the match <code>outport == none</code>
+            and drops the packets.
+          </li>
+        </ul>
+      </li>
+
+      <li>
+        One priority-0 fallback flow that outputs the packet to the egress
+        stage with the outport learnt from <code>get_fdb</code> action.
       </li>
     </ul>
 
@@ -1926,6 +2067,27 @@ next;
         </p>
       </li>
 
+      <li>
+        <p>
+            For each BFD port the two following priority-110 flows are added
+            to manage BFD traffic:
+
+            <ul>
+              <li>
+               if <code>ip4.src</code> or <code>ip6.src</code> is any IP
+               address owned by the router port and <code>udp.dst == 3784
+               </code>, the packet is advanced to the next pipeline stage.
+              </li>
+
+              <li>
+               if <code>ip4.dst</code> or <code>ip6.dst</code> is any IP
+               address owned by the router port and <code>udp.dst == 3784
+               </code>, the <code>handle_bfd_msg</code> action is executed.
+              </li>
+            </ul>
+        </p>
+      </li>
+
       <li>
         <p>
           L3 admission control: A priority-100 flow drops packets that match
@@ -2449,6 +2611,16 @@ icmp6 {
           with an action <code>ct_snat; </code>.
         </p>
 
+        <p>
+          If the Gateway router is configured with
+          <code>lb_force_snat_ip=router_ip</code> then for every logical router
+          port <var>P</var> attached to the Gateway router with the router ip
+          <var>B</var>, a priority-110 flow is added with the match
+          <code>inport == <var>P</var> &amp;&amp; ip4.dst == <var>B</var></code> or
+          <code>inport == <var>P</var> &amp;&amp; ip6.dst == <var>B</var></code>
+          with an action <code>ct_snat; </code>.
+        </p>
+
         <p>
           If the Gateway router has been configured to force SNAT any
           previously load-balanced packets to <var>B</var>, a priority-100 flow
@@ -2592,6 +2764,15 @@ icmp6 {
         packets, the above action will be replaced by
         <code>flags.force_snat_for_lb = 1; ct_dnat;</code>.
       </li>
+
+      <li>
+        If the load balancer is created with <code>--reject</code> option and
+        it has no active backends, a TCP reset segment (for tcp) or an ICMP
+        port unreachable packet (for all other kind of traffic) will be sent
+        whenever an incoming packet is received for this load-balancer.
+        Please note using <code>--reject</code> option will disable
+        empty_lb SB controller event for this load balancer.
+      </li>
     </ul>
 
     <p>Ingress Table 6: DNAT on Gateway Routers</p>
@@ -3022,14 +3203,36 @@ outport = <var>P</var>;
 
       <li>
         <p>
-          If the policy action is <code>reroute</code>, then the logical
-          flow is added with the following actions:
+          If the policy action is <code>reroute</code> with 2 or more nexthops
+          defined, then the logical flow is added with the following actions:
+        </p>
+
+         <pre>
+reg8[0..15] = <var>GID</var>;
+reg8[16..31] = select(1,..n);
+        </pre>
+
+        <p>
+          where <var>GID</var> is the ECMP group id generated by
+          <code>ovn-northd</code> for this policy and <var>n</var>
+          is the number of nexthops. <code>select</code> action
+          selects one of the nexthop member id, stores it in the register
+          <code>reg8[16..31]</code> and advances the packet to the
+          next stage.
+        </p>
+      </li>
+
+      <li>
+        <p>
+          If the policy action is <code>reroute</code> with just one nexhop,
+          then the logical flow is added with the following actions:
         </p>
 
          <pre>
 [xx]reg0 = <var>H</var>;
 eth.src = <var>E</var>;
 outport = <var>P</var>;
+reg8[0..15] = 0;
 flags.loopback = 1;
 next;
         </pre>
@@ -3053,7 +3256,51 @@ next;
       </li>
     </ul>
 
-    <h3>Ingress Table 13: ARP/ND Resolution</h3>
+    <h3>Ingress Table 13: ECMP handling for router policies</h3>
+    <p>
+      This table handles the ECMP for the router policies configured
+      with multiple nexthops.
+    </p>
+
+    <ul>
+      <li>
+        <p>
+          A priority-150 flow is added to advance the packet to the next stage
+          if the ECMP group id register <code>reg8[0..15]</code> is 0.
+        </p>
+      </li>
+
+      <li>
+        <p>
+          For each ECMP reroute router policy with multiple nexthops,
+          a priority-100 flow is added for each nexthop <var>H</var>
+          with the match <code>reg8[0..15] == <var>GID</var> &amp;&amp;
+          reg8[16..31] == <var>M</var></code> where <var>GID</var>
+          is the router policy group id generated by <code>ovn-northd</code>
+          and <var>M</var> is the member id of the nexthop <var>H</var>
+          generated by <code>ovn-northd</code>. The following actions are added
+          to the flow:
+        </p>
+
+        <pre>
+[xx]reg0 = <var>H</var>;
+eth.src = <var>E</var>;
+outport = <var>P</var>
+"flags.loopback = 1; "
+"next;"
+        </pre>
+
+        <p>
+          where <var>H</var> is the <code>nexthop </code> defined in the
+          router policy, <var>E</var> is the ethernet address of the
+          logical router port from which the <code>nexthop</code> is
+          reachable and <var>P</var> is the logical router port from
+          which the <code>nexthop</code> is reachable.
+        </p>
+      </li>
+    </ul>
+
+    <h3>Ingress Table 14: ARP/ND Resolution</h3>
 
     <p>
       Any packet that reaches this table is an IP packet whose next-hop
@@ -3239,7 +3486,7 @@ next;
 
     </ul>
 
-    <h3>Ingress Table 14: Check packet length</h3>
+    <h3>Ingress Table 15: Check packet length</h3>
 
     <p>
       For distributed logical routers with distributed gateway port configured
@@ -3269,7 +3516,7 @@ REGBIT_PKT_LARGER = check_pkt_larger(<var>L</var>); next;
       and advances to the next table.
     </p>
 
-    <h3>Ingress Table 15: Handle larger packets</h3>
+    <h3>Ingress Table 16: Handle larger packets</h3>
 
     <p>
       For distributed logical routers with distributed gateway port configured
@@ -3330,7 +3577,7 @@ icmp6 {
       and advances to the next table.
     </p>
 
-    <h3>Ingress Table 16: Gateway Redirect</h3>
+    <h3>Ingress Table 17: Gateway Redirect</h3>
 
     <p>
       For distributed logical routers where one of the logical router
@@ -3370,7 +3617,7 @@ icmp6 {
       </li>
     </ul>
 
-    <h3>Ingress Table 17: ARP Request</h3>
+    <h3>Ingress Table 18: ARP Request</h3>
 
     <p>
       In the common case where the Ethernet destination has been resolved, this
@@ -3546,6 +3793,32 @@ nd_ns {
           <code>flags.force_snat_for_dnat == 1 &amp;&amp; ip</code> with an
           action <code>ct_snat(<var>B</var>);</code>.
         </p>
+      </li>
+
+      <li>
+        <p>
+          If the Gateway router in the OVN Northbound database has been
+          configured to force SNAT a packet (that has been previously
+          load-balanced) using router IP (i.e <ref column="options"
+          table="Logical_Router"/>:lb_force_snat_ip=router_ip), then for
+          each logical router port <var>P</var> attached to the Gateway
+          router, a priority-110 flow matches
+          <code>flags.force_snat_for_lb == 1 &amp;&amp; outport == <var>P</var>
+          </code> with an action <code>ct_snat(<var>R</var>);</code>
+          where <var>R</var> is the IP configured on the router port.
+          If <code>R</code> is an IPv4 address then the match will also
+          include <code>ip4</code> and if it is an IPv6 address, then the
+          match will also include <code>ip6</code>.
+        </p>
+
+        <p>
+          If the logical router port <var>P</var> is configured with multiple
+          IPv4 and multiple IPv6 addresses, only the first IPv4 and first IPv6
+          address is considered.
+        </p>
+      </li>
+
+      <li>
         <p>
           If the Gateway router in the OVN Northbound database has been
           configured to force SNAT a packet (that has been previously
@@ -3553,6 +3826,9 @@ nd_ns {
           <code>flags.force_snat_for_lb == 1 &amp;&amp; ip</code> with an
           action <code>ct_snat(<var>B</var>);</code>.
         </p>
+      </li>
+
+      <li>
         <p>
           For each configuration in the OVN Northbound database, that asks
           to change the source IP address of a packet from an IP address of
@@ -3566,14 +3842,18 @@ nd_ns {
           options, then the action would be <code>ip4/6.src=
           (<var>B</var>)</code>.
         </p>
+      </li>
 
+      <li>
         <p>
           If the NAT rule has <code>allowed_ext_ips</code> configured, then
           there is an additional match <code>ip4.dst == <var>allowed_ext_ips
           </var></code>. Similarly, for IPV6, match would be <code>ip6.dst ==
           <var>allowed_ext_ips</var></code>.
         </p>
+      </li>
 
+      <li>
         <p>
           If the NAT rule has <code>exempted_ext_ips</code> set, then
           there is an additional flow configured at the priority + 1 of
@@ -3582,7 +3862,9 @@ nd_ns {
           </code>. This flow is used to bypass the ct_snat action for a packet
           which is destinted to <code>exempted_ext_ips</code>.
         </p>
+      </li>
 
+      <li>
         <p>
           A priority-0 logical flow with match <code>1</code> has actions
           <code>next;</code>.
diff --git a/northd/ovn-northd.c b/northd/ovn-northd.c
index 5a3227568..c81e3220c 100644
--- a/northd/ovn-northd.c
+++ b/northd/ovn-northd.c
@@ -38,6 +38,7 @@
 #include "lib/ovn-util.h"
 #include "lib/lb.h"
 #include "ovn/actions.h"
+#include "ovn/features.h"
 #include "ovn/logical-fields.h"
 #include "packets.h"
 #include "openvswitch/poll-loop.h"
@@ -141,25 +142,28 @@ enum ovn_stage {
     PIPELINE_STAGE(SWITCH, IN,  PORT_SEC_L2,    0, "ls_in_port_sec_l2")   \
     PIPELINE_STAGE(SWITCH, IN,  PORT_SEC_IP,    1, "ls_in_port_sec_ip")   \
     PIPELINE_STAGE(SWITCH, IN,  PORT_SEC_ND,    2, "ls_in_port_sec_nd")   \
-    PIPELINE_STAGE(SWITCH, IN,  PRE_ACL,        3, "ls_in_pre_acl")       \
-    PIPELINE_STAGE(SWITCH, IN,  PRE_LB,         4, "ls_in_pre_lb")        \
-    PIPELINE_STAGE(SWITCH, IN,  PRE_STATEFUL,   5, "ls_in_pre_stateful")  \
-    PIPELINE_STAGE(SWITCH, IN,  ACL_HINT,       6, "ls_in_acl_hint")      \
-    PIPELINE_STAGE(SWITCH, IN,  ACL,            7, "ls_in_acl")           \
-    PIPELINE_STAGE(SWITCH, IN,  QOS_MARK,       8, "ls_in_qos_mark")      \
-    PIPELINE_STAGE(SWITCH, IN,  QOS_METER,      9, "ls_in_qos_meter")     \
-    PIPELINE_STAGE(SWITCH, IN,  LB,            10, "ls_in_lb")            \
-    PIPELINE_STAGE(SWITCH, IN,  STATEFUL,      11, "ls_in_stateful")      \
-    PIPELINE_STAGE(SWITCH, IN,  PRE_HAIRPIN,   12, "ls_in_pre_hairpin")   \
-    PIPELINE_STAGE(SWITCH, IN,  NAT_HAIRPIN,   13, "ls_in_nat_hairpin")       \
-    PIPELINE_STAGE(SWITCH, IN,  HAIRPIN,       14, "ls_in_hairpin")       \
-    PIPELINE_STAGE(SWITCH, IN,  ARP_ND_RSP,    15, "ls_in_arp_rsp")       \
-    PIPELINE_STAGE(SWITCH, IN,  DHCP_OPTIONS,  16, "ls_in_dhcp_options")  \
-    PIPELINE_STAGE(SWITCH, IN,  DHCP_RESPONSE, 17, "ls_in_dhcp_response") \
-    PIPELINE_STAGE(SWITCH, IN,  DNS_LOOKUP,    18, "ls_in_dns_lookup")    \
-    PIPELINE_STAGE(SWITCH, IN,  DNS_RESPONSE,  19, "ls_in_dns_response")  \
-    PIPELINE_STAGE(SWITCH, IN,  EXTERNAL_PORT, 20, "ls_in_external_port") \
-    PIPELINE_STAGE(SWITCH, IN,  L2_LKUP,       21, "ls_in_l2_lkup")       \
+    PIPELINE_STAGE(SWITCH, IN,  LOOKUP_FDB ,    3, "ls_in_lookup_fdb")    \
+    PIPELINE_STAGE(SWITCH, IN,  PUT_FDB,        4, "ls_in_put_fdb")       \
+    PIPELINE_STAGE(SWITCH, IN,  PRE_ACL,        5, "ls_in_pre_acl")       \
+    PIPELINE_STAGE(SWITCH, IN,  PRE_LB,         6, "ls_in_pre_lb")        \
+    PIPELINE_STAGE(SWITCH, IN,  PRE_STATEFUL,   7, "ls_in_pre_stateful")  \
+    PIPELINE_STAGE(SWITCH, IN,  ACL_HINT,       8, "ls_in_acl_hint")      \
+    PIPELINE_STAGE(SWITCH, IN,  ACL,            9, "ls_in_acl")           \
+    PIPELINE_STAGE(SWITCH, IN,  QOS_MARK,      10, "ls_in_qos_mark")      \
+    PIPELINE_STAGE(SWITCH, IN,  QOS_METER,     11, "ls_in_qos_meter")     \
+    PIPELINE_STAGE(SWITCH, IN,  LB,            12, "ls_in_lb")            \
+    PIPELINE_STAGE(SWITCH, IN,  STATEFUL,      13, "ls_in_stateful")      \
+    PIPELINE_STAGE(SWITCH, IN,  PRE_HAIRPIN,   14, "ls_in_pre_hairpin")   \
+    PIPELINE_STAGE(SWITCH, IN,  NAT_HAIRPIN,   15, "ls_in_nat_hairpin")   \
+    PIPELINE_STAGE(SWITCH, IN,  HAIRPIN,       16, "ls_in_hairpin")       \
+    PIPELINE_STAGE(SWITCH, IN,  ARP_ND_RSP,    17, "ls_in_arp_rsp")       \
+    PIPELINE_STAGE(SWITCH, IN,  DHCP_OPTIONS,  18, "ls_in_dhcp_options")  \
+    PIPELINE_STAGE(SWITCH, IN,  DHCP_RESPONSE, 19, "ls_in_dhcp_response") \
+    PIPELINE_STAGE(SWITCH, IN,  DNS_LOOKUP,    20, "ls_in_dns_lookup")    \
+    PIPELINE_STAGE(SWITCH, IN,  DNS_RESPONSE,  21, "ls_in_dns_response")  \
+    PIPELINE_STAGE(SWITCH, IN,  EXTERNAL_PORT, 22, "ls_in_external_port") \
+    PIPELINE_STAGE(SWITCH, IN,  L2_LKUP,       23, "ls_in_l2_lkup")       \
+    PIPELINE_STAGE(SWITCH, IN,  L2_UNKNOWN,    24, "ls_in_l2_unknown")    \
                                                                           \
     /* Logical switch egress stages. */                                   \
     PIPELINE_STAGE(SWITCH, OUT, PRE_LB,       0, "ls_out_pre_lb")         \
@@ -188,11 +192,12 @@ enum ovn_stage {
     PIPELINE_STAGE(ROUTER, IN,  IP_ROUTING,      10, "lr_in_ip_routing")   \
     PIPELINE_STAGE(ROUTER, IN,  IP_ROUTING_ECMP, 11, "lr_in_ip_routing_ecmp") \
     PIPELINE_STAGE(ROUTER, IN,  POLICY,          12, "lr_in_policy")       \
-    PIPELINE_STAGE(ROUTER, IN,  ARP_RESOLVE,     13, "lr_in_arp_resolve")  \
-    PIPELINE_STAGE(ROUTER, IN,  CHK_PKT_LEN   ,  14, "lr_in_chk_pkt_len")   \
-    PIPELINE_STAGE(ROUTER, IN,  LARGER_PKTS,     15,"lr_in_larger_pkts")   \
-    PIPELINE_STAGE(ROUTER, IN,  GW_REDIRECT,     16, "lr_in_gw_redirect")  \
-    PIPELINE_STAGE(ROUTER, IN,  ARP_REQUEST,     17, "lr_in_arp_request")  \
+    PIPELINE_STAGE(ROUTER, IN,  POLICY_ECMP,     13, "lr_in_policy_ecmp")  \
+    PIPELINE_STAGE(ROUTER, IN,  ARP_RESOLVE,     14, "lr_in_arp_resolve")  \
+    PIPELINE_STAGE(ROUTER, IN,  CHK_PKT_LEN   ,  15, "lr_in_chk_pkt_len")  \
+    PIPELINE_STAGE(ROUTER, IN,  LARGER_PKTS,     16, "lr_in_larger_pkts")  \
+    PIPELINE_STAGE(ROUTER, IN,  GW_REDIRECT,     17, "lr_in_gw_redirect")  \
+    PIPELINE_STAGE(ROUTER, IN,  ARP_REQUEST,     18, "lr_in_arp_request")  \
                                                                       \
     /* Logical router egress stages. */                               \
     PIPELINE_STAGE(ROUTER, OUT, UNDNAT,    0, "lr_out_undnat")        \
@@ -225,6 +230,12 @@ enum ovn_stage {
 #define REGBIT_ACL_HINT_ALLOW     "reg0[8]"
 #define REGBIT_ACL_HINT_DROP      "reg0[9]"
 #define REGBIT_ACL_HINT_BLOCK     "reg0[10]"
+#define REGBIT_LKUP_FDB           "reg0[11]"
+#define REGBIT_HAIRPIN_REPLY      "reg0[12]"
+
+#define REG_ORIG_DIP_IPV4         "reg1"
+#define REG_ORIG_DIP_IPV6         "xxreg1"
+#define REG_ORIG_TP_DPORT         "reg2[0..15]"
 
 /* Register definitions for switches and routers. */
 
@@ -259,12 +270,29 @@ enum ovn_stage {
  * OVS register usage:
  *
  * Logical Switch pipeline:
- * +---------+----------------------------------------------+
- * | R0      |     REGBIT_{CONNTRACK/DHCP/DNS/HAIRPIN}      |
- * |         | REGBIT_ACL_HINT_{ALLOW_NEW/ALLOW/DROP/BLOCK} |
- * +---------+----------------------------------------------+
- * | R1 - R9 |                   UNUSED                     |
- * +---------+----------------------------------------------+
+ * +----+----------------------------------------------+---+------------------+
+ * | R0 |     REGBIT_{CONNTRACK/DHCP/DNS}              |   |                  |
+ * |    |     REGBIT_{HAIRPIN/HAIRPIN_REPLY}           | X |                  |
+ * |    | REGBIT_ACL_HINT_{ALLOW_NEW/ALLOW/DROP/BLOCK} | X |                  |
+ * +----+----------------------------------------------+ X |                  |
+ * | R1 |         ORIG_DIP_IPV4 (>= IN_STATEFUL)       | R |                  |
+ * +----+----------------------------------------------+ E |                  |
+ * | R2 |         ORIG_TP_DPORT (>= IN_STATEFUL)       | G |                  |
+ * +----+----------------------------------------------+ 0 |                  |
+ * | R3 |                   UNUSED                     |   |                  |
+ * +----+----------------------------------------------+---+------------------+
+ * | R4 |                   UNUSED                     |   |                  |
+ * +----+----------------------------------------------+ X |   ORIG_DIP_IPV6  |
+ * | R5 |                   UNUSED                     | X | (>= IN_STATEFUL) |
+ * +----+----------------------------------------------+ R |                  |
+ * | R6 |                   UNUSED                     | E |                  |
+ * +----+----------------------------------------------+ G |                  |
+ * | R7 |                   UNUSED                     | 1 |                  |
+ * +----+----------------------------------------------+---+------------------+
+ * | R8 |                   UNUSED                     |
+ * +----+----------------------------------------------+
+ * | R9 |                   UNUSED                     |
+ * +----+----------------------------------------------+
  *
  * Logical Router pipeline:
  * +-----+--------------------------+---+-----------------+---+---------------+
@@ -608,6 +636,8 @@ struct ovn_datapath {
     struct hmap port_tnlids;
     uint32_t port_key_hint;
 
+    bool has_stateful_acl;
+    bool has_lb_vip;
     bool has_unknown;
 
     /* IPAM data. */
@@ -633,6 +663,7 @@ struct ovn_datapath {
 
     struct lport_addresses dnat_force_snat_addrs;
     struct lport_addresses lb_force_snat_addrs;
+    bool lb_force_snat_router_ip;
 
     struct ovn_port **localnet_ports;
     size_t n_localnet_ports;
@@ -646,6 +677,9 @@ struct ovn_datapath {
     struct hmap nb_pgs;
 };
 
+static bool ls_has_stateful_acl(struct ovn_datapath *od);
+static bool ls_has_lb_vip(struct ovn_datapath *od);
+
 /* Contains a NAT entry with the external addresses pre-parsed. */
 struct ovn_nat {
     const struct nbrec_nat *nb;
@@ -723,14 +757,28 @@ init_nat_entries(struct ovn_datapath *od)
         }
     }
 
-    if (get_force_snat_ip(od, "lb", &od->lb_force_snat_addrs)) {
-        if (od->lb_force_snat_addrs.n_ipv4_addrs) {
-            snat_ip_add(od, od->lb_force_snat_addrs.ipv4_addrs[0].addr_s,
-                        NULL);
-        }
-        if (od->lb_force_snat_addrs.n_ipv6_addrs) {
-            snat_ip_add(od, od->lb_force_snat_addrs.ipv6_addrs[0].addr_s,
-                        NULL);
+    /* Check if 'lb_force_snat_ip' is configured with 'router_ip'. */
+    const char *lb_force_snat =
+        smap_get(&od->nbr->options, "lb_force_snat_ip");
+    if (lb_force_snat && !strcmp(lb_force_snat, "router_ip")
+            && smap_get(&od->nbr->options, "chassis")) {
+        /* Set it to true only if its gateway router and
+         * options:lb_force_snat_ip=router_ip. */
+        od->lb_force_snat_router_ip = true;
+    } else {
+        od->lb_force_snat_router_ip = false;
+
+        /* Check if 'lb_force_snat_ip' is configured with a set of
+         * IP address(es). */
+        if (get_force_snat_ip(od, "lb", &od->lb_force_snat_addrs)) {
+            if (od->lb_force_snat_addrs.n_ipv4_addrs) {
+                snat_ip_add(od, od->lb_force_snat_addrs.ipv4_addrs[0].addr_s,
+                            NULL);
+            }
+            if (od->lb_force_snat_addrs.n_ipv6_addrs) {
+                snat_ip_add(od, od->lb_force_snat_addrs.ipv6_addrs[0].addr_s,
+                            NULL);
+            }
         }
     }
 
@@ -872,6 +920,20 @@ ovn_datapath_find(struct hmap *datapaths, const struct uuid *uuid)
     return NULL;
 }
 
+static struct ovn_datapath *
+ovn_datapath_find_by_key(struct hmap *datapaths, uint32_t dp_key)
+{
+    struct ovn_datapath *od;
+
+    HMAP_FOR_EACH (od, key_node, datapaths) {
+        if (od->tunnel_key == dp_key) {
+            return od;
+        }
+    }
+
+    return NULL;
+}
+
 static bool
 ovn_datapath_is_stale(const struct ovn_datapath *od)
 {
@@ -1472,6 +1534,8 @@ struct ovn_port {
 
     bool has_unknown; /* If the addresses have 'unknown' defined. */
 
+    bool has_bfd;
+
     /* The port's peer:
      *
      *     - A switch port S of type "router" has a router port R as a peer,
@@ -1543,17 +1607,38 @@ ovn_port_destroy(struct hmap *ports, struct ovn_port *port)
     }
 }
 
+/* Returns the ovn_port that matches 'name'.  If 'prefer_bound' is true and
+ * multiple ports share the same name, gives precendence to ports bound to
+ * an ovn_datapath.
+ */
 static struct ovn_port *
-ovn_port_find(const struct hmap *ports, const char *name)
+ovn_port_find__(const struct hmap *ports, const char *name,
+                bool prefer_bound)
 {
+    struct ovn_port *matched_op = NULL;
     struct ovn_port *op;
 
     HMAP_FOR_EACH_WITH_HASH (op, key_node, hash_string(name, 0), ports) {
         if (!strcmp(op->key, name)) {
-            return op;
+            matched_op = op;
+            if (!prefer_bound || op->od) {
+                return op;
+            }
         }
     }
-    return NULL;
+    return matched_op;
+}
+
+static struct ovn_port *
+ovn_port_find(const struct hmap *ports, const char *name)
+{
+    return ovn_port_find__(ports, name, false);
+}
+
+static struct ovn_port *
+ovn_port_find_bound(const struct hmap *ports, const char *name)
+{
+    return ovn_port_find__(ports, name, true);
 }
 
 /* Returns true if the logical switch port 'enabled' column is empty or
@@ -2336,15 +2421,13 @@ join_logical_ports(struct northd_context *ctx,
             for (size_t i = 0; i < od->nbs->n_ports; i++) {
                 const struct nbrec_logical_switch_port *nbsp
                     = od->nbs->ports[i];
-                struct ovn_port *op = ovn_port_find(ports, nbsp->name);
-                if (op && op->sb->datapath == od->sb) {
-                    if (op->nbsp || op->nbrp) {
-                        static struct vlog_rate_limit rl
-                            = VLOG_RATE_LIMIT_INIT(5, 1);
-                        VLOG_WARN_RL(&rl, "duplicate logical port %s",
-                                     nbsp->name);
-                        continue;
-                    }
+                struct ovn_port *op = ovn_port_find_bound(ports, nbsp->name);
+                if (op && (op->od || op->nbsp || op->nbrp)) {
+                    static struct vlog_rate_limit rl
+                        = VLOG_RATE_LIMIT_INIT(5, 1);
+                    VLOG_WARN_RL(&rl, "duplicate logical port %s", nbsp->name);
+                    continue;
+                } else if (op && (!op->sb || op->sb->datapath == od->sb)) {
                     ovn_port_set_nb(op, nbsp, NULL);
                     ovs_list_remove(&op->list);
 
@@ -2435,16 +2518,15 @@ join_logical_ports(struct northd_context *ctx,
                     continue;
                 }
 
-                struct ovn_port *op = ovn_port_find(ports, nbrp->name);
-                if (op && op->sb->datapath == od->sb) {
-                    if (op->nbsp || op->nbrp) {
-                        static struct vlog_rate_limit rl
-                            = VLOG_RATE_LIMIT_INIT(5, 1);
-                        VLOG_WARN_RL(&rl, "duplicate logical router port %s",
-                                     nbrp->name);
-                        destroy_lport_addresses(&lrp_networks);
-                        continue;
-                    }
+                struct ovn_port *op = ovn_port_find_bound(ports, nbrp->name);
+                if (op && (op->od || op->nbsp || op->nbrp)) {
+                    static struct vlog_rate_limit rl
+                        = VLOG_RATE_LIMIT_INIT(5, 1);
+                    VLOG_WARN_RL(&rl, "duplicate logical router port %s",
+                                 nbrp->name);
+                    destroy_lport_addresses(&lrp_networks);
+                    continue;
+                } else if (op && (!op->sb || op->sb->datapath == od->sb)) {
                     ovn_port_set_nb(op, NULL, nbrp);
                     ovs_list_remove(&op->list);
                     ovs_list_push_back(both, &op->list);
@@ -2487,7 +2569,7 @@ join_logical_ports(struct northd_context *ctx,
                     char *redirect_name =
                         ovn_chassis_redirect_name(nbrp->name);
                     struct ovn_port *crp = ovn_port_find(ports, redirect_name);
-                    if (crp && crp->sb->datapath == od->sb) {
+                    if (crp && crp->sb && crp->sb->datapath == od->sb) {
                         crp->derived = true;
                         ovn_port_set_nb(crp, NULL, nbrp);
                         ovs_list_remove(&crp->list);
@@ -3179,6 +3261,12 @@ ovn_port_update_sbrec(struct northd_context *ctx,
                 } else {
                     sbrec_port_binding_set_ha_chassis_group(op->sb, NULL);
                 }
+            } else if (op->sb->ha_chassis_group) {
+                /* Clear the port bindings ha_chassis_group if the type is
+                 * not external and if this column is set.  This can happen
+                 * when an external port is reset to type normal and
+                 * ha_chassis_group cleared in the same transaction. */
+                sbrec_port_binding_set_ha_chassis_group(op->sb, NULL);
             }
         } else {
             const char *chassis = NULL;
@@ -3308,6 +3396,14 @@ ovn_port_update_sbrec(struct northd_context *ctx,
     if (op->tunnel_key != op->sb->tunnel_key) {
         sbrec_port_binding_set_tunnel_key(op->sb, op->tunnel_key);
     }
+
+    /* ovn-controller will update 'Port_Binding.up' only if it was explicitly
+     * set to 'false'.
+     */
+    if (!op->sb->n_up) {
+        bool up = false;
+        sbrec_port_binding_set_up(op->sb, &up, 1);
+    }
 }
 
 /* Remove mac_binding entries that refer to logical_ports which are
@@ -3340,6 +3436,26 @@ cleanup_sb_ha_chassis_groups(struct northd_context *ctx,
     }
 }
 
+static void
+cleanup_stale_fdp_entries(struct northd_context *ctx, struct hmap *datapaths)
+{
+    const struct sbrec_fdb *fdb_e, *next;
+    SBREC_FDB_FOR_EACH_SAFE (fdb_e, next, ctx->ovnsb_idl) {
+        bool delete = true;
+        struct ovn_datapath *od
+            = ovn_datapath_find_by_key(datapaths, fdb_e->dp_key);
+        if (od) {
+            if (ovn_tnlid_present(&od->port_tnlids, fdb_e->port_key)) {
+                delete = false;
+            }
+        }
+
+        if (delete) {
+            sbrec_fdb_delete(fdb_e);
+        }
+    }
+}
+
 struct service_monitor_info {
     struct hmap_node hmap_node;
     const struct sbrec_service_monitor *sbrec_mon;
@@ -3436,12 +3552,12 @@ ovn_lb_svc_create(struct northd_context *ctx, struct ovn_northd_lb *lb,
 }
 
 static
-void build_lb_vip_ct_lb_actions(struct ovn_lb_vip *lb_vip,
-                                struct ovn_northd_lb_vip *lb_vip_nb,
-                                struct ds *action,
-                                char *selection_fields)
+void build_lb_vip_actions(struct ovn_lb_vip *lb_vip,
+                          struct ovn_northd_lb_vip *lb_vip_nb,
+                          struct ds *action, char *selection_fields,
+                          bool ls_dp)
 {
-    bool skip_hash_fields = false;
+    bool skip_hash_fields = false, reject = false;
 
     if (lb_vip_nb->lb_health_check) {
         ds_put_cstr(action, "ct_lb(backends=");
@@ -3463,18 +3579,30 @@ void build_lb_vip_ct_lb_actions(struct ovn_lb_vip *lb_vip,
         }
 
         if (!n_active_backends) {
-            skip_hash_fields = true;
-            ds_clear(action);
-            ds_put_cstr(action, "drop;");
+            if (!lb_vip->empty_backend_rej) {
+                ds_clear(action);
+                ds_put_cstr(action, "drop;");
+                skip_hash_fields = true;
+            } else {
+                reject = true;
+            }
         } else {
             ds_chomp(action, ',');
             ds_put_cstr(action, ");");
         }
+    } else if (lb_vip->empty_backend_rej && !lb_vip->n_backends) {
+        reject = true;
     } else {
         ds_put_format(action, "ct_lb(backends=%s);", lb_vip_nb->backend_ips);
     }
 
-    if (!skip_hash_fields && selection_fields && selection_fields[0]) {
+    if (reject) {
+        int stage = ls_dp ? ovn_stage_get_table(S_SWITCH_OUT_QOS_MARK)
+                          : ovn_stage_get_table(S_ROUTER_OUT_SNAT);
+        ds_clear(action);
+        ds_put_format(action, "reg0 = 0; reject { outport <-> inport; "
+                      "next(pipeline=egress,table=%d);};", stage);
+    } else if (!skip_hash_fields && selection_fields && selection_fields[0]) {
         ds_chomp(action, ';');
         ds_chomp(action, ')');
         ds_put_format(action, "; hash_fields=\"%s\");", selection_fields);
@@ -3547,10 +3675,18 @@ build_ovn_lbs(struct northd_context *ctx, struct hmap *datapaths,
     /* Create SB Load balancer records if not present and sync
      * the SB load balancer columns. */
     HMAP_FOR_EACH (lb, hmap_node, lbs) {
+
         if (!lb->n_dps) {
             continue;
         }
 
+        /* Store the fact that northd provides the original (destination IP +
+         * transport port) tuple.
+         */
+        struct smap options;
+        smap_clone(&options, &lb->nlb->options);
+        smap_replace(&options, "hairpin_orig_tuple", "true");
+
         if (!lb->slb) {
             sbrec_lb = sbrec_load_balancer_insert(ctx->ovnsb_txn);
             lb->slb = sbrec_lb;
@@ -3564,9 +3700,11 @@ build_ovn_lbs(struct northd_context *ctx, struct hmap *datapaths,
         sbrec_load_balancer_set_name(lb->slb, lb->nlb->name);
         sbrec_load_balancer_set_vips(lb->slb, &lb->nlb->vips);
         sbrec_load_balancer_set_protocol(lb->slb, lb->nlb->protocol);
+        sbrec_load_balancer_set_options(lb->slb, &options);
         sbrec_load_balancer_set_datapaths(
             lb->slb, (struct sbrec_datapath_binding **)lb->dps,
             lb->n_dps);
+        smap_destroy(&options);
     }
 
     /* Set the list of associated load balanacers to a logical switch
@@ -4822,7 +4960,7 @@ ovn_ls_port_group_destroy(struct hmap *nb_pgs)
 }
 
 static bool
-has_stateful_acl(struct ovn_datapath *od)
+ls_has_stateful_acl(struct ovn_datapath *od)
 {
     for (size_t i = 0; i < od->nbs->n_acls; i++) {
         struct nbrec_acl *acl = od->nbs->acls[i];
@@ -4905,50 +5043,82 @@ build_lswitch_input_port_sec_od(
 }
 
 static void
-build_lswitch_output_port_sec(struct hmap *ports, struct hmap *datapaths,
-                              struct hmap *lflows)
+build_lswitch_learn_fdb_op(
+        struct ovn_port *op, struct hmap *lflows,
+        struct ds *actions, struct ds *match)
 {
-    struct ds actions = DS_EMPTY_INITIALIZER;
-    struct ds match = DS_EMPTY_INITIALIZER;
-    struct ovn_port *op;
+    if (op->nbsp && !op->n_ps_addrs && !strcmp(op->nbsp->type, "") &&
+        op->has_unknown) {
+        ds_clear(match);
+        ds_clear(actions);
+        ds_put_format(match, "inport == %s", op->json_key);
+        ds_put_format(actions, REGBIT_LKUP_FDB
+                      " = lookup_fdb(inport, eth.src); next;");
+        ovn_lflow_add_with_hint(lflows, op->od, S_SWITCH_IN_LOOKUP_FDB, 100,
+                                ds_cstr(match), ds_cstr(actions),
+                                &op->nbsp->header_);
 
-    /* Egress table 8: Egress port security - IP (priorities 90 and 80)
-     * if port security enabled.
-     *
-     * Egress table 9: Egress port security - L2 (priorities 50 and 150).
-     *
-     * Priority 50 rules implement port security for enabled logical port.
-     *
-     * Priority 150 rules drop packets to disabled logical ports, so that
-     * they don't even receive multicast or broadcast packets.
-     */
-    HMAP_FOR_EACH (op, key_node, ports) {
-        if (!op->nbsp || lsp_is_external(op->nbsp)) {
-            continue;
-        }
+        ds_put_cstr(match, " && "REGBIT_LKUP_FDB" == 0");
+        ds_clear(actions);
+        ds_put_cstr(actions, "put_fdb(inport, eth.src); next;");
+        ovn_lflow_add_with_hint(lflows, op->od, S_SWITCH_IN_PUT_FDB, 100,
+                                ds_cstr(match), ds_cstr(actions),
+                                &op->nbsp->header_);
+    }
+}
 
-        ds_clear(&actions);
-        ds_clear(&match);
+static void
+build_lswitch_learn_fdb_od(
+        struct ovn_datapath *od, struct hmap *lflows)
+{
+
+    if (od->nbs) {
+        ovn_lflow_add(lflows, od, S_SWITCH_IN_LOOKUP_FDB, 0, "1", "next;");
+        ovn_lflow_add(lflows, od, S_SWITCH_IN_PUT_FDB, 0, "1", "next;");
+    }
+}
+
+/* Egress table 8: Egress port security - IP (priorities 90 and 80)
+ * if port security enabled.
+ *
+ * Egress table 9: Egress port security - L2 (priorities 50 and 150).
+ *
+ * Priority 50 rules implement port security for enabled logical port.
+ *
+ * Priority 150 rules drop packets to disabled logical ports, so that
+ * they don't even receive multicast or broadcast packets.
+ */
+static void
+build_lswitch_output_port_sec_op(struct ovn_port *op,
+                                 struct hmap *lflows,
+                                 struct ds *match,
+                                 struct ds *actions)
+{
+
+    if (op->nbsp && (!lsp_is_external(op->nbsp))) {
+
+        ds_clear(actions);
+        ds_clear(match);
 
-        ds_put_format(&match, "outport == %s", op->json_key);
+        ds_put_format(match, "outport == %s", op->json_key);
         if (lsp_is_enabled(op->nbsp)) {
             build_port_security_l2("eth.dst", op->ps_addrs, op->n_ps_addrs,
-                                   &match);
+                                   match);
 
             if (!strcmp(op->nbsp->type, "localnet")) {
                 const char *queue_id = smap_get(&op->sb->options,
                                                 "qdisc_queue_id");
                 if (queue_id) {
-                    ds_put_format(&actions, "set_queue(%s); ", queue_id);
+                    ds_put_format(actions, "set_queue(%s); ", queue_id);
                 }
             }
-            ds_put_cstr(&actions, "output;");
+            ds_put_cstr(actions, "output;");
             ovn_lflow_add_with_hint(lflows, op->od, S_SWITCH_OUT_PORT_SEC_L2,
-                                    50, ds_cstr(&match), ds_cstr(&actions),
+                                    50, ds_cstr(match), ds_cstr(actions),
                                     &op->nbsp->header_);
         } else {
             ovn_lflow_add_with_hint(lflows, op->od, S_SWITCH_OUT_PORT_SEC_L2,
-                                    150, ds_cstr(&match), "drop;",
+                                    150, ds_cstr(match), "drop;",
                                     &op->nbsp->header_);
         }
 
@@ -4956,23 +5126,20 @@ build_lswitch_output_port_sec(struct hmap *ports, struct hmap *datapaths,
             build_port_security_ip(P_OUT, op, lflows, &op->nbsp->header_);
         }
     }
+}
 
-    /* Egress tables 8: Egress port security - IP (priority 0)
-     * Egress table 9: Egress port security L2 - multicast/broadcast
-     *                 (priority 100). */
-    struct ovn_datapath *od;
-    HMAP_FOR_EACH (od, key_node, datapaths) {
-        if (!od->nbs) {
-            continue;
-        }
-
+/* Egress tables 8: Egress port security - IP (priority 0)
+ * Egress table 9: Egress port security L2 - multicast/broadcast
+ *                 (priority 100). */
+static void
+build_lswitch_output_port_sec_od(struct ovn_datapath *od,
+                              struct hmap *lflows)
+{
+    if (od->nbs) {
         ovn_lflow_add(lflows, od, S_SWITCH_OUT_PORT_SEC_IP, 0, "1", "next;");
         ovn_lflow_add(lflows, od, S_SWITCH_OUT_PORT_SEC_L2, 100, "eth.mcast",
                       "output;");
     }
-
-    ds_destroy(&match);
-    ds_destroy(&actions);
 }
 
 static void
@@ -5008,8 +5175,6 @@ skip_port_from_conntrack(struct ovn_datapath *od, struct ovn_port *op,
 static void
 build_pre_acls(struct ovn_datapath *od, struct hmap *lflows)
 {
-    bool has_stateful = has_stateful_acl(od);
-
     /* Ingress and Egress Pre-ACL Table (Priority 0): Packets are
      * allowed by default. */
     ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_ACL, 0, "1", "next;");
@@ -5024,7 +5189,7 @@ build_pre_acls(struct ovn_datapath *od, struct hmap *lflows)
     /* If there are any stateful ACL rules in this datapath, we must
      * send all IP packets through the conntrack action, which handles
      * defragmentation, in order to match L4 headers. */
-    if (has_stateful) {
+    if (od->has_stateful_acl) {
         for (size_t i = 0; i < od->n_router_ports; i++) {
             skip_port_from_conntrack(od, od->router_ports[i],
                                      S_SWITCH_IN_PRE_ACL, S_SWITCH_OUT_PRE_ACL,
@@ -5084,7 +5249,10 @@ build_empty_lb_event_flow(struct ovn_datapath *od, struct hmap *lflows,
                           struct nbrec_load_balancer *lb,
                           int pl, struct shash *meter_groups)
 {
-    if (!controller_event_en || lb_vip->n_backends) {
+    bool controller_event = smap_get_bool(&lb->options, "event", false) ||
+                            controller_event_en; /* deprecated */
+    if (!controller_event || lb_vip->n_backends ||
+        lb_vip->empty_backend_rej) {
         return;
     }
 
@@ -5124,7 +5292,7 @@ build_empty_lb_event_flow(struct ovn_datapath *od, struct hmap *lflows,
 }
 
 static bool
-has_lb_vip(struct ovn_datapath *od)
+ls_has_lb_vip(struct ovn_datapath *od)
 {
     for (int i = 0; i < od->nbs->n_load_balancer; i++) {
         struct nbrec_load_balancer *nb_lb = od->nbs->load_balancer[i];
@@ -5267,6 +5435,13 @@ build_acl_hints(struct ovn_datapath *od, struct hmap *lflows)
     for (size_t i = 0; i < ARRAY_SIZE(stages); i++) {
         enum ovn_stage stage = stages[i];
 
+        /* In any case, advance to the next stage. */
+        ovn_lflow_add(lflows, od, stage, 0, "1", "next;");
+
+        if (!od->has_stateful_acl && !od->has_lb_vip) {
+            continue;
+        }
+
         /* New, not already established connections, may hit either allow
          * or drop ACLs. For allow ACLs, the connection must also be committed
          * to conntrack so we set REGBIT_ACL_HINT_ALLOW_NEW.
@@ -5327,9 +5502,6 @@ build_acl_hints(struct ovn_datapath *od, struct hmap *lflows)
         ovn_lflow_add(lflows, od, stage, 1, "ct.est && ct_label.blocked == 0",
                       REGBIT_ACL_HINT_BLOCK " = 1; "
                       "next;");
-
-        /* In any case, advance to the next stage. */
-        ovn_lflow_add(lflows, od, stage, 0, "1", "next;");
     }
 }
 
@@ -5661,7 +5833,7 @@ static void
 build_acls(struct ovn_datapath *od, struct hmap *lflows,
            struct hmap *port_groups, const struct shash *meter_groups)
 {
-    bool has_stateful = (has_stateful_acl(od) || has_lb_vip(od));
+    bool has_stateful = od->has_stateful_acl || od->has_lb_vip;
 
     /* Ingress and Egress ACL Table (Priority 0): Packets are allowed by
      * default.  A related rule at priority 1 is added below if there
@@ -5930,7 +6102,7 @@ build_lb(struct ovn_datapath *od, struct hmap *lflows)
         }
     }
 
-    if (has_lb_vip(od)) {
+    if (od->has_lb_vip) {
         /* Ingress and Egress LB Table (Priority 65534).
          *
          * Send established traffic through conntrack for just NAT. */
@@ -5953,11 +6125,20 @@ build_lb_rules(struct ovn_datapath *od, struct hmap *lflows,
         struct ovn_lb_vip *lb_vip = &lb->vips[i];
         struct ovn_northd_lb_vip *lb_vip_nb = &lb->vips_nb[i];
 
+        struct ds action = DS_EMPTY_INITIALIZER;
         const char *ip_match = NULL;
+
+        /* Store the original destination IP to be used when generating
+         * hairpin flows.
+         */
         if (IN6_IS_ADDR_V4MAPPED(&lb_vip->vip)) {
             ip_match = "ip4";
+            ds_put_format(&action, REG_ORIG_DIP_IPV4 " = %s; ",
+                          lb_vip->vip_str);
         } else {
             ip_match = "ip6";
+            ds_put_format(&action, REG_ORIG_DIP_IPV6 " = %s; ",
+                          lb_vip->vip_str);
         }
 
         const char *proto = NULL;
@@ -5970,12 +6151,17 @@ build_lb_rules(struct ovn_datapath *od, struct hmap *lflows,
                     proto = "sctp";
                 }
             }
+
+            /* Store the original destination port to be used when generating
+             * hairpin flows.
+             */
+            ds_put_format(&action, REG_ORIG_TP_DPORT " = %"PRIu16"; ",
+                          lb_vip->vip_port);
         }
 
         /* New connections in Ingress table. */
-        struct ds action = DS_EMPTY_INITIALIZER;
-        build_lb_vip_ct_lb_actions(lb_vip, lb_vip_nb, &action,
-                                   lb->selection_fields);
+        build_lb_vip_actions(lb_vip, lb_vip_nb, &action,
+                             lb->selection_fields, true);
 
         struct ds match = DS_EMPTY_INITIALIZER;
         ds_put_format(&match, "ct.new && %s.dst == %s", ip_match,
@@ -6021,9 +6207,39 @@ build_stateful(struct ovn_datapath *od, struct hmap *lflows, struct hmap *lbs)
      * REGBIT_CONNTRACK_COMMIT is set for new connections and
      * REGBIT_CONNTRACK_NAT is set for established connections. So they
      * don't overlap.
+     *
+     * In the ingress pipeline, also store the original destination IP and
+     * transport port to be used when detecting hairpin packets.
      */
-    ovn_lflow_add(lflows, od, S_SWITCH_IN_STATEFUL, 100,
-                  REGBIT_CONNTRACK_NAT" == 1", "ct_lb;");
+    const char *lb_protocols[] = {"tcp", "udp", "sctp"};
+    struct ds actions = DS_EMPTY_INITIALIZER;
+    struct ds match = DS_EMPTY_INITIALIZER;
+
+    for (size_t i = 0; i < ARRAY_SIZE(lb_protocols); i++) {
+        ds_clear(&match);
+        ds_clear(&actions);
+        ds_put_format(&match, REGBIT_CONNTRACK_NAT" == 1 && ip4 && %s",
+                      lb_protocols[i]);
+        ds_put_format(&actions, REG_ORIG_DIP_IPV4 " = ip4.dst; "
+                                REG_ORIG_TP_DPORT " = %s.dst; ct_lb;",
+                      lb_protocols[i]);
+        ovn_lflow_add(lflows, od, S_SWITCH_IN_STATEFUL, 100,
+                      ds_cstr(&match), ds_cstr(&actions));
+
+        ds_clear(&match);
+        ds_clear(&actions);
+        ds_put_format(&match, REGBIT_CONNTRACK_NAT" == 1 && ip6 && %s",
+                      lb_protocols[i]);
+        ds_put_format(&actions, REG_ORIG_DIP_IPV6 " = ip6.dst; "
+                                REG_ORIG_TP_DPORT " = %s.dst; ct_lb;",
+                      lb_protocols[i]);
+        ovn_lflow_add(lflows, od, S_SWITCH_IN_STATEFUL, 100,
+                      ds_cstr(&match), ds_cstr(&actions));
+    }
+
+    ds_destroy(&actions);
+    ds_destroy(&match);
+
     ovn_lflow_add(lflows, od, S_SWITCH_OUT_STATEFUL, 100,
                   REGBIT_CONNTRACK_NAT" == 1", "ct_lb;");
 
@@ -6051,40 +6267,50 @@ build_lb_hairpin(struct ovn_datapath *od, struct hmap *lflows)
     ovn_lflow_add(lflows, od, S_SWITCH_IN_NAT_HAIRPIN, 0, "1", "next;");
     ovn_lflow_add(lflows, od, S_SWITCH_IN_HAIRPIN, 0, "1", "next;");
 
-    if (has_lb_vip(od)) {
-        /* Check if the packet needs to be hairpinned. */
-        ovn_lflow_add_with_hint(lflows, od, S_SWITCH_IN_PRE_HAIRPIN, 100,
-                                "ip && ct.trk && ct.dnat",
-                                REGBIT_HAIRPIN " = chk_lb_hairpin(); next;",
+    if (od->has_lb_vip) {
+        /* Check if the packet needs to be hairpinned.
+         * Set REGBIT_HAIRPIN in the original direction and
+         * REGBIT_HAIRPIN_REPLY in the reply direction.
+         */
+        ovn_lflow_add_with_hint(
+            lflows, od, S_SWITCH_IN_PRE_HAIRPIN, 100, "ip && ct.trk",
+            REGBIT_HAIRPIN " = chk_lb_hairpin(); "
+            REGBIT_HAIRPIN_REPLY " = chk_lb_hairpin_reply(); "
+            "next;",
+            &od->nbs->header_);
+
+        /* If packet needs to be hairpinned, snat the src ip with the VIP
+         * for new sessions. */
+        ovn_lflow_add_with_hint(lflows, od, S_SWITCH_IN_NAT_HAIRPIN, 100,
+                                "ip && ct.new && ct.trk"
+                                " && "REGBIT_HAIRPIN " == 1",
+                                "ct_snat_to_vip; next;",
                                 &od->nbs->header_);
 
-        /* Check if the packet is a reply of hairpinned traffic. */
-        ovn_lflow_add_with_hint(lflows, od, S_SWITCH_IN_PRE_HAIRPIN, 90, "ip",
-                                REGBIT_HAIRPIN " = chk_lb_hairpin_reply(); "
-                                "next;", &od->nbs->header_);
-
-        /* If packet needs to be hairpinned, snat the src ip with the VIP. */
+        /* If packet needs to be hairpinned, for established sessions there
+         * should already be an SNAT conntrack entry.
+         */
         ovn_lflow_add_with_hint(lflows, od, S_SWITCH_IN_NAT_HAIRPIN, 100,
-                                "ip && (ct.new || ct.est) && ct.trk && ct.dnat"
+                                "ip && ct.est && ct.trk"
                                 " && "REGBIT_HAIRPIN " == 1",
-                                "ct_snat_to_vip; next;",
+                                "ct_snat;",
                                 &od->nbs->header_);
 
         /* For the reply of hairpinned traffic, snat the src ip to the VIP. */
         ovn_lflow_add_with_hint(lflows, od, S_SWITCH_IN_NAT_HAIRPIN, 90,
-                                "ip && "REGBIT_HAIRPIN " == 1", "ct_snat;",
+                                "ip && "REGBIT_HAIRPIN_REPLY " == 1",
+                                "ct_snat;",
                                 &od->nbs->header_);
 
         /* Ingress Hairpin table.
         * - Priority 1: Packets that were SNAT-ed for hairpinning should be
         *   looped back (i.e., swap ETH addresses and send back on inport).
         */
-        ovn_lflow_add(lflows, od, S_SWITCH_IN_HAIRPIN, 1,
-                      REGBIT_HAIRPIN " == 1",
-                      "eth.dst <-> eth.src;"
-                      "outport = inport;"
-                      "flags.loopback = 1;"
-                      "output;");
+        ovn_lflow_add(
+            lflows, od, S_SWITCH_IN_HAIRPIN, 1,
+            "("REGBIT_HAIRPIN " == 1 || " REGBIT_HAIRPIN_REPLY " == 1)",
+            "eth.dst <-> eth.src; outport = inport; flags.loopback = 1; "
+            "output;");
     }
 }
 
@@ -6754,9 +6980,7 @@ is_vlan_transparent(const struct ovn_datapath *od)
 }
 
 static void
-build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
-                    struct hmap *lflows, struct hmap *mcgroups,
-                    struct hmap *igmp_groups, struct hmap *lbs)
+build_lswitch_flows(struct hmap *datapaths, struct hmap *lflows)
 {
     /* This flow table structure is documented in ovn-northd(8), so please
      * update ovn-northd.8.xml if you change anything. */
@@ -6765,32 +6989,111 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
     struct ds actions = DS_EMPTY_INITIALIZER;
     struct ovn_datapath *od;
 
-    /* Ingress table 13: ARP/ND responder, skip requests coming from localnet
-     * and vtep ports. (priority 100); see ovn-northd.8.xml for the
-     * rationale. */
-    struct ovn_port *op;
-    HMAP_FOR_EACH (op, key_node, ports) {
-        if (!op->nbsp) {
+    /* Ingress table 24: Destination lookup for unknown MACs (priority 0). */
+    HMAP_FOR_EACH (od, key_node, datapaths) {
+        if (!od->nbs) {
             continue;
         }
 
+        ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_LKUP, 0, "1",
+                      "outport = get_fdb(eth.dst); next;");
+
+        if (od->has_unknown) {
+            ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_UNKNOWN, 50,
+                          "outport == \"none\"",
+                          "outport = \""MC_UNKNOWN"\"; output;");
+        } else {
+            ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_UNKNOWN, 50,
+                          "outport == \"none\"", "drop;");
+        }
+        ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_UNKNOWN, 0, "1",
+                      "output;");
+    }
+
+    ds_destroy(&match);
+    ds_destroy(&actions);
+}
+
+/* Build pre-ACL and ACL tables for both ingress and egress.
+ * Ingress tables 3 through 10.  Egress tables 0 through 7. */
+static void
+build_lswitch_lflows_pre_acl_and_acl(struct ovn_datapath *od,
+                                     struct hmap *port_groups,
+                                     struct hmap *lflows,
+                                     struct shash *meter_groups,
+                                     struct hmap *lbs)
+{
+    if (od->nbs) {
+        od->has_stateful_acl = ls_has_stateful_acl(od);
+        od->has_lb_vip = ls_has_lb_vip(od);
+
+        build_pre_acls(od, lflows);
+        build_pre_lb(od, lflows, meter_groups, lbs);
+        build_pre_stateful(od, lflows);
+        build_acl_hints(od, lflows);
+        build_acls(od, lflows, port_groups, meter_groups);
+        build_qos(od, lflows);
+        build_lb(od, lflows);
+        build_stateful(od, lflows, lbs);
+        build_lb_hairpin(od, lflows);
+    }
+}
+
+/* Logical switch ingress table 0: Admission control framework (priority
+ * 100). */
+static void
+build_lswitch_lflows_admission_control(struct ovn_datapath *od,
+                                       struct hmap *lflows)
+{
+    if (od->nbs) {
+        /* Logical VLANs not supported. */
+        if (!is_vlan_transparent(od)) {
+            /* Block logical VLANs. */
+            ovn_lflow_add(lflows, od, S_SWITCH_IN_PORT_SEC_L2, 100,
+                          "vlan.present", "drop;");
+        }
+
+        /* Broadcast/multicast source address is invalid. */
+        ovn_lflow_add(lflows, od, S_SWITCH_IN_PORT_SEC_L2, 100, "eth.src[40]",
+                      "drop;");
+
+        /* Port security flows have priority 50
+         * (see build_lswitch_input_port_sec()) and will continue
+         * to the next table if packet source is acceptable. */
+    }
+}
+
+/* Ingress table 13: ARP/ND responder, skip requests coming from localnet
+ * and vtep ports. (priority 100); see ovn-northd.8.xml for the
+ * rationale. */
+
+static void
+build_lswitch_arp_nd_responder_skip_local(struct ovn_port *op,
+                                          struct hmap *lflows,
+                                          struct ds *match)
+{
+    if (op->nbsp) {
         if ((!strcmp(op->nbsp->type, "localnet")) ||
             (!strcmp(op->nbsp->type, "vtep"))) {
-            ds_clear(&match);
-            ds_put_format(&match, "inport == %s", op->json_key);
+            ds_clear(match);
+            ds_put_format(match, "inport == %s", op->json_key);
             ovn_lflow_add_with_hint(lflows, op->od, S_SWITCH_IN_ARP_ND_RSP,
-                                    100, ds_cstr(&match), "next;",
+                                    100, ds_cstr(match), "next;",
                                     &op->nbsp->header_);
         }
     }
+}
 
-    /* Ingress table 13: ARP/ND responder, reply for known IPs.
-     * (priority 50). */
-    HMAP_FOR_EACH (op, key_node, ports) {
-        if (!op->nbsp) {
-            continue;
-        }
-
+/* Ingress table 13: ARP/ND responder, reply for known IPs.
+ * (priority 50). */
+static void
+build_lswitch_arp_nd_responder_known_ips(struct ovn_port *op,
+                                         struct hmap *lflows,
+                                         struct hmap *ports,
+                                         struct ds *actions,
+                                         struct ds *match)
+{
+    if (op->nbsp) {
         if (!strcmp(op->nbsp->type, "virtual")) {
             /* Handle
              *  - GARPs for virtual ip which belongs to a logical port
@@ -6806,7 +7109,7 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
                                                    "virtual-parents");
             if (!virtual_ip || !virtual_parents ||
                 !ip_parse(virtual_ip, &ip)) {
-                continue;
+                return;
             }
 
             char *tokstr = xstrdup(virtual_parents);
@@ -6821,21 +7124,21 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
                     continue;
                 }
 
-                ds_clear(&match);
-                ds_put_format(&match, "inport == \"%s\" && "
+                ds_clear(match);
+                ds_put_format(match, "inport == \"%s\" && "
                               "((arp.op == 1 && arp.spa == %s && "
                               "arp.tpa == %s) || (arp.op == 2 && "
                               "arp.spa == %s))",
                               vparent, virtual_ip, virtual_ip,
                               virtual_ip);
-                ds_clear(&actions);
-                ds_put_format(&actions,
+                ds_clear(actions);
+                ds_put_format(actions,
                     "bind_vport(%s, inport); "
                     "next;",
                     op->json_key);
                 ovn_lflow_add_with_hint(lflows, op->od,
                                         S_SWITCH_IN_ARP_ND_RSP, 100,
-                                        ds_cstr(&match), ds_cstr(&actions),
+                                        ds_cstr(match), ds_cstr(actions),
                                         &vp->nbsp->header_);
             }
 
@@ -6850,20 +7153,20 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
             if (check_lsp_is_up &&
                 !lsp_is_up(op->nbsp) && !lsp_is_router(op->nbsp) &&
                 strcmp(op->nbsp->type, "localport")) {
-                continue;
+                return;
             }
 
             if (lsp_is_external(op->nbsp) || op->has_unknown) {
-                continue;
+                return;
             }
 
             for (size_t i = 0; i < op->n_lsp_addrs; i++) {
                 for (size_t j = 0; j < op->lsp_addrs[i].n_ipv4_addrs; j++) {
-                    ds_clear(&match);
-                    ds_put_format(&match, "arp.tpa == %s && arp.op == 1",
+                    ds_clear(match);
+                    ds_put_format(match, "arp.tpa == %s && arp.op == 1",
                                 op->lsp_addrs[i].ipv4_addrs[j].addr_s);
-                    ds_clear(&actions);
-                    ds_put_format(&actions,
+                    ds_clear(actions);
+                    ds_put_format(actions,
                         "eth.dst = eth.src; "
                         "eth.src = %s; "
                         "arp.op = 2; /* ARP reply */ "
@@ -6878,8 +7181,8 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
                         op->lsp_addrs[i].ipv4_addrs[j].addr_s);
                     ovn_lflow_add_with_hint(lflows, op->od,
                                             S_SWITCH_IN_ARP_ND_RSP, 50,
-                                            ds_cstr(&match),
-                                            ds_cstr(&actions),
+                                            ds_cstr(match),
+                                            ds_cstr(actions),
                                             &op->nbsp->header_);
 
                     /* Do not reply to an ARP request from the port that owns
@@ -6894,10 +7197,10 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
                      * address is intended to detect situations where the
                      * network is not working as configured, so dropping the
                      * request would frustrate that intent.) */
-                    ds_put_format(&match, " && inport == %s", op->json_key);
+                    ds_put_format(match, " && inport == %s", op->json_key);
                     ovn_lflow_add_with_hint(lflows, op->od,
                                             S_SWITCH_IN_ARP_ND_RSP, 100,
-                                            ds_cstr(&match), "next;",
+                                            ds_cstr(match), "next;",
                                             &op->nbsp->header_);
                 }
 
@@ -6905,15 +7208,15 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
                  * unicast IPv6 address and its all-nodes multicast address,
                  * but always respond with the unicast IPv6 address. */
                 for (size_t j = 0; j < op->lsp_addrs[i].n_ipv6_addrs; j++) {
-                    ds_clear(&match);
-                    ds_put_format(&match,
+                    ds_clear(match);
+                    ds_put_format(match,
                             "nd_ns && ip6.dst == {%s, %s} && nd.target == %s",
                             op->lsp_addrs[i].ipv6_addrs[j].addr_s,
                             op->lsp_addrs[i].ipv6_addrs[j].sn_addr_s,
                             op->lsp_addrs[i].ipv6_addrs[j].addr_s);
 
-                    ds_clear(&actions);
-                    ds_put_format(&actions,
+                    ds_clear(actions);
+                    ds_put_format(actions,
                             "%s { "
                             "eth.src = %s; "
                             "ip6.src = %s; "
@@ -6930,93 +7233,99 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
                             op->lsp_addrs[i].ea_s);
                     ovn_lflow_add_with_hint(lflows, op->od,
                                             S_SWITCH_IN_ARP_ND_RSP, 50,
-                                            ds_cstr(&match),
-                                            ds_cstr(&actions),
+                                            ds_cstr(match),
+                                            ds_cstr(actions),
                                             &op->nbsp->header_);
 
                     /* Do not reply to a solicitation from the port that owns
                      * the address (otherwise DAD detection will fail). */
-                    ds_put_format(&match, " && inport == %s", op->json_key);
+                    ds_put_format(match, " && inport == %s", op->json_key);
                     ovn_lflow_add_with_hint(lflows, op->od,
                                             S_SWITCH_IN_ARP_ND_RSP, 100,
-                                            ds_cstr(&match), "next;",
+                                            ds_cstr(match), "next;",
                                             &op->nbsp->header_);
                 }
             }
         }
     }
+}
 
-    /* Ingress table 13: ARP/ND responder, by default goto next.
-     * (priority 0)*/
-    HMAP_FOR_EACH (od, key_node, datapaths) {
-        if (!od->nbs) {
-            continue;
-        }
-
+/* Ingress table 13: ARP/ND responder, by default goto next.
+ * (priority 0)*/
+static void
+build_lswitch_arp_nd_responder_default(struct ovn_datapath *od,
+                                       struct hmap *lflows)
+{
+    if (od->nbs) {
         ovn_lflow_add(lflows, od, S_SWITCH_IN_ARP_ND_RSP, 0, "1", "next;");
     }
+}
 
-    /* Ingress table 13: ARP/ND responder for service monitor source ip.
-     * (priority 110)*/
-    struct ovn_northd_lb *lb;
-    HMAP_FOR_EACH (lb, hmap_node, lbs) {
-        for (size_t i = 0; i < lb->n_vips; i++) {
-            struct ovn_northd_lb_vip *lb_vip_nb = &lb->vips_nb[i];
-            if (!lb_vip_nb->lb_health_check) {
+/* Ingress table 13: ARP/ND responder for service monitor source ip.
+ * (priority 110)*/
+static void
+build_lswitch_arp_nd_service_monitor(struct ovn_northd_lb *lb,
+                                     struct hmap *lflows,
+                                     struct ds *actions,
+                                     struct ds *match)
+{
+    for (size_t i = 0; i < lb->n_vips; i++) {
+        struct ovn_northd_lb_vip *lb_vip_nb = &lb->vips_nb[i];
+        if (!lb_vip_nb->lb_health_check) {
+            continue;
+        }
+
+        for (size_t j = 0; j < lb_vip_nb->n_backends; j++) {
+            struct ovn_northd_lb_backend *backend_nb =
+                &lb_vip_nb->backends_nb[j];
+            if (!backend_nb->op || !backend_nb->svc_mon_src_ip) {
                 continue;
             }
 
-            for (size_t j = 0; j < lb_vip_nb->n_backends; j++) {
-                struct ovn_northd_lb_backend *backend_nb =
-                    &lb_vip_nb->backends_nb[j];
-                if (!backend_nb->op || !backend_nb->svc_mon_src_ip) {
-                    continue;
-                }
-
-                ds_clear(&match);
-                ds_put_format(&match, "arp.tpa == %s && arp.op == 1",
-                              backend_nb->svc_mon_src_ip);
-                ds_clear(&actions);
-                ds_put_format(&actions,
-                    "eth.dst = eth.src; "
-                    "eth.src = %s; "
-                    "arp.op = 2; /* ARP reply */ "
-                    "arp.tha = arp.sha; "
-                    "arp.sha = %s; "
-                    "arp.tpa = arp.spa; "
-                    "arp.spa = %s; "
-                    "outport = inport; "
-                    "flags.loopback = 1; "
-                    "output;",
-                    svc_monitor_mac, svc_monitor_mac,
-                    backend_nb->svc_mon_src_ip);
-                ovn_lflow_add_with_hint(lflows,
-                                        backend_nb->op->od,
-                                        S_SWITCH_IN_ARP_ND_RSP, 110,
-                                        ds_cstr(&match), ds_cstr(&actions),
-                                        &lb->nlb->header_);
-            }
+            ds_clear(match);
+            ds_put_format(match, "arp.tpa == %s && arp.op == 1",
+                          backend_nb->svc_mon_src_ip);
+            ds_clear(actions);
+            ds_put_format(actions,
+                "eth.dst = eth.src; "
+                "eth.src = %s; "
+                "arp.op = 2; /* ARP reply */ "
+                "arp.tha = arp.sha; "
+                "arp.sha = %s; "
+                "arp.tpa = arp.spa; "
+                "arp.spa = %s; "
+                "outport = inport; "
+                "flags.loopback = 1; "
+                "output;",
+                svc_monitor_mac, svc_monitor_mac,
+                backend_nb->svc_mon_src_ip);
+            ovn_lflow_add_with_hint(lflows,
+                                    backend_nb->op->od,
+                                    S_SWITCH_IN_ARP_ND_RSP, 110,
+                                    ds_cstr(match), ds_cstr(actions),
+                                    &lb->nlb->header_);
         }
     }
+}
 
 
-    /* Logical switch ingress table 14 and 15: DHCP options and response
-     * priority 100 flows. */
-    HMAP_FOR_EACH (op, key_node, ports) {
-        if (!op->nbsp) {
-           continue;
-        }
-
+/* Logical switch ingress table 14 and 15: DHCP options and response
+ * priority 100 flows. */
+static void
+build_lswitch_dhcp_options_and_response(struct ovn_port *op,
+                                        struct hmap *lflows)
+{
+    if (op->nbsp) {
         if (!lsp_is_enabled(op->nbsp) || lsp_is_router(op->nbsp)) {
             /* Don't add the DHCP flows if the port is not enabled or if the
              * port is a router port. */
-            continue;
+            return;
         }
 
         if (!op->nbsp->dhcpv4_options && !op->nbsp->dhcpv6_options) {
             /* CMS has disabled both native DHCPv4 and DHCPv6 for this lport.
              */
-            continue;
+            return;
         }
 
         bool is_external = lsp_is_external(op->nbsp);
@@ -7024,7 +7333,7 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
                             !op->nbsp->ha_chassis_group)) {
             /* If it's an external port and there are no localnet ports
              * and if it doesn't belong to an HA chassis group ignore it. */
-            continue;
+            return;
         }
 
         for (size_t i = 0; i < op->n_lsp_addrs; i++) {
@@ -7047,14 +7356,35 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
             }
         }
     }
+}
 
-    /* Logical switch ingress table 17 and 18: DNS lookup and response
-     * priority 100 flows.
-     */
-    HMAP_FOR_EACH (od, key_node, datapaths) {
-        if (!od->nbs || !ls_has_dns_records(od->nbs)) {
-           continue;
-        }
+/* Ingress table 14 and 15: DHCP options and response, by default goto
+ * next. (priority 0).
+ * Ingress table 16 and 17: DNS lookup and response, by default goto next.
+ * (priority 0).
+ * Ingress table 18 - External port handling, by default goto next.
+ * (priority 0). */
+static void
+build_lswitch_dhcp_and_dns_defaults(struct ovn_datapath *od,
+                                        struct hmap *lflows)
+{
+    if (od->nbs) {
+        ovn_lflow_add(lflows, od, S_SWITCH_IN_DHCP_OPTIONS, 0, "1", "next;");
+        ovn_lflow_add(lflows, od, S_SWITCH_IN_DHCP_RESPONSE, 0, "1", "next;");
+        ovn_lflow_add(lflows, od, S_SWITCH_IN_DNS_LOOKUP, 0, "1", "next;");
+        ovn_lflow_add(lflows, od, S_SWITCH_IN_DNS_RESPONSE, 0, "1", "next;");
+        ovn_lflow_add(lflows, od, S_SWITCH_IN_EXTERNAL_PORT, 0, "1", "next;");
+    }
+}
+
+/* Logical switch ingress table 17 and 18: DNS lookup and response
+* priority 100 flows.
+*/
+static void
+build_lswitch_dns_lookup_and_response(struct ovn_datapath *od,
+                                      struct hmap *lflows)
+{
+    if (od->nbs && ls_has_dns_records(od->nbs)) {
 
         ovn_lflow_add(lflows, od, S_SWITCH_IN_DNS_LOOKUP, 100,
                       "udp.dst == 53",
@@ -7071,47 +7401,33 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
         ovn_lflow_add(lflows, od, S_SWITCH_IN_DNS_RESPONSE, 100,
                       dns_match, dns_action);
     }
+}
 
-    /* Ingress table 14 and 15: DHCP options and response, by default goto
-     * next. (priority 0).
-     * Ingress table 16 and 17: DNS lookup and response, by default goto next.
-     * (priority 0).
-     * Ingress table 18 - External port handling, by default goto next.
-     * (priority 0). */
-
-    HMAP_FOR_EACH (od, key_node, datapaths) {
-        if (!od->nbs) {
-            continue;
-        }
-
-        ovn_lflow_add(lflows, od, S_SWITCH_IN_DHCP_OPTIONS, 0, "1", "next;");
-        ovn_lflow_add(lflows, od, S_SWITCH_IN_DHCP_RESPONSE, 0, "1", "next;");
-        ovn_lflow_add(lflows, od, S_SWITCH_IN_DNS_LOOKUP, 0, "1", "next;");
-        ovn_lflow_add(lflows, od, S_SWITCH_IN_DNS_RESPONSE, 0, "1", "next;");
-        ovn_lflow_add(lflows, od, S_SWITCH_IN_EXTERNAL_PORT, 0, "1", "next;");
-    }
-
-    HMAP_FOR_EACH (op, key_node, ports) {
-        if (!op->nbsp || !lsp_is_external(op->nbsp)) {
-           continue;
-        }
+/* Table 18: External port. Drop ARP request for router ips from
+ * external ports  on chassis not binding those ports.
+ * This makes the router pipeline to be run only on the chassis
+ * binding the external ports. */
+static void
+build_lswitch_external_port(struct ovn_port *op,
+                            struct hmap *lflows)
+{
+    if (op->nbsp && lsp_is_external(op->nbsp)) {
 
-        /* Table 18: External port. Drop ARP request for router ips from
-         * external ports  on chassis not binding those ports.
-         * This makes the router pipeline to be run only on the chassis
-         * binding the external ports. */
         for (size_t i = 0; i < op->od->n_localnet_ports; i++) {
             build_drop_arp_nd_flows_for_unbound_router_ports(
                 op, op->od->localnet_ports[i], lflows);
         }
     }
+}
 
-    /* Ingress table 19: Destination lookup, broadcast and multicast handling
-     * (priority 70 - 100). */
-    HMAP_FOR_EACH (od, key_node, datapaths) {
-        if (!od->nbs) {
-            continue;
-        }
+/* Ingress table 19: Destination lookup, broadcast and multicast handling
+ * (priority 70 - 100). */
+static void
+build_lswitch_destination_lookup_bmcast(struct ovn_datapath *od,
+                                        struct hmap *lflows,
+                                        struct ds *actions)
+{
+    if (od->nbs) {
 
         ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_LKUP, 110,
                       "eth.dst == $svc_monitor_mac",
@@ -7120,22 +7436,22 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
         struct mcast_switch_info *mcast_sw_info = &od->mcast_info.sw;
 
         if (mcast_sw_info->enabled) {
-            ds_clear(&actions);
+            ds_clear(actions);
             if (mcast_sw_info->flood_reports) {
-                ds_put_cstr(&actions,
+                ds_put_cstr(actions,
                             "clone { "
                                 "outport = \""MC_MROUTER_STATIC"\"; "
                                 "output; "
                             "};");
             }
-            ds_put_cstr(&actions, "igmp;");
+            ds_put_cstr(actions, "igmp;");
             /* Punt IGMP traffic to controller. */
             ovn_lflow_add_unique(lflows, od, S_SWITCH_IN_L2_LKUP, 100,
-                                 "ip4 && ip.proto == 2", ds_cstr(&actions));
+                                 "ip4 && ip.proto == 2", ds_cstr(actions));
 
             /* Punt MLD traffic to controller. */
             ovn_lflow_add_unique(lflows, od, S_SWITCH_IN_L2_LKUP, 100,
-                                 "mldv1 || mldv2", ds_cstr(&actions));
+                                 "mldv1 || mldv2", ds_cstr(actions));
 
             /* Flood all IP multicast traffic destined to 224.0.0.X to all
              * ports - RFC 4541, section 2.1.2, item 2.
@@ -7157,10 +7473,10 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
              * handled by the L2 multicast flow.
              */
             if (!mcast_sw_info->flood_unregistered) {
-                ds_clear(&actions);
+                ds_clear(actions);
 
                 if (mcast_sw_info->flood_relay) {
-                    ds_put_cstr(&actions,
+                    ds_put_cstr(actions,
                                 "clone { "
                                     "outport = \""MC_MROUTER_FLOOD"\"; "
                                     "output; "
@@ -7168,7 +7484,7 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
                 }
 
                 if (mcast_sw_info->flood_static) {
-                    ds_put_cstr(&actions, "outport =\""MC_STATIC"\"; output;");
+                    ds_put_cstr(actions, "outport =\""MC_STATIC"\"; output;");
                 }
 
                 /* Explicitly drop the traffic if relay or static flooding
@@ -7176,30 +7492,33 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
                  */
                 if (!mcast_sw_info->flood_relay &&
                         !mcast_sw_info->flood_static) {
-                    ds_put_cstr(&actions, "drop;");
+                    ds_put_cstr(actions, "drop;");
                 }
 
                 ovn_lflow_add_unique(lflows, od, S_SWITCH_IN_L2_LKUP, 80,
                                      "ip4.mcast || ip6.mcast",
-                                     ds_cstr(&actions));
+                                     ds_cstr(actions));
             }
         }
 
         ovn_lflow_add_unique(lflows, od, S_SWITCH_IN_L2_LKUP, 70, "eth.mcast",
                              "outport = \""MC_FLOOD"\"; output;");
     }
+}
 
-    /* Ingress table 19: Add IP multicast flows learnt from IGMP/MLD
-     * (priority 90). */
-    struct ovn_igmp_group *igmp_group;
 
-    HMAP_FOR_EACH (igmp_group, hmap_node, igmp_groups) {
-        if (!igmp_group->datapath) {
-            continue;
-        }
+/* Ingress table 19: Add IP multicast flows learnt from IGMP/MLD
+ * (priority 90). */
+static void
+build_lswitch_ip_mcast_igmp_mld(struct ovn_igmp_group *igmp_group,
+                                struct hmap *lflows,
+                                struct ds *actions,
+                                struct ds *match)
+{
+    if (igmp_group->datapath) {
 
-        ds_clear(&match);
-        ds_clear(&actions);
+        ds_clear(match);
+        ds_clear(actions);
 
         struct mcast_switch_info *mcast_sw_info =
             &igmp_group->datapath->mcast_info.sw;
@@ -7211,57 +7530,62 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
             ovs_be32 group_address =
                 in6_addr_get_mapped_ipv4(&igmp_group->address);
             if (ip_is_local_multicast(group_address)) {
-                continue;
+                return;
             }
 
             if (mcast_sw_info->active_v4_flows >= mcast_sw_info->table_size) {
-                continue;
+                return;
             }
             mcast_sw_info->active_v4_flows++;
-            ds_put_format(&match, "eth.mcast && ip4 && ip4.dst == %s ",
+            ds_put_format(match, "eth.mcast && ip4 && ip4.dst == %s ",
                           igmp_group->mcgroup.name);
         } else {
             /* RFC 4291, section 2.7.1: Skip groups that correspond to all
              * hosts.
              */
             if (ipv6_is_all_hosts(&igmp_group->address)) {
-                continue;
+                return;
             }
             if (mcast_sw_info->active_v6_flows >= mcast_sw_info->table_size) {
-                continue;
+                return;
             }
             mcast_sw_info->active_v6_flows++;
-            ds_put_format(&match, "eth.mcast && ip6 && ip6.dst == %s ",
+            ds_put_format(match, "eth.mcast && ip6 && ip6.dst == %s ",
                           igmp_group->mcgroup.name);
         }
 
         /* Also flood traffic to all multicast routers with relay enabled. */
         if (mcast_sw_info->flood_relay) {
-            ds_put_cstr(&actions,
+            ds_put_cstr(actions,
                         "clone { "
                             "outport = \""MC_MROUTER_FLOOD "\"; "
                             "output; "
                         "};");
         }
         if (mcast_sw_info->flood_static) {
-            ds_put_cstr(&actions,
+            ds_put_cstr(actions,
                         "clone { "
                             "outport =\""MC_STATIC"\"; "
                             "output; "
                         "};");
         }
-        ds_put_format(&actions, "outport = \"%s\"; output; ",
+        ds_put_format(actions, "outport = \"%s\"; output; ",
                       igmp_group->mcgroup.name);
 
         ovn_lflow_add_unique(lflows, igmp_group->datapath, S_SWITCH_IN_L2_LKUP,
-                             90, ds_cstr(&match), ds_cstr(&actions));
+                             90, ds_cstr(match), ds_cstr(actions));
     }
+}
 
-    /* Ingress table 19: Destination lookup, unicast handling (priority 50), */
-    HMAP_FOR_EACH (op, key_node, ports) {
-        if (!op->nbsp || lsp_is_external(op->nbsp)) {
-            continue;
-        }
+/* Ingress table 19: Destination lookup, unicast handling (priority 50), */
+static void
+build_lswitch_ip_unicast_lookup(struct ovn_port *op,
+                                struct hmap *lflows,
+                                struct hmap *mcgroups,
+                                struct ds *actions,
+                                struct ds *match)
+{
+    if (op->nbsp && (!lsp_is_external(op->nbsp))) {
 
         /* For ports connected to logical routers add flows to bypass the
          * broadcast flooding of ARP/ND requests in table 19. We direct the
@@ -7279,15 +7603,15 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
             struct eth_addr mac;
             if (ovs_scan(op->nbsp->addresses[i],
                         ETH_ADDR_SCAN_FMT, ETH_ADDR_SCAN_ARGS(mac))) {
-                ds_clear(&match);
-                ds_put_format(&match, "eth.dst == "ETH_ADDR_FMT,
+                ds_clear(match);
+                ds_put_format(match, "eth.dst == "ETH_ADDR_FMT,
                               ETH_ADDR_ARGS(mac));
 
-                ds_clear(&actions);
-                ds_put_format(&actions, "outport = %s; output;", op->json_key);
+                ds_clear(actions);
+                ds_put_format(actions, "outport = %s; output;", op->json_key);
                 ovn_lflow_add_with_hint(lflows, op->od, S_SWITCH_IN_L2_LKUP,
-                                        50, ds_cstr(&match),
-                                        ds_cstr(&actions),
+                                        50, ds_cstr(match),
+                                        ds_cstr(actions),
                                         &op->nbsp->header_);
             } else if (!strcmp(op->nbsp->addresses[i], "unknown")) {
                 if (lsp_is_enabled(op->nbsp)) {
@@ -7300,15 +7624,15 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
                             ETH_ADDR_SCAN_FMT, ETH_ADDR_SCAN_ARGS(mac))) {
                     continue;
                 }
-                ds_clear(&match);
-                ds_put_format(&match, "eth.dst == "ETH_ADDR_FMT,
+                ds_clear(match);
+                ds_put_format(match, "eth.dst == "ETH_ADDR_FMT,
                               ETH_ADDR_ARGS(mac));
 
-                ds_clear(&actions);
-                ds_put_format(&actions, "outport = %s; output;", op->json_key);
+                ds_clear(actions);
+                ds_put_format(actions, "outport = %s; output;", op->json_key);
                 ovn_lflow_add_with_hint(lflows, op->od, S_SWITCH_IN_L2_LKUP,
-                                        50, ds_cstr(&match),
-                                        ds_cstr(&actions),
+                                        50, ds_cstr(match),
+                                        ds_cstr(actions),
                                         &op->nbsp->header_);
             } else if (!strcmp(op->nbsp->addresses[i], "router")) {
                 if (!op->peer || !op->peer->nbrp
@@ -7316,8 +7640,8 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
                             ETH_ADDR_SCAN_FMT, ETH_ADDR_SCAN_ARGS(mac))) {
                     continue;
                 }
-                ds_clear(&match);
-                ds_put_format(&match, "eth.dst == "ETH_ADDR_FMT,
+                ds_clear(match);
+                ds_put_format(match, "eth.dst == "ETH_ADDR_FMT,
                               ETH_ADDR_ARGS(mac));
                 if (op->peer->od->l3dgw_port
                     && op->peer->od->l3redirect_port
@@ -7343,16 +7667,16 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
                     }
 
                     if (add_chassis_resident_check) {
-                        ds_put_format(&match, " && is_chassis_resident(%s)",
+                        ds_put_format(match, " && is_chassis_resident(%s)",
                                       op->peer->od->l3redirect_port->json_key);
                     }
                 }
 
-                ds_clear(&actions);
-                ds_put_format(&actions, "outport = %s; output;", op->json_key);
+                ds_clear(actions);
+                ds_put_format(actions, "outport = %s; output;", op->json_key);
                 ovn_lflow_add_with_hint(lflows, op->od,
                                         S_SWITCH_IN_L2_LKUP, 50,
-                                        ds_cstr(&match), ds_cstr(&actions),
+                                        ds_cstr(match), ds_cstr(actions),
                                         &op->nbsp->header_);
 
                 /* Add ethernet addresses specified in NAT rules on
@@ -7366,19 +7690,19 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
                             && nat->logical_port && nat->external_mac
                             && eth_addr_from_string(nat->external_mac, &mac)) {
 
-                            ds_clear(&match);
-                            ds_put_format(&match, "eth.dst == "ETH_ADDR_FMT
+                            ds_clear(match);
+                            ds_put_format(match, "eth.dst == "ETH_ADDR_FMT
                                           " && is_chassis_resident(\"%s\")",
                                           ETH_ADDR_ARGS(mac),
                                           nat->logical_port);
 
-                            ds_clear(&actions);
-                            ds_put_format(&actions, "outport = %s; output;",
+                            ds_clear(actions);
+                            ds_put_format(actions, "outport = %s; output;",
                                           op->json_key);
                             ovn_lflow_add_with_hint(lflows, op->od,
                                                     S_SWITCH_IN_L2_LKUP, 50,
-                                                    ds_cstr(&match),
-                                                    ds_cstr(&actions),
+                                                    ds_cstr(match),
+                                                    ds_cstr(actions),
                                                     &op->nbsp->header_);
                         }
                     }
@@ -7392,71 +7716,202 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
             }
         }
     }
+}
 
-    /* Ingress table 19: Destination lookup for unknown MACs (priority 0). */
-    HMAP_FOR_EACH (od, key_node, datapaths) {
-        if (!od->nbs) {
+struct bfd_entry {
+    struct hmap_node hmap_node;
+
+    const struct sbrec_bfd *sb_bt;
+
+    bool ref;
+};
+
+static struct bfd_entry *
+bfd_port_lookup(struct hmap *bfd_map, const char *logical_port,
+                const char *dst_ip)
+{
+    struct bfd_entry *bfd_e;
+    uint32_t hash;
+
+    hash = hash_string(dst_ip, 0);
+    hash = hash_string(logical_port, hash);
+    HMAP_FOR_EACH_WITH_HASH (bfd_e, hmap_node, hash, bfd_map) {
+        if (!strcmp(bfd_e->sb_bt->logical_port, logical_port) &&
+            !strcmp(bfd_e->sb_bt->dst_ip, dst_ip)) {
+            return bfd_e;
+        }
+    }
+    return NULL;
+}
+
+static void
+bfd_cleanup_connections(struct northd_context *ctx, struct hmap *bfd_map)
+{
+    const struct nbrec_bfd *nb_bt;
+    struct bfd_entry *bfd_e;
+
+    NBREC_BFD_FOR_EACH (nb_bt, ctx->ovnnb_idl) {
+        bfd_e = bfd_port_lookup(bfd_map, nb_bt->logical_port, nb_bt->dst_ip);
+        if (!bfd_e) {
             continue;
         }
 
-        if (od->has_unknown) {
-            ovn_lflow_add_unique(lflows, od, S_SWITCH_IN_L2_LKUP, 0, "1",
-                                 "outport = \""MC_UNKNOWN"\"; output;");
+        if (!bfd_e->ref && strcmp(nb_bt->status, "admin_down")) {
+            /* no user for this bfd connection */
+            nbrec_bfd_set_status(nb_bt, "admin_down");
         }
     }
 
-    build_lswitch_output_port_sec(ports, datapaths, lflows);
-
-    ds_destroy(&match);
-    ds_destroy(&actions);
+    HMAP_FOR_EACH_POP (bfd_e, hmap_node, bfd_map) {
+        free(bfd_e);
+    }
 }
 
-/* Build pre-ACL and ACL tables for both ingress and egress.
- * Ingress tables 3 through 10.  Egress tables 0 through 7. */
+#define BFD_DEF_MINTX       1000 /* 1s */
+#define BFD_DEF_MINRX       1000 /* 1s */
+#define BFD_DEF_DETECT_MULT 5
+
 static void
-build_lswitch_lflows_pre_acl_and_acl(struct ovn_datapath *od,
-                                     struct hmap *port_groups,
-                                     struct hmap *lflows,
-                                     struct shash *meter_groups,
-                                     struct hmap *lbs)
+build_bfd_update_sb_conf(const struct nbrec_bfd *nb_bt,
+                         const struct sbrec_bfd *sb_bt)
 {
-   if (od->nbs) {
-        build_pre_acls(od, lflows);
-        build_pre_lb(od, lflows, meter_groups, lbs);
-        build_pre_stateful(od, lflows);
-        build_acl_hints(od, lflows);
-        build_acls(od, lflows, port_groups, meter_groups);
-        build_qos(od, lflows);
-        build_lb(od, lflows);
-        build_stateful(od, lflows, lbs);
-        build_lb_hairpin(od, lflows);
+    if (strcmp(nb_bt->dst_ip, sb_bt->dst_ip)) {
+        sbrec_bfd_set_dst_ip(sb_bt, nb_bt->dst_ip);
+    }
+
+    if (strcmp(nb_bt->logical_port, sb_bt->logical_port)) {
+        sbrec_bfd_set_logical_port(sb_bt, nb_bt->logical_port);
+    }
+
+    if (strcmp(nb_bt->status, sb_bt->status)) {
+        sbrec_bfd_set_status(sb_bt, nb_bt->status);
+    }
+
+    int detect_mult = nb_bt->n_detect_mult ? nb_bt->detect_mult[0]
+                                           : BFD_DEF_DETECT_MULT;
+    if (detect_mult != sb_bt->detect_mult) {
+        sbrec_bfd_set_detect_mult(sb_bt, detect_mult);
+    }
+
+    int min_tx = nb_bt->n_min_tx ? nb_bt->min_tx[0] : BFD_DEF_MINTX;
+    if (min_tx != sb_bt->min_tx) {
+        sbrec_bfd_set_min_tx(sb_bt, min_tx);
+    }
+
+    int min_rx = nb_bt->n_min_rx ? nb_bt->min_rx[0] : BFD_DEF_MINRX;
+    if (min_rx != sb_bt->min_rx) {
+        sbrec_bfd_set_min_rx(sb_bt, min_rx);
     }
 }
 
-/* Logical switch ingress table 0: Admission control framework (priority
- * 100). */
+/* RFC 5881 section 4
+ * The source port MUST be in the range 49152 through 65535.
+ * The same UDP source port number MUST be used for all BFD
+ * Control packets associated with a particular session.
+ * The source port number SHOULD be unique among all BFD
+ * sessions on the system
+ */
+#define BFD_UDP_SRC_PORT_START  49152
+#define BFD_UDP_SRC_PORT_LEN    (65535 - BFD_UDP_SRC_PORT_START)
+
+static int bfd_get_unused_port(unsigned long *bfd_src_ports)
+{
+    int port;
+
+    port = bitmap_scan(bfd_src_ports, 0, 0, BFD_UDP_SRC_PORT_LEN);
+    if (port == BFD_UDP_SRC_PORT_LEN) {
+        return -ENOSPC;
+    }
+    bitmap_set1(bfd_src_ports, port);
+
+    return port + BFD_UDP_SRC_PORT_START;
+}
+
 static void
-build_lswitch_lflows_admission_control(struct ovn_datapath *od,
-                                       struct hmap *lflows)
+build_bfd_table(struct northd_context *ctx, struct hmap *bfd_connections,
+                struct hmap *ports)
 {
-    if (od->nbs) {
-        /* Logical VLANs not supported. */
-        if (!is_vlan_transparent(od)) {
-            /* Block logical VLANs. */
-            ovn_lflow_add(lflows, od, S_SWITCH_IN_PORT_SEC_L2, 100,
-                          "vlan.present", "drop;");
+    struct hmap sb_only = HMAP_INITIALIZER(&sb_only);
+    const struct sbrec_bfd *sb_bt;
+    unsigned long *bfd_src_ports;
+    struct bfd_entry *bfd_e;
+    uint32_t hash;
+
+    bfd_src_ports = bitmap_allocate(BFD_UDP_SRC_PORT_LEN);
+
+    SBREC_BFD_FOR_EACH (sb_bt, ctx->ovnsb_idl) {
+        bfd_e = xmalloc(sizeof *bfd_e);
+        bfd_e->sb_bt = sb_bt;
+        hash = hash_string(sb_bt->dst_ip, 0);
+        hash = hash_string(sb_bt->logical_port, hash);
+        hmap_insert(&sb_only, &bfd_e->hmap_node, hash);
+        bitmap_set1(bfd_src_ports, sb_bt->src_port - BFD_UDP_SRC_PORT_START);
+    }
+
+    const struct nbrec_bfd *nb_bt;
+    NBREC_BFD_FOR_EACH (nb_bt, ctx->ovnnb_idl) {
+        if (!nb_bt->status) {
+            /* default state is admin_down */
+            nbrec_bfd_set_status(nb_bt, "admin_down");
         }
 
-        /* Broadcast/multicast source address is invalid. */
-        ovn_lflow_add(lflows, od, S_SWITCH_IN_PORT_SEC_L2, 100, "eth.src[40]",
-                      "drop;");
+        bfd_e = bfd_port_lookup(&sb_only, nb_bt->logical_port, nb_bt->dst_ip);
+        if (!bfd_e) {
+            int udp_src = bfd_get_unused_port(bfd_src_ports);
+            if (udp_src < 0) {
+                continue;
+            }
 
-        /* Port security flows have priority 50
-         * (see build_lswitch_input_port_sec()) and will continue
-         * to the next table if packet source is acceptable. */
+            sb_bt = sbrec_bfd_insert(ctx->ovnsb_txn);
+            sbrec_bfd_set_logical_port(sb_bt, nb_bt->logical_port);
+            sbrec_bfd_set_dst_ip(sb_bt, nb_bt->dst_ip);
+            sbrec_bfd_set_disc(sb_bt, 1 + random_uint32());
+            sbrec_bfd_set_src_port(sb_bt, udp_src);
+            sbrec_bfd_set_status(sb_bt, nb_bt->status);
+
+            int min_tx = nb_bt->n_min_tx ? nb_bt->min_tx[0] : BFD_DEF_MINTX;
+            sbrec_bfd_set_min_tx(sb_bt, min_tx);
+            int min_rx = nb_bt->n_min_rx ? nb_bt->min_rx[0] : BFD_DEF_MINRX;
+            sbrec_bfd_set_min_rx(sb_bt, min_rx);
+            int d_mult = nb_bt->n_detect_mult ? nb_bt->detect_mult[0]
+                                              : BFD_DEF_DETECT_MULT;
+            sbrec_bfd_set_detect_mult(sb_bt, d_mult);
+        } else if (strcmp(bfd_e->sb_bt->status, nb_bt->status)) {
+            if (!strcmp(nb_bt->status, "admin_down") ||
+                !strcmp(bfd_e->sb_bt->status, "admin_down")) {
+                sbrec_bfd_set_status(bfd_e->sb_bt, nb_bt->status);
+            } else {
+                nbrec_bfd_set_status(nb_bt, bfd_e->sb_bt->status);
+            }
+        }
+        if (bfd_e) {
+            build_bfd_update_sb_conf(nb_bt, bfd_e->sb_bt);
+
+            hmap_remove(&sb_only, &bfd_e->hmap_node);
+            bfd_e->ref = false;
+            hash = hash_string(bfd_e->sb_bt->dst_ip, 0);
+            hash = hash_string(bfd_e->sb_bt->logical_port, hash);
+            hmap_insert(bfd_connections, &bfd_e->hmap_node, hash);
+        }
+
+        struct ovn_port *op = ovn_port_find(ports, nb_bt->logical_port);
+        if (op) {
+            op->has_bfd = true;
+        }
     }
-}
 
+    HMAP_FOR_EACH_POP (bfd_e, hmap_node, &sb_only) {
+        struct ovn_port *op = ovn_port_find(ports, bfd_e->sb_bt->logical_port);
+        if (op) {
+            op->has_bfd = false;
+        }
+        sbrec_bfd_delete(bfd_e->sb_bt);
+        free(bfd_e);
+    }
+    hmap_destroy(&sb_only);
+
+    bitmap_free(bfd_src_ports);
+}
 
 /* Returns a string of the IP address of the router port 'op' that
  * overlaps with 'ip_s".  If one is not found, returns NULL.
@@ -7549,33 +8004,39 @@ build_routing_policy_flow(struct hmap *lflows, struct ovn_datapath *od,
     struct ds actions = DS_EMPTY_INITIALIZER;
 
     if (!strcmp(rule->action, "reroute")) {
+        ovs_assert(rule->n_nexthops <= 1);
+
+        char *nexthop =
+            (rule->n_nexthops == 1 ? rule->nexthops[0] : rule->nexthop);
         struct ovn_port *out_port = get_outport_for_routing_policy_nexthop(
-             od, ports, rule->priority, rule->nexthop);
+             od, ports, rule->priority, nexthop);
         if (!out_port) {
             return;
         }
 
-        const char *lrp_addr_s = find_lrp_member_ip(out_port, rule->nexthop);
+        const char *lrp_addr_s = find_lrp_member_ip(out_port, nexthop);
         if (!lrp_addr_s) {
             static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
             VLOG_WARN_RL(&rl, "lrp_addr not found for routing policy "
                          " priority %"PRId64" nexthop %s",
-                         rule->priority, rule->nexthop);
+                         rule->priority, nexthop);
             return;
         }
         uint32_t pkt_mark = ovn_smap_get_uint(&rule->options, "pkt_mark", 0);
         if (pkt_mark) {
             ds_put_format(&actions, "pkt.mark = %u; ", pkt_mark);
         }
-        bool is_ipv4 = strchr(rule->nexthop, '.') ? true : false;
+
+        bool is_ipv4 = strchr(nexthop, '.') ? true : false;
         ds_put_format(&actions, "%s = %s; "
                       "%s = %s; "
                       "eth.src = %s; "
                       "outport = %s; "
                       "flags.loopback = 1; "
+                      REG_ECMP_GROUP_ID" = 0; "
                       "next;",
                       is_ipv4 ? REG_NEXT_HOP_IPV4 : REG_NEXT_HOP_IPV6,
-                      rule->nexthop,
+                      nexthop,
                       is_ipv4 ? REG_SRC_IPV4 : REG_SRC_IPV6,
                       lrp_addr_s,
                       out_port->lrp_networks.ea_s,
@@ -7588,7 +8049,7 @@ build_routing_policy_flow(struct hmap *lflows, struct ovn_datapath *od,
         if (pkt_mark) {
             ds_put_format(&actions, "pkt.mark = %u; ", pkt_mark);
         }
-        ds_put_cstr(&actions, "next;");
+        ds_put_cstr(&actions, REG_ECMP_GROUP_ID" = 0; next;");
     }
     ds_put_format(&match, "%s", rule->match);
 
@@ -7598,15 +8059,116 @@ build_routing_policy_flow(struct hmap *lflows, struct ovn_datapath *od,
     ds_destroy(&actions);
 }
 
-struct parsed_route {
-    struct ovs_list list_node;
-    struct in6_addr prefix;
-    unsigned int plen;
-    bool is_src_route;
-    uint32_t hash;
-    const struct nbrec_logical_router_static_route *route;
-    bool ecmp_symmetric_reply;
-};
+static void
+build_ecmp_routing_policy_flows(struct hmap *lflows, struct ovn_datapath *od,
+                                struct hmap *ports,
+                                const struct nbrec_logical_router_policy *rule,
+                                uint16_t ecmp_group_id)
+{
+    ovs_assert(rule->n_nexthops > 1);
+
+    bool nexthops_is_ipv4 = true;
+
+    /* Check that all the nexthops belong to the same addr family before
+     * adding logical flows. */
+    for (uint16_t i = 0; i < rule->n_nexthops; i++) {
+        bool is_ipv4 = strchr(rule->nexthops[i], '.') ? true : false;
+
+        if (i == 0) {
+            nexthops_is_ipv4 = is_ipv4;
+        }
+
+        if (is_ipv4 != nexthops_is_ipv4) {
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+            VLOG_WARN_RL(&rl, "nexthop [%s] of the router policy with "
+                         "the match [%s] do not belong to the same address "
+                         "family as other next hops",
+                         rule->nexthops[i], rule->match);
+            return;
+        }
+    }
+
+    struct ds match = DS_EMPTY_INITIALIZER;
+    struct ds actions = DS_EMPTY_INITIALIZER;
+
+    for (size_t i = 0; i < rule->n_nexthops; i++) {
+        struct ovn_port *out_port = get_outport_for_routing_policy_nexthop(
+             od, ports, rule->priority, rule->nexthops[i]);
+        if (!out_port) {
+            goto cleanup;
+        }
+
+        const char *lrp_addr_s =
+            find_lrp_member_ip(out_port, rule->nexthops[i]);
+        if (!lrp_addr_s) {
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+            VLOG_WARN_RL(&rl, "lrp_addr not found for routing policy "
+                            " priority %"PRId64" nexthop %s",
+                            rule->priority, rule->nexthops[i]);
+            goto cleanup;
+        }
+
+        ds_clear(&actions);
+        uint32_t pkt_mark = ovn_smap_get_uint(&rule->options, "pkt_mark", 0);
+        if (pkt_mark) {
+            ds_put_format(&actions, "pkt.mark = %u; ", pkt_mark);
+        }
+
+        bool is_ipv4 = strchr(rule->nexthops[i], '.') ? true : false;
+
+        ds_put_format(&actions, "%s = %s; "
+                      "%s = %s; "
+                      "eth.src = %s; "
+                      "outport = %s; "
+                      "flags.loopback = 1; "
+                      "next;",
+                      is_ipv4 ? REG_NEXT_HOP_IPV4 : REG_NEXT_HOP_IPV6,
+                      rule->nexthops[i],
+                      is_ipv4 ? REG_SRC_IPV4 : REG_SRC_IPV6,
+                      lrp_addr_s,
+                      out_port->lrp_networks.ea_s,
+                      out_port->json_key);
+
+        ds_clear(&match);
+        ds_put_format(&match, REG_ECMP_GROUP_ID" == %"PRIu16" && "
+                      REG_ECMP_MEMBER_ID" == %"PRIuSIZE,
+                      ecmp_group_id, i + 1);
+        ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_POLICY_ECMP,
+                                100, ds_cstr(&match),
+                                ds_cstr(&actions), &rule->header_);
+    }
+
+    ds_clear(&actions);
+    ds_put_format(&actions, "%s = %"PRIu16
+                  "; %s = select(", REG_ECMP_GROUP_ID, ecmp_group_id,
+                  REG_ECMP_MEMBER_ID);
+
+    for (size_t i = 0; i < rule->n_nexthops; i++) {
+        if (i > 0) {
+            ds_put_cstr(&actions, ", ");
+        }
+
+        ds_put_format(&actions, "%"PRIuSIZE, i + 1);
+    }
+    ds_put_cstr(&actions, ");");
+    ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_POLICY,
+                            rule->priority, rule->match,
+                            ds_cstr(&actions), &rule->header_);
+
+cleanup:
+    ds_destroy(&match);
+    ds_destroy(&actions);
+}
+
+struct parsed_route {
+    struct ovs_list list_node;
+    struct in6_addr prefix;
+    unsigned int plen;
+    bool is_src_route;
+    uint32_t hash;
+    const struct nbrec_logical_router_static_route *route;
+    bool ecmp_symmetric_reply;
+};
 
 static uint32_t
 route_hash(struct parsed_route *route)
@@ -7619,7 +8181,8 @@ route_hash(struct parsed_route *route)
  * Otherwise return NULL. */
 static struct parsed_route *
 parsed_routes_add(struct ovs_list *routes,
-                  const struct nbrec_logical_router_static_route *route)
+                  const struct nbrec_logical_router_static_route *route,
+                  struct hmap *bfd_connections)
 {
     /* Verify that the next hop is an IP address with an all-ones mask. */
     struct in6_addr nexthop;
@@ -7660,6 +8223,25 @@ parsed_routes_add(struct ovs_list *routes,
         return NULL;
     }
 
+    const struct nbrec_bfd *nb_bt = route->bfd;
+    if (nb_bt && !strcmp(nb_bt->dst_ip, route->nexthop)) {
+        struct bfd_entry *bfd_e;
+
+        bfd_e = bfd_port_lookup(bfd_connections, nb_bt->logical_port,
+                                nb_bt->dst_ip);
+        if (bfd_e) {
+            bfd_e->ref = true;
+        }
+
+        if (!strcmp(nb_bt->status, "admin_down")) {
+            nbrec_bfd_set_status(nb_bt, "down");
+        }
+
+        if (!strcmp(nb_bt->status, "down")) {
+            return NULL;
+        }
+    }
+
     struct parsed_route *pr = xzalloc(sizeof *pr);
     pr->prefix = prefix;
     pr->plen = plen;
@@ -8102,16 +8684,15 @@ add_route(struct hmap *lflows, const struct ovn_port *op,
     build_route_match(op_inport, network_s, plen, is_src_route, is_ipv4,
                       &match, &priority);
 
-    struct ds actions = DS_EMPTY_INITIALIZER;
-    ds_put_format(&actions, "ip.ttl--; "REG_ECMP_GROUP_ID" = 0; %s = ",
+    struct ds common_actions = DS_EMPTY_INITIALIZER;
+    ds_put_format(&common_actions, REG_ECMP_GROUP_ID" = 0; %s = ",
                   is_ipv4 ? REG_NEXT_HOP_IPV4 : REG_NEXT_HOP_IPV6);
-
     if (gateway) {
-        ds_put_cstr(&actions, gateway);
+        ds_put_cstr(&common_actions, gateway);
     } else {
-        ds_put_format(&actions, "ip%s.dst", is_ipv4 ? "4" : "6");
+        ds_put_format(&common_actions, "ip%s.dst", is_ipv4 ? "4" : "6");
     }
-    ds_put_format(&actions, "; "
+    ds_put_format(&common_actions, "; "
                   "%s = %s; "
                   "eth.src = %s; "
                   "outport = %s; "
@@ -8121,11 +8702,20 @@ add_route(struct hmap *lflows, const struct ovn_port *op,
                   lrp_addr_s,
                   op->lrp_networks.ea_s,
                   op->json_key);
+    struct ds actions = DS_EMPTY_INITIALIZER;
+    ds_put_format(&actions, "ip.ttl--; %s", ds_cstr(&common_actions));
 
     ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_ROUTING, priority,
                             ds_cstr(&match), ds_cstr(&actions),
                             stage_hint);
+    if (op->has_bfd) {
+        ds_put_format(&match, " && udp.dst == 3784");
+        ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_ROUTING,
+                                priority + 1, ds_cstr(&match),
+                                ds_cstr(&common_actions), stage_hint);
+    }
     ds_destroy(&match);
+    ds_destroy(&common_actions);
     ds_destroy(&actions);
 }
 
@@ -8203,15 +8793,10 @@ get_force_snat_ip(struct ovn_datapath *od, const char *key_type,
         return false;
     }
 
-    if (!extract_ip_addresses(addresses, laddrs) ||
-        laddrs->n_ipv4_addrs > 1 ||
-        laddrs->n_ipv6_addrs > 1 ||
-        (laddrs->n_ipv4_addrs && laddrs->ipv4_addrs[0].plen != 32) ||
-        (laddrs->n_ipv6_addrs && laddrs->ipv6_addrs[0].plen != 128)) {
+    if (!extract_ip_address(addresses, laddrs)) {
         static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
         VLOG_WARN_RL(&rl, "bad ip %s in options of router "UUID_FMT"",
                      addresses, UUID_ARGS(&od->key));
-        destroy_lport_addresses(laddrs);
         return false;
     }
 
@@ -8221,7 +8806,7 @@ get_force_snat_ip(struct ovn_datapath *od, const char *key_type,
 static void
 add_router_lb_flow(struct hmap *lflows, struct ovn_datapath *od,
                    struct ds *match, struct ds *actions, int priority,
-                   bool lb_force_snat_ip, struct ovn_lb_vip *lb_vip,
+                   bool force_snat_for_lb, struct ovn_lb_vip *lb_vip,
                    const char *proto, struct nbrec_load_balancer *lb,
                    struct shash *meter_groups, struct sset *nat_entries)
 {
@@ -8230,7 +8815,7 @@ add_router_lb_flow(struct hmap *lflows, struct ovn_datapath *od,
 
     /* A match and actions for new connections. */
     char *new_match = xasprintf("ct.new && %s", ds_cstr(match));
-    if (lb_force_snat_ip) {
+    if (force_snat_for_lb) {
         char *new_actions = xasprintf("flags.force_snat_for_lb = 1; %s",
                                       ds_cstr(actions));
         ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_DNAT, priority,
@@ -8243,7 +8828,7 @@ add_router_lb_flow(struct hmap *lflows, struct ovn_datapath *od,
 
     /* A match and actions for established connections. */
     char *est_match = xasprintf("ct.est && %s", ds_cstr(match));
-    if (lb_force_snat_ip) {
+    if (force_snat_for_lb) {
         ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_DNAT, priority,
                                 est_match,
                                 "flags.force_snat_for_lb = 1; ct_dnat;",
@@ -8320,7 +8905,7 @@ add_router_lb_flow(struct hmap *lflows, struct ovn_datapath *od,
     ds_put_format(&undnat_match, ") && outport == %s && "
                  "is_chassis_resident(%s)", od->l3dgw_port->json_key,
                  od->l3redirect_port->json_key);
-    if (lb_force_snat_ip) {
+    if (force_snat_for_lb) {
         ovn_lflow_add_with_hint(lflows, od, S_ROUTER_OUT_UNDNAT, 120,
                                 ds_cstr(&undnat_match),
                                 "flags.force_snat_for_lb = 1; ct_dnat;",
@@ -8788,2375 +9373,2531 @@ build_lrouter_force_snat_flows(struct hmap *lflows, struct ovn_datapath *od,
 }
 
 static void
-build_lrouter_flows(struct hmap *datapaths, struct hmap *ports,
-                    struct hmap *lflows, struct shash *meter_groups,
-                    struct hmap *lbs)
+build_lrouter_force_snat_flows_op(struct ovn_port *op,
+                                  struct hmap *lflows,
+                                  struct ds *match, struct ds *actions)
 {
-    /* This flow table structure is documented in ovn-northd(8), so please
-     * update ovn-northd.8.xml if you change anything. */
+    if (!op->nbrp || !op->peer || !op->od->lb_force_snat_router_ip) {
+        return;
+    }
 
-    struct ds match = DS_EMPTY_INITIALIZER;
-    struct ds actions = DS_EMPTY_INITIALIZER;
+    if (op->lrp_networks.n_ipv4_addrs) {
+        ds_clear(match);
+        ds_clear(actions);
 
-    struct ovn_datapath *od;
-    struct ovn_port *op;
+        ds_put_format(match, "inport == %s && ip4.dst == %s",
+                      op->json_key, op->lrp_networks.ipv4_addrs[0].addr_s);
+        ovn_lflow_add(lflows, op->od, S_ROUTER_IN_UNSNAT, 110,
+                      ds_cstr(match), "ct_snat;");
 
-    HMAP_FOR_EACH (od, key_node, datapaths) {
-        if (!od->nbr) {
-            continue;
-        }
+        ds_clear(match);
 
-        /* Priority-90-92 flows handle ARP requests and ND packets. Most are
-         * per logical port but DNAT addresses can be handled per datapath
-         * for non gateway router ports.
-         *
-         * Priority 91 and 92 flows are added for each gateway router
-         * port to handle the special cases. In case we get the packet
-         * on a regular port, just reply with the port's ETH address.
-         */
-        for (int i = 0; i < od->nbr->n_nat; i++) {
-            struct ovn_nat *nat_entry = &od->nat_entries[i];
+        /* Higher priority rules to force SNAT with the router port ip.
+         * This only takes effect when the packet has already been
+         * load balanced once. */
+        ds_put_format(match, "flags.force_snat_for_lb == 1 && ip4 && "
+                      "outport == %s", op->json_key);
+        ds_put_format(actions, "ct_snat(%s);",
+                      op->lrp_networks.ipv4_addrs[0].addr_s);
+        ovn_lflow_add(lflows, op->od, S_ROUTER_OUT_SNAT, 110,
+                      ds_cstr(match), ds_cstr(actions));
+        if (op->lrp_networks.n_ipv4_addrs > 2) {
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+            VLOG_WARN_RL(&rl, "Logical router port %s is configured with "
+                              "multiple IPv4 addresses.  Only the first "
+                              "IP [%s] is considered as SNAT for load "
+                              "balancer", op->json_key,
+                              op->lrp_networks.ipv4_addrs[0].addr_s);
+        }
+    }
+
+    /* op->lrp_networks.ipv6_addrs will always have LLA and that will be
+     * last in the list. So add the flows only if n_ipv6_addrs > 1. */
+    if (op->lrp_networks.n_ipv6_addrs > 1) {
+        ds_clear(match);
+        ds_clear(actions);
 
-            /* Skip entries we failed to parse. */
-            if (!nat_entry_is_valid(nat_entry)) {
-                continue;
-            }
+        ds_put_format(match, "inport == %s && ip6.dst == %s",
+                      op->json_key, op->lrp_networks.ipv6_addrs[0].addr_s);
+        ovn_lflow_add(lflows, op->od, S_ROUTER_IN_UNSNAT, 110,
+                      ds_cstr(match), "ct_snat;");
 
-            /* Skip SNAT entries for now, we handle unique SNAT IPs separately
-             * below.
-             */
-            if (!strcmp(nat_entry->nb->type, "snat")) {
-                continue;
-            }
-            build_lrouter_nat_arp_nd_flow(od, nat_entry, lflows);
+        ds_clear(match);
+
+        /* Higher priority rules to force SNAT with the router port ip.
+         * This only takes effect when the packet has already been
+         * load balanced once. */
+        ds_put_format(match, "flags.force_snat_for_lb == 1 && ip6 && "
+                      "outport == %s", op->json_key);
+        ds_put_format(actions, "ct_snat(%s);",
+                      op->lrp_networks.ipv6_addrs[0].addr_s);
+        ovn_lflow_add(lflows, op->od, S_ROUTER_OUT_SNAT, 110,
+                      ds_cstr(match), ds_cstr(actions));
+        if (op->lrp_networks.n_ipv6_addrs > 2) {
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+            VLOG_WARN_RL(&rl, "Logical router port %s is configured with "
+                              "multiple IPv6 addresses.  Only the first "
+                              "IP [%s] is considered as SNAT for load "
+                              "balancer", op->json_key,
+                              op->lrp_networks.ipv6_addrs[0].addr_s);
         }
+    }
+}
 
-        /* Now handle SNAT entries too, one per unique SNAT IP. */
-        struct shash_node *snat_snode;
-        SHASH_FOR_EACH (snat_snode, &od->snat_ips) {
-            struct ovn_snat_ip *snat_ip = snat_snode->data;
+static void
+build_lrouter_bfd_flows(struct hmap *lflows, struct ovn_port *op)
+{
+    if (!op->has_bfd) {
+        return;
+    }
 
-            if (ovs_list_is_empty(&snat_ip->snat_entries)) {
-                continue;
-            }
+    struct ds ip_list = DS_EMPTY_INITIALIZER;
+    struct ds match = DS_EMPTY_INITIALIZER;
 
-            struct ovn_nat *nat_entry =
-                CONTAINER_OF(ovs_list_front(&snat_ip->snat_entries),
-                             struct ovn_nat, ext_addr_list_node);
-            build_lrouter_nat_arp_nd_flow(od, nat_entry, lflows);
-        }
+    if (op->lrp_networks.n_ipv4_addrs) {
+        op_put_v4_networks(&ip_list, op, false);
+        ds_put_format(&match, "ip4.src == %s && udp.dst == 3784",
+                      ds_cstr(&ip_list));
+        ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT, 110,
+                                ds_cstr(&match), "next; ",
+                                &op->nbrp->header_);
+        ds_clear(&match);
+        ds_put_format(&match, "ip4.dst == %s && udp.dst == 3784",
+                      ds_cstr(&ip_list));
+        ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT, 110,
+                                ds_cstr(&match), "handle_bfd_msg(); ",
+                                &op->nbrp->header_);
     }
+    if (op->lrp_networks.n_ipv6_addrs) {
+        ds_clear(&ip_list);
+        ds_clear(&match);
 
-    /* Logical router ingress table 3: IP Input for IPv4. */
-    HMAP_FOR_EACH (op, key_node, ports) {
-        if (!op->nbrp) {
-            continue;
+        op_put_v6_networks(&ip_list, op);
+        ds_put_format(&match, "ip6.src == %s && udp.dst == 3784",
+                      ds_cstr(&ip_list));
+        ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT, 110,
+                                ds_cstr(&match), "next; ",
+                                &op->nbrp->header_);
+        ds_clear(&match);
+        ds_put_format(&match, "ip6.dst == %s && udp.dst == 3784",
+                      ds_cstr(&ip_list));
+        ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT, 110,
+                                ds_cstr(&match), "handle_bfd_msg(); ",
+                                &op->nbrp->header_);
+    }
+
+    ds_destroy(&ip_list);
+    ds_destroy(&match);
+}
+
+/* Logical router ingress Table 0: L2 Admission Control
+ * Generic admission control flows (without inport check).
+ */
+static void
+build_adm_ctrl_flows_for_lrouter(
+        struct ovn_datapath *od, struct hmap *lflows)
+{
+    if (od->nbr) {
+        /* Logical VLANs not supported.
+         * Broadcast/multicast source address is invalid. */
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_ADMISSION, 100,
+                      "vlan.present || eth.src[40]", "drop;");
+    }
+}
+
+/* Logical router ingress Table 0: L2 Admission Control
+ * This table drops packets that the router shouldn’t see at all based
+ * on their Ethernet headers.
+ */
+static void
+build_adm_ctrl_flows_for_lrouter_port(
+        struct ovn_port *op, struct hmap *lflows,
+        struct ds *match, struct ds *actions)
+{
+    if (op->nbrp) {
+        if (!lrport_is_enabled(op->nbrp)) {
+            /* Drop packets from disabled logical ports (since logical flow
+             * tables are default-drop). */
+            return;
         }
 
         if (op->derived) {
-            /* No ingress packets are accepted on a chassisredirect
-             * port, so no need to program flows for that port. */
-            continue;
+            /* No ingress packets should be received on a chassisredirect
+             * port. */
+            return;
         }
 
-        if (op->lrp_networks.n_ipv4_addrs) {
-            /* L3 admission control: drop packets that originate from an
-             * IPv4 address owned by the router or a broadcast address
-             * known to the router (priority 100). */
-            ds_clear(&match);
-            ds_put_cstr(&match, "ip4.src == ");
-            op_put_v4_networks(&match, op, true);
-            ds_put_cstr(&match, " && "REGBIT_EGRESS_LOOPBACK" == 0");
-            ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT, 100,
-                                    ds_cstr(&match), "drop;",
-                                    &op->nbrp->header_);
+        /* Store the ethernet address of the port receiving the packet.
+         * This will save us from having to match on inport further down in
+         * the pipeline.
+         */
+        ds_clear(actions);
+        ds_put_format(actions, REG_INPORT_ETH_ADDR " = %s; next;",
+                      op->lrp_networks.ea_s);
 
-            /* ICMP echo reply.  These flows reply to ICMP echo requests
-             * received for the router's IP address. Since packets only
-             * get here as part of the logical router datapath, the inport
-             * (i.e. the incoming locally attached net) does not matter.
-             * The ip.ttl also does not matter (RFC1812 section 4.2.2.9) */
-            ds_clear(&match);
-            ds_put_cstr(&match, "ip4.dst == ");
-            op_put_v4_networks(&match, op, false);
-            ds_put_cstr(&match, " && icmp4.type == 8 && icmp4.code == 0");
+        ds_clear(match);
+        ds_put_format(match, "eth.mcast && inport == %s", op->json_key);
+        ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_ADMISSION, 50,
+                                ds_cstr(match), ds_cstr(actions),
+                                &op->nbrp->header_);
 
-            const char * icmp_actions = "ip4.dst <-> ip4.src; "
-                          "ip.ttl = 255; "
-                          "icmp4.type = 0; "
-                          "flags.loopback = 1; "
-                          "next; ";
-            ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT, 90,
-                                    ds_cstr(&match), icmp_actions,
-                                    &op->nbrp->header_);
+        ds_clear(match);
+        ds_put_format(match, "eth.dst == %s && inport == %s",
+                      op->lrp_networks.ea_s, op->json_key);
+        if (op->od->l3dgw_port && op == op->od->l3dgw_port
+            && op->od->l3redirect_port) {
+            /* Traffic with eth.dst = l3dgw_port->lrp_networks.ea_s
+             * should only be received on the gateway chassis. */
+            ds_put_format(match, " && is_chassis_resident(%s)",
+                          op->od->l3redirect_port->json_key);
         }
+        ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_ADMISSION, 50,
+                                ds_cstr(match),  ds_cstr(actions),
+                                &op->nbrp->header_);
+    }
+}
 
-        /* ICMP time exceeded */
-        for (int i = 0; i < op->lrp_networks.n_ipv4_addrs; i++) {
-            ds_clear(&match);
-            ds_clear(&actions);
 
-            ds_put_format(&match,
-                          "inport == %s && ip4 && "
-                          "ip.ttl == {0, 1} && !ip.later_frag", op->json_key);
-            ds_put_format(&actions,
-                          "icmp4 {"
-                          "eth.dst <-> eth.src; "
-                          "icmp4.type = 11; /* Time exceeded */ "
-                          "icmp4.code = 0; /* TTL exceeded in transit */ "
-                          "ip4.dst = ip4.src; "
-                          "ip4.src = %s; "
-                          "ip.ttl = 255; "
-                          "next; };",
-                          op->lrp_networks.ipv4_addrs[i].addr_s);
-            ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT, 40,
-                                    ds_cstr(&match), ds_cstr(&actions),
-                                    &op->nbrp->header_);
-        }
+/* Logical router ingress Table 1 and 2: Neighbor lookup and learning
+ * lflows for logical routers. */
+static void
+build_neigh_learning_flows_for_lrouter(
+        struct ovn_datapath *od, struct hmap *lflows,
+        struct ds *match, struct ds *actions)
+{
+    if (od->nbr) {
 
-        /* ARP reply.  These flows reply to ARP requests for the router's own
-         * IP address. */
-        for (int i = 0; i < op->lrp_networks.n_ipv4_addrs; i++) {
-            ds_clear(&match);
-            ds_put_format(&match, "arp.spa == %s/%u",
-                          op->lrp_networks.ipv4_addrs[i].network_s,
-                          op->lrp_networks.ipv4_addrs[i].plen);
+        /* Learn MAC bindings from ARP/IPv6 ND.
+         *
+         * For ARP packets, table LOOKUP_NEIGHBOR does a lookup for the
+         * (arp.spa, arp.sha) in the mac binding table using the 'lookup_arp'
+         * action and stores the result in REGBIT_LOOKUP_NEIGHBOR_RESULT bit.
+         * If "always_learn_from_arp_request" is set to false, it will also
+         * lookup for the (arp.spa) in the mac binding table using the
+         * "lookup_arp_ip" action for ARP request packets, and stores the
+         * result in REGBIT_LOOKUP_NEIGHBOR_IP_RESULT bit; or set that bit
+         * to "1" directly for ARP response packets.
+         *
+         * For IPv6 ND NA packets, table LOOKUP_NEIGHBOR does a lookup
+         * for the (nd.target, nd.tll) in the mac binding table using the
+         * 'lookup_nd' action and stores the result in
+         * REGBIT_LOOKUP_NEIGHBOR_RESULT bit. If
+         * "always_learn_from_arp_request" is set to false,
+         * REGBIT_LOOKUP_NEIGHBOR_IP_RESULT bit is set.
+         *
+         * For IPv6 ND NS packets, table LOOKUP_NEIGHBOR does a lookup
+         * for the (ip6.src, nd.sll) in the mac binding table using the
+         * 'lookup_nd' action and stores the result in
+         * REGBIT_LOOKUP_NEIGHBOR_RESULT bit. If
+         * "always_learn_from_arp_request" is set to false, it will also lookup
+         * for the (ip6.src) in the mac binding table using the "lookup_nd_ip"
+         * action and stores the result in REGBIT_LOOKUP_NEIGHBOR_IP_RESULT
+         * bit.
+         *
+         * Table LEARN_NEIGHBOR learns the mac-binding using the action
+         * - 'put_arp/put_nd'. Learning mac-binding is skipped if
+         *   REGBIT_LOOKUP_NEIGHBOR_RESULT bit is set or
+         *   REGBIT_LOOKUP_NEIGHBOR_IP_RESULT is not set.
+         *
+         * */
 
-            if (op->od->l3dgw_port && op->od->l3redirect_port && op->peer
-                && op->peer->od->n_localnet_ports) {
-                bool add_chassis_resident_check = false;
-                if (op == op->od->l3dgw_port) {
-                    /* Traffic with eth.src = l3dgw_port->lrp_networks.ea_s
-                     * should only be sent from the gateway chassis, so that
-                     * upstream MAC learning points to the gateway chassis.
-                     * Also need to avoid generation of multiple ARP responses
-                     * from different chassis. */
-                    add_chassis_resident_check = true;
-                } else {
-                    /* Check if the option 'reside-on-redirect-chassis'
-                     * is set to true on the router port. If set to true
-                     * and if peer's logical switch has a localnet port, it
-                     * means the router pipeline for the packets from
-                     * peer's logical switch is be run on the chassis
-                     * hosting the gateway port and it should reply to the
-                     * ARP requests for the router port IPs.
-                     */
-                    add_chassis_resident_check = smap_get_bool(
-                        &op->nbrp->options,
-                        "reside-on-redirect-chassis", false);
-                }
+        /* Flows for LOOKUP_NEIGHBOR. */
+        bool learn_from_arp_request = smap_get_bool(&od->nbr->options,
+            "always_learn_from_arp_request", true);
+        ds_clear(actions);
+        ds_put_format(actions, REGBIT_LOOKUP_NEIGHBOR_RESULT
+                      " = lookup_arp(inport, arp.spa, arp.sha); %snext;",
+                      learn_from_arp_request ? "" :
+                      REGBIT_LOOKUP_NEIGHBOR_IP_RESULT" = 1; ");
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_LOOKUP_NEIGHBOR, 100,
+                      "arp.op == 2", ds_cstr(actions));
 
-                if (add_chassis_resident_check) {
-                    ds_put_format(&match, " && is_chassis_resident(%s)",
-                                  op->od->l3redirect_port->json_key);
-                }
-            }
+        ds_clear(actions);
+        ds_put_format(actions, REGBIT_LOOKUP_NEIGHBOR_RESULT
+                      " = lookup_nd(inport, nd.target, nd.tll); %snext;",
+                      learn_from_arp_request ? "" :
+                      REGBIT_LOOKUP_NEIGHBOR_IP_RESULT" = 1; ");
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_LOOKUP_NEIGHBOR, 100, "nd_na",
+                      ds_cstr(actions));
 
-            build_lrouter_arp_flow(op->od, op,
-                                   op->lrp_networks.ipv4_addrs[i].addr_s,
-                                   REG_INPORT_ETH_ADDR, &match, false, 90,
-                                   &op->nbrp->header_, lflows);
-        }
+        ds_clear(actions);
+        ds_put_format(actions, REGBIT_LOOKUP_NEIGHBOR_RESULT
+                      " = lookup_nd(inport, ip6.src, nd.sll); %snext;",
+                      learn_from_arp_request ? "" :
+                      REGBIT_LOOKUP_NEIGHBOR_IP_RESULT
+                      " = lookup_nd_ip(inport, ip6.src); ");
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_LOOKUP_NEIGHBOR, 100, "nd_ns",
+                      ds_cstr(actions));
 
-        /* A set to hold all load-balancer vips that need ARP responses. */
-        struct sset all_ips_v4 = SSET_INITIALIZER(&all_ips_v4);
-        struct sset all_ips_v6 = SSET_INITIALIZER(&all_ips_v6);
-        get_router_load_balancer_ips(op->od, &all_ips_v4, &all_ips_v6);
+        /* For other packet types, we can skip neighbor learning.
+         * So set REGBIT_LOOKUP_NEIGHBOR_RESULT to 1. */
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_LOOKUP_NEIGHBOR, 0, "1",
+                      REGBIT_LOOKUP_NEIGHBOR_RESULT" = 1; next;");
 
-        const char *ip_address;
-        SSET_FOR_EACH (ip_address, &all_ips_v4) {
-            ds_clear(&match);
-            if (op == op->od->l3dgw_port) {
-                ds_put_format(&match, "is_chassis_resident(%s)",
-                              op->od->l3redirect_port->json_key);
-            }
+        /* Flows for LEARN_NEIGHBOR. */
+        /* Skip Neighbor learning if not required. */
+        ds_clear(match);
+        ds_put_format(match, REGBIT_LOOKUP_NEIGHBOR_RESULT" == 1%s",
+                      learn_from_arp_request ? "" :
+                      " || "REGBIT_LOOKUP_NEIGHBOR_IP_RESULT" == 0");
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_LEARN_NEIGHBOR, 100,
+                      ds_cstr(match), "next;");
 
-            build_lrouter_arp_flow(op->od, op,
-                                   ip_address, REG_INPORT_ETH_ADDR,
-                                   &match, false, 90, NULL, lflows);
-        }
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_LEARN_NEIGHBOR, 90,
+                      "arp", "put_arp(inport, arp.spa, arp.sha); next;");
 
-        SSET_FOR_EACH (ip_address, &all_ips_v6) {
-            ds_clear(&match);
-            if (op == op->od->l3dgw_port) {
-                ds_put_format(&match, "is_chassis_resident(%s)",
-                              op->od->l3redirect_port->json_key);
-            }
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_LEARN_NEIGHBOR, 90,
+                      "nd_na", "put_nd(inport, nd.target, nd.tll); next;");
 
-            build_lrouter_nd_flow(op->od, op, "nd_na",
-                                  ip_address, NULL, REG_INPORT_ETH_ADDR,
-                                  &match, false, 90, NULL, lflows);
-        }
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_LEARN_NEIGHBOR, 90,
+                      "nd_ns", "put_nd(inport, ip6.src, nd.sll); next;");
+    }
 
-        sset_destroy(&all_ips_v4);
-        sset_destroy(&all_ips_v6);
+}
 
-        if (!smap_get(&op->od->nbr->options, "chassis")
-            && !op->od->l3dgw_port) {
-            /* UDP/TCP port unreachable. */
-            for (int i = 0; i < op->lrp_networks.n_ipv4_addrs; i++) {
-                ds_clear(&match);
-                ds_put_format(&match,
-                              "ip4 && ip4.dst == %s && !ip.later_frag && udp",
-                              op->lrp_networks.ipv4_addrs[i].addr_s);
-                const char *action = "icmp4 {"
-                                     "eth.dst <-> eth.src; "
-                                     "ip4.dst <-> ip4.src; "
-                                     "ip.ttl = 255; "
-                                     "icmp4.type = 3; "
-                                     "icmp4.code = 3; "
-                                     "next; };";
-                ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT,
-                                        80, ds_cstr(&match), action,
-                                        &op->nbrp->header_);
+/* Logical router ingress Table 1: Neighbor lookup lflows
+ * for logical router ports. */
+static void
+build_neigh_learning_flows_for_lrouter_port(
+        struct ovn_port *op, struct hmap *lflows,
+        struct ds *match, struct ds *actions)
+{
+    if (op->nbrp) {
 
-                ds_clear(&match);
-                ds_put_format(&match,
-                              "ip4 && ip4.dst == %s && !ip.later_frag && tcp",
-                              op->lrp_networks.ipv4_addrs[i].addr_s);
-                action = "tcp_reset {"
-                         "eth.dst <-> eth.src; "
-                         "ip4.dst <-> ip4.src; "
-                         "next; };";
-                ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT,
-                                        80, ds_cstr(&match), action,
-                                        &op->nbrp->header_);
+        bool learn_from_arp_request = smap_get_bool(&op->od->nbr->options,
+            "always_learn_from_arp_request", true);
 
-                ds_clear(&match);
-                ds_put_format(&match,
-                              "ip4 && ip4.dst == %s && !ip.later_frag",
+        /* Check if we need to learn mac-binding from ARP requests. */
+        for (int i = 0; i < op->lrp_networks.n_ipv4_addrs; i++) {
+            if (!learn_from_arp_request) {
+                /* ARP request to this address should always get learned,
+                 * so add a priority-110 flow to set
+                 * REGBIT_LOOKUP_NEIGHBOR_IP_RESULT to 1. */
+                ds_clear(match);
+                ds_put_format(match,
+                              "inport == %s && arp.spa == %s/%u && "
+                              "arp.tpa == %s && arp.op == 1",
+                              op->json_key,
+                              op->lrp_networks.ipv4_addrs[i].network_s,
+                              op->lrp_networks.ipv4_addrs[i].plen,
                               op->lrp_networks.ipv4_addrs[i].addr_s);
-                action = "icmp4 {"
-                         "eth.dst <-> eth.src; "
-                         "ip4.dst <-> ip4.src; "
-                         "ip.ttl = 255; "
-                         "icmp4.type = 3; "
-                         "icmp4.code = 2; "
-                         "next; };";
-                ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT,
-                                        70, ds_cstr(&match), action,
+                if (op->od->l3dgw_port && op == op->od->l3dgw_port
+                    && op->od->l3redirect_port) {
+                    ds_put_format(match, " && is_chassis_resident(%s)",
+                                  op->od->l3redirect_port->json_key);
+                }
+                const char *actions_s = REGBIT_LOOKUP_NEIGHBOR_RESULT
+                                  " = lookup_arp(inport, arp.spa, arp.sha); "
+                                  REGBIT_LOOKUP_NEIGHBOR_IP_RESULT" = 1;"
+                                  " next;";
+                ovn_lflow_add_with_hint(lflows, op->od,
+                                        S_ROUTER_IN_LOOKUP_NEIGHBOR, 110,
+                                        ds_cstr(match), actions_s,
                                         &op->nbrp->header_);
             }
+            ds_clear(match);
+            ds_put_format(match,
+                          "inport == %s && arp.spa == %s/%u && arp.op == 1",
+                          op->json_key,
+                          op->lrp_networks.ipv4_addrs[i].network_s,
+                          op->lrp_networks.ipv4_addrs[i].plen);
+            if (op->od->l3dgw_port && op == op->od->l3dgw_port
+                && op->od->l3redirect_port) {
+                ds_put_format(match, " && is_chassis_resident(%s)",
+                              op->od->l3redirect_port->json_key);
+            }
+            ds_clear(actions);
+            ds_put_format(actions, REGBIT_LOOKUP_NEIGHBOR_RESULT
+                          " = lookup_arp(inport, arp.spa, arp.sha); %snext;",
+                          learn_from_arp_request ? "" :
+                          REGBIT_LOOKUP_NEIGHBOR_IP_RESULT
+                          " = lookup_arp_ip(inport, arp.spa); ");
+            ovn_lflow_add_with_hint(lflows, op->od,
+                                    S_ROUTER_IN_LOOKUP_NEIGHBOR, 100,
+                                    ds_cstr(match), ds_cstr(actions),
+                                    &op->nbrp->header_);
         }
+    }
+}
 
-        /* Drop IP traffic destined to router owned IPs except if the IP is
-         * also a SNAT IP. Those are dropped later, in stage
-         * "lr_in_arp_resolve", if unSNAT was unsuccessful.
-         *
-         * Priority 60.
-         */
-        build_lrouter_drop_own_dest(op, S_ROUTER_IN_IP_INPUT, 60, false,
-                                    lflows);
-
-        /* ARP / ND handling for external IP addresses.
-         *
-         * DNAT and SNAT IP addresses are external IP addresses that need ARP
-         * handling.
-         *
-         * These are already taken care globally, per router. The only
-         * exception is on the l3dgw_port where we might need to use a
-         * different ETH address.
-         */
-        if (op != op->od->l3dgw_port) {
-            continue;
-        }
+/* Logical router ingress table ND_RA_OPTIONS & ND_RA_RESPONSE: IPv6 Router
+ * Adv (RA) options and response. */
+static void
+build_ND_RA_flows_for_lrouter_port(
+        struct ovn_port *op, struct hmap *lflows,
+        struct ds *match, struct ds *actions)
+{
+    if (!op->nbrp || op->nbrp->peer || !op->peer) {
+        return;
+    }
 
-        for (size_t i = 0; i < op->od->nbr->n_nat; i++) {
-            struct ovn_nat *nat_entry = &op->od->nat_entries[i];
+    if (!op->lrp_networks.n_ipv6_addrs) {
+        return;
+    }
 
-            /* Skip entries we failed to parse. */
-            if (!nat_entry_is_valid(nat_entry)) {
-                continue;
-            }
+    struct smap options;
+    smap_clone(&options, &op->sb->options);
 
-            /* Skip SNAT entries for now, we handle unique SNAT IPs separately
-             * below.
-             */
-            if (!strcmp(nat_entry->nb->type, "snat")) {
-                continue;
-            }
-            build_lrouter_port_nat_arp_nd_flow(op, nat_entry, lflows);
-        }
+    /* enable IPv6 prefix delegation */
+    bool prefix_delegation = smap_get_bool(&op->nbrp->options,
+                                           "prefix_delegation", false);
+    if (!lrport_is_enabled(op->nbrp)) {
+        prefix_delegation = false;
+    }
+    smap_add(&options, "ipv6_prefix_delegation",
+             prefix_delegation ? "true" : "false");
 
-        /* Now handle SNAT entries too, one per unique SNAT IP. */
-        struct shash_node *snat_snode;
-        SHASH_FOR_EACH (snat_snode, &op->od->snat_ips) {
-            struct ovn_snat_ip *snat_ip = snat_snode->data;
+    bool ipv6_prefix = smap_get_bool(&op->nbrp->options,
+                                     "prefix", false);
+    if (!lrport_is_enabled(op->nbrp)) {
+        ipv6_prefix = false;
+    }
+    smap_add(&options, "ipv6_prefix",
+             ipv6_prefix ? "true" : "false");
+    sbrec_port_binding_set_options(op->sb, &options);
 
-            if (ovs_list_is_empty(&snat_ip->snat_entries)) {
-                continue;
-            }
+    smap_destroy(&options);
 
-            struct ovn_nat *nat_entry =
-                CONTAINER_OF(ovs_list_front(&snat_ip->snat_entries),
-                             struct ovn_nat, ext_addr_list_node);
-            build_lrouter_port_nat_arp_nd_flow(op, nat_entry, lflows);
-        }
+    const char *address_mode = smap_get(
+        &op->nbrp->ipv6_ra_configs, "address_mode");
+
+    if (!address_mode) {
+        return;
+    }
+    if (strcmp(address_mode, "slaac") &&
+        strcmp(address_mode, "dhcpv6_stateful") &&
+        strcmp(address_mode, "dhcpv6_stateless")) {
+        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+        VLOG_WARN_RL(&rl, "Invalid address mode [%s] defined",
+                     address_mode);
+        return;
     }
 
-    /* NAT, Defrag and load balancing. */
-    HMAP_FOR_EACH (od, key_node, datapaths) {
-        if (!od->nbr) {
-            continue;
-        }
+    if (smap_get_bool(&op->nbrp->ipv6_ra_configs, "send_periodic",
+                      false)) {
+        copy_ra_to_sb(op, address_mode);
+    }
 
-        /* Packets are allowed by default. */
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_DEFRAG, 0, "1", "next;");
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_UNSNAT, 0, "1", "next;");
-        ovn_lflow_add(lflows, od, S_ROUTER_OUT_SNAT, 0, "1", "next;");
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_DNAT, 0, "1", "next;");
-        ovn_lflow_add(lflows, od, S_ROUTER_OUT_UNDNAT, 0, "1", "next;");
-        ovn_lflow_add(lflows, od, S_ROUTER_OUT_EGR_LOOP, 0, "1", "next;");
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_ECMP_STATEFUL, 0, "1", "next;");
+    ds_clear(match);
+    ds_put_format(match, "inport == %s && ip6.dst == ff02::2 && nd_rs",
+                          op->json_key);
+    ds_clear(actions);
 
-        /* Send the IPv6 NS packets to next table. When ovn-controller
-         * generates IPv6 NS (for the action - nd_ns{}), the injected
-         * packet would go through conntrack - which is not required. */
-        ovn_lflow_add(lflows, od, S_ROUTER_OUT_SNAT, 120, "nd_ns", "next;");
+    const char *mtu_s = smap_get(
+        &op->nbrp->ipv6_ra_configs, "mtu");
 
-        /* NAT rules are only valid on Gateway routers and routers with
-         * l3dgw_port (router has a port with gateway chassis
-         * specified). */
-        if (!smap_get(&od->nbr->options, "chassis") && !od->l3dgw_port) {
-            continue;
-        }
+    /* As per RFC 2460, 1280 is minimum IPv6 MTU. */
+    uint32_t mtu = (mtu_s && atoi(mtu_s) >= 1280) ? atoi(mtu_s) : 0;
 
-        struct sset nat_entries = SSET_INITIALIZER(&nat_entries);
+    ds_put_format(actions, REGBIT_ND_RA_OPTS_RESULT" = put_nd_ra_opts("
+                  "addr_mode = \"%s\", slla = %s",
+                  address_mode, op->lrp_networks.ea_s);
+    if (mtu > 0) {
+        ds_put_format(actions, ", mtu = %u", mtu);
+    }
 
-        bool dnat_force_snat_ip =
-            !lport_addresses_is_empty(&od->dnat_force_snat_addrs);
-        bool lb_force_snat_ip =
-            !lport_addresses_is_empty(&od->lb_force_snat_addrs);
+    const char *prf = smap_get_def(
+        &op->nbrp->ipv6_ra_configs, "router_preference", "MEDIUM");
+    if (strcmp(prf, "MEDIUM")) {
+        ds_put_format(actions, ", router_preference = \"%s\"", prf);
+    }
 
-        for (int i = 0; i < od->nbr->n_nat; i++) {
-            const struct nbrec_nat *nat;
+    bool add_rs_response_flow = false;
 
-            nat = od->nbr->nat[i];
+    for (size_t i = 0; i < op->lrp_networks.n_ipv6_addrs; i++) {
+        if (in6_is_lla(&op->lrp_networks.ipv6_addrs[i].network)) {
+            continue;
+        }
 
-            ovs_be32 ip, mask;
-            struct in6_addr ipv6, mask_v6, v6_exact = IN6ADDR_EXACT_INIT;
-            bool is_v6 = false;
-            bool stateless = lrouter_nat_is_stateless(nat);
-            struct nbrec_address_set *allowed_ext_ips =
-                                      nat->allowed_ext_ips;
-            struct nbrec_address_set *exempted_ext_ips =
-                                      nat->exempted_ext_ips;
+        ds_put_format(actions, ", prefix = %s/%u",
+                      op->lrp_networks.ipv6_addrs[i].network_s,
+                      op->lrp_networks.ipv6_addrs[i].plen);
 
-            if (allowed_ext_ips && exempted_ext_ips) {
-                static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
-                VLOG_WARN_RL(&rl, "NAT rule: "UUID_FMT" not applied, since "
-                             "both allowed and exempt external ips set",
-                             UUID_ARGS(&(nat->header_.uuid)));
-                continue;
-            }
+        add_rs_response_flow = true;
+    }
 
-            char *error = ip_parse_masked(nat->external_ip, &ip, &mask);
-            if (error || mask != OVS_BE32_MAX) {
-                free(error);
-                error = ipv6_parse_masked(nat->external_ip, &ipv6, &mask_v6);
-                if (error || memcmp(&mask_v6, &v6_exact, sizeof(mask_v6))) {
-                    /* Invalid for both IPv4 and IPv6 */
-                    static struct vlog_rate_limit rl =
-                        VLOG_RATE_LIMIT_INIT(5, 1);
-                    VLOG_WARN_RL(&rl, "bad external ip %s for nat",
-                                 nat->external_ip);
-                    free(error);
-                    continue;
-                }
-                /* It was an invalid IPv4 address, but valid IPv6.
-                 * Treat the rest of the handling of this NAT rule
-                 * as IPv6. */
-                is_v6 = true;
-            }
+    if (add_rs_response_flow) {
+        ds_put_cstr(actions, "); next;");
+        ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_ND_RA_OPTIONS,
+                                50, ds_cstr(match), ds_cstr(actions),
+                                &op->nbrp->header_);
+        ds_clear(actions);
+        ds_clear(match);
+        ds_put_format(match, "inport == %s && ip6.dst == ff02::2 && "
+                      "nd_ra && "REGBIT_ND_RA_OPTS_RESULT, op->json_key);
 
-            /* Check the validity of nat->logical_ip. 'logical_ip' can
-             * be a subnet when the type is "snat". */
-            int cidr_bits;
-            if (is_v6) {
-                error = ipv6_parse_masked(nat->logical_ip, &ipv6, &mask_v6);
-                cidr_bits = ipv6_count_cidr_bits(&mask_v6);
-            } else {
-                error = ip_parse_masked(nat->logical_ip, &ip, &mask);
-                cidr_bits = ip_count_cidr_bits(mask);
-            }
-            if (!strcmp(nat->type, "snat")) {
-                if (error) {
-                    /* Invalid for both IPv4 and IPv6 */
-                    static struct vlog_rate_limit rl =
-                        VLOG_RATE_LIMIT_INIT(5, 1);
-                    VLOG_WARN_RL(&rl, "bad ip network or ip %s for snat "
-                                 "in router "UUID_FMT"",
-                                 nat->logical_ip, UUID_ARGS(&od->key));
-                    free(error);
-                    continue;
-                }
-            } else {
-                if (error || (!is_v6 && mask != OVS_BE32_MAX)
-                    || (is_v6 && memcmp(&mask_v6, &v6_exact,
-                                        sizeof mask_v6))) {
-                    /* Invalid for both IPv4 and IPv6 */
-                    static struct vlog_rate_limit rl =
-                        VLOG_RATE_LIMIT_INIT(5, 1);
-                    VLOG_WARN_RL(&rl, "bad ip %s for dnat in router "
-                        ""UUID_FMT"", nat->logical_ip, UUID_ARGS(&od->key));
-                    free(error);
-                    continue;
-                }
-            }
+        char ip6_str[INET6_ADDRSTRLEN + 1];
+        struct in6_addr lla;
+        in6_generate_lla(op->lrp_networks.ea, &lla);
+        memset(ip6_str, 0, sizeof(ip6_str));
+        ipv6_string_mapped(ip6_str, &lla);
+        ds_put_format(actions, "eth.dst = eth.src; eth.src = %s; "
+                      "ip6.dst = ip6.src; ip6.src = %s; "
+                      "outport = inport; flags.loopback = 1; "
+                      "output;",
+                      op->lrp_networks.ea_s, ip6_str);
+        ovn_lflow_add_with_hint(lflows, op->od,
+                                S_ROUTER_IN_ND_RA_RESPONSE, 50,
+                                ds_cstr(match), ds_cstr(actions),
+                                &op->nbrp->header_);
+    }
+}
 
-            /* For distributed router NAT, determine whether this NAT rule
-             * satisfies the conditions for distributed NAT processing. */
-            bool distributed = false;
-            struct eth_addr mac;
-            if (od->l3dgw_port && !strcmp(nat->type, "dnat_and_snat") &&
-                nat->logical_port && nat->external_mac) {
-                if (eth_addr_from_string(nat->external_mac, &mac)) {
-                    distributed = true;
-                } else {
-                    static struct vlog_rate_limit rl =
-                        VLOG_RATE_LIMIT_INIT(5, 1);
-                    VLOG_WARN_RL(&rl, "bad mac %s for dnat in router "
-                        ""UUID_FMT"", nat->external_mac, UUID_ARGS(&od->key));
-                    continue;
-                }
-            }
+/* Logical router ingress table ND_RA_OPTIONS & ND_RA_RESPONSE: RS
+ * responder, by default goto next. (priority 0). */
+static void
+build_ND_RA_flows_for_lrouter(struct ovn_datapath *od, struct hmap *lflows)
+{
+    if (od->nbr) {
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_ND_RA_OPTIONS, 0, "1", "next;");
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_ND_RA_RESPONSE, 0, "1", "next;");
+    }
+}
 
-            /* Ingress UNSNAT table: It is for already established connections'
-             * reverse traffic. i.e., SNAT has already been done in egress
-             * pipeline and now the packet has entered the ingress pipeline as
-             * part of a reply. We undo the SNAT here.
-             *
-             * Undoing SNAT has to happen before DNAT processing.  This is
-             * because when the packet was DNATed in ingress pipeline, it did
-             * not know about the possibility of eventual additional SNAT in
-             * egress pipeline. */
-            if (!strcmp(nat->type, "snat")
-                || !strcmp(nat->type, "dnat_and_snat")) {
-                if (!od->l3dgw_port) {
-                    /* Gateway router. */
-                    ds_clear(&match);
-                    ds_clear(&actions);
-                    ds_put_format(&match, "ip && ip%s.dst == %s",
-                                  is_v6 ? "6" : "4",
-                                  nat->external_ip);
-                    if (!strcmp(nat->type, "dnat_and_snat") && stateless) {
-                       ds_put_format(&actions, "ip%s.dst=%s; next;",
-                                     is_v6 ? "6" : "4", nat->logical_ip);
-                    } else {
-                       ds_put_cstr(&actions, "ct_snat;");
-                    }
+/* Logical router ingress table IP_ROUTING : IP Routing.
+ *
+ * A packet that arrives at this table is an IP packet that should be
+ * routed to the address in 'ip[46].dst'.
+ *
+ * For regular routes without ECMP, table IP_ROUTING sets outport to the
+ * correct output port, eth.src to the output port's MAC address, and
+ * REG_NEXT_HOP_IPV4/REG_NEXT_HOP_IPV6 to the next-hop IP address
+ * (leaving 'ip[46].dst', the packet’s final destination, unchanged), and
+ * advances to the next table.
+ *
+ * For ECMP routes, i.e. multiple routes with same policy and prefix, table
+ * IP_ROUTING remembers ECMP group id and selects a member id, and advances
+ * to table IP_ROUTING_ECMP, which sets outport, eth.src and
+ * REG_NEXT_HOP_IPV4/REG_NEXT_HOP_IPV6 for the selected ECMP member.
+ */
+static void
+build_ip_routing_flows_for_lrouter_port(
+        struct ovn_port *op, struct hmap *lflows)
+{
+    if (op->nbrp) {
 
-                    ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_UNSNAT,
-                                            90, ds_cstr(&match),
-                                            ds_cstr(&actions),
-                                            &nat->header_);
-                } else {
-                    /* Distributed router. */
+        for (int i = 0; i < op->lrp_networks.n_ipv4_addrs; i++) {
+            add_route(lflows, op, op->lrp_networks.ipv4_addrs[i].addr_s,
+                      op->lrp_networks.ipv4_addrs[i].network_s,
+                      op->lrp_networks.ipv4_addrs[i].plen, NULL, false,
+                      &op->nbrp->header_);
+        }
 
-                    /* Traffic received on l3dgw_port is subject to NAT. */
-                    ds_clear(&match);
-                    ds_clear(&actions);
-                    ds_put_format(&match, "ip && ip%s.dst == %s"
-                                          " && inport == %s",
-                                  is_v6 ? "6" : "4",
-                                  nat->external_ip,
-                                  od->l3dgw_port->json_key);
-                    if (!distributed && od->l3redirect_port) {
-                        /* Flows for NAT rules that are centralized are only
-                         * programmed on the gateway chassis. */
-                        ds_put_format(&match, " && is_chassis_resident(%s)",
-                                      od->l3redirect_port->json_key);
-                    }
+        for (int i = 0; i < op->lrp_networks.n_ipv6_addrs; i++) {
+            add_route(lflows, op, op->lrp_networks.ipv6_addrs[i].addr_s,
+                      op->lrp_networks.ipv6_addrs[i].network_s,
+                      op->lrp_networks.ipv6_addrs[i].plen, NULL, false,
+                      &op->nbrp->header_);
+        }
+    }
+}
 
-                    if (!strcmp(nat->type, "dnat_and_snat") && stateless) {
-                        ds_put_format(&actions, "ip%s.dst=%s; next;",
-                                      is_v6 ? "6" : "4", nat->logical_ip);
-                    } else {
-                        ds_put_cstr(&actions, "ct_snat;");
-                    }
+static void
+build_static_route_flows_for_lrouter(
+        struct ovn_datapath *od, struct hmap *lflows,
+        struct hmap *ports, struct hmap *bfd_connections)
+{
+    if (od->nbr) {
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_ROUTING_ECMP, 150,
+                      REG_ECMP_GROUP_ID" == 0", "next;");
 
-                    ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_UNSNAT,
-                                            100,
-                                            ds_cstr(&match), ds_cstr(&actions),
-                                            &nat->header_);
+        struct hmap ecmp_groups = HMAP_INITIALIZER(&ecmp_groups);
+        struct hmap unique_routes = HMAP_INITIALIZER(&unique_routes);
+        struct ovs_list parsed_routes = OVS_LIST_INITIALIZER(&parsed_routes);
+        struct ecmp_groups_node *group;
+        for (int i = 0; i < od->nbr->n_static_routes; i++) {
+            struct parsed_route *route =
+                parsed_routes_add(&parsed_routes, od->nbr->static_routes[i],
+                                  bfd_connections);
+            if (!route) {
+                continue;
+            }
+            group = ecmp_groups_find(&ecmp_groups, route);
+            if (group) {
+                ecmp_groups_add_route(group, route);
+            } else {
+                const struct parsed_route *existed_route =
+                    unique_routes_remove(&unique_routes, route);
+                if (existed_route) {
+                    group = ecmp_groups_add(&ecmp_groups, existed_route);
+                    if (group) {
+                        ecmp_groups_add_route(group, route);
+                    }
+                } else {
+                    unique_routes_add(&unique_routes, route);
                 }
             }
+        }
+        HMAP_FOR_EACH (group, hmap_node, &ecmp_groups) {
+            /* add a flow in IP_ROUTING, and one flow for each member in
+             * IP_ROUTING_ECMP. */
+            build_ecmp_route_flow(lflows, od, ports, group);
+        }
+        const struct unique_routes_node *ur;
+        HMAP_FOR_EACH (ur, hmap_node, &unique_routes) {
+            build_static_route_flow(lflows, od, ports, ur->route);
+        }
+        ecmp_groups_destroy(&ecmp_groups);
+        unique_routes_destroy(&unique_routes);
+        parsed_routes_destroy(&parsed_routes);
+    }
+}
 
-            /* Ingress DNAT table: Packets enter the pipeline with destination
-             * IP address that needs to be DNATted from a external IP address
-             * to a logical IP address. */
-            if (!strcmp(nat->type, "dnat")
-                || !strcmp(nat->type, "dnat_and_snat")) {
-                if (!od->l3dgw_port) {
-                    /* Gateway router. */
-                    /* Packet when it goes from the initiator to destination.
-                     * We need to set flags.loopback because the router can
-                     * send the packet back through the same interface. */
-                    ds_clear(&match);
-                    ds_put_format(&match, "ip && ip%s.dst == %s",
-                                  is_v6 ? "6" : "4",
-                                  nat->external_ip);
-                    ds_clear(&actions);
-                    if (allowed_ext_ips || exempted_ext_ips) {
-                        lrouter_nat_add_ext_ip_match(od, lflows, &match, nat,
-                                                     is_v6, true, mask);
-                    }
+/* IP Multicast lookup. Here we set the output port, adjust TTL and
+ * advance to next table (priority 500).
+ */
+static void
+build_mcast_lookup_flows_for_lrouter(
+        struct ovn_datapath *od, struct hmap *lflows,
+        struct ds *match, struct ds *actions)
+{
+    if (od->nbr) {
 
-                    if (dnat_force_snat_ip) {
-                        /* Indicate to the future tables that a DNAT has taken
-                         * place and a force SNAT needs to be done in the
-                         * Egress SNAT table. */
-                        ds_put_format(&actions,
-                                      "flags.force_snat_for_dnat = 1; ");
-                    }
+        /* Drop IPv6 multicast traffic that shouldn't be forwarded,
+         * i.e., router solicitation and router advertisement.
+         */
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_ROUTING, 550,
+                      "nd_rs || nd_ra", "drop;");
+        if (!od->mcast_info.rtr.relay) {
+            return;
+        }
 
-                    if (!strcmp(nat->type, "dnat_and_snat") && stateless) {
-                        ds_put_format(&actions, "flags.loopback = 1; "
-                                      "ip%s.dst=%s; next;",
-                                      is_v6 ? "6" : "4", nat->logical_ip);
-                    } else {
-                        ds_put_format(&actions, "flags.loopback = 1; "
-                                      "ct_dnat(%s", nat->logical_ip);
+        struct ovn_igmp_group *igmp_group;
 
-                        if (nat->external_port_range[0]) {
-                            ds_put_format(&actions, ",%s",
-                                          nat->external_port_range);
-                        }
-                        ds_put_format(&actions, ");");
-                    }
+        LIST_FOR_EACH (igmp_group, list_node, &od->mcast_info.groups) {
+            ds_clear(match);
+            ds_clear(actions);
+            if (IN6_IS_ADDR_V4MAPPED(&igmp_group->address)) {
+                ds_put_format(match, "ip4 && ip4.dst == %s ",
+                            igmp_group->mcgroup.name);
+            } else {
+                ds_put_format(match, "ip6 && ip6.dst == %s ",
+                            igmp_group->mcgroup.name);
+            }
+            if (od->mcast_info.rtr.flood_static) {
+                ds_put_cstr(actions,
+                            "clone { "
+                                "outport = \""MC_STATIC"\"; "
+                                "ip.ttl--; "
+                                "next; "
+                            "};");
+            }
+            ds_put_format(actions, "outport = \"%s\"; ip.ttl--; next;",
+                          igmp_group->mcgroup.name);
+            ovn_lflow_add_unique(lflows, od, S_ROUTER_IN_IP_ROUTING, 500,
+                                 ds_cstr(match), ds_cstr(actions));
+        }
 
-                    ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_DNAT, 100,
-                                            ds_cstr(&match), ds_cstr(&actions),
-                                            &nat->header_);
-                } else {
-                    /* Distributed router. */
+        /* If needed, flood unregistered multicast on statically configured
+         * ports. Otherwise drop any multicast traffic.
+         */
+        if (od->mcast_info.rtr.flood_static) {
+            ovn_lflow_add_unique(lflows, od, S_ROUTER_IN_IP_ROUTING, 450,
+                          "ip4.mcast || ip6.mcast",
+                          "clone { "
+                                "outport = \""MC_STATIC"\"; "
+                                "ip.ttl--; "
+                                "next; "
+                          "};");
+        } else {
+            ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_ROUTING, 450,
+                          "ip4.mcast || ip6.mcast", "drop;");
+        }
+    }
+}
 
-                    /* Traffic received on l3dgw_port is subject to NAT. */
-                    ds_clear(&match);
-                    ds_put_format(&match, "ip && ip%s.dst == %s"
-                                          " && inport == %s",
-                                  is_v6 ? "6" : "4",
-                                  nat->external_ip,
-                                  od->l3dgw_port->json_key);
-                    if (!distributed && od->l3redirect_port) {
-                        /* Flows for NAT rules that are centralized are only
-                         * programmed on the gateway chassis. */
-                        ds_put_format(&match, " && is_chassis_resident(%s)",
-                                      od->l3redirect_port->json_key);
-                    }
-                    ds_clear(&actions);
-                    if (allowed_ext_ips || exempted_ext_ips) {
-                        lrouter_nat_add_ext_ip_match(od, lflows, &match, nat,
-                                                     is_v6, true, mask);
-                    }
+/* Logical router ingress table POLICY: Policy.
+ *
+ * A packet that arrives at this table is an IP packet that should be
+ * permitted/denied/rerouted to the address in the rule's nexthop.
+ * This table sets outport to the correct out_port,
+ * eth.src to the output port's MAC address,
+ * and REG_NEXT_HOP_IPV4/REG_NEXT_HOP_IPV6 to the next-hop IP address
+ * (leaving 'ip[46].dst', the packet’s final destination, unchanged), and
+ * advances to the next table for ARP/ND resolution. */
+static void
+build_ingress_policy_flows_for_lrouter(
+        struct ovn_datapath *od, struct hmap *lflows,
+        struct hmap *ports)
+{
+    if (od->nbr) {
+        /* This is a catch-all rule. It has the lowest priority (0)
+         * does a match-all("1") and pass-through (next) */
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_POLICY, 0, "1",
+                      REG_ECMP_GROUP_ID" = 0; next;");
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_POLICY_ECMP, 150,
+                      REG_ECMP_GROUP_ID" == 0", "next;");
 
-                    if (!strcmp(nat->type, "dnat_and_snat") && stateless) {
-                        ds_put_format(&actions, "ip%s.dst=%s; next;",
-                                      is_v6 ? "6" : "4", nat->logical_ip);
-                    } else {
-                        ds_put_format(&actions, "ct_dnat(%s", nat->logical_ip);
-                        if (nat->external_port_range[0]) {
-                            ds_put_format(&actions, ",%s",
-                                          nat->external_port_range);
-                        }
-                        ds_put_format(&actions, ");");
-                    }
+        /* Convert routing policies to flows. */
+        uint16_t ecmp_group_id = 1;
+        for (int i = 0; i < od->nbr->n_policies; i++) {
+            const struct nbrec_logical_router_policy *rule
+                = od->nbr->policies[i];
+            bool is_ecmp_reroute =
+                (!strcmp(rule->action, "reroute") && rule->n_nexthops > 1);
 
-                    ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_DNAT, 100,
-                                            ds_cstr(&match), ds_cstr(&actions),
-                                            &nat->header_);
-                }
+            if (is_ecmp_reroute) {
+                build_ecmp_routing_policy_flows(lflows, od, ports, rule,
+                                                ecmp_group_id);
+                ecmp_group_id++;
+            } else {
+                build_routing_policy_flow(lflows, od, ports, rule,
+                                          &rule->header_);
             }
+        }
+    }
+}
 
-            /* ARP resolve for NAT IPs. */
-            if (od->l3dgw_port) {
-                if (!strcmp(nat->type, "snat")) {
-                    ds_clear(&match);
-                    ds_put_format(
-                        &match, "inport == %s && %s == %s",
-                        od->l3dgw_port->json_key,
-                        is_v6 ? "ip6.src" : "ip4.src", nat->external_ip);
-                    ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_IP_INPUT,
-                                            120, ds_cstr(&match), "next;",
-                                            &nat->header_);
-                }
+/* Local router ingress table ARP_RESOLVE: ARP Resolution. */
+static void
+build_arp_resolve_flows_for_lrouter(
+        struct ovn_datapath *od, struct hmap *lflows)
+{
+    if (od->nbr) {
+        /* Multicast packets already have the outport set so just advance to
+         * next table (priority 500). */
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_ARP_RESOLVE, 500,
+                      "ip4.mcast || ip6.mcast", "next;");
 
-                if (!sset_contains(&nat_entries, nat->external_ip)) {
-                    ds_clear(&match);
-                    ds_put_format(
-                        &match, "outport == %s && %s == %s",
-                        od->l3dgw_port->json_key,
-                        is_v6 ? REG_NEXT_HOP_IPV6 : REG_NEXT_HOP_IPV4,
-                        nat->external_ip);
-                    ds_clear(&actions);
-                    ds_put_format(
-                        &actions, "eth.dst = %s; next;",
-                        distributed ? nat->external_mac :
-                        od->l3dgw_port->lrp_networks.ea_s);
-                    ovn_lflow_add_with_hint(lflows, od,
-                                            S_ROUTER_IN_ARP_RESOLVE,
-                                            100, ds_cstr(&match),
-                                            ds_cstr(&actions),
-                                            &nat->header_);
-                    sset_add(&nat_entries, nat->external_ip);
-                }
-            } else {
-                /* Add the NAT external_ip to the nat_entries even for
-                 * gateway routers. This is required for adding load balancer
-                 * flows.*/
-                sset_add(&nat_entries, nat->external_ip);
-            }
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_ARP_RESOLVE, 0, "ip4",
+                      "get_arp(outport, " REG_NEXT_HOP_IPV4 "); next;");
 
-            /* Egress UNDNAT table: It is for already established connections'
-             * reverse traffic. i.e., DNAT has already been done in ingress
-             * pipeline and now the packet has entered the egress pipeline as
-             * part of a reply. We undo the DNAT here.
-             *
-             * Note that this only applies for NAT on a distributed router.
-             * Undo DNAT on a gateway router is done in the ingress DNAT
-             * pipeline stage. */
-            if (od->l3dgw_port && (!strcmp(nat->type, "dnat")
-                || !strcmp(nat->type, "dnat_and_snat"))) {
-                ds_clear(&match);
-                ds_put_format(&match, "ip && ip%s.src == %s"
-                                      " && outport == %s",
-                              is_v6 ? "6" : "4",
-                              nat->logical_ip,
-                              od->l3dgw_port->json_key);
-                if (!distributed && od->l3redirect_port) {
-                    /* Flows for NAT rules that are centralized are only
-                     * programmed on the gateway chassis. */
-                    ds_put_format(&match, " && is_chassis_resident(%s)",
-                                  od->l3redirect_port->json_key);
-                }
-                ds_clear(&actions);
-                if (distributed) {
-                    ds_put_format(&actions, "eth.src = "ETH_ADDR_FMT"; ",
-                                  ETH_ADDR_ARGS(mac));
-                }
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_ARP_RESOLVE, 0, "ip6",
+                      "get_nd(outport, " REG_NEXT_HOP_IPV6 "); next;");
+    }
+}
 
-                if (!strcmp(nat->type, "dnat_and_snat") && stateless) {
-                    ds_put_format(&actions, "ip%s.src=%s; next;",
-                                  is_v6 ? "6" : "4", nat->external_ip);
-                } else {
-                    ds_put_format(&actions, "ct_dnat;");
-                }
+/* Local router ingress table ARP_RESOLVE: ARP Resolution.
+ *
+ * Any unicast packet that reaches this table is an IP packet whose
+ * next-hop IP address is in REG_NEXT_HOP_IPV4/REG_NEXT_HOP_IPV6
+ * (ip4.dst/ipv6.dst is the final destination).
+ * This table resolves the IP address in
+ * REG_NEXT_HOP_IPV4/REG_NEXT_HOP_IPV6 into an output port in outport and
+ * an Ethernet address in eth.dst.
+ */
+static void
+build_arp_resolve_flows_for_lrouter_port(
+        struct ovn_port *op, struct hmap *lflows,
+        struct hmap *ports,
+        struct ds *match, struct ds *actions)
+{
+    if (op->nbsp && !lsp_is_enabled(op->nbsp)) {
+        return;
+    }
 
-                ovn_lflow_add_with_hint(lflows, od, S_ROUTER_OUT_UNDNAT, 100,
-                                        ds_cstr(&match), ds_cstr(&actions),
-                                        &nat->header_);
-            }
+    if (op->nbrp) {
+        /* This is a logical router port. If next-hop IP address in
+         * REG_NEXT_HOP_IPV4/REG_NEXT_HOP_IPV6 matches IP address of this
+         * router port, then the packet is intended to eventually be sent
+         * to this logical port. Set the destination mac address using
+         * this port's mac address.
+         *
+         * The packet is still in peer's logical pipeline. So the match
+         * should be on peer's outport. */
+        if (op->peer && op->nbrp->peer) {
+            if (op->lrp_networks.n_ipv4_addrs) {
+                ds_clear(match);
+                ds_put_format(match, "outport == %s && "
+                              REG_NEXT_HOP_IPV4 "== ",
+                              op->peer->json_key);
+                op_put_v4_networks(match, op, false);
 
-            /* Egress SNAT table: Packets enter the egress pipeline with
-             * source ip address that needs to be SNATted to a external ip
-             * address. */
-            if (!strcmp(nat->type, "snat")
-                || !strcmp(nat->type, "dnat_and_snat")) {
-                if (!od->l3dgw_port) {
-                    /* Gateway router. */
-                    ds_clear(&match);
-                    ds_put_format(&match, "ip && ip%s.src == %s",
-                                  is_v6 ? "6" : "4",
-                                  nat->logical_ip);
-                    ds_clear(&actions);
+                ds_clear(actions);
+                ds_put_format(actions, "eth.dst = %s; next;",
+                              op->lrp_networks.ea_s);
+                ovn_lflow_add_with_hint(lflows, op->peer->od,
+                                        S_ROUTER_IN_ARP_RESOLVE, 100,
+                                        ds_cstr(match), ds_cstr(actions),
+                                        &op->nbrp->header_);
+            }
 
-                    if (allowed_ext_ips || exempted_ext_ips) {
-                        lrouter_nat_add_ext_ip_match(od, lflows, &match, nat,
-                                                     is_v6, false, mask);
-                    }
+            if (op->lrp_networks.n_ipv6_addrs) {
+                ds_clear(match);
+                ds_put_format(match, "outport == %s && "
+                              REG_NEXT_HOP_IPV6 " == ",
+                              op->peer->json_key);
+                op_put_v6_networks(match, op);
 
-                    if (!strcmp(nat->type, "dnat_and_snat") && stateless) {
-                        ds_put_format(&actions, "ip%s.src=%s; next;",
-                                      is_v6 ? "6" : "4", nat->external_ip);
-                    } else {
-                        ds_put_format(&actions, "ct_snat(%s",
-                                      nat->external_ip);
+                ds_clear(actions);
+                ds_put_format(actions, "eth.dst = %s; next;",
+                              op->lrp_networks.ea_s);
+                ovn_lflow_add_with_hint(lflows, op->peer->od,
+                                        S_ROUTER_IN_ARP_RESOLVE, 100,
+                                        ds_cstr(match), ds_cstr(actions),
+                                        &op->nbrp->header_);
+            }
+        }
 
-                        if (nat->external_port_range[0]) {
-                            ds_put_format(&actions, ",%s",
-                                          nat->external_port_range);
-                        }
-                        ds_put_format(&actions, ");");
-                    }
+        if (!op->derived && op->od->l3redirect_port) {
+            const char *redirect_type = smap_get(&op->nbrp->options,
+                                                 "redirect-type");
+            if (redirect_type && !strcasecmp(redirect_type, "bridged")) {
+                /* Packet is on a non gateway chassis and
+                 * has an unresolved ARP on a network behind gateway
+                 * chassis attached router port. Since, redirect type
+                 * is "bridged", instead of calling "get_arp"
+                 * on this node, we will redirect the packet to gateway
+                 * chassis, by setting destination mac router port mac.*/
+                ds_clear(match);
+                ds_put_format(match, "outport == %s && "
+                              "!is_chassis_resident(%s)", op->json_key,
+                              op->od->l3redirect_port->json_key);
+                ds_clear(actions);
+                ds_put_format(actions, "eth.dst = %s; next;",
+                              op->lrp_networks.ea_s);
 
-                    /* The priority here is calculated such that the
-                     * nat->logical_ip with the longest mask gets a higher
-                     * priority. */
-                    ovn_lflow_add_with_hint(lflows, od, S_ROUTER_OUT_SNAT,
-                                            cidr_bits + 1,
-                                            ds_cstr(&match), ds_cstr(&actions),
-                                            &nat->header_);
-                } else {
-                    uint16_t priority = cidr_bits + 1;
+                ovn_lflow_add_with_hint(lflows, op->od,
+                                        S_ROUTER_IN_ARP_RESOLVE, 50,
+                                        ds_cstr(match), ds_cstr(actions),
+                                        &op->nbrp->header_);
+            }
+        }
 
-                    /* Distributed router. */
-                    ds_clear(&match);
-                    ds_put_format(&match, "ip && ip%s.src == %s"
-                                          " && outport == %s",
-                                  is_v6 ? "6" : "4",
-                                  nat->logical_ip,
-                                  od->l3dgw_port->json_key);
-                    if (!distributed && od->l3redirect_port) {
-                        /* Flows for NAT rules that are centralized are only
-                         * programmed on the gateway chassis. */
-                        priority += 128;
-                        ds_put_format(&match, " && is_chassis_resident(%s)",
-                                      od->l3redirect_port->json_key);
-                    }
-                    ds_clear(&actions);
+        /* Drop IP traffic destined to router owned IPs. Part of it is dropped
+         * in stage "lr_in_ip_input" but traffic that could have been unSNATed
+         * but didn't match any existing session might still end up here.
+         *
+         * Priority 1.
+         */
+        build_lrouter_drop_own_dest(op, S_ROUTER_IN_ARP_RESOLVE, 1, true,
+                                    lflows);
+    } else if (op->od->n_router_ports && !lsp_is_router(op->nbsp)
+               && strcmp(op->nbsp->type, "virtual")) {
+        /* This is a logical switch port that backs a VM or a container.
+         * Extract its addresses. For each of the address, go through all
+         * the router ports attached to the switch (to which this port
+         * connects) and if the address in question is reachable from the
+         * router port, add an ARP/ND entry in that router's pipeline. */
 
-                    if (allowed_ext_ips || exempted_ext_ips) {
-                        lrouter_nat_add_ext_ip_match(od, lflows, &match, nat,
-                                                     is_v6, false, mask);
+        for (size_t i = 0; i < op->n_lsp_addrs; i++) {
+            const char *ea_s = op->lsp_addrs[i].ea_s;
+            for (size_t j = 0; j < op->lsp_addrs[i].n_ipv4_addrs; j++) {
+                const char *ip_s = op->lsp_addrs[i].ipv4_addrs[j].addr_s;
+                for (size_t k = 0; k < op->od->n_router_ports; k++) {
+                    /* Get the Logical_Router_Port that the
+                     * Logical_Switch_Port is connected to, as
+                     * 'peer'. */
+                    const char *peer_name = smap_get(
+                        &op->od->router_ports[k]->nbsp->options,
+                        "router-port");
+                    if (!peer_name) {
+                        continue;
                     }
 
-                    if (distributed) {
-                        ds_put_format(&actions, "eth.src = "ETH_ADDR_FMT"; ",
-                                      ETH_ADDR_ARGS(mac));
+                    struct ovn_port *peer = ovn_port_find(ports, peer_name);
+                    if (!peer || !peer->nbrp) {
+                        continue;
                     }
 
-                    if (!strcmp(nat->type, "dnat_and_snat") && stateless) {
-                        ds_put_format(&actions, "ip%s.src=%s; next;",
-                                      is_v6 ? "6" : "4", nat->external_ip);
-                    } else {
-                        ds_put_format(&actions, "ct_snat(%s",
-                                      nat->external_ip);
-                        if (nat->external_port_range[0]) {
-                            ds_put_format(&actions, ",%s",
-                                          nat->external_port_range);
-                        }
-                        ds_put_format(&actions, ");");
+                    if (!find_lrp_member_ip(peer, ip_s)) {
+                        continue;
                     }
 
-                    /* The priority here is calculated such that the
-                     * nat->logical_ip with the longest mask gets a higher
-                     * priority. */
-                    ovn_lflow_add_with_hint(lflows, od, S_ROUTER_OUT_SNAT,
-                                            priority, ds_cstr(&match),
-                                            ds_cstr(&actions),
-                                            &nat->header_);
+                    ds_clear(match);
+                    ds_put_format(match, "outport == %s && "
+                                  REG_NEXT_HOP_IPV4 " == %s",
+                                  peer->json_key, ip_s);
+
+                    ds_clear(actions);
+                    ds_put_format(actions, "eth.dst = %s; next;", ea_s);
+                    ovn_lflow_add_with_hint(lflows, peer->od,
+                                            S_ROUTER_IN_ARP_RESOLVE, 100,
+                                            ds_cstr(match),
+                                            ds_cstr(actions),
+                                            &op->nbsp->header_);
                 }
             }
 
-            /* Logical router ingress table 0:
-             * For NAT on a distributed router, add rules allowing
-             * ingress traffic with eth.dst matching nat->external_mac
-             * on the l3dgw_port instance where nat->logical_port is
-             * resident. */
-            if (distributed) {
-                /* Store the ethernet address of the port receiving the packet.
-                 * This will save us from having to match on inport further
-                 * down in the pipeline.
-                 */
-                ds_clear(&actions);
-                ds_put_format(&actions, REG_INPORT_ETH_ADDR " = %s; next;",
-                              od->l3dgw_port->lrp_networks.ea_s);
+            for (size_t j = 0; j < op->lsp_addrs[i].n_ipv6_addrs; j++) {
+                const char *ip_s = op->lsp_addrs[i].ipv6_addrs[j].addr_s;
+                for (size_t k = 0; k < op->od->n_router_ports; k++) {
+                    /* Get the Logical_Router_Port that the
+                     * Logical_Switch_Port is connected to, as
+                     * 'peer'. */
+                    const char *peer_name = smap_get(
+                        &op->od->router_ports[k]->nbsp->options,
+                        "router-port");
+                    if (!peer_name) {
+                        continue;
+                    }
 
-                ds_clear(&match);
-                ds_put_format(&match,
-                              "eth.dst == "ETH_ADDR_FMT" && inport == %s"
-                              " && is_chassis_resident(\"%s\")",
-                              ETH_ADDR_ARGS(mac),
-                              od->l3dgw_port->json_key,
-                              nat->logical_port);
-                ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_ADMISSION, 50,
-                                        ds_cstr(&match), ds_cstr(&actions),
-                                        &nat->header_);
-            }
+                    struct ovn_port *peer = ovn_port_find(ports, peer_name);
+                    if (!peer || !peer->nbrp) {
+                        continue;
+                    }
 
-            /* Ingress Gateway Redirect Table: For NAT on a distributed
-             * router, add flows that are specific to a NAT rule.  These
-             * flows indicate the presence of an applicable NAT rule that
-             * can be applied in a distributed manner.
-             * In particulr REG_SRC_IPV4/REG_SRC_IPV6 and eth.src are set to
-             * NAT external IP and NAT external mac so the ARP request
-             * generated in the following stage is sent out with proper IP/MAC
-             * src addresses.
-             */
-            if (distributed) {
-                ds_clear(&match);
-                ds_clear(&actions);
-                ds_put_format(&match,
-                              "ip%s.src == %s && outport == %s && "
-                              "is_chassis_resident(\"%s\")",
-                              is_v6 ? "6" : "4", nat->logical_ip,
-                              od->l3dgw_port->json_key, nat->logical_port);
-                ds_put_format(&actions, "eth.src = %s; %s = %s; next;",
-                              nat->external_mac,
-                              is_v6 ? REG_SRC_IPV6 : REG_SRC_IPV4,
-                              nat->external_ip);
-                ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_GW_REDIRECT,
-                                        100, ds_cstr(&match),
-                                        ds_cstr(&actions), &nat->header_);
-            }
+                    if (!find_lrp_member_ip(peer, ip_s)) {
+                        continue;
+                    }
 
-            /* Egress Loopback table: For NAT on a distributed router.
-             * If packets in the egress pipeline on the distributed
-             * gateway port have ip.dst matching a NAT external IP, then
-             * loop a clone of the packet back to the beginning of the
-             * ingress pipeline with inport = outport. */
-            if (od->l3dgw_port) {
-                /* Distributed router. */
-                ds_clear(&match);
-                ds_put_format(&match, "ip%s.dst == %s && outport == %s",
-                              is_v6 ? "6" : "4",
-                              nat->external_ip,
-                              od->l3dgw_port->json_key);
-                if (!distributed) {
-                    ds_put_format(&match, " && is_chassis_resident(%s)",
-                                  od->l3redirect_port->json_key);
-                } else {
-                    ds_put_format(&match, " && is_chassis_resident(\"%s\")",
-                                  nat->logical_port);
-                }
+                    ds_clear(match);
+                    ds_put_format(match, "outport == %s && "
+                                  REG_NEXT_HOP_IPV6 " == %s",
+                                  peer->json_key, ip_s);
 
-                ds_clear(&actions);
-                ds_put_format(&actions,
-                              "clone { ct_clear; "
-                              "inport = outport; outport = \"\"; "
-                              "flags = 0; flags.loopback = 1; ");
-                for (int j = 0; j < MFF_N_LOG_REGS; j++) {
-                    ds_put_format(&actions, "reg%d = 0; ", j);
+                    ds_clear(actions);
+                    ds_put_format(actions, "eth.dst = %s; next;", ea_s);
+                    ovn_lflow_add_with_hint(lflows, peer->od,
+                                            S_ROUTER_IN_ARP_RESOLVE, 100,
+                                            ds_cstr(match),
+                                            ds_cstr(actions),
+                                            &op->nbsp->header_);
                 }
-                ds_put_format(&actions, REGBIT_EGRESS_LOOPBACK" = 1; "
-                              "next(pipeline=ingress, table=%d); };",
-                              ovn_stage_get_table(S_ROUTER_IN_ADMISSION));
-                ovn_lflow_add_with_hint(lflows, od, S_ROUTER_OUT_EGR_LOOP, 100,
-                                        ds_cstr(&match), ds_cstr(&actions),
-                                        &nat->header_);
             }
         }
+    } else if (op->od->n_router_ports && !lsp_is_router(op->nbsp)
+               && !strcmp(op->nbsp->type, "virtual")) {
+        /* This is a virtual port. Add ARP replies for the virtual ip with
+         * the mac of the present active virtual parent.
+         * If the logical port doesn't have virtual parent set in
+         * Port_Binding table, then add the flow to set eth.dst to
+         * 00:00:00:00:00:00 and advance to next table so that ARP is
+         * resolved by router pipeline using the arp{} action.
+         * The MAC_Binding entry for the virtual ip might be invalid. */
+        ovs_be32 ip;
 
-        /* Handle force SNAT options set in the gateway router. */
-        if (!od->l3dgw_port) {
-            if (dnat_force_snat_ip) {
-                if (od->dnat_force_snat_addrs.n_ipv4_addrs) {
-                    build_lrouter_force_snat_flows(lflows, od, "4",
-                        od->dnat_force_snat_addrs.ipv4_addrs[0].addr_s,
-                        "dnat");
-                }
-                if (od->dnat_force_snat_addrs.n_ipv6_addrs) {
-                    build_lrouter_force_snat_flows(lflows, od, "6",
-                        od->dnat_force_snat_addrs.ipv6_addrs[0].addr_s,
-                        "dnat");
-                }
-            }
-            if (lb_force_snat_ip) {
-                if (od->lb_force_snat_addrs.n_ipv4_addrs) {
-                    build_lrouter_force_snat_flows(lflows, od, "4",
-                        od->lb_force_snat_addrs.ipv4_addrs[0].addr_s, "lb");
-                }
-                if (od->lb_force_snat_addrs.n_ipv6_addrs) {
-                    build_lrouter_force_snat_flows(lflows, od, "6",
-                        od->lb_force_snat_addrs.ipv6_addrs[0].addr_s, "lb");
-                }
-            }
-
-            /* For gateway router, re-circulate every packet through
-            * the DNAT zone.  This helps with the following.
-            *
-            * Any packet that needs to be unDNATed in the reverse
-            * direction gets unDNATed. Ideally this could be done in
-            * the egress pipeline. But since the gateway router
-            * does not have any feature that depends on the source
-            * ip address being external IP address for IP routing,
-            * we can do it here, saving a future re-circulation. */
-            ovn_lflow_add(lflows, od, S_ROUTER_IN_DNAT, 50,
-                          "ip", "flags.loopback = 1; ct_dnat;");
+        const char *vip = smap_get(&op->nbsp->options,
+                                   "virtual-ip");
+        const char *virtual_parents = smap_get(&op->nbsp->options,
+                                               "virtual-parents");
+        if (!vip || !virtual_parents ||
+            !ip_parse(vip, &ip) || !op->sb) {
+            return;
         }
 
-        /* Load balancing and packet defrag are only valid on
-         * Gateway routers or router with gateway port. */
-        if (!smap_get(&od->nbr->options, "chassis") && !od->l3dgw_port) {
-            sset_destroy(&nat_entries);
-            continue;
-        }
+        if (!op->sb->virtual_parent || !op->sb->virtual_parent[0] ||
+            !op->sb->chassis) {
+            /* The virtual port is not claimed yet. */
+            for (size_t i = 0; i < op->od->n_router_ports; i++) {
+                const char *peer_name = smap_get(
+                    &op->od->router_ports[i]->nbsp->options,
+                    "router-port");
+                if (!peer_name) {
+                    continue;
+                }
 
-        /* A set to hold all ips that need defragmentation and tracking. */
-        struct sset all_ips = SSET_INITIALIZER(&all_ips);
+                struct ovn_port *peer = ovn_port_find(ports, peer_name);
+                if (!peer || !peer->nbrp) {
+                    continue;
+                }
 
-        for (int i = 0; i < od->nbr->n_load_balancer; i++) {
-            struct nbrec_load_balancer *nb_lb = od->nbr->load_balancer[i];
-            struct ovn_northd_lb *lb =
-                ovn_northd_lb_find(lbs, &nb_lb->header_.uuid);
-            ovs_assert(lb);
+                if (find_lrp_member_ip(peer, vip)) {
+                    ds_clear(match);
+                    ds_put_format(match, "outport == %s && "
+                                  REG_NEXT_HOP_IPV4 " == %s",
+                                  peer->json_key, vip);
 
-            for (size_t j = 0; j < lb->n_vips; j++) {
-                struct ovn_lb_vip *lb_vip = &lb->vips[j];
-                struct ovn_northd_lb_vip *lb_vip_nb = &lb->vips_nb[j];
-                ds_clear(&actions);
-                build_lb_vip_ct_lb_actions(lb_vip, lb_vip_nb, &actions,
-                                           lb->selection_fields);
+                    const char *arp_actions =
+                                  "eth.dst = 00:00:00:00:00:00; next;";
+                    ovn_lflow_add_with_hint(lflows, peer->od,
+                                            S_ROUTER_IN_ARP_RESOLVE, 100,
+                                            ds_cstr(match),
+                                            arp_actions,
+                                            &op->nbsp->header_);
+                    break;
+                }
+            }
+        } else {
+            struct ovn_port *vp =
+                ovn_port_find(ports, op->sb->virtual_parent);
+            if (!vp || !vp->nbsp) {
+                return;
+            }
 
-                if (!sset_contains(&all_ips, lb_vip->vip_str)) {
-                    sset_add(&all_ips, lb_vip->vip_str);
-                    /* If there are any load balancing rules, we should send
-                     * the packet to conntrack for defragmentation and
-                     * tracking.  This helps with two things.
-                     *
-                     * 1. With tracking, we can send only new connections to
-                     *    pick a DNAT ip address from a group.
-                     * 2. If there are L4 ports in load balancing rules, we
-                     *    need the defragmentation to match on L4 ports. */
-                    ds_clear(&match);
-                    if (IN6_IS_ADDR_V4MAPPED(&lb_vip->vip)) {
-                        ds_put_format(&match, "ip && ip4.dst == %s",
-                                      lb_vip->vip_str);
-                    } else {
-                        ds_put_format(&match, "ip && ip6.dst == %s",
-                                      lb_vip->vip_str);
+            for (size_t i = 0; i < vp->n_lsp_addrs; i++) {
+                bool found_vip_network = false;
+                const char *ea_s = vp->lsp_addrs[i].ea_s;
+                for (size_t j = 0; j < vp->od->n_router_ports; j++) {
+                    /* Get the Logical_Router_Port that the
+                    * Logical_Switch_Port is connected to, as
+                    * 'peer'. */
+                    const char *peer_name = smap_get(
+                        &vp->od->router_ports[j]->nbsp->options,
+                        "router-port");
+                    if (!peer_name) {
+                        continue;
                     }
-                    ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_DEFRAG,
-                                            100, ds_cstr(&match), "ct_next;",
-                                            &nb_lb->header_);
-                }
 
-                /* Higher priority rules are added for load-balancing in DNAT
-                 * table.  For every match (on a VIP[:port]), we add two flows
-                 * via add_router_lb_flow().  One flow is for specific matching
-                 * on ct.new with an action of "ct_lb($targets);".  The other
-                 * flow is for ct.est with an action of "ct_dnat;". */
-                ds_clear(&match);
-                if (IN6_IS_ADDR_V4MAPPED(&lb_vip->vip)) {
-                    ds_put_format(&match, "ip && ip4.dst == %s",
-                                  lb_vip->vip_str);
-                } else {
-                    ds_put_format(&match, "ip && ip6.dst == %s",
-                                  lb_vip->vip_str);
-                }
+                    struct ovn_port *peer =
+                        ovn_port_find(ports, peer_name);
+                    if (!peer || !peer->nbrp) {
+                        continue;
+                    }
 
-                int prio = 110;
-                bool is_udp = nullable_string_is_equal(nb_lb->protocol, "udp");
-                bool is_sctp = nullable_string_is_equal(nb_lb->protocol,
-                                                        "sctp");
-                const char *proto = is_udp ? "udp" : is_sctp ? "sctp" : "tcp";
+                    if (!find_lrp_member_ip(peer, vip)) {
+                        continue;
+                    }
 
-                if (lb_vip->vip_port) {
-                    ds_put_format(&match, " && %s && %s.dst == %d", proto,
-                                  proto, lb_vip->vip_port);
-                    prio = 120;
+                    ds_clear(match);
+                    ds_put_format(match, "outport == %s && "
+                                  REG_NEXT_HOP_IPV4 " == %s",
+                                  peer->json_key, vip);
+
+                    ds_clear(actions);
+                    ds_put_format(actions, "eth.dst = %s; next;", ea_s);
+                    ovn_lflow_add_with_hint(lflows, peer->od,
+                                            S_ROUTER_IN_ARP_RESOLVE, 100,
+                                            ds_cstr(match),
+                                            ds_cstr(actions),
+                                            &op->nbsp->header_);
+                    found_vip_network = true;
+                    break;
                 }
 
-                if (od->l3redirect_port) {
-                    ds_put_format(&match, " && is_chassis_resident(%s)",
-                                  od->l3redirect_port->json_key);
+                if (found_vip_network) {
+                    break;
                 }
-                add_router_lb_flow(lflows, od, &match, &actions, prio,
-                                   lb_force_snat_ip, lb_vip, proto,
-                                   nb_lb, meter_groups, &nat_entries);
             }
         }
-        sset_destroy(&all_ips);
-        sset_destroy(&nat_entries);
-    }
-
-    ds_destroy(&match);
-    ds_destroy(&actions);
-}
+    } else if (lsp_is_router(op->nbsp)) {
+        /* This is a logical switch port that connects to a router. */
 
-/* Logical router ingress Table 0: L2 Admission Control
- * Generic admission control flows (without inport check).
- */
-static void
-build_adm_ctrl_flows_for_lrouter(
-        struct ovn_datapath *od, struct hmap *lflows)
-{
-    if (od->nbr) {
-        /* Logical VLANs not supported.
-         * Broadcast/multicast source address is invalid. */
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_ADMISSION, 100,
-                      "vlan.present || eth.src[40]", "drop;");
-    }
-}
+        /* The peer of this switch port is the router port for which
+         * we need to add logical flows such that it can resolve
+         * ARP entries for all the other router ports connected to
+         * the switch in question. */
 
-/* Logical router ingress Table 0: L2 Admission Control
- * This table drops packets that the router shouldn’t see at all based
- * on their Ethernet headers.
- */
-static void
-build_adm_ctrl_flows_for_lrouter_port(
-        struct ovn_port *op, struct hmap *lflows,
-        struct ds *match, struct ds *actions)
-{
-    if (op->nbrp) {
-        if (!lrport_is_enabled(op->nbrp)) {
-            /* Drop packets from disabled logical ports (since logical flow
-             * tables are default-drop). */
+        const char *peer_name = smap_get(&op->nbsp->options,
+                                         "router-port");
+        if (!peer_name) {
             return;
         }
 
-        if (op->derived) {
-            /* No ingress packets should be received on a chassisredirect
-             * port. */
+        struct ovn_port *peer = ovn_port_find(ports, peer_name);
+        if (!peer || !peer->nbrp) {
             return;
         }
 
-        /* Store the ethernet address of the port receiving the packet.
-         * This will save us from having to match on inport further down in
-         * the pipeline.
-         */
-        ds_clear(actions);
-        ds_put_format(actions, REG_INPORT_ETH_ADDR " = %s; next;",
-                      op->lrp_networks.ea_s);
-
-        ds_clear(match);
-        ds_put_format(match, "eth.mcast && inport == %s", op->json_key);
-        ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_ADMISSION, 50,
-                                ds_cstr(match), ds_cstr(actions),
-                                &op->nbrp->header_);
-
-        ds_clear(match);
-        ds_put_format(match, "eth.dst == %s && inport == %s",
-                      op->lrp_networks.ea_s, op->json_key);
-        if (op->od->l3dgw_port && op == op->od->l3dgw_port
-            && op->od->l3redirect_port) {
-            /* Traffic with eth.dst = l3dgw_port->lrp_networks.ea_s
-             * should only be received on the gateway chassis. */
-            ds_put_format(match, " && is_chassis_resident(%s)",
-                          op->od->l3redirect_port->json_key);
+        if (peer->od->nbr &&
+            smap_get_bool(&peer->od->nbr->options,
+                          "dynamic_neigh_routers", false)) {
+            return;
         }
-        ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_ADMISSION, 50,
-                                ds_cstr(match),  ds_cstr(actions),
-                                &op->nbrp->header_);
-    }
-}
-
-
-/* Logical router ingress Table 1 and 2: Neighbor lookup and learning
- * lflows for logical routers. */
-static void
-build_neigh_learning_flows_for_lrouter(
-        struct ovn_datapath *od, struct hmap *lflows,
-        struct ds *match, struct ds *actions)
-{
-    if (od->nbr) {
-
-        /* Learn MAC bindings from ARP/IPv6 ND.
-         *
-         * For ARP packets, table LOOKUP_NEIGHBOR does a lookup for the
-         * (arp.spa, arp.sha) in the mac binding table using the 'lookup_arp'
-         * action and stores the result in REGBIT_LOOKUP_NEIGHBOR_RESULT bit.
-         * If "always_learn_from_arp_request" is set to false, it will also
-         * lookup for the (arp.spa) in the mac binding table using the
-         * "lookup_arp_ip" action for ARP request packets, and stores the
-         * result in REGBIT_LOOKUP_NEIGHBOR_IP_RESULT bit; or set that bit
-         * to "1" directly for ARP response packets.
-         *
-         * For IPv6 ND NA packets, table LOOKUP_NEIGHBOR does a lookup
-         * for the (nd.target, nd.tll) in the mac binding table using the
-         * 'lookup_nd' action and stores the result in
-         * REGBIT_LOOKUP_NEIGHBOR_RESULT bit. If
-         * "always_learn_from_arp_request" is set to false,
-         * REGBIT_LOOKUP_NEIGHBOR_IP_RESULT bit is set.
-         *
-         * For IPv6 ND NS packets, table LOOKUP_NEIGHBOR does a lookup
-         * for the (ip6.src, nd.sll) in the mac binding table using the
-         * 'lookup_nd' action and stores the result in
-         * REGBIT_LOOKUP_NEIGHBOR_RESULT bit. If
-         * "always_learn_from_arp_request" is set to false, it will also lookup
-         * for the (ip6.src) in the mac binding table using the "lookup_nd_ip"
-         * action and stores the result in REGBIT_LOOKUP_NEIGHBOR_IP_RESULT
-         * bit.
-         *
-         * Table LEARN_NEIGHBOR learns the mac-binding using the action
-         * - 'put_arp/put_nd'. Learning mac-binding is skipped if
-         *   REGBIT_LOOKUP_NEIGHBOR_RESULT bit is set or
-         *   REGBIT_LOOKUP_NEIGHBOR_IP_RESULT is not set.
-         *
-         * */
-
-        /* Flows for LOOKUP_NEIGHBOR. */
-        bool learn_from_arp_request = smap_get_bool(&od->nbr->options,
-            "always_learn_from_arp_request", true);
-        ds_clear(actions);
-        ds_put_format(actions, REGBIT_LOOKUP_NEIGHBOR_RESULT
-                      " = lookup_arp(inport, arp.spa, arp.sha); %snext;",
-                      learn_from_arp_request ? "" :
-                      REGBIT_LOOKUP_NEIGHBOR_IP_RESULT" = 1; ");
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_LOOKUP_NEIGHBOR, 100,
-                      "arp.op == 2", ds_cstr(actions));
-
-        ds_clear(actions);
-        ds_put_format(actions, REGBIT_LOOKUP_NEIGHBOR_RESULT
-                      " = lookup_nd(inport, nd.target, nd.tll); %snext;",
-                      learn_from_arp_request ? "" :
-                      REGBIT_LOOKUP_NEIGHBOR_IP_RESULT" = 1; ");
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_LOOKUP_NEIGHBOR, 100, "nd_na",
-                      ds_cstr(actions));
-
-        ds_clear(actions);
-        ds_put_format(actions, REGBIT_LOOKUP_NEIGHBOR_RESULT
-                      " = lookup_nd(inport, ip6.src, nd.sll); %snext;",
-                      learn_from_arp_request ? "" :
-                      REGBIT_LOOKUP_NEIGHBOR_IP_RESULT
-                      " = lookup_nd_ip(inport, ip6.src); ");
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_LOOKUP_NEIGHBOR, 100, "nd_ns",
-                      ds_cstr(actions));
-
-        /* For other packet types, we can skip neighbor learning.
-         * So set REGBIT_LOOKUP_NEIGHBOR_RESULT to 1. */
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_LOOKUP_NEIGHBOR, 0, "1",
-                      REGBIT_LOOKUP_NEIGHBOR_RESULT" = 1; next;");
-
-        /* Flows for LEARN_NEIGHBOR. */
-        /* Skip Neighbor learning if not required. */
-        ds_clear(match);
-        ds_put_format(match, REGBIT_LOOKUP_NEIGHBOR_RESULT" == 1%s",
-                      learn_from_arp_request ? "" :
-                      " || "REGBIT_LOOKUP_NEIGHBOR_IP_RESULT" == 0");
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_LEARN_NEIGHBOR, 100,
-                      ds_cstr(match), "next;");
-
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_LEARN_NEIGHBOR, 90,
-                      "arp", "put_arp(inport, arp.spa, arp.sha); next;");
 
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_LEARN_NEIGHBOR, 90,
-                      "nd_na", "put_nd(inport, nd.target, nd.tll); next;");
-
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_LEARN_NEIGHBOR, 90,
-                      "nd_ns", "put_nd(inport, ip6.src, nd.sll); next;");
-    }
+        for (size_t i = 0; i < op->od->n_router_ports; i++) {
+            const char *router_port_name = smap_get(
+                                &op->od->router_ports[i]->nbsp->options,
+                                "router-port");
+            struct ovn_port *router_port = ovn_port_find(ports,
+                                                         router_port_name);
+            if (!router_port || !router_port->nbrp) {
+                continue;
+            }
 
-}
+            /* Skip the router port under consideration. */
+            if (router_port == peer) {
+               continue;
+            }
 
-/* Logical router ingress Table 1: Neighbor lookup lflows
- * for logical router ports. */
-static void
-build_neigh_learning_flows_for_lrouter_port(
-        struct ovn_port *op, struct hmap *lflows,
-        struct ds *match, struct ds *actions)
-{
-    if (op->nbrp) {
+            if (router_port->lrp_networks.n_ipv4_addrs) {
+                ds_clear(match);
+                ds_put_format(match, "outport == %s && "
+                              REG_NEXT_HOP_IPV4 " == ",
+                              peer->json_key);
+                op_put_v4_networks(match, router_port, false);
 
-        bool learn_from_arp_request = smap_get_bool(&op->od->nbr->options,
-            "always_learn_from_arp_request", true);
+                ds_clear(actions);
+                ds_put_format(actions, "eth.dst = %s; next;",
+                                          router_port->lrp_networks.ea_s);
+                ovn_lflow_add_with_hint(lflows, peer->od,
+                                        S_ROUTER_IN_ARP_RESOLVE, 100,
+                                        ds_cstr(match), ds_cstr(actions),
+                                        &op->nbsp->header_);
+            }
 
-        /* Check if we need to learn mac-binding from ARP requests. */
-        for (int i = 0; i < op->lrp_networks.n_ipv4_addrs; i++) {
-            if (!learn_from_arp_request) {
-                /* ARP request to this address should always get learned,
-                 * so add a priority-110 flow to set
-                 * REGBIT_LOOKUP_NEIGHBOR_IP_RESULT to 1. */
+            if (router_port->lrp_networks.n_ipv6_addrs) {
                 ds_clear(match);
-                ds_put_format(match,
-                              "inport == %s && arp.spa == %s/%u && "
-                              "arp.tpa == %s && arp.op == 1",
-                              op->json_key,
-                              op->lrp_networks.ipv4_addrs[i].network_s,
-                              op->lrp_networks.ipv4_addrs[i].plen,
-                              op->lrp_networks.ipv4_addrs[i].addr_s);
-                if (op->od->l3dgw_port && op == op->od->l3dgw_port
-                    && op->od->l3redirect_port) {
-                    ds_put_format(match, " && is_chassis_resident(%s)",
-                                  op->od->l3redirect_port->json_key);
-                }
-                const char *actions_s = REGBIT_LOOKUP_NEIGHBOR_RESULT
-                                  " = lookup_arp(inport, arp.spa, arp.sha); "
-                                  REGBIT_LOOKUP_NEIGHBOR_IP_RESULT" = 1;"
-                                  " next;";
-                ovn_lflow_add_with_hint(lflows, op->od,
-                                        S_ROUTER_IN_LOOKUP_NEIGHBOR, 110,
-                                        ds_cstr(match), actions_s,
-                                        &op->nbrp->header_);
-            }
-            ds_clear(match);
-            ds_put_format(match,
-                          "inport == %s && arp.spa == %s/%u && arp.op == 1",
-                          op->json_key,
-                          op->lrp_networks.ipv4_addrs[i].network_s,
-                          op->lrp_networks.ipv4_addrs[i].plen);
-            if (op->od->l3dgw_port && op == op->od->l3dgw_port
-                && op->od->l3redirect_port) {
-                ds_put_format(match, " && is_chassis_resident(%s)",
-                              op->od->l3redirect_port->json_key);
+                ds_put_format(match, "outport == %s && "
+                              REG_NEXT_HOP_IPV6 " == ",
+                              peer->json_key);
+                op_put_v6_networks(match, router_port);
+
+                ds_clear(actions);
+                ds_put_format(actions, "eth.dst = %s; next;",
+                              router_port->lrp_networks.ea_s);
+                ovn_lflow_add_with_hint(lflows, peer->od,
+                                        S_ROUTER_IN_ARP_RESOLVE, 100,
+                                        ds_cstr(match), ds_cstr(actions),
+                                        &op->nbsp->header_);
             }
-            ds_clear(actions);
-            ds_put_format(actions, REGBIT_LOOKUP_NEIGHBOR_RESULT
-                          " = lookup_arp(inport, arp.spa, arp.sha); %snext;",
-                          learn_from_arp_request ? "" :
-                          REGBIT_LOOKUP_NEIGHBOR_IP_RESULT
-                          " = lookup_arp_ip(inport, arp.spa); ");
-            ovn_lflow_add_with_hint(lflows, op->od,
-                                    S_ROUTER_IN_LOOKUP_NEIGHBOR, 100,
-                                    ds_cstr(match), ds_cstr(actions),
-                                    &op->nbrp->header_);
         }
     }
+
 }
 
-/* Logical router ingress table ND_RA_OPTIONS & ND_RA_RESPONSE: IPv6 Router
- * Adv (RA) options and response. */
+/* Local router ingress table CHK_PKT_LEN: Check packet length.
+ *
+ * Any IPv4 packet with outport set to the distributed gateway
+ * router port, check the packet length and store the result in the
+ * 'REGBIT_PKT_LARGER' register bit.
+ *
+ * Local router ingress table LARGER_PKTS: Handle larger packets.
+ *
+ * Any IPv4 packet with outport set to the distributed gateway
+ * router port and the 'REGBIT_PKT_LARGER' register bit is set,
+ * generate ICMPv4 packet with type 3 (Destination Unreachable) and
+ * code 4 (Fragmentation needed).
+ * */
 static void
-build_ND_RA_flows_for_lrouter_port(
-        struct ovn_port *op, struct hmap *lflows,
+build_check_pkt_len_flows_for_lrouter(
+        struct ovn_datapath *od, struct hmap *lflows,
+        struct hmap *ports,
         struct ds *match, struct ds *actions)
 {
-    if (!op->nbrp || op->nbrp->peer || !op->peer) {
-        return;
-    }
-
-    if (!op->lrp_networks.n_ipv6_addrs) {
-        return;
-    }
-
-    struct smap options;
-    smap_clone(&options, &op->sb->options);
-
-    /* enable IPv6 prefix delegation */
-    bool prefix_delegation = smap_get_bool(&op->nbrp->options,
-                                           "prefix_delegation", false);
-    if (!lrport_is_enabled(op->nbrp)) {
-        prefix_delegation = false;
-    }
-    smap_add(&options, "ipv6_prefix_delegation",
-             prefix_delegation ? "true" : "false");
+    if (od->nbr) {
 
-    bool ipv6_prefix = smap_get_bool(&op->nbrp->options,
-                                     "prefix", false);
-    if (!lrport_is_enabled(op->nbrp)) {
-        ipv6_prefix = false;
-    }
-    smap_add(&options, "ipv6_prefix",
-             ipv6_prefix ? "true" : "false");
-    sbrec_port_binding_set_options(op->sb, &options);
+        /* Packets are allowed by default. */
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_CHK_PKT_LEN, 0, "1",
+                      "next;");
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_LARGER_PKTS, 0, "1",
+                      "next;");
 
-    smap_destroy(&options);
+        if (od->l3dgw_port && od->l3redirect_port) {
+            int gw_mtu = 0;
+            if (od->l3dgw_port->nbrp) {
+                 gw_mtu = smap_get_int(&od->l3dgw_port->nbrp->options,
+                                       "gateway_mtu", 0);
+            }
+            /* Add the flows only if gateway_mtu is configured. */
+            if (gw_mtu <= 0) {
+                return;
+            }
 
-    const char *address_mode = smap_get(
-        &op->nbrp->ipv6_ra_configs, "address_mode");
+            ds_clear(match);
+            ds_put_format(match, "outport == %s", od->l3dgw_port->json_key);
 
-    if (!address_mode) {
-        return;
-    }
-    if (strcmp(address_mode, "slaac") &&
-        strcmp(address_mode, "dhcpv6_stateful") &&
-        strcmp(address_mode, "dhcpv6_stateless")) {
-        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
-        VLOG_WARN_RL(&rl, "Invalid address mode [%s] defined",
-                     address_mode);
-        return;
-    }
+            ds_clear(actions);
+            ds_put_format(actions,
+                          REGBIT_PKT_LARGER" = check_pkt_larger(%d);"
+                          " next;", gw_mtu + VLAN_ETH_HEADER_LEN);
+            ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_CHK_PKT_LEN, 50,
+                                    ds_cstr(match), ds_cstr(actions),
+                                    &od->l3dgw_port->nbrp->header_);
 
-    if (smap_get_bool(&op->nbrp->ipv6_ra_configs, "send_periodic",
-                      false)) {
-        copy_ra_to_sb(op, address_mode);
-    }
+            for (size_t i = 0; i < od->nbr->n_ports; i++) {
+                struct ovn_port *rp = ovn_port_find(ports,
+                                                    od->nbr->ports[i]->name);
+                if (!rp || rp == od->l3dgw_port) {
+                    continue;
+                }
 
-    ds_clear(match);
-    ds_put_format(match, "inport == %s && ip6.dst == ff02::2 && nd_rs",
-                          op->json_key);
-    ds_clear(actions);
+                if (rp->lrp_networks.ipv4_addrs) {
+                    ds_clear(match);
+                    ds_put_format(match, "inport == %s && outport == %s"
+                                  " && ip4 && "REGBIT_PKT_LARGER,
+                                  rp->json_key, od->l3dgw_port->json_key);
 
-    const char *mtu_s = smap_get(
-        &op->nbrp->ipv6_ra_configs, "mtu");
+                    ds_clear(actions);
+                    /* Set icmp4.frag_mtu to gw_mtu */
+                    ds_put_format(actions,
+                        "icmp4_error {"
+                        REGBIT_EGRESS_LOOPBACK" = 1; "
+                        "eth.dst = %s; "
+                        "ip4.dst = ip4.src; "
+                        "ip4.src = %s; "
+                        "ip.ttl = 255; "
+                        "icmp4.type = 3; /* Destination Unreachable. */ "
+                        "icmp4.code = 4; /* Frag Needed and DF was Set. */ "
+                        "icmp4.frag_mtu = %d; "
+                        "next(pipeline=ingress, table=%d); };",
+                        rp->lrp_networks.ea_s,
+                        rp->lrp_networks.ipv4_addrs[0].addr_s,
+                        gw_mtu,
+                        ovn_stage_get_table(S_ROUTER_IN_ADMISSION));
+                    ovn_lflow_add_with_hint(lflows, od,
+                                            S_ROUTER_IN_LARGER_PKTS, 50,
+                                            ds_cstr(match), ds_cstr(actions),
+                                            &rp->nbrp->header_);
+                }
 
-    /* As per RFC 2460, 1280 is minimum IPv6 MTU. */
-    uint32_t mtu = (mtu_s && atoi(mtu_s) >= 1280) ? atoi(mtu_s) : 0;
+                if (rp->lrp_networks.ipv6_addrs) {
+                    ds_clear(match);
+                    ds_put_format(match, "inport == %s && outport == %s"
+                                  " && ip6 && "REGBIT_PKT_LARGER,
+                                  rp->json_key, od->l3dgw_port->json_key);
 
-    ds_put_format(actions, REGBIT_ND_RA_OPTS_RESULT" = put_nd_ra_opts("
-                  "addr_mode = \"%s\", slla = %s",
-                  address_mode, op->lrp_networks.ea_s);
-    if (mtu > 0) {
-        ds_put_format(actions, ", mtu = %u", mtu);
+                    ds_clear(actions);
+                    /* Set icmp6.frag_mtu to gw_mtu */
+                    ds_put_format(actions,
+                        "icmp6_error {"
+                        REGBIT_EGRESS_LOOPBACK" = 1; "
+                        "eth.dst = %s; "
+                        "ip6.dst = ip6.src; "
+                        "ip6.src = %s; "
+                        "ip.ttl = 255; "
+                        "icmp6.type = 2; /* Packet Too Big. */ "
+                        "icmp6.code = 0; "
+                        "icmp6.frag_mtu = %d; "
+                        "next(pipeline=ingress, table=%d); };",
+                        rp->lrp_networks.ea_s,
+                        rp->lrp_networks.ipv6_addrs[0].addr_s,
+                        gw_mtu,
+                        ovn_stage_get_table(S_ROUTER_IN_ADMISSION));
+                    ovn_lflow_add_with_hint(lflows, od,
+                                            S_ROUTER_IN_LARGER_PKTS, 50,
+                                            ds_cstr(match), ds_cstr(actions),
+                                            &rp->nbrp->header_);
+                }
+            }
+        }
     }
+}
 
-    const char *prf = smap_get_def(
-        &op->nbrp->ipv6_ra_configs, "router_preference", "MEDIUM");
-    if (strcmp(prf, "MEDIUM")) {
-        ds_put_format(actions, ", router_preference = \"%s\"", prf);
-    }
+/* Logical router ingress table GW_REDIRECT: Gateway redirect.
+ *
+ * For traffic with outport equal to the l3dgw_port
+ * on a distributed router, this table redirects a subset
+ * of the traffic to the l3redirect_port which represents
+ * the central instance of the l3dgw_port.
+ */
+static void
+build_gateway_redirect_flows_for_lrouter(
+        struct ovn_datapath *od, struct hmap *lflows,
+        struct ds *match, struct ds *actions)
+{
+    if (od->nbr) {
+        if (od->l3dgw_port && od->l3redirect_port) {
+            const struct ovsdb_idl_row *stage_hint = NULL;
 
-    bool add_rs_response_flow = false;
+            if (od->l3dgw_port->nbrp) {
+                stage_hint = &od->l3dgw_port->nbrp->header_;
+            }
 
-    for (size_t i = 0; i < op->lrp_networks.n_ipv6_addrs; i++) {
-        if (in6_is_lla(&op->lrp_networks.ipv6_addrs[i].network)) {
-            continue;
+            /* For traffic with outport == l3dgw_port, if the
+             * packet did not match any higher priority redirect
+             * rule, then the traffic is redirected to the central
+             * instance of the l3dgw_port. */
+            ds_clear(match);
+            ds_put_format(match, "outport == %s",
+                          od->l3dgw_port->json_key);
+            ds_clear(actions);
+            ds_put_format(actions, "outport = %s; next;",
+                          od->l3redirect_port->json_key);
+            ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_GW_REDIRECT, 50,
+                                    ds_cstr(match), ds_cstr(actions),
+                                    stage_hint);
         }
 
-        ds_put_format(actions, ", prefix = %s/%u",
-                      op->lrp_networks.ipv6_addrs[i].network_s,
-                      op->lrp_networks.ipv6_addrs[i].plen);
-
-        add_rs_response_flow = true;
-    }
-
-    if (add_rs_response_flow) {
-        ds_put_cstr(actions, "); next;");
-        ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_ND_RA_OPTIONS,
-                                50, ds_cstr(match), ds_cstr(actions),
-                                &op->nbrp->header_);
-        ds_clear(actions);
-        ds_clear(match);
-        ds_put_format(match, "inport == %s && ip6.dst == ff02::2 && "
-                      "nd_ra && "REGBIT_ND_RA_OPTS_RESULT, op->json_key);
-
-        char ip6_str[INET6_ADDRSTRLEN + 1];
-        struct in6_addr lla;
-        in6_generate_lla(op->lrp_networks.ea, &lla);
-        memset(ip6_str, 0, sizeof(ip6_str));
-        ipv6_string_mapped(ip6_str, &lla);
-        ds_put_format(actions, "eth.dst = eth.src; eth.src = %s; "
-                      "ip6.dst = ip6.src; ip6.src = %s; "
-                      "outport = inport; flags.loopback = 1; "
-                      "output;",
-                      op->lrp_networks.ea_s, ip6_str);
-        ovn_lflow_add_with_hint(lflows, op->od,
-                                S_ROUTER_IN_ND_RA_RESPONSE, 50,
-                                ds_cstr(match), ds_cstr(actions),
-                                &op->nbrp->header_);
+        /* Packets are allowed by default. */
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_GW_REDIRECT, 0, "1", "next;");
     }
 }
 
-/* Logical router ingress table ND_RA_OPTIONS & ND_RA_RESPONSE: RS
- * responder, by default goto next. (priority 0). */
+/* Local router ingress table ARP_REQUEST: ARP request.
+ *
+ * In the common case where the Ethernet destination has been resolved,
+ * this table outputs the packet (priority 0).  Otherwise, it composes
+ * and sends an ARP/IPv6 NA request (priority 100). */
 static void
-build_ND_RA_flows_for_lrouter(struct ovn_datapath *od, struct hmap *lflows)
+build_arp_request_flows_for_lrouter(
+        struct ovn_datapath *od, struct hmap *lflows,
+        struct ds *match, struct ds *actions)
 {
     if (od->nbr) {
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_ND_RA_OPTIONS, 0, "1", "next;");
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_ND_RA_RESPONSE, 0, "1", "next;");
+        for (int i = 0; i < od->nbr->n_static_routes; i++) {
+            const struct nbrec_logical_router_static_route *route;
+
+            route = od->nbr->static_routes[i];
+            struct in6_addr gw_ip6;
+            unsigned int plen;
+            char *error = ipv6_parse_cidr(route->nexthop, &gw_ip6, &plen);
+            if (error || plen != 128) {
+                free(error);
+                continue;
+            }
+
+            ds_clear(match);
+            ds_put_format(match, "eth.dst == 00:00:00:00:00:00 && "
+                          "ip6 && " REG_NEXT_HOP_IPV6 " == %s",
+                          route->nexthop);
+            struct in6_addr sn_addr;
+            struct eth_addr eth_dst;
+            in6_addr_solicited_node(&sn_addr, &gw_ip6);
+            ipv6_multicast_to_ethernet(&eth_dst, &sn_addr);
+
+            char sn_addr_s[INET6_ADDRSTRLEN + 1];
+            ipv6_string_mapped(sn_addr_s, &sn_addr);
+
+            ds_clear(actions);
+            ds_put_format(actions,
+                          "nd_ns { "
+                          "eth.dst = "ETH_ADDR_FMT"; "
+                          "ip6.dst = %s; "
+                          "nd.target = %s; "
+                          "output; "
+                          "};", ETH_ADDR_ARGS(eth_dst), sn_addr_s,
+                          route->nexthop);
+
+            ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_ARP_REQUEST, 200,
+                                    ds_cstr(match), ds_cstr(actions),
+                                    &route->header_);
+        }
+
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_ARP_REQUEST, 100,
+                      "eth.dst == 00:00:00:00:00:00 && ip4",
+                      "arp { "
+                      "eth.dst = ff:ff:ff:ff:ff:ff; "
+                      "arp.spa = " REG_SRC_IPV4 "; "
+                      "arp.tpa = " REG_NEXT_HOP_IPV4 "; "
+                      "arp.op = 1; " /* ARP request */
+                      "output; "
+                      "};");
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_ARP_REQUEST, 100,
+                      "eth.dst == 00:00:00:00:00:00 && ip6",
+                      "nd_ns { "
+                      "nd.target = " REG_NEXT_HOP_IPV6 "; "
+                      "output; "
+                      "};");
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_ARP_REQUEST, 0, "1", "output;");
     }
 }
 
-/* Logical router ingress table IP_ROUTING : IP Routing.
- *
- * A packet that arrives at this table is an IP packet that should be
- * routed to the address in 'ip[46].dst'.
- *
- * For regular routes without ECMP, table IP_ROUTING sets outport to the
- * correct output port, eth.src to the output port's MAC address, and
- * REG_NEXT_HOP_IPV4/REG_NEXT_HOP_IPV6 to the next-hop IP address
- * (leaving 'ip[46].dst', the packet’s final destination, unchanged), and
- * advances to the next table.
+/* Logical router egress table DELIVERY: Delivery (priority 100-110).
  *
- * For ECMP routes, i.e. multiple routes with same policy and prefix, table
- * IP_ROUTING remembers ECMP group id and selects a member id, and advances
- * to table IP_ROUTING_ECMP, which sets outport, eth.src and
- * REG_NEXT_HOP_IPV4/REG_NEXT_HOP_IPV6 for the selected ECMP member.
+ * Priority 100 rules deliver packets to enabled logical ports.
+ * Priority 110 rules match multicast packets and update the source
+ * mac before delivering to enabled logical ports. IP multicast traffic
+ * bypasses S_ROUTER_IN_IP_ROUTING route lookups.
  */
 static void
-build_ip_routing_flows_for_lrouter_port(
-        struct ovn_port *op, struct hmap *lflows)
+build_egress_delivery_flows_for_lrouter_port(
+        struct ovn_port *op, struct hmap *lflows,
+        struct ds *match, struct ds *actions)
 {
     if (op->nbrp) {
+        if (!lrport_is_enabled(op->nbrp)) {
+            /* Drop packets to disabled logical ports (since logical flow
+             * tables are default-drop). */
+            return;
+        }
 
-        for (int i = 0; i < op->lrp_networks.n_ipv4_addrs; i++) {
-            add_route(lflows, op, op->lrp_networks.ipv4_addrs[i].addr_s,
-                      op->lrp_networks.ipv4_addrs[i].network_s,
-                      op->lrp_networks.ipv4_addrs[i].plen, NULL, false,
-                      &op->nbrp->header_);
+        if (op->derived) {
+            /* No egress packets should be processed in the context of
+             * a chassisredirect port.  The chassisredirect port should
+             * be replaced by the l3dgw port in the local output
+             * pipeline stage before egress processing. */
+            return;
         }
 
-        for (int i = 0; i < op->lrp_networks.n_ipv6_addrs; i++) {
-            add_route(lflows, op, op->lrp_networks.ipv6_addrs[i].addr_s,
-                      op->lrp_networks.ipv6_addrs[i].network_s,
-                      op->lrp_networks.ipv6_addrs[i].plen, NULL, false,
-                      &op->nbrp->header_);
+        /* If multicast relay is enabled then also adjust source mac for IP
+         * multicast traffic.
+         */
+        if (op->od->mcast_info.rtr.relay) {
+            ds_clear(match);
+            ds_clear(actions);
+            ds_put_format(match, "(ip4.mcast || ip6.mcast) && outport == %s",
+                          op->json_key);
+            ds_put_format(actions, "eth.src = %s; output;",
+                          op->lrp_networks.ea_s);
+            ovn_lflow_add(lflows, op->od, S_ROUTER_OUT_DELIVERY, 110,
+                          ds_cstr(match), ds_cstr(actions));
         }
+
+        ds_clear(match);
+        ds_put_format(match, "outport == %s", op->json_key);
+        ovn_lflow_add(lflows, op->od, S_ROUTER_OUT_DELIVERY, 100,
+                      ds_cstr(match), "output;");
+    }
+
+}
+
+static void
+build_misc_local_traffic_drop_flows_for_lrouter(
+        struct ovn_datapath *od, struct hmap *lflows)
+{
+    if (od->nbr) {
+        /* L3 admission control: drop multicast and broadcast source, localhost
+         * source or destination, and zero network source or destination
+         * (priority 100). */
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 100,
+                      "ip4.src_mcast ||"
+                      "ip4.src == 255.255.255.255 || "
+                      "ip4.src == 127.0.0.0/8 || "
+                      "ip4.dst == 127.0.0.0/8 || "
+                      "ip4.src == 0.0.0.0/8 || "
+                      "ip4.dst == 0.0.0.0/8",
+                      "drop;");
+
+        /* Drop ARP packets (priority 85). ARP request packets for router's own
+         * IPs are handled with priority-90 flows.
+         * Drop IPv6 ND packets (priority 85). ND NA packets for router's own
+         * IPs are handled with priority-90 flows.
+         */
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 85,
+                      "arp || nd", "drop;");
+
+        /* Allow IPv6 multicast traffic that's supposed to reach the
+         * router pipeline (e.g., router solicitations).
+         */
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 84, "nd_rs || nd_ra",
+                      "next;");
+
+        /* Drop other reserved multicast. */
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 83,
+                      "ip6.mcast_rsvd", "drop;");
+
+        /* Allow other multicast if relay enabled (priority 82). */
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 82,
+                      "ip4.mcast || ip6.mcast",
+                      od->mcast_info.rtr.relay ? "next;" : "drop;");
+
+        /* Drop Ethernet local broadcast.  By definition this traffic should
+         * not be forwarded.*/
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 50,
+                      "eth.bcast", "drop;");
+
+        /* TTL discard */
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 30,
+                      "ip4 && ip.ttl == {0, 1}", "drop;");
+
+        /* Pass other traffic not already handled to the next table for
+         * routing. */
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 0, "1", "next;");
     }
 }
 
 static void
-build_static_route_flows_for_lrouter(
-        struct ovn_datapath *od, struct hmap *lflows,
-        struct hmap *ports)
+build_dhcpv6_reply_flows_for_lrouter_port(
+        struct ovn_port *op, struct hmap *lflows,
+        struct ds *match)
 {
-    if (od->nbr) {
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_ROUTING_ECMP, 150,
-                      REG_ECMP_GROUP_ID" == 0", "next;");
-
-        struct hmap ecmp_groups = HMAP_INITIALIZER(&ecmp_groups);
-        struct hmap unique_routes = HMAP_INITIALIZER(&unique_routes);
-        struct ovs_list parsed_routes = OVS_LIST_INITIALIZER(&parsed_routes);
-        struct ecmp_groups_node *group;
-        for (int i = 0; i < od->nbr->n_static_routes; i++) {
-            struct parsed_route *route =
-                parsed_routes_add(&parsed_routes, od->nbr->static_routes[i]);
-            if (!route) {
-                continue;
-            }
-            group = ecmp_groups_find(&ecmp_groups, route);
-            if (group) {
-                ecmp_groups_add_route(group, route);
-            } else {
-                const struct parsed_route *existed_route =
-                    unique_routes_remove(&unique_routes, route);
-                if (existed_route) {
-                    group = ecmp_groups_add(&ecmp_groups, existed_route);
-                    if (group) {
-                        ecmp_groups_add_route(group, route);
-                    }
-                } else {
-                    unique_routes_add(&unique_routes, route);
-                }
-            }
-        }
-        HMAP_FOR_EACH (group, hmap_node, &ecmp_groups) {
-            /* add a flow in IP_ROUTING, and one flow for each member in
-             * IP_ROUTING_ECMP. */
-            build_ecmp_route_flow(lflows, od, ports, group);
-        }
-        const struct unique_routes_node *ur;
-        HMAP_FOR_EACH (ur, hmap_node, &unique_routes) {
-            build_static_route_flow(lflows, od, ports, ur->route);
+    if (op->nbrp && (!op->derived)) {
+        for (size_t i = 0; i < op->lrp_networks.n_ipv6_addrs; i++) {
+            ds_clear(match);
+            ds_put_format(match, "ip6.dst == %s && udp.src == 547 &&"
+                          " udp.dst == 546",
+                          op->lrp_networks.ipv6_addrs[i].addr_s);
+            ovn_lflow_add(lflows, op->od, S_ROUTER_IN_IP_INPUT, 100,
+                          ds_cstr(match),
+                          "reg0 = 0; handle_dhcpv6_reply;");
         }
-        ecmp_groups_destroy(&ecmp_groups);
-        unique_routes_destroy(&unique_routes);
-        parsed_routes_destroy(&parsed_routes);
     }
+
 }
 
-/* IP Multicast lookup. Here we set the output port, adjust TTL and
- * advance to next table (priority 500).
- */
 static void
-build_mcast_lookup_flows_for_lrouter(
-        struct ovn_datapath *od, struct hmap *lflows,
+build_ipv6_input_flows_for_lrouter_port(
+        struct ovn_port *op, struct hmap *lflows,
         struct ds *match, struct ds *actions)
 {
-    if (od->nbr) {
+    if (op->nbrp && (!op->derived)) {
+        /* No ingress packets are accepted on a chassisredirect
+         * port, so no need to program flows for that port. */
+        if (op->lrp_networks.n_ipv6_addrs) {
+            /* ICMPv6 echo reply.  These flows reply to echo requests
+             * received for the router's IP address. */
+            ds_clear(match);
+            ds_put_cstr(match, "ip6.dst == ");
+            op_put_v6_networks(match, op);
+            ds_put_cstr(match, " && icmp6.type == 128 && icmp6.code == 0");
 
-        /* Drop IPv6 multicast traffic that shouldn't be forwarded,
-         * i.e., router solicitation and router advertisement.
-         */
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_ROUTING, 550,
-                      "nd_rs || nd_ra", "drop;");
-        if (!od->mcast_info.rtr.relay) {
-            return;
+            const char *lrp_actions =
+                        "ip6.dst <-> ip6.src; "
+                        "ip.ttl = 255; "
+                        "icmp6.type = 129; "
+                        "flags.loopback = 1; "
+                        "next; ";
+            ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT, 90,
+                                    ds_cstr(match), lrp_actions,
+                                    &op->nbrp->header_);
         }
 
-        struct ovn_igmp_group *igmp_group;
-
-        LIST_FOR_EACH (igmp_group, list_node, &od->mcast_info.groups) {
+        /* ND reply.  These flows reply to ND solicitations for the
+         * router's own IP address. */
+        for (int i = 0; i < op->lrp_networks.n_ipv6_addrs; i++) {
             ds_clear(match);
-            ds_clear(actions);
-            if (IN6_IS_ADDR_V4MAPPED(&igmp_group->address)) {
-                ds_put_format(match, "ip4 && ip4.dst == %s ",
-                            igmp_group->mcgroup.name);
-            } else {
-                ds_put_format(match, "ip6 && ip6.dst == %s ",
-                            igmp_group->mcgroup.name);
-            }
-            if (od->mcast_info.rtr.flood_static) {
-                ds_put_cstr(actions,
-                            "clone { "
-                                "outport = \""MC_STATIC"\"; "
-                                "ip.ttl--; "
-                                "next; "
-                            "};");
+            if (op->od->l3dgw_port && op == op->od->l3dgw_port
+                && op->od->l3redirect_port) {
+                /* Traffic with eth.src = l3dgw_port->lrp_networks.ea_s
+                 * should only be sent from the gateway chassi, so that
+                 * upstream MAC learning points to the gateway chassis.
+                 * Also need to avoid generation of multiple ND replies
+                 * from different chassis. */
+                ds_put_format(match, "is_chassis_resident(%s)",
+                              op->od->l3redirect_port->json_key);
             }
-            ds_put_format(actions, "outport = \"%s\"; ip.ttl--; next;",
-                          igmp_group->mcgroup.name);
-            ovn_lflow_add_unique(lflows, od, S_ROUTER_IN_IP_ROUTING, 500,
-                                 ds_cstr(match), ds_cstr(actions));
+
+            build_lrouter_nd_flow(op->od, op, "nd_na_router",
+                                  op->lrp_networks.ipv6_addrs[i].addr_s,
+                                  op->lrp_networks.ipv6_addrs[i].sn_addr_s,
+                                  REG_INPORT_ETH_ADDR, match, false, 90,
+                                  &op->nbrp->header_, lflows);
         }
 
-        /* If needed, flood unregistered multicast on statically configured
-         * ports. Otherwise drop any multicast traffic.
-         */
-        if (od->mcast_info.rtr.flood_static) {
-            ovn_lflow_add_unique(lflows, od, S_ROUTER_IN_IP_ROUTING, 450,
-                          "ip4.mcast || ip6.mcast",
-                          "clone { "
-                                "outport = \""MC_STATIC"\"; "
-                                "ip.ttl--; "
-                                "next; "
-                          "};");
-        } else {
-            ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_ROUTING, 450,
-                          "ip4.mcast || ip6.mcast", "drop;");
+        /* UDP/TCP/SCTP port unreachable */
+        if (!smap_get(&op->od->nbr->options, "chassis")
+            && !op->od->l3dgw_port) {
+            for (int i = 0; i < op->lrp_networks.n_ipv6_addrs; i++) {
+                ds_clear(match);
+                ds_put_format(match,
+                              "ip6 && ip6.dst == %s && !ip.later_frag && tcp",
+                              op->lrp_networks.ipv6_addrs[i].addr_s);
+                const char *action = "tcp_reset {"
+                                     "eth.dst <-> eth.src; "
+                                     "ip6.dst <-> ip6.src; "
+                                     "next; };";
+                ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT,
+                                        80, ds_cstr(match), action,
+                                        &op->nbrp->header_);
+
+                ds_clear(match);
+                ds_put_format(match,
+                              "ip6 && ip6.dst == %s && !ip.later_frag && sctp",
+                              op->lrp_networks.ipv6_addrs[i].addr_s);
+                action = "sctp_abort {"
+                         "eth.dst <-> eth.src; "
+                         "ip6.dst <-> ip6.src; "
+                         "next; };";
+                ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT,
+                                        80, ds_cstr(match), action,
+                                        &op->nbrp->header_);
+
+                ds_clear(match);
+                ds_put_format(match,
+                              "ip6 && ip6.dst == %s && !ip.later_frag && udp",
+                              op->lrp_networks.ipv6_addrs[i].addr_s);
+                action = "icmp6 {"
+                         "eth.dst <-> eth.src; "
+                         "ip6.dst <-> ip6.src; "
+                         "ip.ttl = 255; "
+                         "icmp6.type = 1; "
+                         "icmp6.code = 4; "
+                         "next; };";
+                ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT,
+                                        80, ds_cstr(match), action,
+                                        &op->nbrp->header_);
+
+                ds_clear(match);
+                ds_put_format(match,
+                              "ip6 && ip6.dst == %s && !ip.later_frag",
+                              op->lrp_networks.ipv6_addrs[i].addr_s);
+                action = "icmp6 {"
+                         "eth.dst <-> eth.src; "
+                         "ip6.dst <-> ip6.src; "
+                         "ip.ttl = 255; "
+                         "icmp6.type = 1; "
+                         "icmp6.code = 3; "
+                         "next; };";
+                ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT,
+                                        70, ds_cstr(match), action,
+                                        &op->nbrp->header_);
+            }
         }
-    }
-}
 
-/* Logical router ingress table POLICY: Policy.
- *
- * A packet that arrives at this table is an IP packet that should be
- * permitted/denied/rerouted to the address in the rule's nexthop.
- * This table sets outport to the correct out_port,
- * eth.src to the output port's MAC address,
- * and REG_NEXT_HOP_IPV4/REG_NEXT_HOP_IPV6 to the next-hop IP address
- * (leaving 'ip[46].dst', the packet’s final destination, unchanged), and
- * advances to the next table for ARP/ND resolution. */
-static void
-build_ingress_policy_flows_for_lrouter(
-        struct ovn_datapath *od, struct hmap *lflows,
-        struct hmap *ports)
-{
-    if (od->nbr) {
-        /* This is a catch-all rule. It has the lowest priority (0)
-         * does a match-all("1") and pass-through (next) */
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_POLICY, 0, "1", "next;");
+        /* ICMPv6 time exceeded */
+        for (int i = 0; i < op->lrp_networks.n_ipv6_addrs; i++) {
+            /* skip link-local address */
+            if (in6_is_lla(&op->lrp_networks.ipv6_addrs[i].network)) {
+                continue;
+            }
 
-        /* Convert routing policies to flows. */
-        for (int i = 0; i < od->nbr->n_policies; i++) {
-            const struct nbrec_logical_router_policy *rule
-                = od->nbr->policies[i];
-            build_routing_policy_flow(lflows, od, ports, rule, &rule->header_);
+            ds_clear(match);
+            ds_clear(actions);
+
+            ds_put_format(match,
+                          "inport == %s && ip6 && "
+                          "ip6.src == %s/%d && "
+                          "ip.ttl == {0, 1} && !ip.later_frag",
+                          op->json_key,
+                          op->lrp_networks.ipv6_addrs[i].network_s,
+                          op->lrp_networks.ipv6_addrs[i].plen);
+            ds_put_format(actions,
+                          "icmp6 {"
+                          "eth.dst <-> eth.src; "
+                          "ip6.dst = ip6.src; "
+                          "ip6.src = %s; "
+                          "ip.ttl = 255; "
+                          "icmp6.type = 3; /* Time exceeded */ "
+                          "icmp6.code = 0; /* TTL exceeded in transit */ "
+                          "next; };",
+                          op->lrp_networks.ipv6_addrs[i].addr_s);
+            ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT, 40,
+                                    ds_cstr(match), ds_cstr(actions),
+                                    &op->nbrp->header_);
         }
     }
+
 }
 
-/* Local router ingress table ARP_RESOLVE: ARP Resolution. */
 static void
-build_arp_resolve_flows_for_lrouter(
-        struct ovn_datapath *od, struct hmap *lflows)
+build_lrouter_arp_nd_for_datapath(struct ovn_datapath *od,
+                                  struct hmap *lflows)
 {
     if (od->nbr) {
-        /* Multicast packets already have the outport set so just advance to
-         * next table (priority 500). */
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_ARP_RESOLVE, 500,
-                      "ip4.mcast || ip6.mcast", "next;");
 
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_ARP_RESOLVE, 0, "ip4",
-                      "get_arp(outport, " REG_NEXT_HOP_IPV4 "); next;");
+        /* Priority-90-92 flows handle ARP requests and ND packets. Most are
+         * per logical port but DNAT addresses can be handled per datapath
+         * for non gateway router ports.
+         *
+         * Priority 91 and 92 flows are added for each gateway router
+         * port to handle the special cases. In case we get the packet
+         * on a regular port, just reply with the port's ETH address.
+         */
+        for (int i = 0; i < od->nbr->n_nat; i++) {
+            struct ovn_nat *nat_entry = &od->nat_entries[i];
 
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_ARP_RESOLVE, 0, "ip6",
-                      "get_nd(outport, " REG_NEXT_HOP_IPV6 "); next;");
+            /* Skip entries we failed to parse. */
+            if (!nat_entry_is_valid(nat_entry)) {
+                continue;
+            }
+
+            /* Skip SNAT entries for now, we handle unique SNAT IPs separately
+             * below.
+             */
+            if (!strcmp(nat_entry->nb->type, "snat")) {
+                continue;
+            }
+            build_lrouter_nat_arp_nd_flow(od, nat_entry, lflows);
+        }
+
+        /* Now handle SNAT entries too, one per unique SNAT IP. */
+        struct shash_node *snat_snode;
+        SHASH_FOR_EACH (snat_snode, &od->snat_ips) {
+            struct ovn_snat_ip *snat_ip = snat_snode->data;
+
+            if (ovs_list_is_empty(&snat_ip->snat_entries)) {
+                continue;
+            }
+
+            struct ovn_nat *nat_entry =
+                CONTAINER_OF(ovs_list_front(&snat_ip->snat_entries),
+                             struct ovn_nat, ext_addr_list_node);
+            build_lrouter_nat_arp_nd_flow(od, nat_entry, lflows);
+        }
     }
 }
 
-/* Local router ingress table ARP_RESOLVE: ARP Resolution.
- *
- * Any unicast packet that reaches this table is an IP packet whose
- * next-hop IP address is in REG_NEXT_HOP_IPV4/REG_NEXT_HOP_IPV6
- * (ip4.dst/ipv6.dst is the final destination).
- * This table resolves the IP address in
- * REG_NEXT_HOP_IPV4/REG_NEXT_HOP_IPV6 into an output port in outport and
- * an Ethernet address in eth.dst.
- */
+/* Logical router ingress table 3: IP Input for IPv4. */
 static void
-build_arp_resolve_flows_for_lrouter_port(
-        struct ovn_port *op, struct hmap *lflows,
-        struct hmap *ports,
-        struct ds *match, struct ds *actions)
+build_lrouter_ipv4_ip_input(struct ovn_port *op,
+                            struct hmap *lflows,
+                            struct ds *match, struct ds *actions)
 {
-    if (op->nbsp && !lsp_is_enabled(op->nbsp)) {
-        return;
-    }
+    /* No ingress packets are accepted on a chassisredirect
+     * port, so no need to program flows for that port. */
+    if (op->nbrp && (!op->derived)) {
+        if (op->lrp_networks.n_ipv4_addrs) {
+            /* L3 admission control: drop packets that originate from an
+             * IPv4 address owned by the router or a broadcast address
+             * known to the router (priority 100). */
+            ds_clear(match);
+            ds_put_cstr(match, "ip4.src == ");
+            op_put_v4_networks(match, op, true);
+            ds_put_cstr(match, " && "REGBIT_EGRESS_LOOPBACK" == 0");
+            ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT, 100,
+                                    ds_cstr(match), "drop;",
+                                    &op->nbrp->header_);
 
-    if (op->nbrp) {
-        /* This is a logical router port. If next-hop IP address in
-         * REG_NEXT_HOP_IPV4/REG_NEXT_HOP_IPV6 matches IP address of this
-         * router port, then the packet is intended to eventually be sent
-         * to this logical port. Set the destination mac address using
-         * this port's mac address.
-         *
-         * The packet is still in peer's logical pipeline. So the match
-         * should be on peer's outport. */
-        if (op->peer && op->nbrp->peer) {
-            if (op->lrp_networks.n_ipv4_addrs) {
-                ds_clear(match);
-                ds_put_format(match, "outport == %s && "
-                              REG_NEXT_HOP_IPV4 "== ",
-                              op->peer->json_key);
-                op_put_v4_networks(match, op, false);
+            /* ICMP echo reply.  These flows reply to ICMP echo requests
+             * received for the router's IP address. Since packets only
+             * get here as part of the logical router datapath, the inport
+             * (i.e. the incoming locally attached net) does not matter.
+             * The ip.ttl also does not matter (RFC1812 section 4.2.2.9) */
+            ds_clear(match);
+            ds_put_cstr(match, "ip4.dst == ");
+            op_put_v4_networks(match, op, false);
+            ds_put_cstr(match, " && icmp4.type == 8 && icmp4.code == 0");
 
-                ds_clear(actions);
-                ds_put_format(actions, "eth.dst = %s; next;",
-                              op->lrp_networks.ea_s);
-                ovn_lflow_add_with_hint(lflows, op->peer->od,
-                                        S_ROUTER_IN_ARP_RESOLVE, 100,
-                                        ds_cstr(match), ds_cstr(actions),
-                                        &op->nbrp->header_);
-            }
+            const char * icmp_actions = "ip4.dst <-> ip4.src; "
+                          "ip.ttl = 255; "
+                          "icmp4.type = 0; "
+                          "flags.loopback = 1; "
+                          "next; ";
+            ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT, 90,
+                                    ds_cstr(match), icmp_actions,
+                                    &op->nbrp->header_);
+        }
 
-            if (op->lrp_networks.n_ipv6_addrs) {
-                ds_clear(match);
-                ds_put_format(match, "outport == %s && "
-                              REG_NEXT_HOP_IPV6 " == ",
-                              op->peer->json_key);
-                op_put_v6_networks(match, op);
+        /* BFD msg handling */
+        build_lrouter_bfd_flows(lflows, op);
 
-                ds_clear(actions);
-                ds_put_format(actions, "eth.dst = %s; next;",
-                              op->lrp_networks.ea_s);
-                ovn_lflow_add_with_hint(lflows, op->peer->od,
-                                        S_ROUTER_IN_ARP_RESOLVE, 100,
-                                        ds_cstr(match), ds_cstr(actions),
-                                        &op->nbrp->header_);
-            }
+        /* ICMP time exceeded */
+        for (int i = 0; i < op->lrp_networks.n_ipv4_addrs; i++) {
+            ds_clear(match);
+            ds_clear(actions);
+
+            ds_put_format(match,
+                          "inport == %s && ip4 && "
+                          "ip.ttl == {0, 1} && !ip.later_frag", op->json_key);
+            ds_put_format(actions,
+                          "icmp4 {"
+                          "eth.dst <-> eth.src; "
+                          "icmp4.type = 11; /* Time exceeded */ "
+                          "icmp4.code = 0; /* TTL exceeded in transit */ "
+                          "ip4.dst = ip4.src; "
+                          "ip4.src = %s; "
+                          "ip.ttl = 255; "
+                          "next; };",
+                          op->lrp_networks.ipv4_addrs[i].addr_s);
+            ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT, 40,
+                                    ds_cstr(match), ds_cstr(actions),
+                                    &op->nbrp->header_);
         }
 
-        if (!op->derived && op->od->l3redirect_port) {
-            const char *redirect_type = smap_get(&op->nbrp->options,
-                                                 "redirect-type");
-            if (redirect_type && !strcasecmp(redirect_type, "bridged")) {
-                /* Packet is on a non gateway chassis and
-                 * has an unresolved ARP on a network behind gateway
-                 * chassis attached router port. Since, redirect type
-                 * is "bridged", instead of calling "get_arp"
-                 * on this node, we will redirect the packet to gateway
-                 * chassis, by setting destination mac router port mac.*/
-                ds_clear(match);
-                ds_put_format(match, "outport == %s && "
-                              "!is_chassis_resident(%s)", op->json_key,
-                              op->od->l3redirect_port->json_key);
-                ds_clear(actions);
-                ds_put_format(actions, "eth.dst = %s; next;",
-                              op->lrp_networks.ea_s);
+        /* ARP reply.  These flows reply to ARP requests for the router's own
+         * IP address. */
+        for (int i = 0; i < op->lrp_networks.n_ipv4_addrs; i++) {
+            ds_clear(match);
+            ds_put_format(match, "arp.spa == %s/%u",
+                          op->lrp_networks.ipv4_addrs[i].network_s,
+                          op->lrp_networks.ipv4_addrs[i].plen);
 
-                ovn_lflow_add_with_hint(lflows, op->od,
-                                        S_ROUTER_IN_ARP_RESOLVE, 50,
-                                        ds_cstr(match), ds_cstr(actions),
-                                        &op->nbrp->header_);
-            }
-        }
+            if (op->od->l3dgw_port && op->od->l3redirect_port && op->peer
+                && op->peer->od->n_localnet_ports) {
+                bool add_chassis_resident_check = false;
+                if (op == op->od->l3dgw_port) {
+                    /* Traffic with eth.src = l3dgw_port->lrp_networks.ea_s
+                     * should only be sent from the gateway chassis, so that
+                     * upstream MAC learning points to the gateway chassis.
+                     * Also need to avoid generation of multiple ARP responses
+                     * from different chassis. */
+                    add_chassis_resident_check = true;
+                } else {
+                    /* Check if the option 'reside-on-redirect-chassis'
+                     * is set to true on the router port. If set to true
+                     * and if peer's logical switch has a localnet port, it
+                     * means the router pipeline for the packets from
+                     * peer's logical switch is be run on the chassis
+                     * hosting the gateway port and it should reply to the
+                     * ARP requests for the router port IPs.
+                     */
+                    add_chassis_resident_check = smap_get_bool(
+                        &op->nbrp->options,
+                        "reside-on-redirect-chassis", false);
+                }
 
-        /* Drop IP traffic destined to router owned IPs. Part of it is dropped
-         * in stage "lr_in_ip_input" but traffic that could have been unSNATed
-         * but didn't match any existing session might still end up here.
-         *
-         * Priority 1.
-         */
-        build_lrouter_drop_own_dest(op, S_ROUTER_IN_ARP_RESOLVE, 1, true,
-                                    lflows);
-    } else if (op->od->n_router_ports && !lsp_is_router(op->nbsp)
-               && strcmp(op->nbsp->type, "virtual")) {
-        /* This is a logical switch port that backs a VM or a container.
-         * Extract its addresses. For each of the address, go through all
-         * the router ports attached to the switch (to which this port
-         * connects) and if the address in question is reachable from the
-         * router port, add an ARP/ND entry in that router's pipeline. */
+                if (add_chassis_resident_check) {
+                    ds_put_format(match, " && is_chassis_resident(%s)",
+                                  op->od->l3redirect_port->json_key);
+                }
+            }
 
-        for (size_t i = 0; i < op->n_lsp_addrs; i++) {
-            const char *ea_s = op->lsp_addrs[i].ea_s;
-            for (size_t j = 0; j < op->lsp_addrs[i].n_ipv4_addrs; j++) {
-                const char *ip_s = op->lsp_addrs[i].ipv4_addrs[j].addr_s;
-                for (size_t k = 0; k < op->od->n_router_ports; k++) {
-                    /* Get the Logical_Router_Port that the
-                     * Logical_Switch_Port is connected to, as
-                     * 'peer'. */
-                    const char *peer_name = smap_get(
-                        &op->od->router_ports[k]->nbsp->options,
-                        "router-port");
-                    if (!peer_name) {
-                        continue;
-                    }
+            build_lrouter_arp_flow(op->od, op,
+                                   op->lrp_networks.ipv4_addrs[i].addr_s,
+                                   REG_INPORT_ETH_ADDR, match, false, 90,
+                                   &op->nbrp->header_, lflows);
+        }
 
-                    struct ovn_port *peer = ovn_port_find(ports, peer_name);
-                    if (!peer || !peer->nbrp) {
-                        continue;
-                    }
+        /* A set to hold all load-balancer vips that need ARP responses. */
+        struct sset all_ips_v4 = SSET_INITIALIZER(&all_ips_v4);
+        struct sset all_ips_v6 = SSET_INITIALIZER(&all_ips_v6);
+        get_router_load_balancer_ips(op->od, &all_ips_v4, &all_ips_v6);
 
-                    if (!find_lrp_member_ip(peer, ip_s)) {
-                        continue;
-                    }
+        const char *ip_address;
+        SSET_FOR_EACH (ip_address, &all_ips_v4) {
+            ds_clear(match);
+            if (op == op->od->l3dgw_port) {
+                ds_put_format(match, "is_chassis_resident(%s)",
+                              op->od->l3redirect_port->json_key);
+            }
 
-                    ds_clear(match);
-                    ds_put_format(match, "outport == %s && "
-                                  REG_NEXT_HOP_IPV4 " == %s",
-                                  peer->json_key, ip_s);
+            build_lrouter_arp_flow(op->od, op,
+                                   ip_address, REG_INPORT_ETH_ADDR,
+                                   match, false, 90, NULL, lflows);
+        }
 
-                    ds_clear(actions);
-                    ds_put_format(actions, "eth.dst = %s; next;", ea_s);
-                    ovn_lflow_add_with_hint(lflows, peer->od,
-                                            S_ROUTER_IN_ARP_RESOLVE, 100,
-                                            ds_cstr(match),
-                                            ds_cstr(actions),
-                                            &op->nbsp->header_);
-                }
+        SSET_FOR_EACH (ip_address, &all_ips_v6) {
+            ds_clear(match);
+            if (op == op->od->l3dgw_port) {
+                ds_put_format(match, "is_chassis_resident(%s)",
+                              op->od->l3redirect_port->json_key);
             }
 
-            for (size_t j = 0; j < op->lsp_addrs[i].n_ipv6_addrs; j++) {
-                const char *ip_s = op->lsp_addrs[i].ipv6_addrs[j].addr_s;
-                for (size_t k = 0; k < op->od->n_router_ports; k++) {
-                    /* Get the Logical_Router_Port that the
-                     * Logical_Switch_Port is connected to, as
-                     * 'peer'. */
-                    const char *peer_name = smap_get(
-                        &op->od->router_ports[k]->nbsp->options,
-                        "router-port");
-                    if (!peer_name) {
-                        continue;
-                    }
+            build_lrouter_nd_flow(op->od, op, "nd_na",
+                                  ip_address, NULL, REG_INPORT_ETH_ADDR,
+                                  match, false, 90, NULL, lflows);
+        }
 
-                    struct ovn_port *peer = ovn_port_find(ports, peer_name);
-                    if (!peer || !peer->nbrp) {
-                        continue;
-                    }
+        sset_destroy(&all_ips_v4);
+        sset_destroy(&all_ips_v6);
 
-                    if (!find_lrp_member_ip(peer, ip_s)) {
-                        continue;
-                    }
+        if (!smap_get(&op->od->nbr->options, "chassis")
+            && !op->od->l3dgw_port) {
+            /* UDP/TCP/SCTP port unreachable. */
+            for (int i = 0; i < op->lrp_networks.n_ipv4_addrs; i++) {
+                ds_clear(match);
+                ds_put_format(match,
+                              "ip4 && ip4.dst == %s && !ip.later_frag && udp",
+                              op->lrp_networks.ipv4_addrs[i].addr_s);
+                const char *action = "icmp4 {"
+                                     "eth.dst <-> eth.src; "
+                                     "ip4.dst <-> ip4.src; "
+                                     "ip.ttl = 255; "
+                                     "icmp4.type = 3; "
+                                     "icmp4.code = 3; "
+                                     "next; };";
+                ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT,
+                                        80, ds_cstr(match), action,
+                                        &op->nbrp->header_);
 
-                    ds_clear(match);
-                    ds_put_format(match, "outport == %s && "
-                                  REG_NEXT_HOP_IPV6 " == %s",
-                                  peer->json_key, ip_s);
+                ds_clear(match);
+                ds_put_format(match,
+                              "ip4 && ip4.dst == %s && !ip.later_frag && tcp",
+                              op->lrp_networks.ipv4_addrs[i].addr_s);
+                action = "tcp_reset {"
+                         "eth.dst <-> eth.src; "
+                         "ip4.dst <-> ip4.src; "
+                         "next; };";
+                ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT,
+                                        80, ds_cstr(match), action,
+                                        &op->nbrp->header_);
 
-                    ds_clear(actions);
-                    ds_put_format(actions, "eth.dst = %s; next;", ea_s);
-                    ovn_lflow_add_with_hint(lflows, peer->od,
-                                            S_ROUTER_IN_ARP_RESOLVE, 100,
-                                            ds_cstr(match),
-                                            ds_cstr(actions),
-                                            &op->nbsp->header_);
-                }
+                ds_clear(match);
+                ds_put_format(match,
+                              "ip4 && ip4.dst == %s && !ip.later_frag && sctp",
+                              op->lrp_networks.ipv4_addrs[i].addr_s);
+                action = "sctp_abort {"
+                         "eth.dst <-> eth.src; "
+                         "ip4.dst <-> ip4.src; "
+                         "next; };";
+                ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT,
+                                        80, ds_cstr(match), action,
+                                        &op->nbrp->header_);
+
+                ds_clear(match);
+                ds_put_format(match,
+                              "ip4 && ip4.dst == %s && !ip.later_frag",
+                              op->lrp_networks.ipv4_addrs[i].addr_s);
+                action = "icmp4 {"
+                         "eth.dst <-> eth.src; "
+                         "ip4.dst <-> ip4.src; "
+                         "ip.ttl = 255; "
+                         "icmp4.type = 3; "
+                         "icmp4.code = 2; "
+                         "next; };";
+                ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT,
+                                        70, ds_cstr(match), action,
+                                        &op->nbrp->header_);
             }
         }
-    } else if (op->od->n_router_ports && !lsp_is_router(op->nbsp)
-               && !strcmp(op->nbsp->type, "virtual")) {
-        /* This is a virtual port. Add ARP replies for the virtual ip with
-         * the mac of the present active virtual parent.
-         * If the logical port doesn't have virtual parent set in
-         * Port_Binding table, then add the flow to set eth.dst to
-         * 00:00:00:00:00:00 and advance to next table so that ARP is
-         * resolved by router pipeline using the arp{} action.
-         * The MAC_Binding entry for the virtual ip might be invalid. */
-        ovs_be32 ip;
 
-        const char *vip = smap_get(&op->nbsp->options,
-                                   "virtual-ip");
-        const char *virtual_parents = smap_get(&op->nbsp->options,
-                                               "virtual-parents");
-        if (!vip || !virtual_parents ||
-            !ip_parse(vip, &ip) || !op->sb) {
+        /* Drop IP traffic destined to router owned IPs except if the IP is
+         * also a SNAT IP. Those are dropped later, in stage
+         * "lr_in_arp_resolve", if unSNAT was unsuccessful.
+         *
+         * If op->pd->lb_force_snat_router_ip is true, it means the IP of the
+         * router port is also SNAT IP.
+         *
+         * Priority 60.
+         */
+        if (!op->od->lb_force_snat_router_ip) {
+            build_lrouter_drop_own_dest(op, S_ROUTER_IN_IP_INPUT, 60, false,
+                                        lflows);
+        }
+        /* ARP / ND handling for external IP addresses.
+         *
+         * DNAT and SNAT IP addresses are external IP addresses that need ARP
+         * handling.
+         *
+         * These are already taken care globally, per router. The only
+         * exception is on the l3dgw_port where we might need to use a
+         * different ETH address.
+         */
+        if (op != op->od->l3dgw_port) {
             return;
         }
 
-        if (!op->sb->virtual_parent || !op->sb->virtual_parent[0] ||
-            !op->sb->chassis) {
-            /* The virtual port is not claimed yet. */
-            for (size_t i = 0; i < op->od->n_router_ports; i++) {
-                const char *peer_name = smap_get(
-                    &op->od->router_ports[i]->nbsp->options,
-                    "router-port");
-                if (!peer_name) {
-                    continue;
-                }
-
-                struct ovn_port *peer = ovn_port_find(ports, peer_name);
-                if (!peer || !peer->nbrp) {
-                    continue;
-                }
-
-                if (find_lrp_member_ip(peer, vip)) {
-                    ds_clear(match);
-                    ds_put_format(match, "outport == %s && "
-                                  REG_NEXT_HOP_IPV4 " == %s",
-                                  peer->json_key, vip);
+        for (size_t i = 0; i < op->od->nbr->n_nat; i++) {
+            struct ovn_nat *nat_entry = &op->od->nat_entries[i];
 
-                    const char *arp_actions =
-                                  "eth.dst = 00:00:00:00:00:00; next;";
-                    ovn_lflow_add_with_hint(lflows, peer->od,
-                                            S_ROUTER_IN_ARP_RESOLVE, 100,
-                                            ds_cstr(match),
-                                            arp_actions,
-                                            &op->nbsp->header_);
-                    break;
-                }
-            }
-        } else {
-            struct ovn_port *vp =
-                ovn_port_find(ports, op->sb->virtual_parent);
-            if (!vp || !vp->nbsp) {
-                return;
+            /* Skip entries we failed to parse. */
+            if (!nat_entry_is_valid(nat_entry)) {
+                continue;
             }
 
-            for (size_t i = 0; i < vp->n_lsp_addrs; i++) {
-                bool found_vip_network = false;
-                const char *ea_s = vp->lsp_addrs[i].ea_s;
-                for (size_t j = 0; j < vp->od->n_router_ports; j++) {
-                    /* Get the Logical_Router_Port that the
-                    * Logical_Switch_Port is connected to, as
-                    * 'peer'. */
-                    const char *peer_name = smap_get(
-                        &vp->od->router_ports[j]->nbsp->options,
-                        "router-port");
-                    if (!peer_name) {
-                        continue;
-                    }
+            /* Skip SNAT entries for now, we handle unique SNAT IPs separately
+             * below.
+             */
+            if (!strcmp(nat_entry->nb->type, "snat")) {
+                continue;
+            }
+            build_lrouter_port_nat_arp_nd_flow(op, nat_entry, lflows);
+        }
 
-                    struct ovn_port *peer =
-                        ovn_port_find(ports, peer_name);
-                    if (!peer || !peer->nbrp) {
-                        continue;
-                    }
+        /* Now handle SNAT entries too, one per unique SNAT IP. */
+        struct shash_node *snat_snode;
+        SHASH_FOR_EACH (snat_snode, &op->od->snat_ips) {
+            struct ovn_snat_ip *snat_ip = snat_snode->data;
 
-                    if (!find_lrp_member_ip(peer, vip)) {
-                        continue;
-                    }
+            if (ovs_list_is_empty(&snat_ip->snat_entries)) {
+                continue;
+            }
 
-                    ds_clear(match);
-                    ds_put_format(match, "outport == %s && "
-                                  REG_NEXT_HOP_IPV4 " == %s",
-                                  peer->json_key, vip);
+            struct ovn_nat *nat_entry =
+                CONTAINER_OF(ovs_list_front(&snat_ip->snat_entries),
+                             struct ovn_nat, ext_addr_list_node);
+            build_lrouter_port_nat_arp_nd_flow(op, nat_entry, lflows);
+        }
+    }
+}
 
-                    ds_clear(actions);
-                    ds_put_format(actions, "eth.dst = %s; next;", ea_s);
-                    ovn_lflow_add_with_hint(lflows, peer->od,
-                                            S_ROUTER_IN_ARP_RESOLVE, 100,
-                                            ds_cstr(match),
-                                            ds_cstr(actions),
-                                            &op->nbsp->header_);
-                    found_vip_network = true;
-                    break;
-                }
+/* NAT, Defrag and load balancing. */
+static void
+build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od,
+                                struct hmap *lflows,
+                                struct shash *meter_groups,
+                                struct hmap *lbs,
+                                struct ds *match, struct ds *actions)
+{
+    if (od->nbr) {
 
-                if (found_vip_network) {
-                    break;
-                }
-            }
-        }
-    } else if (lsp_is_router(op->nbsp)) {
-        /* This is a logical switch port that connects to a router. */
+        /* Packets are allowed by default. */
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_DEFRAG, 0, "1", "next;");
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_UNSNAT, 0, "1", "next;");
+        ovn_lflow_add(lflows, od, S_ROUTER_OUT_SNAT, 0, "1", "next;");
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_DNAT, 0, "1", "next;");
+        ovn_lflow_add(lflows, od, S_ROUTER_OUT_UNDNAT, 0, "1", "next;");
+        ovn_lflow_add(lflows, od, S_ROUTER_OUT_EGR_LOOP, 0, "1", "next;");
+        ovn_lflow_add(lflows, od, S_ROUTER_IN_ECMP_STATEFUL, 0, "1", "next;");
 
-        /* The peer of this switch port is the router port for which
-         * we need to add logical flows such that it can resolve
-         * ARP entries for all the other router ports connected to
-         * the switch in question. */
+        /* Send the IPv6 NS packets to next table. When ovn-controller
+         * generates IPv6 NS (for the action - nd_ns{}), the injected
+         * packet would go through conntrack - which is not required. */
+        ovn_lflow_add(lflows, od, S_ROUTER_OUT_SNAT, 120, "nd_ns", "next;");
 
-        const char *peer_name = smap_get(&op->nbsp->options,
-                                         "router-port");
-        if (!peer_name) {
+        /* NAT rules are only valid on Gateway routers and routers with
+         * l3dgw_port (router has a port with gateway chassis
+         * specified). */
+        if (!smap_get(&od->nbr->options, "chassis") && !od->l3dgw_port) {
             return;
         }
 
-        struct ovn_port *peer = ovn_port_find(ports, peer_name);
-        if (!peer || !peer->nbrp) {
-            return;
-        }
+        struct sset nat_entries = SSET_INITIALIZER(&nat_entries);
 
-        if (peer->od->nbr &&
-            smap_get_bool(&peer->od->nbr->options,
-                          "dynamic_neigh_routers", false)) {
-            return;
-        }
+        bool dnat_force_snat_ip =
+            !lport_addresses_is_empty(&od->dnat_force_snat_addrs);
+        bool lb_force_snat_ip =
+            !lport_addresses_is_empty(&od->lb_force_snat_addrs);
 
-        for (size_t i = 0; i < op->od->n_router_ports; i++) {
-            const char *router_port_name = smap_get(
-                                &op->od->router_ports[i]->nbsp->options,
-                                "router-port");
-            struct ovn_port *router_port = ovn_port_find(ports,
-                                                         router_port_name);
-            if (!router_port || !router_port->nbrp) {
+        for (int i = 0; i < od->nbr->n_nat; i++) {
+            const struct nbrec_nat *nat;
+
+            nat = od->nbr->nat[i];
+
+            ovs_be32 ip, mask;
+            struct in6_addr ipv6, mask_v6, v6_exact = IN6ADDR_EXACT_INIT;
+            bool is_v6 = false;
+            bool stateless = lrouter_nat_is_stateless(nat);
+            struct nbrec_address_set *allowed_ext_ips =
+                                      nat->allowed_ext_ips;
+            struct nbrec_address_set *exempted_ext_ips =
+                                      nat->exempted_ext_ips;
+
+            if (allowed_ext_ips && exempted_ext_ips) {
+                static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+                VLOG_WARN_RL(&rl, "NAT rule: "UUID_FMT" not applied, since "
+                             "both allowed and exempt external ips set",
+                             UUID_ARGS(&(nat->header_.uuid)));
                 continue;
             }
 
-            /* Skip the router port under consideration. */
-            if (router_port == peer) {
-               continue;
+            char *error = ip_parse_masked(nat->external_ip, &ip, &mask);
+            if (error || mask != OVS_BE32_MAX) {
+                free(error);
+                error = ipv6_parse_masked(nat->external_ip, &ipv6, &mask_v6);
+                if (error || memcmp(&mask_v6, &v6_exact, sizeof(mask_v6))) {
+                    /* Invalid for both IPv4 and IPv6 */
+                    static struct vlog_rate_limit rl =
+                        VLOG_RATE_LIMIT_INIT(5, 1);
+                    VLOG_WARN_RL(&rl, "bad external ip %s for nat",
+                                 nat->external_ip);
+                    free(error);
+                    continue;
+                }
+                /* It was an invalid IPv4 address, but valid IPv6.
+                 * Treat the rest of the handling of this NAT rule
+                 * as IPv6. */
+                is_v6 = true;
             }
 
-            if (router_port->lrp_networks.n_ipv4_addrs) {
-                ds_clear(match);
-                ds_put_format(match, "outport == %s && "
-                              REG_NEXT_HOP_IPV4 " == ",
-                              peer->json_key);
-                op_put_v4_networks(match, router_port, false);
-
-                ds_clear(actions);
-                ds_put_format(actions, "eth.dst = %s; next;",
-                                          router_port->lrp_networks.ea_s);
-                ovn_lflow_add_with_hint(lflows, peer->od,
-                                        S_ROUTER_IN_ARP_RESOLVE, 100,
-                                        ds_cstr(match), ds_cstr(actions),
-                                        &op->nbsp->header_);
+            /* Check the validity of nat->logical_ip. 'logical_ip' can
+             * be a subnet when the type is "snat". */
+            int cidr_bits;
+            if (is_v6) {
+                error = ipv6_parse_masked(nat->logical_ip, &ipv6, &mask_v6);
+                cidr_bits = ipv6_count_cidr_bits(&mask_v6);
+            } else {
+                error = ip_parse_masked(nat->logical_ip, &ip, &mask);
+                cidr_bits = ip_count_cidr_bits(mask);
+            }
+            if (!strcmp(nat->type, "snat")) {
+                if (error) {
+                    /* Invalid for both IPv4 and IPv6 */
+                    static struct vlog_rate_limit rl =
+                        VLOG_RATE_LIMIT_INIT(5, 1);
+                    VLOG_WARN_RL(&rl, "bad ip network or ip %s for snat "
+                                 "in router "UUID_FMT"",
+                                 nat->logical_ip, UUID_ARGS(&od->key));
+                    free(error);
+                    continue;
+                }
+            } else {
+                if (error || (!is_v6 && mask != OVS_BE32_MAX)
+                    || (is_v6 && memcmp(&mask_v6, &v6_exact,
+                                        sizeof mask_v6))) {
+                    /* Invalid for both IPv4 and IPv6 */
+                    static struct vlog_rate_limit rl =
+                        VLOG_RATE_LIMIT_INIT(5, 1);
+                    VLOG_WARN_RL(&rl, "bad ip %s for dnat in router "
+                        ""UUID_FMT"", nat->logical_ip, UUID_ARGS(&od->key));
+                    free(error);
+                    continue;
+                }
             }
 
-            if (router_port->lrp_networks.n_ipv6_addrs) {
-                ds_clear(match);
-                ds_put_format(match, "outport == %s && "
-                              REG_NEXT_HOP_IPV6 " == ",
-                              peer->json_key);
-                op_put_v6_networks(match, router_port);
-
-                ds_clear(actions);
-                ds_put_format(actions, "eth.dst = %s; next;",
-                              router_port->lrp_networks.ea_s);
-                ovn_lflow_add_with_hint(lflows, peer->od,
-                                        S_ROUTER_IN_ARP_RESOLVE, 100,
-                                        ds_cstr(match), ds_cstr(actions),
-                                        &op->nbsp->header_);
+            /* For distributed router NAT, determine whether this NAT rule
+             * satisfies the conditions for distributed NAT processing. */
+            bool distributed = false;
+            struct eth_addr mac;
+            if (od->l3dgw_port && !strcmp(nat->type, "dnat_and_snat") &&
+                nat->logical_port && nat->external_mac) {
+                if (eth_addr_from_string(nat->external_mac, &mac)) {
+                    distributed = true;
+                } else {
+                    static struct vlog_rate_limit rl =
+                        VLOG_RATE_LIMIT_INIT(5, 1);
+                    VLOG_WARN_RL(&rl, "bad mac %s for dnat in router "
+                        ""UUID_FMT"", nat->external_mac, UUID_ARGS(&od->key));
+                    continue;
+                }
             }
-        }
-    }
 
-}
+            /* Ingress UNSNAT table: It is for already established connections'
+             * reverse traffic. i.e., SNAT has already been done in egress
+             * pipeline and now the packet has entered the ingress pipeline as
+             * part of a reply. We undo the SNAT here.
+             *
+             * Undoing SNAT has to happen before DNAT processing.  This is
+             * because when the packet was DNATed in ingress pipeline, it did
+             * not know about the possibility of eventual additional SNAT in
+             * egress pipeline. */
+            if (!strcmp(nat->type, "snat")
+                || !strcmp(nat->type, "dnat_and_snat")) {
+                if (!od->l3dgw_port) {
+                    /* Gateway router. */
+                    ds_clear(match);
+                    ds_clear(actions);
+                    ds_put_format(match, "ip && ip%s.dst == %s",
+                                  is_v6 ? "6" : "4",
+                                  nat->external_ip);
+                    if (!strcmp(nat->type, "dnat_and_snat") && stateless) {
+                       ds_put_format(actions, "ip%s.dst=%s; next;",
+                                     is_v6 ? "6" : "4", nat->logical_ip);
+                    } else {
+                       ds_put_cstr(actions, "ct_snat;");
+                    }
 
-/* Local router ingress table CHK_PKT_LEN: Check packet length.
- *
- * Any IPv4 packet with outport set to the distributed gateway
- * router port, check the packet length and store the result in the
- * 'REGBIT_PKT_LARGER' register bit.
- *
- * Local router ingress table LARGER_PKTS: Handle larger packets.
- *
- * Any IPv4 packet with outport set to the distributed gateway
- * router port and the 'REGBIT_PKT_LARGER' register bit is set,
- * generate ICMPv4 packet with type 3 (Destination Unreachable) and
- * code 4 (Fragmentation needed).
- * */
-static void
-build_check_pkt_len_flows_for_lrouter(
-        struct ovn_datapath *od, struct hmap *lflows,
-        struct hmap *ports,
-        struct ds *match, struct ds *actions)
-{
-    if (od->nbr) {
+                    ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_UNSNAT,
+                                            90, ds_cstr(match),
+                                            ds_cstr(actions),
+                                            &nat->header_);
+                } else {
+                    /* Distributed router. */
 
-        /* Packets are allowed by default. */
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_CHK_PKT_LEN, 0, "1",
-                      "next;");
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_LARGER_PKTS, 0, "1",
-                      "next;");
+                    /* Traffic received on l3dgw_port is subject to NAT. */
+                    ds_clear(match);
+                    ds_clear(actions);
+                    ds_put_format(match, "ip && ip%s.dst == %s"
+                                          " && inport == %s",
+                                  is_v6 ? "6" : "4",
+                                  nat->external_ip,
+                                  od->l3dgw_port->json_key);
+                    if (!distributed && od->l3redirect_port) {
+                        /* Flows for NAT rules that are centralized are only
+                         * programmed on the gateway chassis. */
+                        ds_put_format(match, " && is_chassis_resident(%s)",
+                                      od->l3redirect_port->json_key);
+                    }
 
-        if (od->l3dgw_port && od->l3redirect_port) {
-            int gw_mtu = 0;
-            if (od->l3dgw_port->nbrp) {
-                 gw_mtu = smap_get_int(&od->l3dgw_port->nbrp->options,
-                                       "gateway_mtu", 0);
-            }
-            /* Add the flows only if gateway_mtu is configured. */
-            if (gw_mtu <= 0) {
-                return;
+                    if (!strcmp(nat->type, "dnat_and_snat") && stateless) {
+                        ds_put_format(actions, "ip%s.dst=%s; next;",
+                                      is_v6 ? "6" : "4", nat->logical_ip);
+                    } else {
+                        ds_put_cstr(actions, "ct_snat;");
+                    }
+
+                    ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_UNSNAT,
+                                            100,
+                                            ds_cstr(match), ds_cstr(actions),
+                                            &nat->header_);
+                }
             }
 
-            ds_clear(match);
-            ds_put_format(match, "outport == %s", od->l3dgw_port->json_key);
+            /* Ingress DNAT table: Packets enter the pipeline with destination
+             * IP address that needs to be DNATted from a external IP address
+             * to a logical IP address. */
+            if (!strcmp(nat->type, "dnat")
+                || !strcmp(nat->type, "dnat_and_snat")) {
+                if (!od->l3dgw_port) {
+                    /* Gateway router. */
+                    /* Packet when it goes from the initiator to destination.
+                     * We need to set flags.loopback because the router can
+                     * send the packet back through the same interface. */
+                    ds_clear(match);
+                    ds_put_format(match, "ip && ip%s.dst == %s",
+                                  is_v6 ? "6" : "4",
+                                  nat->external_ip);
+                    ds_clear(actions);
+                    if (allowed_ext_ips || exempted_ext_ips) {
+                        lrouter_nat_add_ext_ip_match(od, lflows, match, nat,
+                                                     is_v6, true, mask);
+                    }
+
+                    if (dnat_force_snat_ip) {
+                        /* Indicate to the future tables that a DNAT has taken
+                         * place and a force SNAT needs to be done in the
+                         * Egress SNAT table. */
+                        ds_put_format(actions,
+                                      "flags.force_snat_for_dnat = 1; ");
+                    }
 
-            ds_clear(actions);
-            ds_put_format(actions,
-                          REGBIT_PKT_LARGER" = check_pkt_larger(%d);"
-                          " next;", gw_mtu + VLAN_ETH_HEADER_LEN);
-            ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_CHK_PKT_LEN, 50,
-                                    ds_cstr(match), ds_cstr(actions),
-                                    &od->l3dgw_port->nbrp->header_);
+                    if (!strcmp(nat->type, "dnat_and_snat") && stateless) {
+                        ds_put_format(actions, "flags.loopback = 1; "
+                                      "ip%s.dst=%s; next;",
+                                      is_v6 ? "6" : "4", nat->logical_ip);
+                    } else {
+                        ds_put_format(actions, "flags.loopback = 1; "
+                                      "ct_dnat(%s", nat->logical_ip);
 
-            for (size_t i = 0; i < od->nbr->n_ports; i++) {
-                struct ovn_port *rp = ovn_port_find(ports,
-                                                    od->nbr->ports[i]->name);
-                if (!rp || rp == od->l3dgw_port) {
-                    continue;
-                }
+                        if (nat->external_port_range[0]) {
+                            ds_put_format(actions, ",%s",
+                                          nat->external_port_range);
+                        }
+                        ds_put_format(actions, ");");
+                    }
 
-                if (rp->lrp_networks.ipv4_addrs) {
-                    ds_clear(match);
-                    ds_put_format(match, "inport == %s && outport == %s"
-                                  " && ip4 && "REGBIT_PKT_LARGER,
-                                  rp->json_key, od->l3dgw_port->json_key);
+                    ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_DNAT, 100,
+                                            ds_cstr(match), ds_cstr(actions),
+                                            &nat->header_);
+                } else {
+                    /* Distributed router. */
 
+                    /* Traffic received on l3dgw_port is subject to NAT. */
+                    ds_clear(match);
+                    ds_put_format(match, "ip && ip%s.dst == %s"
+                                          " && inport == %s",
+                                  is_v6 ? "6" : "4",
+                                  nat->external_ip,
+                                  od->l3dgw_port->json_key);
+                    if (!distributed && od->l3redirect_port) {
+                        /* Flows for NAT rules that are centralized are only
+                         * programmed on the gateway chassis. */
+                        ds_put_format(match, " && is_chassis_resident(%s)",
+                                      od->l3redirect_port->json_key);
+                    }
                     ds_clear(actions);
-                    /* Set icmp4.frag_mtu to gw_mtu */
-                    ds_put_format(actions,
-                        "icmp4_error {"
-                        REGBIT_EGRESS_LOOPBACK" = 1; "
-                        "eth.dst = %s; "
-                        "ip4.dst = ip4.src; "
-                        "ip4.src = %s; "
-                        "ip.ttl = 255; "
-                        "icmp4.type = 3; /* Destination Unreachable. */ "
-                        "icmp4.code = 4; /* Frag Needed and DF was Set. */ "
-                        "icmp4.frag_mtu = %d; "
-                        "next(pipeline=ingress, table=%d); };",
-                        rp->lrp_networks.ea_s,
-                        rp->lrp_networks.ipv4_addrs[0].addr_s,
-                        gw_mtu,
-                        ovn_stage_get_table(S_ROUTER_IN_ADMISSION));
-                    ovn_lflow_add_with_hint(lflows, od,
-                                            S_ROUTER_IN_LARGER_PKTS, 50,
+                    if (allowed_ext_ips || exempted_ext_ips) {
+                        lrouter_nat_add_ext_ip_match(od, lflows, match, nat,
+                                                     is_v6, true, mask);
+                    }
+
+                    if (!strcmp(nat->type, "dnat_and_snat") && stateless) {
+                        ds_put_format(actions, "ip%s.dst=%s; next;",
+                                      is_v6 ? "6" : "4", nat->logical_ip);
+                    } else {
+                        ds_put_format(actions, "ct_dnat(%s", nat->logical_ip);
+                        if (nat->external_port_range[0]) {
+                            ds_put_format(actions, ",%s",
+                                          nat->external_port_range);
+                        }
+                        ds_put_format(actions, ");");
+                    }
+
+                    ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_DNAT, 100,
                                             ds_cstr(match), ds_cstr(actions),
-                                            &rp->nbrp->header_);
+                                            &nat->header_);
                 }
+            }
 
-                if (rp->lrp_networks.ipv6_addrs) {
+            /* ARP resolve for NAT IPs. */
+            if (od->l3dgw_port) {
+                if (!strcmp(nat->type, "snat")) {
                     ds_clear(match);
-                    ds_put_format(match, "inport == %s && outport == %s"
-                                  " && ip6 && "REGBIT_PKT_LARGER,
-                                  rp->json_key, od->l3dgw_port->json_key);
+                    ds_put_format(
+                        match, "inport == %s && %s == %s",
+                        od->l3dgw_port->json_key,
+                        is_v6 ? "ip6.src" : "ip4.src", nat->external_ip);
+                    ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_IP_INPUT,
+                                            120, ds_cstr(match), "next;",
+                                            &nat->header_);
+                }
 
+                if (!sset_contains(&nat_entries, nat->external_ip)) {
+                    ds_clear(match);
+                    ds_put_format(
+                        match, "outport == %s && %s == %s",
+                        od->l3dgw_port->json_key,
+                        is_v6 ? REG_NEXT_HOP_IPV6 : REG_NEXT_HOP_IPV4,
+                        nat->external_ip);
                     ds_clear(actions);
-                    /* Set icmp6.frag_mtu to gw_mtu */
-                    ds_put_format(actions,
-                        "icmp6_error {"
-                        REGBIT_EGRESS_LOOPBACK" = 1; "
-                        "eth.dst = %s; "
-                        "ip6.dst = ip6.src; "
-                        "ip6.src = %s; "
-                        "ip.ttl = 255; "
-                        "icmp6.type = 2; /* Packet Too Big. */ "
-                        "icmp6.code = 0; "
-                        "icmp6.frag_mtu = %d; "
-                        "next(pipeline=ingress, table=%d); };",
-                        rp->lrp_networks.ea_s,
-                        rp->lrp_networks.ipv6_addrs[0].addr_s,
-                        gw_mtu,
-                        ovn_stage_get_table(S_ROUTER_IN_ADMISSION));
+                    ds_put_format(
+                        actions, "eth.dst = %s; next;",
+                        distributed ? nat->external_mac :
+                        od->l3dgw_port->lrp_networks.ea_s);
                     ovn_lflow_add_with_hint(lflows, od,
-                                            S_ROUTER_IN_LARGER_PKTS, 50,
-                                            ds_cstr(match), ds_cstr(actions),
-                                            &rp->nbrp->header_);
+                                            S_ROUTER_IN_ARP_RESOLVE,
+                                            100, ds_cstr(match),
+                                            ds_cstr(actions),
+                                            &nat->header_);
+                    sset_add(&nat_entries, nat->external_ip);
                 }
+            } else {
+                /* Add the NAT external_ip to the nat_entries even for
+                 * gateway routers. This is required for adding load balancer
+                 * flows.*/
+                sset_add(&nat_entries, nat->external_ip);
             }
-        }
-    }
-}
-
-/* Logical router ingress table GW_REDIRECT: Gateway redirect.
- *
- * For traffic with outport equal to the l3dgw_port
- * on a distributed router, this table redirects a subset
- * of the traffic to the l3redirect_port which represents
- * the central instance of the l3dgw_port.
- */
-static void
-build_gateway_redirect_flows_for_lrouter(
-        struct ovn_datapath *od, struct hmap *lflows,
-        struct ds *match, struct ds *actions)
-{
-    if (od->nbr) {
-        if (od->l3dgw_port && od->l3redirect_port) {
-            const struct ovsdb_idl_row *stage_hint = NULL;
-
-            if (od->l3dgw_port->nbrp) {
-                stage_hint = &od->l3dgw_port->nbrp->header_;
-            }
-
-            /* For traffic with outport == l3dgw_port, if the
-             * packet did not match any higher priority redirect
-             * rule, then the traffic is redirected to the central
-             * instance of the l3dgw_port. */
-            ds_clear(match);
-            ds_put_format(match, "outport == %s",
-                          od->l3dgw_port->json_key);
-            ds_clear(actions);
-            ds_put_format(actions, "outport = %s; next;",
-                          od->l3redirect_port->json_key);
-            ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_GW_REDIRECT, 50,
-                                    ds_cstr(match), ds_cstr(actions),
-                                    stage_hint);
-        }
 
-        /* Packets are allowed by default. */
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_GW_REDIRECT, 0, "1", "next;");
-    }
-}
+            /* Egress UNDNAT table: It is for already established connections'
+             * reverse traffic. i.e., DNAT has already been done in ingress
+             * pipeline and now the packet has entered the egress pipeline as
+             * part of a reply. We undo the DNAT here.
+             *
+             * Note that this only applies for NAT on a distributed router.
+             * Undo DNAT on a gateway router is done in the ingress DNAT
+             * pipeline stage. */
+            if (od->l3dgw_port && (!strcmp(nat->type, "dnat")
+                || !strcmp(nat->type, "dnat_and_snat"))) {
+                ds_clear(match);
+                ds_put_format(match, "ip && ip%s.src == %s"
+                                      " && outport == %s",
+                              is_v6 ? "6" : "4",
+                              nat->logical_ip,
+                              od->l3dgw_port->json_key);
+                if (!distributed && od->l3redirect_port) {
+                    /* Flows for NAT rules that are centralized are only
+                     * programmed on the gateway chassis. */
+                    ds_put_format(match, " && is_chassis_resident(%s)",
+                                  od->l3redirect_port->json_key);
+                }
+                ds_clear(actions);
+                if (distributed) {
+                    ds_put_format(actions, "eth.src = "ETH_ADDR_FMT"; ",
+                                  ETH_ADDR_ARGS(mac));
+                }
 
-/* Local router ingress table ARP_REQUEST: ARP request.
- *
- * In the common case where the Ethernet destination has been resolved,
- * this table outputs the packet (priority 0).  Otherwise, it composes
- * and sends an ARP/IPv6 NA request (priority 100). */
-static void
-build_arp_request_flows_for_lrouter(
-        struct ovn_datapath *od, struct hmap *lflows,
-        struct ds *match, struct ds *actions)
-{
-    if (od->nbr) {
-        for (int i = 0; i < od->nbr->n_static_routes; i++) {
-            const struct nbrec_logical_router_static_route *route;
+                if (!strcmp(nat->type, "dnat_and_snat") && stateless) {
+                    ds_put_format(actions, "ip%s.src=%s; next;",
+                                  is_v6 ? "6" : "4", nat->external_ip);
+                } else {
+                    ds_put_format(actions, "ct_dnat;");
+                }
 
-            route = od->nbr->static_routes[i];
-            struct in6_addr gw_ip6;
-            unsigned int plen;
-            char *error = ipv6_parse_cidr(route->nexthop, &gw_ip6, &plen);
-            if (error || plen != 128) {
-                free(error);
-                continue;
+                ovn_lflow_add_with_hint(lflows, od, S_ROUTER_OUT_UNDNAT, 100,
+                                        ds_cstr(match), ds_cstr(actions),
+                                        &nat->header_);
             }
 
-            ds_clear(match);
-            ds_put_format(match, "eth.dst == 00:00:00:00:00:00 && "
-                          "ip6 && " REG_NEXT_HOP_IPV6 " == %s",
-                          route->nexthop);
-            struct in6_addr sn_addr;
-            struct eth_addr eth_dst;
-            in6_addr_solicited_node(&sn_addr, &gw_ip6);
-            ipv6_multicast_to_ethernet(&eth_dst, &sn_addr);
-
-            char sn_addr_s[INET6_ADDRSTRLEN + 1];
-            ipv6_string_mapped(sn_addr_s, &sn_addr);
-
-            ds_clear(actions);
-            ds_put_format(actions,
-                          "nd_ns { "
-                          "eth.dst = "ETH_ADDR_FMT"; "
-                          "ip6.dst = %s; "
-                          "nd.target = %s; "
-                          "output; "
-                          "};", ETH_ADDR_ARGS(eth_dst), sn_addr_s,
-                          route->nexthop);
-
-            ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_ARP_REQUEST, 200,
-                                    ds_cstr(match), ds_cstr(actions),
-                                    &route->header_);
-        }
-
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_ARP_REQUEST, 100,
-                      "eth.dst == 00:00:00:00:00:00 && ip4",
-                      "arp { "
-                      "eth.dst = ff:ff:ff:ff:ff:ff; "
-                      "arp.spa = " REG_SRC_IPV4 "; "
-                      "arp.tpa = " REG_NEXT_HOP_IPV4 "; "
-                      "arp.op = 1; " /* ARP request */
-                      "output; "
-                      "};");
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_ARP_REQUEST, 100,
-                      "eth.dst == 00:00:00:00:00:00 && ip6",
-                      "nd_ns { "
-                      "nd.target = " REG_NEXT_HOP_IPV6 "; "
-                      "output; "
-                      "};");
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_ARP_REQUEST, 0, "1", "output;");
-    }
-}
+            /* Egress SNAT table: Packets enter the egress pipeline with
+             * source ip address that needs to be SNATted to a external ip
+             * address. */
+            if (!strcmp(nat->type, "snat")
+                || !strcmp(nat->type, "dnat_and_snat")) {
+                if (!od->l3dgw_port) {
+                    /* Gateway router. */
+                    ds_clear(match);
+                    ds_put_format(match, "ip && ip%s.src == %s",
+                                  is_v6 ? "6" : "4",
+                                  nat->logical_ip);
+                    ds_clear(actions);
 
-/* Logical router egress table DELIVERY: Delivery (priority 100-110).
- *
- * Priority 100 rules deliver packets to enabled logical ports.
- * Priority 110 rules match multicast packets and update the source
- * mac before delivering to enabled logical ports. IP multicast traffic
- * bypasses S_ROUTER_IN_IP_ROUTING route lookups.
- */
-static void
-build_egress_delivery_flows_for_lrouter_port(
-        struct ovn_port *op, struct hmap *lflows,
-        struct ds *match, struct ds *actions)
-{
-    if (op->nbrp) {
-        if (!lrport_is_enabled(op->nbrp)) {
-            /* Drop packets to disabled logical ports (since logical flow
-             * tables are default-drop). */
-            return;
-        }
+                    if (allowed_ext_ips || exempted_ext_ips) {
+                        lrouter_nat_add_ext_ip_match(od, lflows, match, nat,
+                                                     is_v6, false, mask);
+                    }
 
-        if (op->derived) {
-            /* No egress packets should be processed in the context of
-             * a chassisredirect port.  The chassisredirect port should
-             * be replaced by the l3dgw port in the local output
-             * pipeline stage before egress processing. */
-            return;
-        }
+                    if (!strcmp(nat->type, "dnat_and_snat") && stateless) {
+                        ds_put_format(actions, "ip%s.src=%s; next;",
+                                      is_v6 ? "6" : "4", nat->external_ip);
+                    } else {
+                        ds_put_format(actions, "ct_snat(%s",
+                                      nat->external_ip);
 
-        /* If multicast relay is enabled then also adjust source mac for IP
-         * multicast traffic.
-         */
-        if (op->od->mcast_info.rtr.relay) {
-            ds_clear(match);
-            ds_clear(actions);
-            ds_put_format(match, "(ip4.mcast || ip6.mcast) && outport == %s",
-                          op->json_key);
-            ds_put_format(actions, "eth.src = %s; output;",
-                          op->lrp_networks.ea_s);
-            ovn_lflow_add(lflows, op->od, S_ROUTER_OUT_DELIVERY, 110,
-                          ds_cstr(match), ds_cstr(actions));
-        }
+                        if (nat->external_port_range[0]) {
+                            ds_put_format(actions, ",%s",
+                                          nat->external_port_range);
+                        }
+                        ds_put_format(actions, ");");
+                    }
 
-        ds_clear(match);
-        ds_put_format(match, "outport == %s", op->json_key);
-        ovn_lflow_add(lflows, op->od, S_ROUTER_OUT_DELIVERY, 100,
-                      ds_cstr(match), "output;");
-    }
+                    /* The priority here is calculated such that the
+                     * nat->logical_ip with the longest mask gets a higher
+                     * priority. */
+                    ovn_lflow_add_with_hint(lflows, od, S_ROUTER_OUT_SNAT,
+                                            cidr_bits + 1,
+                                            ds_cstr(match), ds_cstr(actions),
+                                            &nat->header_);
+                } else {
+                    uint16_t priority = cidr_bits + 1;
 
-}
+                    /* Distributed router. */
+                    ds_clear(match);
+                    ds_put_format(match, "ip && ip%s.src == %s"
+                                          " && outport == %s",
+                                  is_v6 ? "6" : "4",
+                                  nat->logical_ip,
+                                  od->l3dgw_port->json_key);
+                    if (!distributed && od->l3redirect_port) {
+                        /* Flows for NAT rules that are centralized are only
+                         * programmed on the gateway chassis. */
+                        priority += 128;
+                        ds_put_format(match, " && is_chassis_resident(%s)",
+                                      od->l3redirect_port->json_key);
+                    }
+                    ds_clear(actions);
 
-static void
-build_misc_local_traffic_drop_flows_for_lrouter(
-        struct ovn_datapath *od, struct hmap *lflows)
-{
-    if (od->nbr) {
-        /* L3 admission control: drop multicast and broadcast source, localhost
-         * source or destination, and zero network source or destination
-         * (priority 100). */
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 100,
-                      "ip4.src_mcast ||"
-                      "ip4.src == 255.255.255.255 || "
-                      "ip4.src == 127.0.0.0/8 || "
-                      "ip4.dst == 127.0.0.0/8 || "
-                      "ip4.src == 0.0.0.0/8 || "
-                      "ip4.dst == 0.0.0.0/8",
-                      "drop;");
+                    if (allowed_ext_ips || exempted_ext_ips) {
+                        lrouter_nat_add_ext_ip_match(od, lflows, match, nat,
+                                                     is_v6, false, mask);
+                    }
 
-        /* Drop ARP packets (priority 85). ARP request packets for router's own
-         * IPs are handled with priority-90 flows.
-         * Drop IPv6 ND packets (priority 85). ND NA packets for router's own
-         * IPs are handled with priority-90 flows.
-         */
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 85,
-                      "arp || nd", "drop;");
+                    if (distributed) {
+                        ds_put_format(actions, "eth.src = "ETH_ADDR_FMT"; ",
+                                      ETH_ADDR_ARGS(mac));
+                    }
 
-        /* Allow IPv6 multicast traffic that's supposed to reach the
-         * router pipeline (e.g., router solicitations).
-         */
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 84, "nd_rs || nd_ra",
-                      "next;");
+                    if (!strcmp(nat->type, "dnat_and_snat") && stateless) {
+                        ds_put_format(actions, "ip%s.src=%s; next;",
+                                      is_v6 ? "6" : "4", nat->external_ip);
+                    } else {
+                        ds_put_format(actions, "ct_snat(%s",
+                                      nat->external_ip);
+                        if (nat->external_port_range[0]) {
+                            ds_put_format(actions, ",%s",
+                                          nat->external_port_range);
+                        }
+                        ds_put_format(actions, ");");
+                    }
 
-        /* Drop other reserved multicast. */
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 83,
-                      "ip6.mcast_rsvd", "drop;");
+                    /* The priority here is calculated such that the
+                     * nat->logical_ip with the longest mask gets a higher
+                     * priority. */
+                    ovn_lflow_add_with_hint(lflows, od, S_ROUTER_OUT_SNAT,
+                                            priority, ds_cstr(match),
+                                            ds_cstr(actions),
+                                            &nat->header_);
+                }
+            }
 
-        /* Allow other multicast if relay enabled (priority 82). */
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 82,
-                      "ip4.mcast || ip6.mcast",
-                      od->mcast_info.rtr.relay ? "next;" : "drop;");
+            /* Logical router ingress table 0:
+             * For NAT on a distributed router, add rules allowing
+             * ingress traffic with eth.dst matching nat->external_mac
+             * on the l3dgw_port instance where nat->logical_port is
+             * resident. */
+            if (distributed) {
+                /* Store the ethernet address of the port receiving the packet.
+                 * This will save us from having to match on inport further
+                 * down in the pipeline.
+                 */
+                ds_clear(actions);
+                ds_put_format(actions, REG_INPORT_ETH_ADDR " = %s; next;",
+                              od->l3dgw_port->lrp_networks.ea_s);
 
-        /* Drop Ethernet local broadcast.  By definition this traffic should
-         * not be forwarded.*/
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 50,
-                      "eth.bcast", "drop;");
+                ds_clear(match);
+                ds_put_format(match,
+                              "eth.dst == "ETH_ADDR_FMT" && inport == %s"
+                              " && is_chassis_resident(\"%s\")",
+                              ETH_ADDR_ARGS(mac),
+                              od->l3dgw_port->json_key,
+                              nat->logical_port);
+                ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_ADMISSION, 50,
+                                        ds_cstr(match), ds_cstr(actions),
+                                        &nat->header_);
+            }
 
-        /* TTL discard */
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 30,
-                      "ip4 && ip.ttl == {0, 1}", "drop;");
+            /* Ingress Gateway Redirect Table: For NAT on a distributed
+             * router, add flows that are specific to a NAT rule.  These
+             * flows indicate the presence of an applicable NAT rule that
+             * can be applied in a distributed manner.
+             * In particulr REG_SRC_IPV4/REG_SRC_IPV6 and eth.src are set to
+             * NAT external IP and NAT external mac so the ARP request
+             * generated in the following stage is sent out with proper IP/MAC
+             * src addresses.
+             */
+            if (distributed) {
+                ds_clear(match);
+                ds_clear(actions);
+                ds_put_format(match,
+                              "ip%s.src == %s && outport == %s && "
+                              "is_chassis_resident(\"%s\")",
+                              is_v6 ? "6" : "4", nat->logical_ip,
+                              od->l3dgw_port->json_key, nat->logical_port);
+                ds_put_format(actions, "eth.src = %s; %s = %s; next;",
+                              nat->external_mac,
+                              is_v6 ? REG_SRC_IPV6 : REG_SRC_IPV4,
+                              nat->external_ip);
+                ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_GW_REDIRECT,
+                                        100, ds_cstr(match),
+                                        ds_cstr(actions), &nat->header_);
+            }
 
-        /* Pass other traffic not already handled to the next table for
-         * routing. */
-        ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 0, "1", "next;");
-    }
-}
+            /* Egress Loopback table: For NAT on a distributed router.
+             * If packets in the egress pipeline on the distributed
+             * gateway port have ip.dst matching a NAT external IP, then
+             * loop a clone of the packet back to the beginning of the
+             * ingress pipeline with inport = outport. */
+            if (od->l3dgw_port) {
+                /* Distributed router. */
+                ds_clear(match);
+                ds_put_format(match, "ip%s.dst == %s && outport == %s",
+                              is_v6 ? "6" : "4",
+                              nat->external_ip,
+                              od->l3dgw_port->json_key);
+                if (!distributed) {
+                    ds_put_format(match, " && is_chassis_resident(%s)",
+                                  od->l3redirect_port->json_key);
+                } else {
+                    ds_put_format(match, " && is_chassis_resident(\"%s\")",
+                                  nat->logical_port);
+                }
 
-static void
-build_dhcpv6_reply_flows_for_lrouter_port(
-        struct ovn_port *op, struct hmap *lflows,
-        struct ds *match)
-{
-    if (op->nbrp && (!op->derived)) {
-        for (size_t i = 0; i < op->lrp_networks.n_ipv6_addrs; i++) {
-            ds_clear(match);
-            ds_put_format(match, "ip6.dst == %s && udp.src == 547 &&"
-                          " udp.dst == 546",
-                          op->lrp_networks.ipv6_addrs[i].addr_s);
-            ovn_lflow_add(lflows, op->od, S_ROUTER_IN_IP_INPUT, 100,
-                          ds_cstr(match),
-                          "reg0 = 0; handle_dhcpv6_reply;");
+                ds_clear(actions);
+                ds_put_format(actions,
+                              "clone { ct_clear; "
+                              "inport = outport; outport = \"\"; "
+                              "flags = 0; flags.loopback = 1; ");
+                for (int j = 0; j < MFF_N_LOG_REGS; j++) {
+                    ds_put_format(actions, "reg%d = 0; ", j);
+                }
+                ds_put_format(actions, REGBIT_EGRESS_LOOPBACK" = 1; "
+                              "next(pipeline=ingress, table=%d); };",
+                              ovn_stage_get_table(S_ROUTER_IN_ADMISSION));
+                ovn_lflow_add_with_hint(lflows, od, S_ROUTER_OUT_EGR_LOOP, 100,
+                                        ds_cstr(match), ds_cstr(actions),
+                                        &nat->header_);
+            }
         }
-    }
 
-}
+        /* Handle force SNAT options set in the gateway router. */
+        if (!od->l3dgw_port) {
+            if (dnat_force_snat_ip) {
+                if (od->dnat_force_snat_addrs.n_ipv4_addrs) {
+                    build_lrouter_force_snat_flows(lflows, od, "4",
+                        od->dnat_force_snat_addrs.ipv4_addrs[0].addr_s,
+                        "dnat");
+                }
+                if (od->dnat_force_snat_addrs.n_ipv6_addrs) {
+                    build_lrouter_force_snat_flows(lflows, od, "6",
+                        od->dnat_force_snat_addrs.ipv6_addrs[0].addr_s,
+                        "dnat");
+                }
+            }
+            if (lb_force_snat_ip) {
+                if (od->lb_force_snat_addrs.n_ipv4_addrs) {
+                    build_lrouter_force_snat_flows(lflows, od, "4",
+                        od->lb_force_snat_addrs.ipv4_addrs[0].addr_s, "lb");
+                }
+                if (od->lb_force_snat_addrs.n_ipv6_addrs) {
+                    build_lrouter_force_snat_flows(lflows, od, "6",
+                        od->lb_force_snat_addrs.ipv6_addrs[0].addr_s, "lb");
+                }
+            }
 
-static void
-build_ipv6_input_flows_for_lrouter_port(
-        struct ovn_port *op, struct hmap *lflows,
-        struct ds *match, struct ds *actions)
-{
-    if (op->nbrp && (!op->derived)) {
-        /* No ingress packets are accepted on a chassisredirect
-         * port, so no need to program flows for that port. */
-        if (op->lrp_networks.n_ipv6_addrs) {
-            /* ICMPv6 echo reply.  These flows reply to echo requests
-             * received for the router's IP address. */
-            ds_clear(match);
-            ds_put_cstr(match, "ip6.dst == ");
-            op_put_v6_networks(match, op);
-            ds_put_cstr(match, " && icmp6.type == 128 && icmp6.code == 0");
+            /* For gateway router, re-circulate every packet through
+            * the DNAT zone.  This helps with the following.
+            *
+            * Any packet that needs to be unDNATed in the reverse
+            * direction gets unDNATed. Ideally this could be done in
+            * the egress pipeline. But since the gateway router
+            * does not have any feature that depends on the source
+            * ip address being external IP address for IP routing,
+            * we can do it here, saving a future re-circulation. */
+            ovn_lflow_add(lflows, od, S_ROUTER_IN_DNAT, 50,
+                          "ip", "flags.loopback = 1; ct_dnat;");
+        }
 
-            const char *lrp_actions =
-                        "ip6.dst <-> ip6.src; "
-                        "ip.ttl = 255; "
-                        "icmp6.type = 129; "
-                        "flags.loopback = 1; "
-                        "next; ";
-            ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT, 90,
-                                    ds_cstr(match), lrp_actions,
-                                    &op->nbrp->header_);
+        /* Load balancing and packet defrag are only valid on
+         * Gateway routers or router with gateway port. */
+        if (!smap_get(&od->nbr->options, "chassis") && !od->l3dgw_port) {
+            sset_destroy(&nat_entries);
+            return;
         }
 
-        /* ND reply.  These flows reply to ND solicitations for the
-         * router's own IP address. */
-        for (int i = 0; i < op->lrp_networks.n_ipv6_addrs; i++) {
-            ds_clear(match);
-            if (op->od->l3dgw_port && op == op->od->l3dgw_port
-                && op->od->l3redirect_port) {
-                /* Traffic with eth.src = l3dgw_port->lrp_networks.ea_s
-                 * should only be sent from the gateway chassi, so that
-                 * upstream MAC learning points to the gateway chassis.
-                 * Also need to avoid generation of multiple ND replies
-                 * from different chassis. */
-                ds_put_format(match, "is_chassis_resident(%s)",
-                              op->od->l3redirect_port->json_key);
-            }
+        /* A set to hold all ips that need defragmentation and tracking. */
+        struct sset all_ips = SSET_INITIALIZER(&all_ips);
 
-            build_lrouter_nd_flow(op->od, op, "nd_na_router",
-                                  op->lrp_networks.ipv6_addrs[i].addr_s,
-                                  op->lrp_networks.ipv6_addrs[i].sn_addr_s,
-                                  REG_INPORT_ETH_ADDR, match, false, 90,
-                                  &op->nbrp->header_, lflows);
-        }
+        for (int i = 0; i < od->nbr->n_load_balancer; i++) {
+            struct nbrec_load_balancer *nb_lb = od->nbr->load_balancer[i];
+            struct ovn_northd_lb *lb =
+                ovn_northd_lb_find(lbs, &nb_lb->header_.uuid);
+            ovs_assert(lb);
 
-        /* UDP/TCP port unreachable */
-        if (!smap_get(&op->od->nbr->options, "chassis")
-            && !op->od->l3dgw_port) {
-            for (int i = 0; i < op->lrp_networks.n_ipv6_addrs; i++) {
-                ds_clear(match);
-                ds_put_format(match,
-                              "ip6 && ip6.dst == %s && !ip.later_frag && tcp",
-                              op->lrp_networks.ipv6_addrs[i].addr_s);
-                const char *action = "tcp_reset {"
-                                     "eth.dst <-> eth.src; "
-                                     "ip6.dst <-> ip6.src; "
-                                     "next; };";
-                ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT,
-                                        80, ds_cstr(match), action,
-                                        &op->nbrp->header_);
+            for (size_t j = 0; j < lb->n_vips; j++) {
+                struct ovn_lb_vip *lb_vip = &lb->vips[j];
+                struct ovn_northd_lb_vip *lb_vip_nb = &lb->vips_nb[j];
+                ds_clear(actions);
+                build_lb_vip_actions(lb_vip, lb_vip_nb, actions,
+                                     lb->selection_fields, false);
 
-                ds_clear(match);
-                ds_put_format(match,
-                              "ip6 && ip6.dst == %s && !ip.later_frag && udp",
-                              op->lrp_networks.ipv6_addrs[i].addr_s);
-                action = "icmp6 {"
-                         "eth.dst <-> eth.src; "
-                         "ip6.dst <-> ip6.src; "
-                         "ip.ttl = 255; "
-                         "icmp6.type = 1; "
-                         "icmp6.code = 4; "
-                         "next; };";
-                ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT,
-                                        80, ds_cstr(match), action,
-                                        &op->nbrp->header_);
+                if (!sset_contains(&all_ips, lb_vip->vip_str)) {
+                    sset_add(&all_ips, lb_vip->vip_str);
+                    /* If there are any load balancing rules, we should send
+                     * the packet to conntrack for defragmentation and
+                     * tracking.  This helps with two things.
+                     *
+                     * 1. With tracking, we can send only new connections to
+                     *    pick a DNAT ip address from a group.
+                     * 2. If there are L4 ports in load balancing rules, we
+                     *    need the defragmentation to match on L4 ports. */
+                    ds_clear(match);
+                    if (IN6_IS_ADDR_V4MAPPED(&lb_vip->vip)) {
+                        ds_put_format(match, "ip && ip4.dst == %s",
+                                      lb_vip->vip_str);
+                    } else {
+                        ds_put_format(match, "ip && ip6.dst == %s",
+                                      lb_vip->vip_str);
+                    }
+                    ovn_lflow_add_with_hint(lflows, od, S_ROUTER_IN_DEFRAG,
+                                            100, ds_cstr(match), "ct_next;",
+                                            &nb_lb->header_);
+                }
 
+                /* Higher priority rules are added for load-balancing in DNAT
+                 * table.  For every match (on a VIP[:port]), we add two flows
+                 * via add_router_lb_flow().  One flow is for specific matching
+                 * on ct.new with an action of "ct_lb($targets);".  The other
+                 * flow is for ct.est with an action of "ct_dnat;". */
                 ds_clear(match);
-                ds_put_format(match,
-                              "ip6 && ip6.dst == %s && !ip.later_frag",
-                              op->lrp_networks.ipv6_addrs[i].addr_s);
-                action = "icmp6 {"
-                         "eth.dst <-> eth.src; "
-                         "ip6.dst <-> ip6.src; "
-                         "ip.ttl = 255; "
-                         "icmp6.type = 1; "
-                         "icmp6.code = 3; "
-                         "next; };";
-                ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT,
-                                        70, ds_cstr(match), action,
-                                        &op->nbrp->header_);
-            }
-        }
+                if (IN6_IS_ADDR_V4MAPPED(&lb_vip->vip)) {
+                    ds_put_format(match, "ip && ip4.dst == %s",
+                                  lb_vip->vip_str);
+                } else {
+                    ds_put_format(match, "ip && ip6.dst == %s",
+                                  lb_vip->vip_str);
+                }
 
-        /* ICMPv6 time exceeded */
-        for (int i = 0; i < op->lrp_networks.n_ipv6_addrs; i++) {
-            /* skip link-local address */
-            if (in6_is_lla(&op->lrp_networks.ipv6_addrs[i].network)) {
-                continue;
-            }
+                int prio = 110;
+                bool is_udp = nullable_string_is_equal(nb_lb->protocol, "udp");
+                bool is_sctp = nullable_string_is_equal(nb_lb->protocol,
+                                                        "sctp");
+                const char *proto = is_udp ? "udp" : is_sctp ? "sctp" : "tcp";
 
-            ds_clear(match);
-            ds_clear(actions);
+                if (lb_vip->vip_port) {
+                    ds_put_format(match, " && %s && %s.dst == %d", proto,
+                                  proto, lb_vip->vip_port);
+                    prio = 120;
+                }
 
-            ds_put_format(match,
-                          "inport == %s && ip6 && "
-                          "ip6.src == %s/%d && "
-                          "ip.ttl == {0, 1} && !ip.later_frag",
-                          op->json_key,
-                          op->lrp_networks.ipv6_addrs[i].network_s,
-                          op->lrp_networks.ipv6_addrs[i].plen);
-            ds_put_format(actions,
-                          "icmp6 {"
-                          "eth.dst <-> eth.src; "
-                          "ip6.dst = ip6.src; "
-                          "ip6.src = %s; "
-                          "ip.ttl = 255; "
-                          "icmp6.type = 3; /* Time exceeded */ "
-                          "icmp6.code = 0; /* TTL exceeded in transit */ "
-                          "next; };",
-                          op->lrp_networks.ipv6_addrs[i].addr_s);
-            ovn_lflow_add_with_hint(lflows, op->od, S_ROUTER_IN_IP_INPUT, 40,
-                                    ds_cstr(match), ds_cstr(actions),
-                                    &op->nbrp->header_);
+                if (od->l3redirect_port &&
+                    (lb_vip->n_backends || !lb_vip->empty_backend_rej)) {
+                    ds_put_format(match, " && is_chassis_resident(%s)",
+                                  od->l3redirect_port->json_key);
+                }
+                bool force_snat_for_lb =
+                    lb_force_snat_ip || od->lb_force_snat_router_ip;
+                add_router_lb_flow(lflows, od, match, actions, prio,
+                                   force_snat_for_lb, lb_vip, proto,
+                                   nb_lb, meter_groups, &nat_entries);
+            }
         }
+        sset_destroy(&all_ips);
+        sset_destroy(&nat_entries);
     }
-
 }
 
+
+
 struct lswitch_flow_build_info {
     struct hmap *datapaths;
     struct hmap *ports;
@@ -11177,7 +11918,8 @@ struct lswitch_flow_build_info {
 
 static void
 build_lswitch_and_lrouter_iterate_by_od(struct ovn_datapath *od,
-                                        struct lswitch_flow_build_info *lsi)
+                                        struct lswitch_flow_build_info *lsi,
+                                        struct hmap *bfd_connections)
 {
     /* Build Logical Switch Flows. */
     build_lswitch_lflows_pre_acl_and_acl(od, lsi->port_groups, lsi->lflows,
@@ -11186,13 +11928,20 @@ build_lswitch_and_lrouter_iterate_by_od(struct ovn_datapath *od,
     build_fwd_group_lflows(od, lsi->lflows);
     build_lswitch_lflows_admission_control(od, lsi->lflows);
     build_lswitch_input_port_sec_od(od, lsi->lflows);
+    build_lswitch_learn_fdb_od(od, lsi->lflows);
+    build_lswitch_arp_nd_responder_default(od, lsi->lflows);
+    build_lswitch_dns_lookup_and_response(od, lsi->lflows);
+    build_lswitch_dhcp_and_dns_defaults(od, lsi->lflows);
+    build_lswitch_destination_lookup_bmcast(od, lsi->lflows, &lsi->actions);
+    build_lswitch_output_port_sec_od(od, lsi->lflows);
 
     /* Build Logical Router Flows. */
     build_adm_ctrl_flows_for_lrouter(od, lsi->lflows);
     build_neigh_learning_flows_for_lrouter(od, lsi->lflows, &lsi->match,
                                            &lsi->actions);
     build_ND_RA_flows_for_lrouter(od, lsi->lflows);
-    build_static_route_flows_for_lrouter(od, lsi->lflows, lsi->ports);
+    build_static_route_flows_for_lrouter(od, lsi->lflows, lsi->ports,
+                                         bfd_connections);
     build_mcast_lookup_flows_for_lrouter(od, lsi->lflows, &lsi->match,
                                          &lsi->actions);
     build_ingress_policy_flows_for_lrouter(od, lsi->lflows, lsi->ports);
@@ -11204,6 +11953,9 @@ build_lswitch_and_lrouter_iterate_by_od(struct ovn_datapath *od,
     build_arp_request_flows_for_lrouter(od, lsi->lflows, &lsi->match,
                                         &lsi->actions);
     build_misc_local_traffic_drop_flows_for_lrouter(od, lsi->lflows);
+    build_lrouter_arp_nd_for_datapath(od, lsi->lflows);
+    build_lrouter_nat_defrag_and_lb(od, lsi->lflows, lsi->meter_groups,
+                                    lsi->lbs, &lsi->match, &lsi->actions);
 }
 
 /* Helper function to combine all lflow generation which is iterated by port.
@@ -11216,6 +11968,20 @@ build_lswitch_and_lrouter_iterate_by_op(struct ovn_port *op,
     /* Build Logical Switch Flows. */
     build_lswitch_input_port_sec_op(op, lsi->lflows, &lsi->actions,
                                     &lsi->match);
+    build_lswitch_learn_fdb_op(op, lsi->lflows, &lsi->actions,
+                               &lsi->match);
+    build_lswitch_arp_nd_responder_skip_local(op, lsi->lflows,
+                                              &lsi->match);
+    build_lswitch_arp_nd_responder_known_ips(op, lsi->lflows,
+                                             lsi->ports,
+                                             &lsi->actions,
+                                             &lsi->match);
+    build_lswitch_dhcp_options_and_response(op,lsi->lflows);
+    build_lswitch_external_port(op, lsi->lflows);
+    build_lswitch_ip_unicast_lookup(op, lsi->lflows, lsi->mcgroups,
+                                    &lsi->actions, &lsi->match);
+    build_lswitch_output_port_sec_op(op, lsi->lflows,
+                                     &lsi->actions, &lsi->match);
 
     /* Build Logical Router Flows. */
     build_adm_ctrl_flows_for_lrouter_port(op, lsi->lflows, &lsi->match,
@@ -11232,6 +11998,10 @@ build_lswitch_and_lrouter_iterate_by_op(struct ovn_port *op,
     build_dhcpv6_reply_flows_for_lrouter_port(op, lsi->lflows, &lsi->match);
     build_ipv6_input_flows_for_lrouter_port(op, lsi->lflows,
                                             &lsi->match, &lsi->actions);
+    build_lrouter_ipv4_ip_input(op, lsi->lflows,
+                                &lsi->match, &lsi->actions);
+    build_lrouter_force_snat_flows_op(op, lsi->lflows, &lsi->match,
+                                      &lsi->actions);
 }
 
 static void
@@ -11239,10 +12009,13 @@ build_lswitch_and_lrouter_flows(struct hmap *datapaths, struct hmap *ports,
                                 struct hmap *port_groups, struct hmap *lflows,
                                 struct hmap *mcgroups,
                                 struct hmap *igmp_groups,
-                                struct shash *meter_groups, struct hmap *lbs)
+                                struct shash *meter_groups, struct hmap *lbs,
+                                struct hmap *bfd_connections)
 {
     struct ovn_datapath *od;
     struct ovn_port *op;
+    struct ovn_northd_lb *lb;
+    struct ovn_igmp_group *igmp_group;
 
     char *svc_check_match = xasprintf("eth.dst == %s", svc_monitor_mac);
 
@@ -11264,22 +12037,28 @@ build_lswitch_and_lrouter_flows(struct hmap *datapaths, struct hmap *ports,
      * will move here and will be reogranized by iterator type.
      */
     HMAP_FOR_EACH (od, key_node, datapaths) {
-        build_lswitch_and_lrouter_iterate_by_od(od, &lsi);
+        build_lswitch_and_lrouter_iterate_by_od(od, &lsi, bfd_connections);
     }
     HMAP_FOR_EACH (op, key_node, ports) {
         build_lswitch_and_lrouter_iterate_by_op(op, &lsi);
     }
+    HMAP_FOR_EACH (lb, hmap_node, lbs) {
+        build_lswitch_arp_nd_service_monitor(lb, lsi.lflows,
+                                             &lsi.actions,
+                                             &lsi.match);
+    }
+    HMAP_FOR_EACH (igmp_group, hmap_node, igmp_groups) {
+        build_lswitch_ip_mcast_igmp_mld(igmp_group,
+                                        lsi.lflows,
+                                        &lsi.actions,
+                                        &lsi.match);
+    }
     free(svc_check_match);
 
     ds_destroy(&lsi.match);
     ds_destroy(&lsi.actions);
 
-    /* Legacy lswitch build - to be migrated. */
-    build_lswitch_flows(datapaths, ports, lflows, mcgroups,
-                        igmp_groups, lbs);
-
-    /* Legacy lrouter build - to be migrated. */
-    build_lrouter_flows(datapaths, ports, lflows, meter_groups, lbs);
+    build_lswitch_flows(datapaths, lflows);
 }
 
 struct ovn_dp_group {
@@ -11356,13 +12135,14 @@ build_lflows(struct northd_context *ctx, struct hmap *datapaths,
              struct hmap *ports, struct hmap *port_groups,
              struct hmap *mcgroups, struct hmap *igmp_groups,
              struct shash *meter_groups,
-             struct hmap *lbs)
+             struct hmap *lbs, struct hmap *bfd_connections)
 {
     struct hmap lflows = HMAP_INITIALIZER(&lflows);
 
     build_lswitch_and_lrouter_flows(datapaths, ports,
                                     port_groups, &lflows, mcgroups,
-                                    igmp_groups, meter_groups, lbs);
+                                    igmp_groups, meter_groups, lbs,
+                                    bfd_connections);
 
     /* Collecting all unique datapath groups. */
     struct hmap dp_groups = HMAP_INITIALIZER(&dp_groups);
@@ -11801,17 +12581,20 @@ static void
 sync_meters_iterate_nb_meter(struct northd_context *ctx,
                              const char *meter_name,
                              const struct nbrec_meter *nb_meter,
-                             struct shash *sb_meters)
+                             struct shash *sb_meters,
+                             struct sset *used_sb_meters)
 {
+    const struct sbrec_meter *sb_meter;
     bool new_sb_meter = false;
 
-    const struct sbrec_meter *sb_meter = shash_find_and_delete(sb_meters,
-                                                               meter_name);
+    sb_meter = shash_find_data(sb_meters, meter_name);
     if (!sb_meter) {
         sb_meter = sbrec_meter_insert(ctx->ovnsb_txn);
         sbrec_meter_set_name(sb_meter, meter_name);
+        shash_add(sb_meters, sb_meter->name, sb_meter);
         new_sb_meter = true;
     }
+    sset_add(used_sb_meters, meter_name);
 
     if (new_sb_meter || bands_need_update(nb_meter, sb_meter)) {
         struct sbrec_meter_band **sb_bands;
@@ -11833,6 +12616,24 @@ sync_meters_iterate_nb_meter(struct northd_context *ctx,
     sbrec_meter_set_unit(sb_meter, nb_meter->unit);
 }
 
+static void
+sync_acl_fair_meter(struct northd_context *ctx, struct shash *meter_groups,
+                    const struct nbrec_acl *acl, struct shash *sb_meters,
+                    struct sset *used_sb_meters)
+{
+    const struct nbrec_meter *nb_meter =
+        fair_meter_lookup_by_name(meter_groups, acl->meter);
+
+    if (!nb_meter) {
+        return;
+    }
+
+    char *meter_name = alloc_acl_log_unique_meter_name(acl);
+    sync_meters_iterate_nb_meter(ctx, meter_name, nb_meter, sb_meters,
+                                 used_sb_meters);
+    free(meter_name);
+}
+
 /* Each entry in the Meter and Meter_Band tables in OVN_Northbound have
  * a corresponding entries in the Meter and Meter_Band tables in
  * OVN_Southbound. Additionally, ACL logs that use fair meters have
@@ -11840,9 +12641,10 @@ sync_meters_iterate_nb_meter(struct northd_context *ctx,
  */
 static void
 sync_meters(struct northd_context *ctx, struct hmap *datapaths,
-            struct shash *meter_groups)
+            struct shash *meter_groups, struct hmap *port_groups)
 {
     struct shash sb_meters = SHASH_INITIALIZER(&sb_meters);
+    struct sset used_sb_meters = SSET_INITIALIZER(&used_sb_meters);
 
     const struct sbrec_meter *sb_meter;
     SBREC_METER_FOR_EACH (sb_meter, ctx->ovnsb_idl) {
@@ -11852,7 +12654,7 @@ sync_meters(struct northd_context *ctx, struct hmap *datapaths,
     const struct nbrec_meter *nb_meter;
     NBREC_METER_FOR_EACH (nb_meter, ctx->ovnnb_idl) {
         sync_meters_iterate_nb_meter(ctx, nb_meter->name, nb_meter,
-                                     &sb_meters);
+                                     &sb_meters, &used_sb_meters);
     }
 
     /*
@@ -11866,19 +12668,28 @@ sync_meters(struct northd_context *ctx, struct hmap *datapaths,
             continue;
         }
         for (size_t i = 0; i < od->nbs->n_acls; i++) {
-            struct nbrec_acl *acl = od->nbs->acls[i];
-            nb_meter = fair_meter_lookup_by_name(meter_groups, acl->meter);
-            if (!nb_meter) {
-                continue;
+            sync_acl_fair_meter(ctx, meter_groups, od->nbs->acls[i],
+                                &sb_meters, &used_sb_meters);
+        }
+        struct ovn_port_group *pg;
+        HMAP_FOR_EACH (pg, key_node, port_groups) {
+            if (ovn_port_group_ls_find(pg, &od->nbs->header_.uuid)) {
+                for (size_t i = 0; i < pg->nb_pg->n_acls; i++) {
+                    sync_acl_fair_meter(ctx, meter_groups, pg->nb_pg->acls[i],
+                                        &sb_meters, &used_sb_meters);
+                }
             }
-
-            char *meter_name = alloc_acl_log_unique_meter_name(acl);
-            sync_meters_iterate_nb_meter(ctx, meter_name, nb_meter,
-                                         &sb_meters);
-            free(meter_name);
         }
     }
 
+    const char *used_meter;
+    const char *used_meter_next;
+    SSET_FOR_EACH_SAFE (used_meter, used_meter_next, &used_sb_meters) {
+        shash_find_and_delete(&sb_meters, used_meter);
+        sset_delete(&used_sb_meters, SSET_NODE_FROM_NAME(used_meter));
+    }
+    sset_destroy(&used_sb_meters);
+
     struct shash_node *node, *next;
     SHASH_FOR_EACH_SAFE (node, next, &sb_meters) {
         sbrec_meter_delete(node->data);
@@ -12274,6 +13085,7 @@ ovnnb_db_run(struct northd_context *ctx,
     struct hmap igmp_groups;
     struct shash meter_groups = SHASH_INITIALIZER(&meter_groups);
     struct hmap lbs;
+    struct hmap bfd_connections = HMAP_INITIALIZER(&bfd_connections);
 
     /* Sync ipsec configuration.
      * Copy nb_cfg from northbound to southbound database.
@@ -12354,6 +13166,7 @@ ovnnb_db_run(struct northd_context *ctx,
 
     use_logical_dp_groups = smap_get_bool(&nb->options,
                                           "use_logical_dp_groups", false);
+    /* deprecated, use --event instead */
     controller_event_en = smap_get_bool(&nb->options,
                                         "controller_event", false);
     check_lsp_is_up = !smap_get_bool(&nb->options,
@@ -12368,14 +13181,16 @@ ovnnb_db_run(struct northd_context *ctx,
     build_ip_mcast(ctx, datapaths);
     build_mcast_groups(ctx, datapaths, ports, &mcast_groups, &igmp_groups);
     build_meter_groups(ctx, &meter_groups);
+    build_bfd_table(ctx, &bfd_connections, ports);
     build_lflows(ctx, datapaths, ports, &port_groups, &mcast_groups,
-                 &igmp_groups, &meter_groups, &lbs);
+                 &igmp_groups, &meter_groups, &lbs, &bfd_connections);
     ovn_update_ipv6_prefix(ports);
 
     sync_address_sets(ctx);
     sync_port_groups(ctx, &port_groups);
-    sync_meters(ctx, datapaths, &meter_groups);
+    sync_meters(ctx, datapaths, &meter_groups, &port_groups);
     sync_dns_entries(ctx, datapaths);
+    cleanup_stale_fdp_entries(ctx, datapaths);
 
     struct ovn_northd_lb *lb;
     HMAP_FOR_EACH_POP (lb, hmap_node, &lbs) {
@@ -12393,9 +13208,13 @@ ovnnb_db_run(struct northd_context *ctx,
     HMAP_FOR_EACH_SAFE (pg, next_pg, key_node, &port_groups) {
         ovn_port_group_destroy(&port_groups, pg);
     }
+
+    bfd_cleanup_connections(ctx, &bfd_connections);
+
     hmap_destroy(&igmp_groups);
     hmap_destroy(&mcast_groups);
     hmap_destroy(&port_groups);
+    hmap_destroy(&bfd_connections);
 
     struct shash_node *node, *next;
     SHASH_FOR_EACH_SAFE (node, next, &meter_groups) {
@@ -12542,7 +13361,17 @@ handle_port_binding_changes(struct northd_context *ctx, struct hmap *ports,
             continue;
         }
 
-        bool up = (sb->chassis || lsp_is_router(op->nbsp));
+        bool up = false;
+
+        if (lsp_is_router(op->nbsp)) {
+            up = true;
+        } else if (sb->chassis) {
+            up = smap_get_bool(&sb->chassis->other_config,
+                               OVN_FEATURE_PORT_UP_NOTIF, false)
+                 ? sb->n_up && sb->up[0]
+                 : true;
+        }
+
         if (!op->nbsp->up || *op->nbsp->up != up) {
             nbrec_logical_switch_port_set_up(op->nbsp, &up, 1);
         }
@@ -12690,7 +13519,7 @@ static const char *rbac_encap_update[] =
 static const char *rbac_port_binding_auth[] =
     {""};
 static const char *rbac_port_binding_update[] =
-    {"chassis"};
+    {"chassis", "up"};
 
 static const char *rbac_mac_binding_auth[] =
     {""};
@@ -13176,6 +14005,8 @@ main(int argc, char *argv[])
                          &sbrec_port_binding_col_ha_chassis_group);
     ovsdb_idl_add_column(ovnsb_idl_loop.idl,
                          &sbrec_port_binding_col_virtual_parent);
+    ovsdb_idl_add_column(ovnsb_idl_loop.idl,
+                         &sbrec_port_binding_col_up);
     ovsdb_idl_add_column(ovnsb_idl_loop.idl,
                          &sbrec_gateway_chassis_col_chassis);
     ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_gateway_chassis_col_name);
@@ -13324,9 +14155,25 @@ main(int argc, char *argv[])
     add_column_noalert(ovnsb_idl_loop.idl, &sbrec_load_balancer_col_name);
     add_column_noalert(ovnsb_idl_loop.idl, &sbrec_load_balancer_col_vips);
     add_column_noalert(ovnsb_idl_loop.idl, &sbrec_load_balancer_col_protocol);
+    add_column_noalert(ovnsb_idl_loop.idl, &sbrec_load_balancer_col_options);
     add_column_noalert(ovnsb_idl_loop.idl,
                        &sbrec_load_balancer_col_external_ids);
 
+    ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_bfd);
+    ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_bfd_col_logical_port);
+    ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_bfd_col_dst_ip);
+    ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_bfd_col_status);
+    ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_bfd_col_min_tx);
+    ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_bfd_col_min_rx);
+    ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_bfd_col_detect_mult);
+    ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_bfd_col_disc);
+    ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_bfd_col_src_port);
+
+    ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_fdb);
+    add_column_noalert(ovnsb_idl_loop.idl, &sbrec_fdb_col_mac);
+    add_column_noalert(ovnsb_idl_loop.idl, &sbrec_fdb_col_dp_key);
+    add_column_noalert(ovnsb_idl_loop.idl, &sbrec_fdb_col_port_key);
+
     struct ovsdb_idl_index *sbrec_chassis_by_name
         = chassis_index_create(ovnsb_idl_loop.idl);
 
@@ -13449,6 +14296,7 @@ main(int argc, char *argv[])
         }
     }
 
+
     free(ovn_internal_version);
     unixctl_server_destroy(unixctl);
     ovsdb_idl_loop_destroy(&ovnnb_idl_loop);
diff --git a/ovn-nb.ovsschema b/ovn-nb.ovsschema
index 269e3a888..29019809c 100644
--- a/ovn-nb.ovsschema
+++ b/ovn-nb.ovsschema
@@ -1,7 +1,7 @@
 {
     "name": "OVN_Northbound",
-    "version": "5.28.0",
-    "cksum": "610359755 26847",
+    "version": "5.31.0",
+    "cksum": "2352750632 28701",
     "tables": {
         "NB_Global": {
             "columns": {
@@ -188,6 +188,11 @@
                                 ["eth_src", "eth_dst", "ip_src", "ip_dst",
                                  "tp_src", "tp_dst"]]},
                              "min": 0, "max": "unlimited"}},
+                "options": {
+                     "type": {"key": "string",
+                              "value": "string",
+                              "min": 0,
+                              "max": "unlimited"}},
                 "external_ids": {
                     "type": {"key": "string", "value": "string",
                              "min": 0, "max": "unlimited"}}},
@@ -369,6 +374,10 @@
                                     "min": 0, "max": 1}},
                 "nexthop": {"type": "string"},
                 "output_port": {"type": {"key": "string", "min": 0, "max": 1}},
+                "bfd": {"type": {"key": {"type": "uuid", "refTable": "BFD",
+                                          "refType": "weak"},
+                                  "min": 0,
+                                  "max": 1}},
                 "options": {
                     "type": {"key": "string", "value": "string",
                              "min": 0, "max": "unlimited"}},
@@ -386,6 +395,8 @@
                     "key": {"type": "string",
                             "enum": ["set", ["allow", "drop", "reroute"]]}}},
                 "nexthop": {"type": {"key": "string", "min": 0, "max": 1}},
+                "nexthops": {"type": {
+                    "key": "string", "min": 0, "max": "unlimited"}},
                 "options": {
                     "type": {"key": "string", "value": "string",
                              "min": 0, "max": "unlimited"}},
@@ -519,5 +530,30 @@
                     "type": {"key": "string", "value": "string",
                              "min": 0, "max": "unlimited"}}},
             "indexes": [["name"]],
+            "isRoot": true},
+        "BFD": {
+            "columns": {
+                "logical_port": {"type": "string"},
+                "dst_ip": {"type": "string"},
+                "min_tx": {"type": {"key": {"type": "integer",
+                                            "minInteger": 1},
+                                    "min": 0, "max": 1}},
+                "min_rx": {"type": {"key": {"type": "integer"},
+                                    "min": 0, "max": 1}},
+                "detect_mult": {"type": {"key": {"type": "integer",
+                                                 "minInteger": 1},
+                                    "min": 0, "max": 1}},
+                "status": {
+                    "type": {"key": {"type": "string",
+                             "enum": ["set", ["down", "init", "up",
+                                              "admin_down"]]},
+                             "min": 0, "max": 1}},
+                "external_ids": {
+                    "type": {"key": "string", "value": "string",
+                             "min": 0, "max": "unlimited"}},
+                "options": {
+                    "type": {"key": "string", "value": "string",
+                             "min": 0, "max": "unlimited"}}},
+            "indexes": [["logical_port", "dst_ip"]],
             "isRoot": true}}
     }
diff --git a/ovn-nb.xml b/ovn-nb.xml
index c9ab25ceb..09b755f1a 100644
--- a/ovn-nb.xml
+++ b/ovn-nb.xml
@@ -1635,6 +1635,24 @@
         See <em>External IDs</em> at the beginning of this document.
       </column>
     </group>
+    <group title="Load_Balancer options">
+      <column name="options" key="reject" type='{"type": "boolean"}'>
+        If the load balancer is created with <code>--reject</code> option and
+        it has no active backends, a TCP reset segment (for tcp) or an ICMP
+        port unreachable packet (for all other kind of traffic) will be sent
+        whenever an incoming packet is received for this load-balancer.
+        Please note using <code>--reject</code> option will disable empty_lb
+        SB controller event for this load balancer.
+      </column>
+
+      <column name="options" key="hairpin_snat_ip">
+        IP to be used as source IP for packets that have been hair-pinned
+        after load balancing.  The default behavior when the option is not set
+        is to use the load balancer VIP as source IP.  This option may have
+        exactly one IPv4 and/or one IPv6 address on it, separated by a space
+        character.
+      </column>
+    </group>
   </table>
 
   <table name="Load_Balancer_Health_Check" title="load balancer">
@@ -1917,16 +1935,29 @@
       </column>
       <column name="options" key="lb_force_snat_ip">
         <p>
-          If set, indicates a set of IP addresses to use to force SNAT a packet
-          that has already been load-balanced in the gateway router.  When
-          multiple gateway routers are configured, a packet can potentially
-          enter any of the gateway routers, get DNATted as part of the load-
-          balancing and eventually reach the logical switch port.
-          For the return traffic to go back to the same gateway router (for
-          unDNATing), the packet needs a SNAT in the first place.  This can be
-          achieved by setting the above option with a gateway specific set of
-          IP addresses. This option may have exactly one IPv4 and/or one IPv6
-          address on it, separated by a space character.
+          If set, this option can take two possible type of values.  Either
+          a set of IP addresses or the string value - <code>router_ip</code>.
+        </p>
+
+        <p>
+          If a set of IP addresses are configured, it indicates to use to
+          force SNAT a packet that has already been load-balanced in the
+          gateway router.  When multiple gateway routers are configured, a
+          packet can potentially enter any of the gateway routers, get
+          DNATted as part of the load-balancing and eventually reach the
+          logical switch port.  For the return traffic to go back to the
+          same gateway router (for unDNATing), the packet needs a SNAT in the
+          first place.  This can be achieved by setting the above option with
+          a gateway specific set of IP addresses. This option may have exactly
+          one IPv4 and/or one IPv6 address on it, separated by a space
+          character.
+        </p>
+
+        <p>
+          If it is configured with the value <code>router_ip</code>, then
+          the load balanced packet is SNATed with the IP of router port
+          (attached to the gateway router) selected as the destination after
+          taking the routing decision.
         </p>
       </column>
       <column name="options" key="mcast_relay" type='{"type": "boolean"}'>
@@ -2634,6 +2665,13 @@
       </p>
     </column>
 
+    <column name="bfd">
+      <p>
+        Reference to <ref table="BFD"/> row if the route has associated a
+        BFD session
+      </p>
+    </column>
+
     <column name="external_ids" key="ic-learned-route">
       <code>ovn-ic</code> populates this key if the route is learned from the
       global <ref db="OVN_IC_Southbound"/> database.  In this case the value
@@ -2713,18 +2751,34 @@
         </li>
 
         <li>
-          <code>reroute</code>: Reroute packet to <ref column="nexthop"/>.
+          <code>reroute</code>: Reroute packet to <ref column="nexthop"/> or
+          <ref column="nexthops"/>.
         </li>
       </ul>
     </column>
 
     <column name="nexthop">
+      <p>
+        Note: This column is deprecated in favor of <ref column="nexthops"/>.
+      </p>
       <p>
         Next-hop IP address for this route, which should be the IP
         address of a connected router port or the IP address of a logical port.
       </p>
     </column>
 
+    <column name="nexthops">
+      <p>
+        Next-hop ECMP IP addresses for this route. Each IP in the list should
+        be the IP address of a connected router port or the IP address of a
+        logical port.
+      </p>
+
+      <p>
+        One IP from the list is selected as next hop.
+      </p>
+    </column>
+
     <column name="options" key="pkt_mark">
       <p>
         Marks the packet with the value specified when the router policy
@@ -3702,4 +3756,71 @@
       </column>
     </group>
   </table>
+
+  <table name="BFD">
+    <p>
+      Contains BFD parameter for ovn-controller bfd configuration.
+    </p>
+
+    <group title="Configuration">
+      <column name="logical_port">
+        OVN logical port when BFD engine is running.
+      </column>
+
+      <column name="dst_ip">
+        BFD peer IP address.
+      </column>
+
+      <column name="min_tx">
+        This is the minimum interval, in milliseconds, that the local
+        system would like to use when transmitting BFD Control packets,
+        less any jitter applied. The value zero is reserved. Default
+        value is 1000 ms.
+      </column>
+
+      <column name="min_rx">
+        This is the minimum interval, in milliseconds, between received
+        BFD Control packets that this system is capable of supporting,
+        less any jitter applied by the sender. If this value is zero,
+        the transmitting system does not want the remote system to send
+        any periodic BFD Control packets.
+      </column>
+
+      <column name="detect_mult">
+        Detection time multiplier.  The negotiated transmit interval,
+        multiplied by this value, provides the Detection Time for the
+        receiving system in Asynchronous mode. Default value is 5.
+      </column>
+
+      <column name="options">
+        Reserved for future use.
+      </column>
+
+      <column name="external_ids">
+        See <em>External IDs</em> at the beginning of this document.
+      </column>
+    </group>
+
+    <group title="Status Reporting">
+      <column name="status">
+        <p>
+          BFD port logical states. Possible values are:
+          <ul>
+            <li>
+              <code>admin_down</code>
+            </li>
+            <li>
+              <code>down</code>
+            </li>
+            <li>
+              <code>init</code>
+            </li>
+            <li>
+              <code>up</code>
+            </li>
+          </ul>
+        </p>
+      </column>
+    </group>
+  </table>
 </database>
diff --git a/ovn-sb.ovsschema b/ovn-sb.ovsschema
index 5228839b8..b5d3338f4 100644
--- a/ovn-sb.ovsschema
+++ b/ovn-sb.ovsschema
@@ -1,7 +1,7 @@
 {
     "name": "OVN_Southbound",
-    "version": "20.12.0",
-    "cksum": "3969471120 24441",
+    "version": "20.16.1",
+    "cksum": "4243908307 26536",
     "tables": {
         "SB_Global": {
             "columns": {
@@ -103,7 +103,7 @@
                                                        "egress"]]}}},
                 "table_id": {"type": {"key": {"type": "integer",
                                               "minInteger": 0,
-                                              "maxInteger": 23}}},
+                                              "maxInteger": 32}}},
                 "priority": {"type": {"key": {"type": "integer",
                                               "minInteger": 0,
                                               "maxInteger": 65535}}},
@@ -225,6 +225,7 @@
                 "nat_addresses": {"type": {"key": "string",
                                            "min": 0,
                                            "max": "unlimited"}},
+                "up": {"type": {"key": "boolean", "min": 0, "max": 1}},
                 "external_ids": {"type": {"key": "string",
                                  "value": "string",
                                  "min": 0,
@@ -481,9 +482,50 @@
                     "type": {"key": {"type": "uuid",
                                      "refTable": "Datapath_Binding"},
                              "min": 0, "max": "unlimited"}},
+                "options": {
+                     "type": {"key": "string",
+                              "value": "string",
+                              "min": 0,
+                              "max": "unlimited"}},
+                "external_ids": {
+                    "type": {"key": "string", "value": "string",
+                             "min": 0, "max": "unlimited"}}},
+            "isRoot": true},
+        "BFD": {
+            "columns": {
+                "src_port": {"type": {"key": {"type": "integer",
+                                          "minInteger": 49152,
+                                          "maxInteger": 65535}}},
+                "disc": {"type": {"key": {"type": "integer"}}},
+                "logical_port": {"type": "string"},
+                "dst_ip": {"type": "string"},
+                "min_tx": {"type": {"key": {"type": "integer"}}},
+                "min_rx": {"type": {"key": {"type": "integer"}}},
+                "detect_mult": {"type": {"key": {"type": "integer"}}},
+                "status": {
+                    "type": {"key": {"type": "string",
+                             "enum": ["set", ["down", "init", "up",
+                                              "admin_down"]]}}},
                 "external_ids": {
+                    "type": {"key": "string", "value": "string",
+                             "min": 0, "max": "unlimited"}},
+                "options": {
                     "type": {"key": "string", "value": "string",
                              "min": 0, "max": "unlimited"}}},
+            "indexes": [["logical_port", "dst_ip", "src_port", "disc"]],
+            "isRoot": true},
+        "FDB": {
+            "columns": {
+                "mac": {"type": "string"},
+                "dp_key": {
+                     "type": {"key": {"type": "integer",
+                                      "minInteger": 1,
+                                      "maxInteger": 16777215}}},
+                "port_key": {
+                     "type": {"key": {"type": "integer",
+                                      "minInteger": 1,
+                                      "maxInteger": 16777215}}}},
+            "indexes": [["mac", "dp_key"]],
             "isRoot": true}
     }
 }
diff --git a/ovn-sb.xml b/ovn-sb.xml
index c13994848..258a12b4e 100644
--- a/ovn-sb.xml
+++ b/ovn-sb.xml
@@ -322,6 +322,11 @@
       table. See <code>ovn-controller</code>(8) for more information.
     </column>
 
+    <column name="other_config" key="port-up-notif">
+      <code>ovn-controller</code> populates this key with <code>true</code>
+      when it supports <code>Port_Binding.up</code>.
+    </column>
+
     <group title="Common Columns">
       The overall purpose of these columns is described under <code>Common
       Columns</code> at the beginning of this document.
@@ -1521,6 +1526,68 @@
           </p>
         </dd>
 
+        <dt><code><var>P</var> = get_fdb(<var>A</var>);</code></dt>
+
+        <dd>
+          <p>
+            <b>Parameters</b>:48-bit MAC address field <var>A</var>.
+          </p>
+
+          <p>
+            Looks up <var>A</var> in fdb table. If an entry is found, stores
+            the logical port key to the out parameter <code>P</code>.
+          </p>
+
+          <p><b>Example:</b> <code>outport = get_fdb(eth.src);</code></p>
+        </dd>
+
+        <dt>
+          <code>put_fdb(<var>P</var>, <var>A</var>);</code>
+        </dt>
+
+        <dd>
+          <p>
+            <b>Parameters</b>: logical port string field <var>P</var>, 48-bit
+            MAC address field <var>A</var>.
+          </p>
+
+          <p>
+            Adds or updates the entry for Ethernet address <var>A</var> in
+            fdb table, setting its logical port key to <var>P</var>.
+          </p>
+
+          <p><b>Example:</b> <code>put_fdb(inport, arp.spa);</code></p>
+        </dd>
+
+        <dt>
+          <code><var>R</var> = lookup_fdb(<var>P</var>, <var>A</var>);</code>
+        </dt>
+
+        <dd>
+          <p>
+            <b>Parameters</b>: 48-bit MAC address field <var>M</var>,
+            logical port string field <var>P</var>.
+          </p>
+
+          <p>
+            <b>Result</b>: stored to a 1-bit subfield <var>R</var>.
+          </p>
+
+          <p>
+            Looks up <var>A</var> in fdb table. If an entry is found
+            and the the logical port key is <var>P</var>, <code>P</code>,
+            stores <code>1</code> in the 1-bit subfield
+            <var>R</var>, else 0.
+          </p>
+
+          <p>
+            <b>Example:</b>
+            <code>
+              reg0[0] = lookup_fdb(inport, eth.src);
+            </code>
+          </p>
+        </dd>
+
         <dt><code>nd_ns { <var>action</var>; </code>...<code> };</code></dt>
         <dd>
           <p>
@@ -2771,6 +2838,14 @@ tcp.flags = RST;
         </p>
       </column>
 
+      <column name="up">
+        <p>
+          This is set to <code>true</code> whenever all OVS flows
+          required by this Port_Binding have been installed.  This is
+          populated by <code>ovn-controller</code>.
+        </p>
+      </column>
+
       <column name="tunnel_key">
         <p>
           A number that represents the logical port in the key (e.g. STT key or
@@ -4225,10 +4300,126 @@ tcp.flags = RST;
       Datapaths to which this load balancer applies to.
     </column>
 
+    <group title="Load_Balancer options">
+    <column name="options" key="hairpin_snat_ip">
+      IP to be used as source IP for packets that have been hair-pinned after
+      load balancing.  This value is automatically populated by
+      <code>ovn-northd</code>.
+    </column>
+    <column name="options" key="hairpin_orig_tuple" type='{"type": "boolean"}'>
+      This value is automatically set to <code>true</code> by
+      <code>ovn-northd</code> when original destination IP and transport port
+      of the load balanced packets are stored in registers
+      <code>reg1, reg2, xxreg1</code>.
+    </column>
+    </group>
+
     <group title="Common Columns">
       <column name="external_ids">
         See <em>External IDs</em> at the beginning of this document.
       </column>
     </group>
   </table>
+
+  <table name="BFD">
+    <p>
+      Contains BFD parameter for ovn-controller bfd configuration.
+    </p>
+
+    <group title="Configuration">
+      <column name="src_port">
+        udp source port used in bfd control packets.
+        The source port MUST be in the range 49152 through 65535
+        (RFC5881 section 4).
+      </column>
+
+      <column name="disc">
+        A unique, nonzero discriminator value generated by the transmitting
+        system, used to demultiplex multiple BFD sessions between the same pair
+        of systems.
+      </column>
+
+      <column name="logical_port">
+        OVN logical port when BFD engine is running.
+      </column>
+
+      <column name="dst_ip">
+        BFD peer IP address.
+      </column>
+
+      <column name="min_tx">
+        This is the minimum interval, in milliseconds, that the local
+        system would like to use when transmitting BFD Control packets,
+        less any jitter applied. The value zero is reserved.
+      </column>
+
+      <column name="min_rx">
+        This is the minimum interval, in milliseconds, between received
+        BFD Control packets that this system is capable of supporting,
+        less any jitter applied by the sender. If this value is zero,
+        the transmitting system does not want the remote system to send
+        any periodic BFD Control packets.
+      </column>
+
+      <column name="detect_mult">
+        Detection time multiplier.  The negotiated transmit interval,
+        multiplied by this value, provides the Detection Time for the
+        receiving system in Asynchronous mode.
+      </column>
+
+      <column name="options">
+        Reserved for future use.
+      </column>
+
+      <column name="external_ids">
+        See <em>External IDs</em> at the beginning of this document.
+      </column>
+    </group>
+
+    <group title="Status Reporting">
+      <column name="status">
+        <p>
+          BFD port logical states. Possible values are:
+          <ul>
+            <li>
+              <code>admin_down</code>
+            </li>
+            <li>
+              <code>down</code>
+            </li>
+            <li>
+              <code>init</code>
+            </li>
+            <li>
+              <code>up</code>
+            </li>
+          </ul>
+        </p>
+      </column>
+    </group>
+  </table>
+
+  <table name="FDB" title="Port to MAC bindings">
+    <p>
+      This table is primarily used to learn the MACs observed on a VIF
+      which belongs to a <code>Logical_Switch_Port</code> record in
+      <code>OVN_Northbound</code> whose port security is disabled
+      and 'unknown' address set.  If port security is disabled on a
+      <code>Logical_Switch_Port</code> record, OVN should allow traffic
+      with any source mac from the VIF.  This table will be used to deliver
+      a packet to the VIF, If a packet's <code>eth.dst</code> is learnt.
+    </p>
+
+    <column name="mac">
+      The learnt mac address.
+    </column>
+
+    <column name="dp_key">
+      The key of the datapath on which this FDB was learnt.
+    </column>
+
+    <column name="port_key">
+      The key of the port binding on which this FDB was learnt.
+    </column>
+  </table>
 </database>
diff --git a/ovs b/ovs
new file mode 160000
index 000000000..ac09cbfcb
--- /dev/null
+++ b/ovs
@@ -0,0 +1 @@
+Subproject commit ac09cbfcb70ac6f443f039d5934448bd80f74493
diff --git a/tests/atlocal.in b/tests/atlocal.in
index d9a4c91d4..5ebc8e117 100644
--- a/tests/atlocal.in
+++ b/tests/atlocal.in
@@ -181,6 +181,9 @@ fi
 # Set HAVE_DIBBLER-SERVER
 find_command dibbler-server
 
+# Set HAVE_BFDD_BEACON
+find_command bfdd-beacon
+
 # Turn off proxies.
 unset http_proxy
 unset https_proxy
diff --git a/tests/automake.mk b/tests/automake.mk
index c5c286eae..c09f615d5 100644
--- a/tests/automake.mk
+++ b/tests/automake.mk
@@ -31,7 +31,8 @@ TESTSUITE_AT = \
 	tests/ovn-controller-vtep.at \
 	tests/ovn-ic.at \
 	tests/ovn-macros.at \
-	tests/ovn-performance.at
+	tests/ovn-performance.at \
+	tests/ovn-ofctrl-seqno.at
 
 SYSTEM_KMOD_TESTSUITE_AT = \
 	tests/system-common-macros.at \
@@ -202,7 +203,10 @@ noinst_PROGRAMS += tests/ovstest
 tests_ovstest_SOURCES = \
 	tests/ovstest.c \
 	tests/ovstest.h \
-	tests/test-ovn.c
+	tests/test-ovn.c \
+	controller/test-ofctrl-seqno.c \
+	controller/ofctrl-seqno.c \
+	controller/ofctrl-seqno.h
 
 tests_ovstest_LDADD = $(OVS_LIBDIR)/daemon.lo \
     $(OVS_LIBDIR)/libopenvswitch.la lib/libovn.la
diff --git a/tests/ofproto-macros.at b/tests/ofproto-macros.at
index dd5d3848d..3d7ac08b3 100644
--- a/tests/ofproto-macros.at
+++ b/tests/ofproto-macros.at
@@ -12,7 +12,10 @@ strip_n_bytes () {
 
 # Strips 'cookie=...' from ovs-ofctl output.
 strip_cookie () {
-    sed 's/ cookie=0x[0-9a-fA-F]*,//'
+    sed '
+s/ cookie=0x[0-9a-fA-F]*,//
+s/cookie=0x[0-9a-fA-F]*,//
+'
 }
 
 # Strips out uninteresting parts of ovs-ofctl output, as well as parts
@@ -37,7 +40,7 @@ s/dir\/[0-9]*\/br0.mgmt/dir\/XXXX\/br0.mgmt/
 # Strips out uninteresting parts of ovs-ofctl output, including n_packets=..
 # n_bytes=..
 ofctl_strip_all () {
-    ofctl_strip | strip_n_packets | strip_n_bytes | strip_cookie
+    ofctl_strip | strip_n_packets | strip_n_bytes | strip_cookie | sort
 }
 
 # Filter (multiline) vconn debug messages from ovs-vswitchd.log.
diff --git a/tests/ovn-controller-vtep.at b/tests/ovn-controller-vtep.at
index cb582811f..b2261d285 100644
--- a/tests/ovn-controller-vtep.at
+++ b/tests/ovn-controller-vtep.at
@@ -177,22 +177,22 @@ AT_CLEANUP
 AT_SETUP([ovn-controller-vtep - binding 1])
 OVN_CONTROLLER_VTEP_START
 
-# adds logical switch 'lswitch0' and vlan_bindings.
+AS_BOX([add logical switch 'lswitch0' and vlan_bindings])
 AT_CHECK([vtep-ctl add-ls lswitch0 -- bind-ls br-vtep p0 100 lswitch0 -- bind-ls br-vtep p1 300 lswitch0])
 # adds logical switch port in ovn-nb database, and sets the type and options.
 OVN_NB_ADD_VTEP_PORT([br-test], [br-vtep_lswitch0], [br-vtep], [lswitch0])
-check ovn-sbctl wait-until Port_Binding br-vtep_lswitch0 chassis!='[[]]'
+wait_row_count Port_Binding 1 logical_port=br-vtep_lswitch0 chassis!='[[]]'
 # should see one binding, associated to chassis of 'br-vtep'.
 chassis_uuid=$(ovn-sbctl --columns=_uuid list Chassis br-vtep | cut -d ':' -f2 | tr -d ' ')
 AT_CHECK_UNQUOTED([ovn-sbctl --columns=chassis list Port_Binding br-vtep_lswitch0 | cut -d ':' -f2 | tr -d ' '], [0], [dnl
 ${chassis_uuid}
 ])
 
-# adds another logical switch 'lswitch1' and vlan_bindings.
+AS_BOX([add another logical switch 'lswitch1' and vlan_bindings])
 AT_CHECK([vtep-ctl add-ls lswitch1 -- bind-ls br-vtep p0 200 lswitch1])
 # adds logical switch port in ovn-nb database for lswitch1.
 OVN_NB_ADD_VTEP_PORT([br-test], [br-vtep_lswitch1], [br-vtep], [lswitch1])
-check ovn-sbctl wait-until Port_Binding br-vtep_lswitch1 chassis!='[[]]'
+wait_row_count Port_Binding 1 logical_port=br-vtep_lswitch1 chassis!='[[]]'
 # This is allowed, but not recommended, to have two vlan_bindings (to different vtep logical switches)
 # from one vtep gateway physical port in one ovn-nb logical swithch.
 AT_CHECK_UNQUOTED([ovn-sbctl --columns=chassis list Port_Binding | cut -d ':' -f2 | tr -d ' ' | sort], [0], [dnl
@@ -201,7 +201,7 @@ ${chassis_uuid}
 ${chassis_uuid}
 ])
 
-# adds another logical switch port in ovn-nb database for lswitch0.
+AS_BOX([add another logical switch port in ovn-nb database for lswitch0])
 OVN_NB_ADD_VTEP_PORT([br-test], [br-vtep_lswitch0_dup], [br-vtep], [lswitch0])
 
 # confirms the warning log.
@@ -228,7 +228,7 @@ ${chassis_uuid}
 ${chassis_uuid}
 ])
 
-# deletes physical ports from vtep.
+AS_BOX([delete physical ports from vtep])
 AT_CHECK([ovs-vsctl del-port p0 -- del-port p1])
 OVS_WAIT_UNTIL([test -z "`ovn-sbctl list Chassis | grep -- br-vtep_lswitch`"])
 OVS_WAIT_UNTIL([test -z "`vtep-ctl list physical_port p0`"])
diff --git a/tests/ovn-controller.at b/tests/ovn-controller.at
index 1b4679963..f818f9cea 100644
--- a/tests/ovn-controller.at
+++ b/tests/ovn-controller.at
@@ -414,3 +414,20 @@ OVS_WAIT_UNTIL([ovs-vsctl get Bridge br-int external_ids:ovn-nb-cfg], [0], [1])
 
 OVN_CLEANUP([hv1])
 AT_CLEANUP
+
+AT_SETUP([ovn -- features])
+AT_KEYWORDS([features])
+ovn_start
+
+net_add n1
+sim_add hv1
+ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.1
+
+# Wait for ovn-controller to register in the SB.
+OVS_WAIT_UNTIL([
+    test "$(ovn-sbctl get chassis hv1 other_config:port-up-notif)" = '"true"'
+])
+
+OVN_CLEANUP([hv1])
+AT_CLEANUP
diff --git a/tests/ovn-macros.at b/tests/ovn-macros.at
index 59e500c57..2ba29a960 100644
--- a/tests/ovn-macros.at
+++ b/tests/ovn-macros.at
@@ -417,6 +417,22 @@ wait_column() {
       echo "$column in $db table $table has value $found, from the following rows:"
       ovn-${db}ctl list $table])
 }
+
+# wait_for_ports_up [PORT...]
+#
+# With arguments, waits for specified Logical_Switch_Ports to come up.
+# Without arguments, waits for all "plain" and router
+# Logical_Switch_Ports to come up.
+wait_for_ports_up() {
+    if test $# = 0; then
+        wait_row_count nb:Logical_Switch_Port 0 up!=true type='""'
+        wait_row_count nb:Logical_Switch_Port 0 up!=true type=router
+    else
+        for port; do
+            wait_row_count nb:Logical_Switch_Port 1 up=true name=$port
+        done
+    fi
+}
 OVS_END_SHELL_HELPERS
 
 m4_define([OVN_POPULATE_ARP], [AT_CHECK(ovn_populate_arp__, [0], [ignore])])
diff --git a/tests/ovn-nbctl.at b/tests/ovn-nbctl.at
index 01edfcbc1..6d91aa4c5 100644
--- a/tests/ovn-nbctl.at
+++ b/tests/ovn-nbctl.at
@@ -1539,34 +1539,34 @@ AT_CHECK([ovn-nbctl lr-route-list lr0], [0], [dnl
 dnl Add ecmp routes
 AT_CHECK([ovn-nbctl lr-route-add lr0 10.0.0.0/24 11.0.0.1])
 AT_CHECK([ovn-nbctl --ecmp lr-route-add lr0 10.0.0.0/24 11.0.0.2])
-AT_CHECK([ovn-nbctl --ecmp lr-route-add lr0 10.0.0.0/24 11.0.0.2])
 AT_CHECK([ovn-nbctl --ecmp lr-route-add lr0 10.0.0.0/24 11.0.0.3])
-AT_CHECK([ovn-nbctl --ecmp lr-route-add lr0 10.0.0.0/24 11.0.0.3 lp0])
+AT_CHECK([ovn-nbctl --ecmp lr-route-add lr0 10.0.0.0/24 11.0.0.4 lp0])
 AT_CHECK([ovn-nbctl lr-route-list lr0], [0], [dnl
 IPv4 Routes
-              10.0.0.0/24                  11.0.0.1 dst-ip
-              10.0.0.0/24                  11.0.0.2 dst-ip
-              10.0.0.0/24                  11.0.0.2 dst-ip
-              10.0.0.0/24                  11.0.0.3 dst-ip
-              10.0.0.0/24                  11.0.0.3 dst-ip lp0
+              10.0.0.0/24                  11.0.0.1 dst-ip ecmp
+              10.0.0.0/24                  11.0.0.2 dst-ip ecmp
+              10.0.0.0/24                  11.0.0.3 dst-ip ecmp
+              10.0.0.0/24                  11.0.0.4 dst-ip lp0 ecmp
+])
+AT_CHECK([ovn-nbctl --ecmp lr-route-add lr0 10.0.0.0/24 11.0.0.2], [1], [],
+  [ovn-nbctl: duplicate nexthop for the same ECMP route
 ])
 
 dnl Delete ecmp routes
 AT_CHECK([ovn-nbctl lr-route-del lr0 10.0.0.0/24 11.0.0.1])
 AT_CHECK([ovn-nbctl lr-route-list lr0], [0], [dnl
 IPv4 Routes
-              10.0.0.0/24                  11.0.0.2 dst-ip
-              10.0.0.0/24                  11.0.0.2 dst-ip
-              10.0.0.0/24                  11.0.0.3 dst-ip
-              10.0.0.0/24                  11.0.0.3 dst-ip lp0
+              10.0.0.0/24                  11.0.0.2 dst-ip ecmp
+              10.0.0.0/24                  11.0.0.3 dst-ip ecmp
+              10.0.0.0/24                  11.0.0.4 dst-ip lp0 ecmp
 ])
 AT_CHECK([ovn-nbctl lr-route-del lr0 10.0.0.0/24 11.0.0.2])
 AT_CHECK([ovn-nbctl lr-route-list lr0], [0], [dnl
 IPv4 Routes
-              10.0.0.0/24                  11.0.0.3 dst-ip
-              10.0.0.0/24                  11.0.0.3 dst-ip lp0
+              10.0.0.0/24                  11.0.0.3 dst-ip ecmp
+              10.0.0.0/24                  11.0.0.4 dst-ip lp0 ecmp
 ])
-AT_CHECK([ovn-nbctl lr-route-del lr0 10.0.0.0/24 11.0.0.3 lp0])
+AT_CHECK([ovn-nbctl lr-route-del lr0 10.0.0.0/24 11.0.0.4 lp0])
 AT_CHECK([ovn-nbctl lr-route-list lr0], [0], [dnl
 IPv4 Routes
               10.0.0.0/24                  11.0.0.3 dst-ip
@@ -1605,7 +1605,15 @@ AT_CHECK([ovn-nbctl lr-route-add lr0 10.0.1.1/24 11.0.1.1 lp0])
 AT_CHECK([ovn-nbctl lr-route-add lr0 10.0.0.1/24 11.0.0.1])
 AT_CHECK([ovn-nbctl lr-route-add lr0 0:0:0:0:0:0:0:0/0 2001:0db8:0:f101::1])
 AT_CHECK([ovn-nbctl lr-route-add lr0 2001:0db8:0::/64 2001:0db8:0:f102::1 lp0])
-AT_CHECK([ovn-nbctl lr-route-add lr0 2001:0db8:1::/64 2001:0db8:0:f103::1])
+AT_CHECK([ovn-nbctl --ecmp lr-route-add lr0 2001:0db8:1::/64 2001:0db8:0:f103::1])
+AT_CHECK([ovn-nbctl --ecmp lr-route-add lr0 2001:0db8:1::/64 2001:0db8:0:f103::2])
+AT_CHECK([ovn-nbctl --ecmp lr-route-add lr0 2001:0db8:1::/64 2001:0db8:0:f103::3])
+AT_CHECK([ovn-nbctl --ecmp lr-route-add lr0 2001:0db8:1::/64 2001:0db8:0:f103::4])
+AT_CHECK([ovn-nbctl lr-route-add lr0 2002:0db8:1::/64 2001:0db8:0:f103::5])
+AT_CHECK([ovn-nbctl --ecmp-symmetric-reply lr-route-add lr0 2003:0db8:1::/64 2001:0db8:0:f103::6])
+AT_CHECK([ovn-nbctl --ecmp-symmetric-reply lr-route-add lr0 2003:0db8:1::/64 2001:0db8:0:f103::6], [1], [],
+  [ovn-nbctl: duplicate nexthop for the same ECMP route
+])
 
 AT_CHECK([ovn-nbctl lr-route-list lr0], [0], [dnl
 IPv4 Routes
@@ -1615,9 +1623,20 @@ IPv4 Routes
 
 IPv6 Routes
             2001:db8::/64        2001:db8:0:f102::1 dst-ip lp0
-          2001:db8:1::/64        2001:db8:0:f103::1 dst-ip
+          2001:db8:1::/64        2001:db8:0:f103::1 dst-ip ecmp
+          2001:db8:1::/64        2001:db8:0:f103::2 dst-ip ecmp
+          2001:db8:1::/64        2001:db8:0:f103::3 dst-ip ecmp
+          2001:db8:1::/64        2001:db8:0:f103::4 dst-ip ecmp
+          2002:db8:1::/64        2001:db8:0:f103::5 dst-ip
+          2003:db8:1::/64        2001:db8:0:f103::6 dst-ip ecmp-symmetric-reply
                      ::/0        2001:db8:0:f101::1 dst-ip
-])])
+])
+
+AT_CHECK([ovn-nbctl lrp-add lr0 lr0-p0 00:00:01:01:02:03 192.168.10.1/24])
+bfd_uuid=$(ovn-nbctl create bfd logical_port=lr0-p0 dst_ip=100.0.0.50 status=down min_tx=250 min_rx=250 detect_mult=10)
+AT_CHECK([ovn-nbctl lr-route-add lr0 100.0.0.0/24 192.168.0.1])
+route_uuid=$(fetch_column nb:logical_router_static_route _uuid ip_prefix="100.0.0.0/24")
+AT_CHECK([ovn-nbctl set logical_router_static_route $route_uuid bfd=$bfd_uuid])])
 
 dnl ---------------------------------------------------------------------
 
diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at
index 90ca0a4db..11d4a9c86 100644
--- a/tests/ovn-northd.at
+++ b/tests/ovn-northd.at
@@ -605,11 +605,12 @@ wait_row_count Port_Binding 0 logical_port=sw0-pext1 'chassis!=[[]]'
 wait_row_count HA_Chassis_Group 1 name=hagrp1
 wait_row_count HA_Chassis 3
 
-# Clear ha_chassis_group for sw0-pext2
-ovn-nbctl --wait=sb clear logical_switch_port sw0-pext2 ha_chassis_group
+AS_BOX([Clear ha_chassis_group for sw0-pext2 and reset port type to normal in the same txn])
 
-wait_row_count Port_Binding 0 logical_port=sw0-pext2 'chassis!=[[]]'
+check ovn-nbctl  --wait=sb clear logical_switch_port sw0-pext2 \
+ha_chassis_group -- set logical_switch_port sw0-pext2 'type=""'
 wait_row_count HA_Chassis_Group 0
+wait_row_count Port_Binding 0 logical_port=sw0-pext2 'chassis!=[[]]'
 check_row_count HA_Chassis 0
 
 as ovn-sb
@@ -624,61 +625,66 @@ AT_CLEANUP
 AT_SETUP([ovn -- ovn-northd pause and resume])
 ovn_start
 
-AT_CHECK([test xfalse = x`as northd ovn-appctl -t ovn-northd is-paused`])
-AT_CHECK([as northd ovn-appctl -t ovn-northd status], [0], [Status: active
-])
-AT_CHECK([test xfalse = x`as northd-backup ovn-appctl -t ovn-northd \
-is-paused`])
-AT_CHECK([as northd-backup ovn-appctl -t ovn-northd status], [0],
-[Status: standby
-])
-
-ovn-nbctl ls-add sw0
-
-OVS_WAIT_UNTIL([
-    ovn-sbctl lflow-list sw0
-    test 0 = $?])
+get_northd_status() {
+    as northd ovn-appctl -t ovn-northd is-paused
+    as northd ovn-appctl -t ovn-northd status
+    as northd-backup ovn-appctl -t ovn-northd is-paused
+    as northd-backup ovn-appctl -t ovn-northd status
+}
 
-ovn-nbctl ls-del sw0
-OVS_WAIT_UNTIL([
-    ovn-sbctl lflow-list sw0
-    test 1 = $?])
+AS_BOX([Pause the backup])
+# This forces the main northd to become active (otherwise there's no
+# guarantee, ovn_start is racy).
+check as northd-backup ovs-appctl -t ovn-northd pause
+OVS_WAIT_FOR_OUTPUT([get_northd_status], [0], [false
+Status: active
+true
+Status: paused
+])
+
+AS_BOX([Resume the backup])
+check as northd-backup ovs-appctl -t ovn-northd resume
+OVS_WAIT_FOR_OUTPUT([get_northd_status], [0], [false
+Status: active
+false
+Status: standby
+])
+
+AS_BOX([Check that ovn-northd is active])
+# Check that ovn-northd is active, by verifying that it creates and
+# destroys southbound datapaths as one would expect.
+check_row_count Datapath_Binding 0
+check ovn-nbctl --wait=sb ls-add sw0
+check_row_count Datapath_Binding 1
+check ovn-nbctl --wait=sb ls-del sw0
+check_row_count Datapath_Binding 0
 
-# Now pause the ovn-northd
-as northd ovs-appctl -t ovn-northd pause
-as northd-backup ovs-appctl -t ovn-northd pause
-AT_CHECK([test xtrue = x`as northd ovn-appctl -t ovn-northd is-paused`])
-AT_CHECK([as northd ovn-appctl -t ovn-northd status], [0], [Status: paused
-])
-AT_CHECK([test xtrue = x`as northd-backup ovn-appctl -t ovn-northd is-paused`])
-AT_CHECK([as northd-backup ovn-appctl -t ovn-northd status], [0],
-[Status: paused
+AS_BOX([Pause the main northd])
+check as northd ovs-appctl -t ovn-northd pause
+check as northd-backup ovs-appctl -t ovn-northd pause
+AT_CHECK([get_northd_status], [0], [true
+Status: paused
+true
+Status: paused
 ])
 
-ovn-nbctl ls-add sw0
-
-# There should be no logical flows for sw0 datapath.
-OVS_WAIT_UNTIL([
-    ovn-sbctl lflow-list sw0
-    test 1 = $?])
-
-# Now resume ovn-northd
-as northd ovs-appctl -t ovn-northd resume
-AT_CHECK([test xfalse = x`as northd ovn-appctl -t ovn-northd is-paused`])
-OVS_WAIT_UNTIL([as northd ovn-appctl -t ovn-northd status], [0],
-[Status: active
-])
+AS_BOX([Verify that ovn-northd is paused])
+# Now ovn-northd won't respond by adding a datapath, because it's paused.
+check ovn-nbctl ls-add sw0
+check sleep 5
+check_row_count Datapath_Binding 0
 
-as northd-backup ovs-appctl -t ovn-northd resume
-AT_CHECK([test xfalse = x`as northd-backup ovn-appctl -t ovn-northd \
-is-paused`])
-AT_CHECK([as northd-backup ovn-appctl -t ovn-northd status], [0],
-[Status: standby
+AS_BOX([Resume the main northd])
+check as northd ovs-appctl -t ovn-northd resume
+check as northd-backup ovs-appctl -t ovn-northd resume
+OVS_WAIT_FOR_OUTPUT([get_northd_status], [0], [false
+Status: active
+false
+Status: standby
 ])
 
-OVS_WAIT_UNTIL([
-    ovn-sbctl lflow-list sw0
-    test 0 = $?])
+check ovn-nbctl --wait=sb sync
+check_row_count Datapath_Binding 1
 
 AT_CLEANUP
 
@@ -849,7 +855,7 @@ uuid=$(fetch_column Port_Binding _uuid logical_port=cr-DR-S1)
 echo "CR-LRP UUID is: " $uuid
 
 check ovn-nbctl set Logical_Router $cr_uuid options:chassis=gw1
-check ovn-nbctl --wait=hv sync
+check ovn-nbctl --wait=sb sync
 
 ovn-nbctl create Address_Set name=allowed_range addresses=\"1.1.1.1\"
 ovn-nbctl create Address_Set name=disallowed_range addresses=\"2.2.2.2\"
@@ -1048,7 +1054,7 @@ health_check @hc | uuidfilt], [0], [<0>
 
 wait_row_count Service_Monitor 0
 
-# create logical switches and ports
+AS_BOX([create logical switches and ports])
 ovn-nbctl ls-add sw0
 ovn-nbctl --wait=sb lsp-add sw0 sw0-p1 -- lsp-set-addresses sw0-p1 \
 "00:00:00:00:00:03 10.0.0.3"
@@ -1072,54 +1078,57 @@ check ovn-nbctl --wait=sb ls-lb-add sw0 lb1
 AT_CAPTURE_FILE([sbflows])
 OVS_WAIT_FOR_OUTPUT(
   [ovn-sbctl dump-flows sw0 | tee sbflows | grep 'priority=120.*ct_lb' | sed 's/table=..//'], 0, [dnl
-  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
+  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
 ])
 
-# Delete the Load_Balancer_Health_Check
+AS_BOX([Delete the Load_Balancer_Health_Check])
 ovn-nbctl --wait=sb clear load_balancer . health_check
 wait_row_count Service_Monitor 0
 
 AT_CAPTURE_FILE([sbflows2])
 OVS_WAIT_FOR_OUTPUT(
   [ovn-sbctl dump-flows sw0 | tee sbflows2 | grep 'priority=120.*ct_lb' | sed 's/table=..//'], [0],
-[  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
+[  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
 ])
 
-# Create the Load_Balancer_Health_Check again.
+AS_BOX([Create the Load_Balancer_Health_Check again.])
 ovn-nbctl --wait=sb -- --id=@hc create \
 Load_Balancer_Health_Check vip="10.0.0.10\:80" -- add Load_Balancer . \
 health_check @hc
 wait_row_count Service_Monitor 2
+check ovn-nbctl --wait=sb sync
 
 ovn-sbctl dump-flows sw0 | grep ct_lb | grep priority=120 > lflows.txt
 AT_CHECK([cat lflows.txt | sed 's/table=..//'], [0], [dnl
-  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
+  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
 ])
 
-# Get the uuid of both the service_monitor
+AS_BOX([Get the uuid of both the service_monitor])
 sm_sw0_p1=$(fetch_column Service_Monitor _uuid logical_port=sw0-p1)
 sm_sw1_p1=$(fetch_column Service_Monitor _uuid logical_port=sw1-p1)
 
 AT_CAPTURE_FILE([sbflows3])
 OVS_WAIT_FOR_OUTPUT(
   [ovn-sbctl dump-flows sw0 | tee sbflows 3 | grep 'priority=120.*ct_lb' | sed 's/table=..//'], [0],
-[  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
+[  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
 ])
 
-# Set the service monitor for sw1-p1 to offline
+AS_BOX([Set the service monitor for sw1-p1 to offline])
 check ovn-sbctl set service_monitor sw1-p1 status=offline
 wait_row_count Service_Monitor 1 logical_port=sw1-p1 status=offline
+check ovn-nbctl --wait=sb sync
 
 AT_CAPTURE_FILE([sbflows4])
 OVS_WAIT_FOR_OUTPUT(
   [ovn-sbctl dump-flows sw0 | tee sbflows4 | grep 'priority=120.*ct_lb' | sed 's/table=..//'], [0],
-[  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80);)
+[  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80);)
 ])
 
-# Set the service monitor for sw0-p1 to offline
+AS_BOX([Set the service monitor for sw0-p1 to offline])
 ovn-sbctl set service_monitor $sm_sw0_p1 status=offline
 
 wait_row_count Service_Monitor 1 logical_port=sw0-p1 status=offline
+check ovn-nbctl --wait=sb sync
 
 AT_CAPTURE_FILE([sbflows5])
 OVS_WAIT_FOR_OUTPUT(
@@ -1131,32 +1140,34 @@ OVS_WAIT_FOR_OUTPUT(
   (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(drop;)
 ])
 
-# Set the service monitor for sw0-p1 and sw1-p1 to online
+AS_BOX([Set the service monitor for sw0-p1 and sw1-p1 to online])
 ovn-sbctl set service_monitor $sm_sw0_p1 status=online
 ovn-sbctl set service_monitor $sm_sw1_p1 status=online
 
 wait_row_count Service_Monitor 1 logical_port=sw1-p1 status=online
+check ovn-nbctl --wait=sb sync
 
 AT_CAPTURE_FILE([sbflows7])
 OVS_WAIT_FOR_OUTPUT(
   [ovn-sbctl dump-flows sw0 | tee sbflows7 | grep ct_lb | grep priority=120 | sed 's/table=..//'], 0,
-[  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
+[  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
 ])
 
-# Set the service monitor for sw1-p1 to error
+AS_BOX([Set the service monitor for sw1-p1 to error])
 ovn-sbctl set service_monitor $sm_sw1_p1 status=error
 wait_row_count Service_Monitor 1 logical_port=sw1-p1 status=error
+check ovn-nbctl --wait=sb sync
 
 ovn-sbctl dump-flows sw0 | grep "ip4.dst == 10.0.0.10 && tcp.dst == 80" \
 | grep priority=120 > lflows.txt
 AT_CHECK([cat lflows.txt | sed 's/table=..//'], [0], [dnl
-  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80);)
+  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80);)
 ])
 
-# Add one more vip to lb1
+AS_BOX([Add one more vip to lb1])
 check ovn-nbctl set load_balancer . vip:10.0.0.40\\:1000=10.0.0.3:1000,20.0.0.3:80
 
-# create health_check for new vip - 10.0.0.40
+AS_BOX([create health_check for new vip - 10.0.0.40])
 AT_CHECK(
   [ovn-nbctl --wait=sb \
           -- --id=@hc create Load_Balancer_Health_Check vip=10.0.0.40\\:1000 \
@@ -1176,34 +1187,35 @@ AT_CAPTURE_FILE([sbflows9])
 OVS_WAIT_FOR_OUTPUT(
   [ovn-sbctl dump-flows sw0 | tee sbflows9 | grep ct_lb | grep priority=120 | sed 's/table=..//' | sort],
   0,
-[  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80);)
-  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.40 && tcp.dst == 1000), action=(ct_lb(backends=10.0.0.3:1000);)
+[  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80);)
+  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.40 && tcp.dst == 1000), action=(reg1 = 10.0.0.40; reg2[[0..15]] = 1000; ct_lb(backends=10.0.0.3:1000);)
 ])
 
-# Set the service monitor for sw1-p1 to online
+AS_BOX([Set the service monitor for sw1-p1 to online])
 check ovn-sbctl set service_monitor sw1-p1 status=online
 
 wait_row_count Service_Monitor 1 logical_port=sw1-p1 status=online
+check ovn-nbctl --wait=sb sync
 
 AT_CAPTURE_FILE([sbflows10])
 OVS_WAIT_FOR_OUTPUT(
   [ovn-sbctl dump-flows sw0 | tee sbflows10 | grep ct_lb | grep priority=120 | sed 's/table=..//' | sort],
   0,
-[  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
-  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.40 && tcp.dst == 1000), action=(ct_lb(backends=10.0.0.3:1000,20.0.0.3:80);)
+[  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
+  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.40 && tcp.dst == 1000), action=(reg1 = 10.0.0.40; reg2[[0..15]] = 1000; ct_lb(backends=10.0.0.3:1000,20.0.0.3:80);)
 ])
 
-# Associate lb1 to sw1
+AS_BOX([Associate lb1 to sw1])
 check ovn-nbctl --wait=sb ls-lb-add sw1 lb1
 AT_CAPTURE_FILE([sbflows11])
 OVS_WAIT_FOR_OUTPUT(
   [ovn-sbctl dump-flows sw1 | tee sbflows11 | grep ct_lb | grep priority=120 | sed 's/table=..//' | sort],
   0, [dnl
-  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
-  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.40 && tcp.dst == 1000), action=(ct_lb(backends=10.0.0.3:1000,20.0.0.3:80);)
+  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80,20.0.0.3:80);)
+  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.40 && tcp.dst == 1000), action=(reg1 = 10.0.0.40; reg2[[0..15]] = 1000; ct_lb(backends=10.0.0.3:1000,20.0.0.3:80);)
 ])
 
-# Now create lb2 same as lb1 but udp protocol.
+AS_BOX([Now create lb2 same as lb1 but udp protocol.])
 check ovn-nbctl lb-add lb2 10.0.0.10:80 10.0.0.3:80,20.0.0.3:80 udp
 check ovn-nbctl --wait=sb set load_balancer lb2 ip_port_mappings:10.0.0.3=sw0-p1:10.0.0.2
 check ovn-nbctl --wait=sb set load_balancer lb2 ip_port_mappings:20.0.0.3=sw1-p1:20.0.0.2
@@ -1214,15 +1226,17 @@ AT_CHECK([ovn-nbctl -- --id=@hc create Load_Balancer_Health_Check vip="10.0.0.10
 
 check ovn-nbctl ls-lb-add sw0 lb2
 check ovn-nbctl ls-lb-add sw1 lb2
+check ovn-nbctl --wait=sb sync
 
 wait_row_count Service_Monitor 5
 
-# Change the svc_monitor_mac. This should get reflected in service_monitor table rows.
+AS_BOX([Change the svc_monitor_mac.])
+# This should get reflected in service_monitor table rows.
 check ovn-nbctl set NB_Global . options:svc_monitor_mac="fe:a0:65:a2:01:03"
 
 wait_row_count Service_Monitor 5 src_mac='"fe:a0:65:a2:01:03"'
 
-# Change the source ip for 10.0.0.3 backend ip in lb2
+AS_BOX([Change the source ip for 10.0.0.3 backend ip in lb2])
 check ovn-nbctl --wait=sb set load_balancer lb2 ip_port_mappings:10.0.0.3=sw0-p1:10.0.0.100
 
 wait_row_count Service_Monitor 1 logical_port=sw0-p1 src_ip=10.0.0.100
@@ -1233,6 +1247,31 @@ wait_row_count Service_Monitor 2
 ovn-nbctl --wait=sb lb-del lb2
 wait_row_count Service_Monitor 0
 
+check ovn-nbctl --reject lb-add lb3 10.0.0.10:80 10.0.0.3:80,20.0.0.3:80
+check ovn-nbctl --wait=sb set load_balancer lb3 ip_port_mappings:10.0.0.3=sw0-p1:10.0.0.2
+check ovn-nbctl --wait=sb set load_balancer lb3 ip_port_mappings:20.0.0.3=sw1-p1:20.0.0.2
+wait_row_count Service_Monitor 0
+
+check ovn-nbctl --wait=sb ls-lb-add sw0 lb3
+AT_CHECK([ovn-nbctl --wait=sb -- --id=@hc create \
+Load_Balancer_Health_Check vip="10.0.0.10\:80" -- add Load_Balancer lb3 \
+health_check @hc | uuidfilt], [0], [<0>
+])
+wait_row_count Service_Monitor 2
+
+# Set the service monitor for sw0-p1 and sw1-p1 to online
+sm_sw0_p1=$(fetch_column Service_Monitor _uuid logical_port=sw0-p1)
+sm_sw1_p1=$(fetch_column Service_Monitor _uuid logical_port=sw1-p1)
+
+ovn-sbctl set service_monitor $sm_sw0_p1 status=offline
+ovn-sbctl set service_monitor $sm_sw1_p1 status=offline
+
+AT_CAPTURE_FILE([sbflows12])
+OVS_WAIT_FOR_OUTPUT(
+  [ovn-sbctl dump-flows sw0 | tee sbflows12 | grep "ip4.dst == 10.0.0.10 && tcp.dst == 80" | grep priority=120 | sed 's/table=..//'], [0], [dnl
+  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0 = 0; reject { outport <-> inport; next(pipeline=egress,table=6);};)
+])
+
 AT_CLEANUP
 
 AT_SETUP([ovn -- Load balancer VIP in NAT entries])
@@ -1704,7 +1743,7 @@ check ovn-nbctl pg-add pg0 sw0-p1 sw1-p1
 check ovn-nbctl acl-add pg0 from-lport 1002 "inport == @pg0 && ip4 && tcp && tcp.dst == 80" reject
 check ovn-nbctl acl-add pg0 to-lport 1003 "outport == @pg0 && ip6 && udp" reject
 
-check ovn-nbctl --wait=hv sync
+check ovn-nbctl --wait=sb sync
 
 AS_BOX([1])
 
@@ -1713,28 +1752,12 @@ AT_CAPTURE_FILE([sw0flows])
 ovn-sbctl dump-flows sw1 > sw1flows
 AT_CAPTURE_FILE([sw1flows])
 
-AT_CHECK([grep "ls_in_acl" sw0flows | grep pg0 | sort], [0], [dnl
-  table=7 (ls_in_acl          ), priority=2002 , dnl
-match=(inport == @pg0 && ip4 && tcp && tcp.dst == 80), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=egress,table=6); };)
-])
-
-AT_CHECK([grep "ls_in_acl" sw1flows | grep pg0 | sort], [0], [dnl
-  table=7 (ls_in_acl          ), priority=2002 , dnl
-match=(inport == @pg0 && ip4 && tcp && tcp.dst == 80), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=egress,table=6); };)
-])
-
-AT_CHECK([grep "ls_out_acl" sw0flows | grep pg0 | sort], [0], [dnl
-  table=5 (ls_out_acl         ), priority=2003 , dnl
-match=(outport == @pg0 && ip6 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
-])
-
-AT_CHECK([grep "ls_out_acl" sw1flows | grep pg0 | sort], [0], [dnl
-  table=5 (ls_out_acl         ), priority=2003 , dnl
-match=(outport == @pg0 && ip6 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
+AT_CHECK(
+  [grep -E 'ls_(in|out)_acl' sw0flows sw1flows | grep pg0 | sort], [0], [dnl
+sw0flows:  table=5 (ls_out_acl         ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
+sw0flows:  table=9 (ls_in_acl          ), priority=2002 , match=(inport == @pg0 && ip4 && tcp && tcp.dst == 80), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=egress,table=6); };)
+sw1flows:  table=5 (ls_out_acl         ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
+sw1flows:  table=9 (ls_in_acl          ), priority=2002 , match=(inport == @pg0 && ip4 && tcp && tcp.dst == 80), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=egress,table=6); };)
 ])
 
 AS_BOX([2])
@@ -1746,22 +1769,11 @@ AT_CAPTURE_FILE([sw0flows2])
 ovn-sbctl dump-flows sw1 > sw1flows2
 AT_CAPTURE_FILE([sw1flows2])
 
-AT_CHECK([grep "ls_out_acl" sw0flows2 | grep pg0 | sort], [0], [dnl
-  table=5 (ls_out_acl         ), priority=2002 , dnl
-match=(outport == @pg0 && ip4 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
-  table=5 (ls_out_acl         ), priority=2003 , dnl
-match=(outport == @pg0 && ip6 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
-])
-
-AT_CHECK([grep "ls_out_acl" sw1flows2 | grep pg0 | sort], [0], [dnl
-  table=5 (ls_out_acl         ), priority=2002 , dnl
-match=(outport == @pg0 && ip4 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
-  table=5 (ls_out_acl         ), priority=2003 , dnl
-match=(outport == @pg0 && ip6 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
+AT_CHECK([grep "ls_out_acl" sw0flows2 sw1flows2 | grep pg0 | sort], [0], [dnl
+sw0flows2:  table=5 (ls_out_acl         ), priority=2002 , match=(outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
+sw0flows2:  table=5 (ls_out_acl         ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
+sw1flows2:  table=5 (ls_out_acl         ), priority=2002 , match=(outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
+sw1flows2:  table=5 (ls_out_acl         ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
 ])
 
 AS_BOX([3])
@@ -1773,42 +1785,19 @@ AT_CAPTURE_FILE([sw0flows3])
 ovn-sbctl dump-flows sw1 > sw1flows3
 AT_CAPTURE_FILE([sw1flows3])
 
-AT_CHECK([grep "ls_out_acl" sw0flows3 | grep pg0 | sort], [0], [dnl
-  table=5 (ls_out_acl         ), priority=2001 , dnl
-match=(reg0[[7]] == 1 && (outport == @pg0 && ip)), action=(reg0[[1]] = 1; next;)
-  table=5 (ls_out_acl         ), priority=2001 , dnl
-match=(reg0[[8]] == 1 && (outport == @pg0 && ip)), action=(next;)
-  table=5 (ls_out_acl         ), priority=2002 , dnl
-match=((reg0[[10]] == 1) && outport == @pg0 && ip4 && udp), dnl
-action=(ct_commit { ct_label.blocked = 1; };  reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
-  table=5 (ls_out_acl         ), priority=2002 , dnl
-match=((reg0[[9]] == 1) && outport == @pg0 && ip4 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
-  table=5 (ls_out_acl         ), priority=2003 , dnl
-match=((reg0[[10]] == 1) && outport == @pg0 && ip6 && udp), dnl
-action=(ct_commit { ct_label.blocked = 1; };  reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
-  table=5 (ls_out_acl         ), priority=2003 , dnl
-match=((reg0[[9]] == 1) && outport == @pg0 && ip6 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
-])
-
-AT_CHECK([grep "ls_out_acl" sw1flows3 | grep pg0 | sort], [0], [dnl
-  table=5 (ls_out_acl         ), priority=2001 , dnl
-match=(reg0[[7]] == 1 && (outport == @pg0 && ip)), action=(reg0[[1]] = 1; next;)
-  table=5 (ls_out_acl         ), priority=2001 , dnl
-match=(reg0[[8]] == 1 && (outport == @pg0 && ip)), action=(next;)
-  table=5 (ls_out_acl         ), priority=2002 , dnl
-match=((reg0[[10]] == 1) && outport == @pg0 && ip4 && udp), dnl
-action=(ct_commit { ct_label.blocked = 1; };  reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
-  table=5 (ls_out_acl         ), priority=2002 , dnl
-match=((reg0[[9]] == 1) && outport == @pg0 && ip4 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
-  table=5 (ls_out_acl         ), priority=2003 , dnl
-match=((reg0[[10]] == 1) && outport == @pg0 && ip6 && udp), dnl
-action=(ct_commit { ct_label.blocked = 1; };  reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
-  table=5 (ls_out_acl         ), priority=2003 , dnl
-match=((reg0[[9]] == 1) && outport == @pg0 && ip6 && udp), dnl
-action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=21); };)
+AT_CHECK([grep "ls_out_acl" sw0flows3 sw1flows3 | grep pg0 | sort], [0], [dnl
+sw0flows3:  table=5 (ls_out_acl         ), priority=2001 , match=(reg0[[7]] == 1 && (outport == @pg0 && ip)), action=(reg0[[1]] = 1; next;)
+sw0flows3:  table=5 (ls_out_acl         ), priority=2001 , match=(reg0[[8]] == 1 && (outport == @pg0 && ip)), action=(next;)
+sw0flows3:  table=5 (ls_out_acl         ), priority=2002 , match=((reg0[[10]] == 1) && outport == @pg0 && ip4 && udp), action=(ct_commit { ct_label.blocked = 1; };  reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
+sw0flows3:  table=5 (ls_out_acl         ), priority=2002 , match=((reg0[[9]] == 1) && outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
+sw0flows3:  table=5 (ls_out_acl         ), priority=2003 , match=((reg0[[10]] == 1) && outport == @pg0 && ip6 && udp), action=(ct_commit { ct_label.blocked = 1; };  reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
+sw0flows3:  table=5 (ls_out_acl         ), priority=2003 , match=((reg0[[9]] == 1) && outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
+sw1flows3:  table=5 (ls_out_acl         ), priority=2001 , match=(reg0[[7]] == 1 && (outport == @pg0 && ip)), action=(reg0[[1]] = 1; next;)
+sw1flows3:  table=5 (ls_out_acl         ), priority=2001 , match=(reg0[[8]] == 1 && (outport == @pg0 && ip)), action=(next;)
+sw1flows3:  table=5 (ls_out_acl         ), priority=2002 , match=((reg0[[10]] == 1) && outport == @pg0 && ip4 && udp), action=(ct_commit { ct_label.blocked = 1; };  reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
+sw1flows3:  table=5 (ls_out_acl         ), priority=2002 , match=((reg0[[9]] == 1) && outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
+sw1flows3:  table=5 (ls_out_acl         ), priority=2003 , match=((reg0[[10]] == 1) && outport == @pg0 && ip6 && udp), action=(ct_commit { ct_label.blocked = 1; };  reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
+sw1flows3:  table=5 (ls_out_acl         ), priority=2003 , match=((reg0[[9]] == 1) && outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
 ])
 
 AT_CLEANUP
@@ -1818,20 +1807,25 @@ AT_KEYWORDS([acl log meter fair])
 ovn_start
 
 check ovn-nbctl ls-add sw0
+check ovn-nbctl ls-add sw1
 check ovn-nbctl lsp-add sw0 sw0-p1 -- lsp-set-addresses sw0-p1 "50:54:00:00:00:01 10.0.0.11"
 check ovn-nbctl lsp-add sw0 sw0-p2 -- lsp-set-addresses sw0-p2 "50:54:00:00:00:02 10.0.0.12"
-check ovn-nbctl lsp-add sw0 sw0-p3 -- lsp-set-addresses sw0-p3 "50:54:00:00:00:03 10.0.0.13"
+check ovn-nbctl lsp-add sw1 sw1-p3 -- lsp-set-addresses sw1-p3 "50:54:00:00:00:03 10.0.0.13"
+check ovn-nbctl pg-add pg0 sw0-p1 sw0-p2 sw1-p3
 
 check ovn-nbctl meter-add meter_me drop 1 pktps
 nb_meter_uuid=$(fetch_column nb:Meter _uuid name=meter_me)
 
 check ovn-nbctl acl-add sw0 to-lport 1002 'outport == "sw0-p1" && ip4.src == 10.0.0.12' allow
 check ovn-nbctl acl-add sw0 to-lport 1002 'outport == "sw0-p1" && ip4.src == 10.0.0.13' allow
+check ovn-nbctl acl-add pg0 to-lport 1002 'outport == "pg0" && ip4.src == 10.0.0.11' drop
 
 acl1=$(ovn-nbctl --bare --column _uuid,match find acl | grep -B1 '10.0.0.12' | head -1)
 acl2=$(ovn-nbctl --bare --column _uuid,match find acl | grep -B1 '10.0.0.13' | head -1)
+acl3=$(ovn-nbctl --bare --column _uuid,match find acl | grep -B1 '10.0.0.11' | head -1)
 check ovn-nbctl set acl $acl1 log=true severity=alert meter=meter_me name=acl_one
 check ovn-nbctl set acl $acl2 log=true severity=info  meter=meter_me name=acl_two
+check ovn-nbctl set acl $acl3 log=true severity=info  meter=meter_me name=acl_three
 check ovn-nbctl --wait=sb sync
 
 check_row_count nb:meter 1
@@ -1840,8 +1834,9 @@ check_column meter_me nb:meter name
 check_acl_lflow() {
     acl_log_name=$1
     meter_name=$2
+    ls=$3
     # echo checking that logical flow for acl log $acl_log_name has $meter_name
-    AT_CHECK([ovn-sbctl lflow-list | grep ls_out_acl | \
+    AT_CHECK([ovn-sbctl lflow-list $ls | grep ls_out_acl | \
               grep "\"${acl_log_name}\"" | \
               grep -c "meter=\"${meter_name}\""], [0], [1
 ])
@@ -1857,59 +1852,144 @@ check_meter_by_name() {
 
 # Make sure 'fair' value properly affects the Meters in SB
 check_meter_by_name meter_me
-check_meter_by_name NOT meter_me__${acl1} meter_me__${acl2}
+check_meter_by_name NOT meter_me__${acl1} meter_me__${acl2} meter_me__${acl3}
 
 check ovn-nbctl --wait=sb set Meter $nb_meter_uuid fair=true
-check_meter_by_name meter_me meter_me__${acl1} meter_me__${acl2}
+check_meter_by_name meter_me meter_me__${acl1} meter_me__${acl2} meter_me__${acl3}
 
 check ovn-nbctl --wait=sb set Meter $nb_meter_uuid fair=false
 check_meter_by_name meter_me
-check_meter_by_name NOT meter_me__${acl1} meter_me__${acl2}
+check_meter_by_name NOT meter_me__${acl1} meter_me__${acl2} meter_me__${acl3}
 
 check ovn-nbctl --wait=sb set Meter $nb_meter_uuid fair=true
-check_meter_by_name meter_me meter_me__${acl1} meter_me__${acl2}
+check_meter_by_name meter_me meter_me__${acl1} meter_me__${acl2} meter_me__${acl3}
 
 # Change template meter and make sure that is reflected on acl meters as well
 template_band=$(fetch_column nb:meter bands name=meter_me)
 check ovn-nbctl --wait=sb set meter_band $template_band rate=123
 # Make sure that every Meter_Band has the right rate.  (ovn-northd
-# creates 3 identical Meter_Band rows, all identical; ovn-northd-ddlog
+# creates 4 identical Meter_Band rows, all identical; ovn-northd-ddlog
 # creates just 1.  It doesn't matter, they work just as well.)
 n_meter_bands=$(count_rows meter_band)
-AT_FAIL_IF([test "$n_meter_bands" != 1 && test "$n_meter_bands" != 3])
+AT_FAIL_IF([test "$n_meter_bands" != 1 && test "$n_meter_bands" != 4])
 check_row_count meter_band $n_meter_bands rate=123
 
 # Check meter in logical flows for acl logs
-check_acl_lflow acl_one meter_me__${acl1}
-check_acl_lflow acl_two meter_me__${acl2}
+check_acl_lflow acl_one meter_me__${acl1} sw0
+check_acl_lflow acl_two meter_me__${acl2} sw0
+check_acl_lflow acl_three meter_me__${acl3} sw0
+check_acl_lflow acl_three meter_me__${acl3} sw1
 
 # Stop using meter for acl1
 check ovn-nbctl --wait=sb clear acl $acl1 meter
 check_meter_by_name meter_me meter_me__${acl2}
 check_meter_by_name NOT meter_me__${acl1}
-check_acl_lflow acl_two meter_me__${acl2}
+check_acl_lflow acl_two meter_me__${acl2} sw0
+check_acl_lflow acl_three meter_me__${acl3} sw0
+check_acl_lflow acl_three meter_me__${acl3} sw1
 
 # Remove template Meter should remove all others as well
 check ovn-nbctl --wait=sb meter-del meter_me
 check_row_count meter 0
 # Check that logical flow remains but uses non-unique meter since fair
 # attribute is lost by the removal of the Meter row.
-check_acl_lflow acl_two meter_me
+check_acl_lflow acl_two meter_me sw0
+check_acl_lflow acl_three meter_me sw0
+check_acl_lflow acl_three meter_me sw1
 
 # Re-add template meter and make sure acl2's meter is back in sb
 check ovn-nbctl --wait=sb --fair meter-add meter_me drop 1 pktps
 check_meter_by_name meter_me meter_me__${acl2}
 check_meter_by_name NOT meter_me__${acl1}
-check_acl_lflow acl_two meter_me__${acl2}
+check_acl_lflow acl_two meter_me__${acl2} sw0
+check_acl_lflow acl_three meter_me__${acl3} sw0
+check_acl_lflow acl_three meter_me__${acl3} sw1
 
 # Remove acl2
 sw0=$(fetch_column nb:logical_switch _uuid name=sw0)
 check ovn-nbctl --wait=sb remove logical_switch $sw0 acls $acl2
-check_meter_by_name meter_me
+check_meter_by_name meter_me meter_me__${acl3}
 check_meter_by_name NOT meter_me__${acl1} meter_me__${acl2}
 
 AT_CLEANUP
 
+AT_SETUP([ovn -- ACL skip hints for stateless config])
+AT_KEYWORDS([acl])
+ovn_start
+
+check ovn-nbctl --wait=sb \
+    -- ls-add ls \
+    -- lsp-add ls lsp \
+    -- acl-add ls from-lport 1 "ip" allow \
+    -- acl-add ls to-lport 1 "ip" allow
+
+AS_BOX([Check no match on ct_state with stateless ACLs])
+AT_CHECK([ovn-sbctl lflow-list ls | grep -e ls_in_acl_hint -e ls_out_acl_hint -e ls_in_acl -e ls_out_acl | grep 'ct\.' | sort], [0], [dnl
+])
+
+AS_BOX([Check match ct_state with stateful ACLs])
+check ovn-nbctl --wait=sb \
+    -- acl-add ls from-lport 2 "udp" allow-related \
+    -- acl-add ls to-lport 2 "udp" allow-related
+AT_CHECK([ovn-sbctl lflow-list ls | grep -e ls_in_acl_hint -e ls_out_acl_hint -e ls_in_acl -e ls_out_acl | grep 'ct\.' | sort], [0], [dnl
+  table=4 (ls_out_acl_hint    ), priority=1    , match=(ct.est && ct_label.blocked == 0), action=(reg0[[10]] = 1; next;)
+  table=4 (ls_out_acl_hint    ), priority=2    , match=(ct.est && ct_label.blocked == 1), action=(reg0[[9]] = 1; next;)
+  table=4 (ls_out_acl_hint    ), priority=3    , match=(!ct.est), action=(reg0[[9]] = 1; next;)
+  table=4 (ls_out_acl_hint    ), priority=4    , match=(!ct.new && ct.est && !ct.rpl && ct_label.blocked == 0), action=(reg0[[8]] = 1; reg0[[10]] = 1; next;)
+  table=4 (ls_out_acl_hint    ), priority=5    , match=(!ct.trk), action=(reg0[[8]] = 1; reg0[[9]] = 1; next;)
+  table=4 (ls_out_acl_hint    ), priority=6    , match=(!ct.new && ct.est && !ct.rpl && ct_label.blocked == 1), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;)
+  table=4 (ls_out_acl_hint    ), priority=7    , match=(ct.new && !ct.est), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;)
+  table=5 (ls_out_acl         ), priority=1    , match=(ip && (!ct.est || (ct.est && ct_label.blocked == 1))), action=(reg0[[1]] = 1; next;)
+  table=5 (ls_out_acl         ), priority=65535, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_label.blocked == 0), action=(next;)
+  table=5 (ls_out_acl         ), priority=65535, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_label.blocked == 0), action=(next;)
+  table=5 (ls_out_acl         ), priority=65535, match=(ct.inv || (ct.est && ct.rpl && ct_label.blocked == 1)), action=(drop;)
+  table=8 (ls_in_acl_hint     ), priority=1    , match=(ct.est && ct_label.blocked == 0), action=(reg0[[10]] = 1; next;)
+  table=8 (ls_in_acl_hint     ), priority=2    , match=(ct.est && ct_label.blocked == 1), action=(reg0[[9]] = 1; next;)
+  table=8 (ls_in_acl_hint     ), priority=3    , match=(!ct.est), action=(reg0[[9]] = 1; next;)
+  table=8 (ls_in_acl_hint     ), priority=4    , match=(!ct.new && ct.est && !ct.rpl && ct_label.blocked == 0), action=(reg0[[8]] = 1; reg0[[10]] = 1; next;)
+  table=8 (ls_in_acl_hint     ), priority=5    , match=(!ct.trk), action=(reg0[[8]] = 1; reg0[[9]] = 1; next;)
+  table=8 (ls_in_acl_hint     ), priority=6    , match=(!ct.new && ct.est && !ct.rpl && ct_label.blocked == 1), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;)
+  table=8 (ls_in_acl_hint     ), priority=7    , match=(ct.new && !ct.est), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;)
+  table=9 (ls_in_acl          ), priority=1    , match=(ip && (!ct.est || (ct.est && ct_label.blocked == 1))), action=(reg0[[1]] = 1; next;)
+  table=9 (ls_in_acl          ), priority=65535, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_label.blocked == 0), action=(next;)
+  table=9 (ls_in_acl          ), priority=65535, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_label.blocked == 0), action=(next;)
+  table=9 (ls_in_acl          ), priority=65535, match=(ct.inv || (ct.est && ct.rpl && ct_label.blocked == 1)), action=(drop;)
+])
+
+AS_BOX([Check match ct_state with load balancer])
+check ovn-nbctl --wait=sb \
+    -- acl-del ls from-lport 2 "udp" \
+    -- acl-del ls to-lport 2 "udp" \
+    -- lb-add lb "10.0.0.1" "10.0.0.2" \
+    -- ls-lb-add ls lb
+
+AT_CHECK([ovn-sbctl lflow-list ls | grep -e ls_in_acl_hint -e ls_out_acl_hint -e ls_in_acl -e ls_out_acl | grep 'ct\.' | sort], [0], [dnl
+  table=4 (ls_out_acl_hint    ), priority=1    , match=(ct.est && ct_label.blocked == 0), action=(reg0[[10]] = 1; next;)
+  table=4 (ls_out_acl_hint    ), priority=2    , match=(ct.est && ct_label.blocked == 1), action=(reg0[[9]] = 1; next;)
+  table=4 (ls_out_acl_hint    ), priority=3    , match=(!ct.est), action=(reg0[[9]] = 1; next;)
+  table=4 (ls_out_acl_hint    ), priority=4    , match=(!ct.new && ct.est && !ct.rpl && ct_label.blocked == 0), action=(reg0[[8]] = 1; reg0[[10]] = 1; next;)
+  table=4 (ls_out_acl_hint    ), priority=5    , match=(!ct.trk), action=(reg0[[8]] = 1; reg0[[9]] = 1; next;)
+  table=4 (ls_out_acl_hint    ), priority=6    , match=(!ct.new && ct.est && !ct.rpl && ct_label.blocked == 1), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;)
+  table=4 (ls_out_acl_hint    ), priority=7    , match=(ct.new && !ct.est), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;)
+  table=5 (ls_out_acl         ), priority=1    , match=(ip && (!ct.est || (ct.est && ct_label.blocked == 1))), action=(reg0[[1]] = 1; next;)
+  table=5 (ls_out_acl         ), priority=65535, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_label.blocked == 0), action=(next;)
+  table=5 (ls_out_acl         ), priority=65535, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_label.blocked == 0), action=(next;)
+  table=5 (ls_out_acl         ), priority=65535, match=(ct.inv || (ct.est && ct.rpl && ct_label.blocked == 1)), action=(drop;)
+  table=8 (ls_in_acl_hint     ), priority=1    , match=(ct.est && ct_label.blocked == 0), action=(reg0[[10]] = 1; next;)
+  table=8 (ls_in_acl_hint     ), priority=2    , match=(ct.est && ct_label.blocked == 1), action=(reg0[[9]] = 1; next;)
+  table=8 (ls_in_acl_hint     ), priority=3    , match=(!ct.est), action=(reg0[[9]] = 1; next;)
+  table=8 (ls_in_acl_hint     ), priority=4    , match=(!ct.new && ct.est && !ct.rpl && ct_label.blocked == 0), action=(reg0[[8]] = 1; reg0[[10]] = 1; next;)
+  table=8 (ls_in_acl_hint     ), priority=5    , match=(!ct.trk), action=(reg0[[8]] = 1; reg0[[9]] = 1; next;)
+  table=8 (ls_in_acl_hint     ), priority=6    , match=(!ct.new && ct.est && !ct.rpl && ct_label.blocked == 1), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;)
+  table=8 (ls_in_acl_hint     ), priority=7    , match=(ct.new && !ct.est), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;)
+  table=9 (ls_in_acl          ), priority=1    , match=(ip && (!ct.est || (ct.est && ct_label.blocked == 1))), action=(reg0[[1]] = 1; next;)
+  table=9 (ls_in_acl          ), priority=65535, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_label.blocked == 0), action=(next;)
+  table=9 (ls_in_acl          ), priority=65535, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_label.blocked == 0), action=(next;)
+  table=9 (ls_in_acl          ), priority=65535, match=(ct.inv || (ct.est && ct.rpl && ct_label.blocked == 1)), action=(drop;)
+])
+
+AT_CLEANUP
+
 AT_SETUP([datapath requested-tnl-key])
 AT_KEYWORDS([requested tnl tunnel key keys])
 ovn_start
@@ -2092,6 +2172,12 @@ echo
 echo "__file__:__line__: check that datapath sw1 has lb0 and lb1 set in the load_balancers column."
 check_column "$lb0_uuid $lb1_uuid" sb:datapath_binding load_balancers external_ids:name=sw1
 
+
+echo
+echo "__file__:__line__: Set hairpin_snat_ip on lb1 and check that SB DB is updated."
+check ovn-nbctl --wait=sb set Load_Balancer lb1 options:hairpin_snat_ip="42.42.42.42 4242::4242"
+check_column "$lb1_uuid" sb:load_balancer _uuid name=lb1 options='{hairpin_orig_tuple="true", hairpin_snat_ip="42.42.42.42 4242::4242"}'
+
 echo
 echo "__file__:__line__: Delete load balancer lb1 an check that datapath sw1's load_balancers are updated accordingly."
 
@@ -2100,6 +2186,35 @@ check_column "$lb0_uuid" sb:datapath_binding load_balancers external_ids:name=sw
 
 AT_CLEANUP
 
+AT_SETUP([ovn -- LS load balancer hairpin logical flows])
+ovn_start
+
+check ovn-nbctl \
+    -- ls-add sw0 \
+    -- lb-add lb0 10.0.0.10:80 10.0.0.4:8080 \
+    -- ls-lb-add sw0 lb0
+
+check ovn-nbctl --wait=sb sync
+
+AT_CHECK([ovn-sbctl lflow-list sw0 | grep ls_in_pre_hairpin | sort], [0], [dnl
+  table=14(ls_in_pre_hairpin  ), priority=0    , match=(1), action=(next;)
+  table=14(ls_in_pre_hairpin  ), priority=100  , match=(ip && ct.trk), action=(reg0[[6]] = chk_lb_hairpin(); reg0[[12]] = chk_lb_hairpin_reply(); next;)
+])
+
+AT_CHECK([ovn-sbctl lflow-list sw0 | grep ls_in_nat_hairpin | sort], [0], [dnl
+  table=15(ls_in_nat_hairpin  ), priority=0    , match=(1), action=(next;)
+  table=15(ls_in_nat_hairpin  ), priority=100  , match=(ip && ct.est && ct.trk && reg0[[6]] == 1), action=(ct_snat;)
+  table=15(ls_in_nat_hairpin  ), priority=100  , match=(ip && ct.new && ct.trk && reg0[[6]] == 1), action=(ct_snat_to_vip; next;)
+  table=15(ls_in_nat_hairpin  ), priority=90   , match=(ip && reg0[[12]] == 1), action=(ct_snat;)
+])
+
+AT_CHECK([ovn-sbctl lflow-list sw0 | grep ls_in_hairpin | sort], [0], [dnl
+  table=16(ls_in_hairpin      ), priority=0    , match=(1), action=(next;)
+  table=16(ls_in_hairpin      ), priority=1    , match=((reg0[[6]] == 1 || reg0[[12]] == 1)), action=(eth.dst <-> eth.src; outport = inport; flags.loopback = 1; output;)
+])
+
+AT_CLEANUP
+
 AT_SETUP([ovn -- logical gatapath groups])
 AT_KEYWORDS([use_logical_dp_groups])
 ovn_start
@@ -2173,3 +2288,498 @@ dnl Number of common flows should be the same.
 check_row_count Logical_Flow ${n_flows_common} logical_dp_group=${dp_group_uuid}
 
 AT_CLEANUP
+
+AT_SETUP([ovn -- Router policies - ECMP reroute])
+AT_KEYWORDS([router policies ecmp reroute])
+ovn_start
+
+check ovn-nbctl ls-add sw0
+check ovn-nbctl lsp-add sw0 sw0-port1
+check ovn-nbctl lsp-set-addresses sw0-port1 "50:54:00:00:00:03 10.0.0.3"
+
+check ovn-nbctl ls-add sw1
+check ovn-nbctl lsp-add sw1 sw1-port1
+check ovn-nbctl lsp-set-addresses sw1-port1 "40:54:00:00:00:03 20.0.0.3"
+
+# Create a logical router and attach both logical switches
+check ovn-nbctl lr-add lr0
+check ovn-nbctl lrp-add lr0 lr0-sw0 00:00:00:00:ff:01 10.0.0.1/24 1000::a/64
+check ovn-nbctl lsp-add sw0 sw0-lr0
+check ovn-nbctl lsp-set-type sw0-lr0 router
+check ovn-nbctl lsp-set-addresses sw0-lr0 00:00:00:00:ff:01
+check ovn-nbctl lsp-set-options sw0-lr0 router-port=lr0-sw0
+
+check ovn-nbctl lrp-add lr0 lr0-sw1 00:00:00:00:ff:02 20.0.0.1/24 2000::a/64
+check ovn-nbctl lsp-add sw1 sw1-lr0
+check ovn-nbctl lsp-set-type sw1-lr0 router
+check ovn-nbctl lsp-set-addresses sw1-lr0 00:00:00:00:ff:02
+check ovn-nbctl lsp-set-options sw1-lr0 router-port=lr-sw1
+
+check ovn-nbctl ls-add public
+check ovn-nbctl lrp-add lr0 lr0-public 00:00:20:20:12:13 172.168.0.100/24
+check ovn-nbctl lsp-add public public-lr0
+check ovn-nbctl lsp-set-type public-lr0 router
+check ovn-nbctl lsp-set-addresses public-lr0 router
+check ovn-nbctl lsp-set-options public-lr0 router-port=lr0-public
+
+check ovn-nbctl --wait=sb lr-policy-add lr0  10 "ip4.src == 10.0.0.3" reroute 172.168.0.101,172.168.0.102
+
+ovn-sbctl dump-flows lr0 > lr0flows3
+AT_CAPTURE_FILE([lr0flows3])
+
+AT_CHECK([grep "lr_in_policy" lr0flows3 | sort], [0], [dnl
+  table=12(lr_in_policy       ), priority=0    , match=(1), action=(reg8[[0..15]] = 0; next;)
+  table=12(lr_in_policy       ), priority=10   , match=(ip4.src == 10.0.0.3), action=(reg8[[0..15]] = 1; reg8[[16..31]] = select(1, 2);)
+  table=13(lr_in_policy_ecmp  ), priority=100  , match=(reg8[[0..15]] == 1 && reg8[[16..31]] == 1), action=(reg0 = 172.168.0.101; reg1 = 172.168.0.100; eth.src = 00:00:20:20:12:13; outport = "lr0-public"; flags.loopback = 1; next;)
+  table=13(lr_in_policy_ecmp  ), priority=100  , match=(reg8[[0..15]] == 1 && reg8[[16..31]] == 2), action=(reg0 = 172.168.0.102; reg1 = 172.168.0.100; eth.src = 00:00:20:20:12:13; outport = "lr0-public"; flags.loopback = 1; next;)
+  table=13(lr_in_policy_ecmp  ), priority=150  , match=(reg8[[0..15]] == 0), action=(next;)
+])
+
+check ovn-nbctl --wait=sb lr-policy-add lr0  10 "ip4.src == 10.0.0.4" reroute 172.168.0.101,172.168.0.102,172.168.0.103
+ovn-sbctl dump-flows lr0 > lr0flows3
+AT_CAPTURE_FILE([lr0flows3])
+
+AT_CHECK([grep "lr_in_policy" lr0flows3 |  \
+sed 's/reg8\[[0..15\]] = [[0-9]]*/reg8\[[0..15\]] = <cleared>/' | \
+sed 's/reg8\[[0..15\]] == [[0-9]]*/reg8\[[0..15\]] == <cleared>/' | sort], [0], [dnl
+  table=12(lr_in_policy       ), priority=0    , match=(1), action=(reg8[[0..15]] = <cleared>; next;)
+  table=12(lr_in_policy       ), priority=10   , match=(ip4.src == 10.0.0.3), action=(reg8[[0..15]] = <cleared>; reg8[[16..31]] = select(1, 2);)
+  table=12(lr_in_policy       ), priority=10   , match=(ip4.src == 10.0.0.4), action=(reg8[[0..15]] = <cleared>; reg8[[16..31]] = select(1, 2, 3);)
+  table=13(lr_in_policy_ecmp  ), priority=100  , match=(reg8[[0..15]] == <cleared> && reg8[[16..31]] == 1), action=(reg0 = 172.168.0.101; reg1 = 172.168.0.100; eth.src = 00:00:20:20:12:13; outport = "lr0-public"; flags.loopback = 1; next;)
+  table=13(lr_in_policy_ecmp  ), priority=100  , match=(reg8[[0..15]] == <cleared> && reg8[[16..31]] == 1), action=(reg0 = 172.168.0.101; reg1 = 172.168.0.100; eth.src = 00:00:20:20:12:13; outport = "lr0-public"; flags.loopback = 1; next;)
+  table=13(lr_in_policy_ecmp  ), priority=100  , match=(reg8[[0..15]] == <cleared> && reg8[[16..31]] == 2), action=(reg0 = 172.168.0.102; reg1 = 172.168.0.100; eth.src = 00:00:20:20:12:13; outport = "lr0-public"; flags.loopback = 1; next;)
+  table=13(lr_in_policy_ecmp  ), priority=100  , match=(reg8[[0..15]] == <cleared> && reg8[[16..31]] == 2), action=(reg0 = 172.168.0.102; reg1 = 172.168.0.100; eth.src = 00:00:20:20:12:13; outport = "lr0-public"; flags.loopback = 1; next;)
+  table=13(lr_in_policy_ecmp  ), priority=100  , match=(reg8[[0..15]] == <cleared> && reg8[[16..31]] == 3), action=(reg0 = 172.168.0.103; reg1 = 172.168.0.100; eth.src = 00:00:20:20:12:13; outport = "lr0-public"; flags.loopback = 1; next;)
+  table=13(lr_in_policy_ecmp  ), priority=150  , match=(reg8[[0..15]] == <cleared>), action=(next;)
+])
+
+check ovn-nbctl --wait=sb lr-policy-add lr0  10 "ip4.src == 10.0.0.5" reroute 172.168.0.110
+ovn-sbctl dump-flows lr0 > lr0flows3
+AT_CAPTURE_FILE([lr0flows3])
+
+AT_CHECK([grep "lr_in_policy" lr0flows3 |  \
+sed 's/reg8\[[0..15\]] = [[0-9]]*/reg8\[[0..15\]] = <cleared>/' | \
+sed 's/reg8\[[0..15\]] == [[0-9]]*/reg8\[[0..15\]] == <cleared>/' | sort], [0], [dnl
+  table=12(lr_in_policy       ), priority=0    , match=(1), action=(reg8[[0..15]] = <cleared>; next;)
+  table=12(lr_in_policy       ), priority=10   , match=(ip4.src == 10.0.0.3), action=(reg8[[0..15]] = <cleared>; reg8[[16..31]] = select(1, 2);)
+  table=12(lr_in_policy       ), priority=10   , match=(ip4.src == 10.0.0.4), action=(reg8[[0..15]] = <cleared>; reg8[[16..31]] = select(1, 2, 3);)
+  table=12(lr_in_policy       ), priority=10   , match=(ip4.src == 10.0.0.5), action=(reg0 = 172.168.0.110; reg1 = 172.168.0.100; eth.src = 00:00:20:20:12:13; outport = "lr0-public"; flags.loopback = 1; reg8[[0..15]] = <cleared>; next;)
+  table=13(lr_in_policy_ecmp  ), priority=100  , match=(reg8[[0..15]] == <cleared> && reg8[[16..31]] == 1), action=(reg0 = 172.168.0.101; reg1 = 172.168.0.100; eth.src = 00:00:20:20:12:13; outport = "lr0-public"; flags.loopback = 1; next;)
+  table=13(lr_in_policy_ecmp  ), priority=100  , match=(reg8[[0..15]] == <cleared> && reg8[[16..31]] == 1), action=(reg0 = 172.168.0.101; reg1 = 172.168.0.100; eth.src = 00:00:20:20:12:13; outport = "lr0-public"; flags.loopback = 1; next;)
+  table=13(lr_in_policy_ecmp  ), priority=100  , match=(reg8[[0..15]] == <cleared> && reg8[[16..31]] == 2), action=(reg0 = 172.168.0.102; reg1 = 172.168.0.100; eth.src = 00:00:20:20:12:13; outport = "lr0-public"; flags.loopback = 1; next;)
+  table=13(lr_in_policy_ecmp  ), priority=100  , match=(reg8[[0..15]] == <cleared> && reg8[[16..31]] == 2), action=(reg0 = 172.168.0.102; reg1 = 172.168.0.100; eth.src = 00:00:20:20:12:13; outport = "lr0-public"; flags.loopback = 1; next;)
+  table=13(lr_in_policy_ecmp  ), priority=100  , match=(reg8[[0..15]] == <cleared> && reg8[[16..31]] == 3), action=(reg0 = 172.168.0.103; reg1 = 172.168.0.100; eth.src = 00:00:20:20:12:13; outport = "lr0-public"; flags.loopback = 1; next;)
+  table=13(lr_in_policy_ecmp  ), priority=150  , match=(reg8[[0..15]] == <cleared>), action=(next;)
+])
+
+check ovn-nbctl --wait=sb lr-policy-del lr0  10 "ip4.src == 10.0.0.3"
+ovn-sbctl dump-flows lr0 > lr0flows3
+AT_CAPTURE_FILE([lr0flows3])
+
+AT_CHECK([grep "lr_in_policy" lr0flows3 |  \
+sed 's/reg8\[[0..15\]] = [[0-9]]*/reg8\[[0..15\]] = <cleared>/' | \
+sed 's/reg8\[[0..15\]] == [[0-9]]*/reg8\[[0..15\]] == <cleared>/' | sort], [0], [dnl
+  table=12(lr_in_policy       ), priority=0    , match=(1), action=(reg8[[0..15]] = <cleared>; next;)
+  table=12(lr_in_policy       ), priority=10   , match=(ip4.src == 10.0.0.4), action=(reg8[[0..15]] = <cleared>; reg8[[16..31]] = select(1, 2, 3);)
+  table=12(lr_in_policy       ), priority=10   , match=(ip4.src == 10.0.0.5), action=(reg0 = 172.168.0.110; reg1 = 172.168.0.100; eth.src = 00:00:20:20:12:13; outport = "lr0-public"; flags.loopback = 1; reg8[[0..15]] = <cleared>; next;)
+  table=13(lr_in_policy_ecmp  ), priority=100  , match=(reg8[[0..15]] == <cleared> && reg8[[16..31]] == 1), action=(reg0 = 172.168.0.101; reg1 = 172.168.0.100; eth.src = 00:00:20:20:12:13; outport = "lr0-public"; flags.loopback = 1; next;)
+  table=13(lr_in_policy_ecmp  ), priority=100  , match=(reg8[[0..15]] == <cleared> && reg8[[16..31]] == 2), action=(reg0 = 172.168.0.102; reg1 = 172.168.0.100; eth.src = 00:00:20:20:12:13; outport = "lr0-public"; flags.loopback = 1; next;)
+  table=13(lr_in_policy_ecmp  ), priority=100  , match=(reg8[[0..15]] == <cleared> && reg8[[16..31]] == 3), action=(reg0 = 172.168.0.103; reg1 = 172.168.0.100; eth.src = 00:00:20:20:12:13; outport = "lr0-public"; flags.loopback = 1; next;)
+  table=13(lr_in_policy_ecmp  ), priority=150  , match=(reg8[[0..15]] == <cleared>), action=(next;)
+])
+
+check ovn-nbctl --wait=sb lr-policy-del lr0  10 "ip4.src == 10.0.0.4"
+ovn-sbctl dump-flows lr0 > lr0flows3
+AT_CAPTURE_FILE([lr0flows3])
+
+AT_CHECK([grep "lr_in_policy" lr0flows3 |  \
+sed 's/reg8\[[0..15\]] = [[0-9]]*/reg8\[[0..15\]] = <cleared>/' | \
+sed 's/reg8\[[0..15\]] == [[0-9]]*/reg8\[[0..15\]] == <cleared>/' | sort], [0], [dnl
+  table=12(lr_in_policy       ), priority=0    , match=(1), action=(reg8[[0..15]] = <cleared>; next;)
+  table=12(lr_in_policy       ), priority=10   , match=(ip4.src == 10.0.0.5), action=(reg0 = 172.168.0.110; reg1 = 172.168.0.100; eth.src = 00:00:20:20:12:13; outport = "lr0-public"; flags.loopback = 1; reg8[[0..15]] = <cleared>; next;)
+  table=13(lr_in_policy_ecmp  ), priority=150  , match=(reg8[[0..15]] == <cleared>), action=(next;)
+])
+
+check ovn-nbctl --wait=sb add logical_router_policy . nexthops "2000\:\:b"
+ovn-sbctl dump-flows lr0 > lr0flows3
+AT_CAPTURE_FILE([lr0flows3])
+
+AT_CHECK([grep "lr_in_policy" lr0flows3 |  \
+sed 's/reg8\[[0..15\]] = [[0-9]]*/reg8\[[0..15\]] = <cleared>/' | \
+sed 's/reg8\[[0..15\]] == [[0-9]]*/reg8\[[0..15\]] == <cleared>/' | sort], [0], [dnl
+  table=12(lr_in_policy       ), priority=0    , match=(1), action=(reg8[[0..15]] = <cleared>; next;)
+  table=13(lr_in_policy_ecmp  ), priority=150  , match=(reg8[[0..15]] == <cleared>), action=(next;)
+])
+
+AT_CLEANUP
+
+AT_SETUP([ovn -- check BFD config propagation to SBDB])
+AT_KEYWORDS([northd-bfd])
+ovn_start
+
+check ovn-nbctl --wait=sb lr-add r0
+for i in $(seq 1 5); do
+    check ovn-nbctl --wait=sb lrp-add r0 r0-sw$i 00:00:00:00:00:0$i 192.168.$i.1/24
+    check ovn-nbctl --wait=sb ls-add sw$i
+    check ovn-nbctl --wait=sb lsp-add sw$i sw$i-r0
+    check ovn-nbctl --wait=sb lsp-set-type sw$i-r0 router
+    check ovn-nbctl --wait=sb lsp-set-options sw$i-r0 router-port=r0-sw$i
+    check ovn-nbctl --wait=sb lsp-set-addresses sw$i-r0 00:00:00:00:00:0$i
+done
+
+uuid=$(ovn-nbctl create bfd logical_port=r0-sw1 dst_ip=192.168.10.2 status=down min_tx=250 min_rx=250 detect_mult=10)
+ovn-nbctl create bfd logical_port=r0-sw2 dst_ip=192.168.20.2 status=down min_tx=500 min_rx=500 detect_mult=20
+ovn-nbctl create bfd logical_port=r0-sw3 dst_ip=192.168.30.2 status=down
+ovn-nbctl create bfd logical_port=r0-sw4 dst_ip=192.168.40.2 status=down min_tx=0 detect_mult=0
+
+check_column 10 bfd detect_mult logical_port=r0-sw1
+check_column "192.168.10.2" bfd dst_ip logical_port=r0-sw1
+check_column 250 bfd min_rx logical_port=r0-sw1
+check_column 250 bfd min_tx logical_port=r0-sw1
+check_column admin_down bfd status logical_port=r0-sw1
+
+check_column 20 bfd detect_mult logical_port=r0-sw2
+check_column "192.168.20.2" bfd dst_ip logical_port=r0-sw2
+check_column 500 bfd min_rx logical_port=r0-sw2
+check_column 500 bfd min_tx logical_port=r0-sw2
+check_column admin_down bfd status logical_port=r0-sw2
+
+check_column 5 bfd detect_mult logical_port=r0-sw3
+check_column "192.168.30.2" bfd dst_ip logical_port=r0-sw3
+check_column 1000 bfd min_rx logical_port=r0-sw3
+check_column 1000 bfd min_tx logical_port=r0-sw3
+check_column admin_down bfd status logical_port=r0-sw3
+
+uuid=$(fetch_column nb:bfd _uuid logical_port=r0-sw1)
+check ovn-nbctl set bfd $uuid min_tx=1000
+check ovn-nbctl set bfd $uuid min_rx=1000
+check ovn-nbctl set bfd $uuid detect_mult=100
+
+uuid_2=$(fetch_column nb:bfd _uuid logical_port=r0-sw2)
+check ovn-nbctl clear bfd $uuid_2 min_rx
+check_column 1000 bfd min_rx logical_port=r0-sw2
+
+check_column 1000 bfd min_tx logical_port=r0-sw1
+check_column 1000 bfd min_rx logical_port=r0-sw1
+check_column 100 bfd detect_mult logical_port=r0-sw1
+
+check ovn-nbctl --bfd=$uuid lr-route-add r0 100.0.0.0/8 192.168.10.2
+check_column down bfd status logical_port=r0-sw1
+AT_CHECK([ovn-nbctl lr-route-list r0 | grep 192.168.10.2 | grep -q bfd],[0])
+
+check ovn-nbctl --bfd lr-route-add r0 200.0.0.0/8 192.168.20.2
+check_column down bfd status logical_port=r0-sw2
+AT_CHECK([ovn-nbctl lr-route-list r0 | grep 192.168.20.2 | grep -q bfd],[0])
+
+check ovn-nbctl --bfd lr-route-add r0 240.0.0.0/8 192.168.50.2 r0-sw5
+check_column down bfd status logical_port=r0-sw5
+AT_CHECK([ovn-nbctl lr-route-list r0 | grep 192.168.50.2 | grep -q bfd],[0])
+
+route_uuid=$(fetch_column nb:logical_router_static_route _uuid ip_prefix="100.0.0.0/8")
+check ovn-nbctl clear logical_router_static_route $route_uuid bfd
+check_column admin_down bfd status logical_port=r0-sw1
+
+ovn-nbctl destroy bfd $uuid
+check_row_count bfd 3
+
+AT_CLEANUP
+
+AT_SETUP([ovn -- check LSP attached to multiple LS])
+ovn_start
+
+check ovn-nbctl ls-add ls1 \
+    -- ls-add ls2 \
+    -- lsp-add ls1 p1
+check ovn-nbctl --wait=sb sync
+
+uuid=$(fetch_column nb:Logical_Switch_Port _uuid name=p1)
+check ovn-nbctl set Logical_Switch ls2 ports=$uuid
+check ovn-nbctl --wait=sb sync
+
+AT_CHECK([grep -qE 'duplicate logical port p1' northd/ovn-northd.log], [0])
+
+AT_CLEANUP
+
+AT_SETUP([ovn -- check LRP attached to multiple LR])
+ovn_start
+
+check ovn-nbctl lr-add lr1 \
+    -- lr-add lr2 \
+    -- lrp-add lr1 p1 00:00:00:00:00:01 10.0.0.1/24
+check ovn-nbctl --wait=sb sync
+
+uuid=$(fetch_column nb:Logical_Router_Port _uuid name=p1)
+check ovn-nbctl set Logical_Router lr2 ports=$uuid
+check ovn-nbctl --wait=sb sync
+
+AT_CHECK([grep -qE 'duplicate logical router port p1' northd/ovn-northd.log], [0])
+
+AT_CLEANUP
+
+AT_SETUP([ovn -- check duplicate LSP/LRP])
+ovn_start
+
+check ovn-nbctl ls-add ls \
+    -- lsp-add ls p1 \
+    -- lr-add lr \
+    -- lrp-add lr p1 00:00:00:00:00:01 10.0.0.1/24
+check ovn-nbctl --wait=sb sync
+
+AT_CHECK([grep -qE 'duplicate logical.*port p1' northd/ovn-northd.log], [0])
+
+AT_CLEANUP
+
+AT_SETUP([ovn -- Port_Binding.up backwards compatibility])
+ovn_start
+
+ovn-nbctl ls-add ls1
+ovn-nbctl --wait=sb lsp-add ls1 lsp1
+
+# Simulate the fact that lsp1 had been previously bound on hv1 by an
+# ovn-controller running an older version.
+ovn-sbctl \
+    --id=@e create encap chassis_name=hv1 ip="192.168.0.1" type="geneve" \
+    -- --id=@c create chassis name=hv1 encaps=@e \
+    -- set Port_Binding lsp1 chassis=@c
+
+wait_for_ports_up lsp1
+
+# Simulate the fact that hv1 is aware of Port_Binding.up, ovn-northd
+# should transition the port state to down.
+check ovn-sbctl set chassis hv1 other_config:port-up-notif=true
+wait_row_count nb:Logical_Switch_Port 1 up=false name=lsp1
+
+AT_CLEANUP
+
+AT_SETUP([ovn -- lb_force_snat_ip for Gateway Routers])
+ovn_start
+
+check ovn-nbctl ls-add sw0
+check ovn-nbctl ls-add sw1
+
+# Create a logical router and attach both logical switches
+check ovn-nbctl lr-add lr0
+check ovn-nbctl lrp-add lr0 lr0-sw0 00:00:00:00:ff:01 10.0.0.1/24
+check ovn-nbctl lsp-add sw0 sw0-lr0
+check ovn-nbctl lsp-set-type sw0-lr0 router
+check ovn-nbctl lsp-set-addresses sw0-lr0 00:00:00:00:ff:01
+check ovn-nbctl lsp-set-options sw0-lr0 router-port=lr0-sw0
+
+check ovn-nbctl lrp-add lr0 lr0-sw1 00:00:00:00:ff:02 20.0.0.1/24
+check ovn-nbctl lsp-add sw1 sw1-lr0
+check ovn-nbctl lsp-set-type sw1-lr0 router
+check ovn-nbctl lsp-set-addresses sw1-lr0 00:00:00:00:ff:02
+check ovn-nbctl lsp-set-options sw1-lr0 router-port=lr0-sw1
+
+check ovn-nbctl ls-add public
+check ovn-nbctl lrp-add lr0 lr0-public 00:00:20:20:12:13 172.168.0.100/24
+check ovn-nbctl lsp-add public public-lr0
+check ovn-nbctl lsp-set-type public-lr0 router
+check ovn-nbctl lsp-set-addresses public-lr0 router
+check ovn-nbctl lsp-set-options public-lr0 router-port=lr0-public
+
+check ovn-nbctl lb-add lb1 10.0.0.10:80 10.0.0.4:8080
+check ovn-nbctl lr-lb-add lr0 lb1
+check ovn-nbctl set logical_router lr0 options:chassis=ch1
+
+ovn-sbctl dump-flows lr0 > lr0flows
+AT_CAPTURE_FILE([lr0flows])
+
+AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
+  table=5 (lr_in_unsnat       ), priority=0    , match=(1), action=(next;)
+])
+
+AT_CHECK([grep "lr_in_dnat" lr0flows | grep force_snat_for_lb | sort], [0], [dnl
+])
+
+
+AT_CHECK([grep "lr_out_snat" lr0flows | grep force_snat_for_lb | sort], [0], [dnl
+])
+
+check ovn-nbctl --wait=sb set logical_router lr0 options:lb_force_snat_ip="20.0.0.4 aef0::4"
+
+ovn-sbctl dump-flows lr0 > lr0flows
+AT_CAPTURE_FILE([lr0flows])
+
+
+AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
+  table=5 (lr_in_unsnat       ), priority=0    , match=(1), action=(next;)
+  table=5 (lr_in_unsnat       ), priority=110  , match=(ip4 && ip4.dst == 20.0.0.4), action=(ct_snat;)
+  table=5 (lr_in_unsnat       ), priority=110  , match=(ip6 && ip6.dst == aef0::4), action=(ct_snat;)
+])
+
+AT_CHECK([grep "lr_in_dnat" lr0flows | grep force_snat_for_lb | sort], [0], [dnl
+  table=6 (lr_in_dnat         ), priority=120  , match=(ct.est && ip && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_dnat;)
+  table=6 (lr_in_dnat         ), priority=120  , match=(ct.new && ip && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_lb(backends=10.0.0.4:8080);)
+])
+
+AT_CHECK([grep "lr_out_snat" lr0flows | grep force_snat_for_lb | sort], [0], [dnl
+  table=1 (lr_out_snat        ), priority=100  , match=(flags.force_snat_for_lb == 1 && ip4), action=(ct_snat(20.0.0.4);)
+  table=1 (lr_out_snat        ), priority=100  , match=(flags.force_snat_for_lb == 1 && ip6), action=(ct_snat(aef0::4);)
+])
+
+check ovn-nbctl --wait=sb set logical_router lr0 options:lb_force_snat_ip="router_ip"
+
+ovn-sbctl dump-flows lr0 > lr0flows
+AT_CAPTURE_FILE([lr0flows])
+
+AT_CHECK([grep "lr_in_ip_input" lr0flows | grep "priority=60" | sort], [0], [dnl
+])
+
+AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
+  table=5 (lr_in_unsnat       ), priority=0    , match=(1), action=(next;)
+  table=5 (lr_in_unsnat       ), priority=110  , match=(inport == "lr0-public" && ip4.dst == 172.168.0.100), action=(ct_snat;)
+  table=5 (lr_in_unsnat       ), priority=110  , match=(inport == "lr0-sw0" && ip4.dst == 10.0.0.1), action=(ct_snat;)
+  table=5 (lr_in_unsnat       ), priority=110  , match=(inport == "lr0-sw1" && ip4.dst == 20.0.0.1), action=(ct_snat;)
+])
+
+AT_CHECK([grep "lr_in_dnat" lr0flows | grep force_snat_for_lb | sort], [0], [dnl
+  table=6 (lr_in_dnat         ), priority=120  , match=(ct.est && ip && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_dnat;)
+  table=6 (lr_in_dnat         ), priority=120  , match=(ct.new && ip && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_lb(backends=10.0.0.4:8080);)
+])
+
+AT_CHECK([grep "lr_out_snat" lr0flows | grep force_snat_for_lb | sort], [0], [dnl
+  table=1 (lr_out_snat        ), priority=110  , match=(flags.force_snat_for_lb == 1 && ip4 && outport == "lr0-public"), action=(ct_snat(172.168.0.100);)
+  table=1 (lr_out_snat        ), priority=110  , match=(flags.force_snat_for_lb == 1 && ip4 && outport == "lr0-sw0"), action=(ct_snat(10.0.0.1);)
+  table=1 (lr_out_snat        ), priority=110  , match=(flags.force_snat_for_lb == 1 && ip4 && outport == "lr0-sw1"), action=(ct_snat(20.0.0.1);)
+])
+
+check ovn-nbctl --wait=sb remove logical_router lr0 options chassis
+
+ovn-sbctl dump-flows lr0 > lr0flows
+AT_CAPTURE_FILE([lr0flows])
+
+AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
+  table=5 (lr_in_unsnat       ), priority=0    , match=(1), action=(next;)
+])
+
+AT_CHECK([grep "lr_out_snat" lr0flows | grep force_snat_for_lb | sort], [0], [dnl
+])
+
+check ovn-nbctl set logical_router lr0 options:chassis=ch1
+check ovn-nbctl --wait=sb add logical_router_port lr0-sw1 networks "bef0\:\:1/64"
+
+ovn-sbctl dump-flows lr0 > lr0flows
+AT_CAPTURE_FILE([lr0flows])
+
+AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
+  table=5 (lr_in_unsnat       ), priority=0    , match=(1), action=(next;)
+  table=5 (lr_in_unsnat       ), priority=110  , match=(inport == "lr0-public" && ip4.dst == 172.168.0.100), action=(ct_snat;)
+  table=5 (lr_in_unsnat       ), priority=110  , match=(inport == "lr0-sw0" && ip4.dst == 10.0.0.1), action=(ct_snat;)
+  table=5 (lr_in_unsnat       ), priority=110  , match=(inport == "lr0-sw1" && ip4.dst == 20.0.0.1), action=(ct_snat;)
+  table=5 (lr_in_unsnat       ), priority=110  , match=(inport == "lr0-sw1" && ip6.dst == bef0::1), action=(ct_snat;)
+])
+
+AT_CHECK([grep "lr_in_dnat" lr0flows | grep force_snat_for_lb | sort], [0], [dnl
+  table=6 (lr_in_dnat         ), priority=120  , match=(ct.est && ip && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_dnat;)
+  table=6 (lr_in_dnat         ), priority=120  , match=(ct.new && ip && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80), action=(flags.force_snat_for_lb = 1; ct_lb(backends=10.0.0.4:8080);)
+])
+
+AT_CHECK([grep "lr_out_snat" lr0flows | grep force_snat_for_lb | sort], [0], [dnl
+  table=1 (lr_out_snat        ), priority=110  , match=(flags.force_snat_for_lb == 1 && ip4 && outport == "lr0-public"), action=(ct_snat(172.168.0.100);)
+  table=1 (lr_out_snat        ), priority=110  , match=(flags.force_snat_for_lb == 1 && ip4 && outport == "lr0-sw0"), action=(ct_snat(10.0.0.1);)
+  table=1 (lr_out_snat        ), priority=110  , match=(flags.force_snat_for_lb == 1 && ip4 && outport == "lr0-sw1"), action=(ct_snat(20.0.0.1);)
+  table=1 (lr_out_snat        ), priority=110  , match=(flags.force_snat_for_lb == 1 && ip6 && outport == "lr0-sw1"), action=(ct_snat(bef0::1);)
+])
+
+AT_CLEANUP
+
+AT_SETUP([ovn -- FDB cleanup])
+
+ovn_start
+
+ovn-nbctl ls-add sw0
+ovn-nbctl lsp-add sw0 sw0-p1
+ovn-nbctl lsp-add sw0 sw0-p2
+ovn-nbctl lsp-add sw0 sw0-p3
+
+ovn-nbctl ls-add sw1
+ovn-nbctl lsp-add sw1 sw1-p1
+ovn-nbctl lsp-add sw1 sw1-p2
+ovn-nbctl --wait=sb lsp-add sw1 sw1-p3
+
+sw0_key=$(fetch_column datapath_binding tunnel_key external_ids:name=sw0)
+sw1_key=$(fetch_column datapath_binding tunnel_key external_ids:name=sw1)
+sw0p1_key=$(fetch_column port_binding tunnel_key logical_port=sw0-p1)
+sw0p2_key=$(fetch_column port_binding tunnel_key logical_port=sw0-p2)
+sw1p1_key=$(fetch_column port_binding tunnel_key logical_port=sw1-p1)
+
+ovn-sbctl create FDB mac="00\:00\:00\:00\:00\:01" dp_key=$sw0_key port_key=$sw0p1_key
+ovn-sbctl create FDB mac="00\:00\:00\:00\:00\:02" dp_key=$sw0_key port_key=$sw0p1_key
+ovn-sbctl create FDB mac="00\:00\:00\:00\:00\:03" dp_key=$sw0_key port_key=$sw0p2_key
+ovn-sbctl create FDB mac="00\:00\:00\:00\:01\:01" dp_key=$sw1_key port_key=$sw1p1_key
+ovn-sbctl create FDB mac="00\:00\:00\:00\:01\:02" dp_key=$sw1_key port_key=$sw1p1_key
+ovn-sbctl create FDB mac="00\:00\:00\:00\:01\:03" dp_key=$sw1_key port_key=$sw1p1_key
+
+wait_row_count FDB 6
+
+ovn-sbctl create fdb mac="00\:00\:00\:00\:01\:03" dp_key=$sw1_key port_key=10
+wait_row_count FDB 6
+ovn-sbctl create fdb mac="00\:00\:00\:00\:01\:03" dp_key=4 port_key=10
+wait_row_count FDB 6
+
+ovn-nbctl --wait=sb ls-del sw1
+wait_row_count FDB 3
+
+ovn-nbctl lsp-del sw0-p3
+wait_row_count FDB 3
+
+ovn-nbctl lsp-del sw0-p1
+wait_row_count FDB 1
+
+check_column '00:00:00:00:00:03' FDB mac
+ovn-sbctl list fdb
+
+check_column $sw0_key FDB dp_key
+check_column $sw0p2_key FDB port_key
+
+ovn-nbctl --wait=sb lsp-add sw0-p1
+wait_row_count FDB 1
+
+ovn-nbctl lsp-del sw0-p2
+ovn-nbctl lsp-add sw0-p2
+wait_row_count FDB 0
+
+ovn-sbctl list FDB
+
+AT_CLEANUP
+
+AT_SETUP([ovn -- HA chassis group cleanup for external port ])
+ovn_start
+
+check ovn-nbctl ls-add sw0
+check ovn-nbctl lsp-add sw0 sw0-p1
+check ovn-nbctl lsp-set-type sw0-p1 external
+
+check ovn-sbctl chassis-add ch1 geneve 127.0.0.1
+check ovn-sbctl chassis-add ch2 geneve 127.0.0.2
+
+check ovn-nbctl ha-chassis-group-add hagrp1
+check ovn-nbctl ha-chassis-group-add-chassis hagrp1 ch1 20
+check ovn-nbctl --wait=sb ha-chassis-group-add-chassis hagrp1 ch2 10
+
+ha_grp1_uuid=$(fetch_column nb:ha_chassis_group _uuid)
+echo "ha grp1 uuid = $ha_grp1_uuid"
+ovn-nbctl list ha_chassis_group
+check ovn-nbctl set logical_switch_port sw0-p1 ha_chassis_group=$ha_grp1_uuid
+
+wait_row_count ha_chassis_group 1
+check ovn-nbctl clear logical_switch_port sw0-p1 ha_chassis_group
+wait_row_count ha_chassis_group 0
+
+check ovn-nbctl set logical_switch_port sw0-p1 ha_chassis_group=$ha_grp1_uuid
+wait_row_count ha_chassis_group 1
+sb_ha_grp1_uuid=$(fetch_column ha_chassis_group _uuid)
+
+echo
+echo "__file__:__line__:Check that port_binding sw0-p1 has ha_chassis_group set"
+
+check_column "$sb_ha_grp1_uuid" Port_Binding ha_chassis_group logical_port=sw0-p1
+
+AS_BOX([Clear ha_chassis_group for sw0-p1 and reset port type to normal port in the same txn])
+
+check ovn-nbctl clear logical_switch_port sw0-p1 ha_chassis_group -- set logical_switch_port sw0-p1 'type=""'
+wait_row_count ha_chassis_group 0
+check_column "" Port_Binding chassis logical_port=sw0-p1
+
+AT_CLEANUP
diff --git a/tests/ovn-ofctrl-seqno.at b/tests/ovn-ofctrl-seqno.at
new file mode 100644
index 000000000..59dfea947
--- /dev/null
+++ b/tests/ovn-ofctrl-seqno.at
@@ -0,0 +1,226 @@
+#
+# Unit tests for the controller/ofctrl-seqno.c module.
+#
+AT_BANNER([OVN unit tests - ofctrl-seqno])
+
+AT_SETUP([ovn -- unit test -- ofctrl-seqno add-type])
+
+AT_CHECK([ovstest test-ofctrl-seqno ofctrl_seqno_add_type 1], [0], [dnl
+0
+])
+AT_CHECK([ovstest test-ofctrl-seqno ofctrl_seqno_add_type 2], [0], [dnl
+0
+1
+])
+AT_CHECK([ovstest test-ofctrl-seqno ofctrl_seqno_add_type 3], [0], [dnl
+0
+1
+2
+])
+AT_CLEANUP
+
+AT_SETUP([ovn -- unit test -- ofctrl-seqno ack-seqnos])
+
+AS_BOX([No Ack Batching, 1 seqno type])
+n_types=1
+n_app_seqnos=3
+app_seqnos="40 41 42"
+
+n_acks=1
+acks="1"
+echo "ovstest test-ofctrl-seqno ofctrl_seqno_ack_seqnos false ${n_types} ${n_app_seqnos} ${app_seqnos} ${n_acks} ${acks}"
+AT_CHECK([ovstest test-ofctrl-seqno ofctrl_seqno_ack_seqnos false ${n_types} \
+          ${n_app_seqnos} ${app_seqnos} ${n_acks} ${acks}], [0], [dnl
+ofctrl-seqno-req-cfg: 3
+ofctrl-seqno-type: 0
+  last-acked 40
+  40
+])
+
+n_acks=2
+acks="1 2"
+AT_CHECK([ovstest test-ofctrl-seqno ofctrl_seqno_ack_seqnos false ${n_types} \
+          ${n_app_seqnos} ${app_seqnos} ${n_acks} ${acks}], [0], [dnl
+ofctrl-seqno-req-cfg: 3
+ofctrl-seqno-type: 0
+  last-acked 40
+  40
+ofctrl-seqno-type: 0
+  last-acked 41
+  41
+])
+
+n_acks=3
+acks="1 2 3"
+AT_CHECK([ovstest test-ofctrl-seqno ofctrl_seqno_ack_seqnos false ${n_types} \
+          ${n_app_seqnos} ${app_seqnos} ${n_acks} ${acks}], [0], [dnl
+ofctrl-seqno-req-cfg: 3
+ofctrl-seqno-type: 0
+  last-acked 40
+  40
+ofctrl-seqno-type: 0
+  last-acked 41
+  41
+ofctrl-seqno-type: 0
+  last-acked 42
+  42
+])
+
+AS_BOX([Ack Batching, 1 seqno type])
+n_types=1
+n_app_seqnos=3
+app_seqnos="40 41 42"
+
+n_acks=1
+acks="1"
+AT_CHECK([ovstest test-ofctrl-seqno ofctrl_seqno_ack_seqnos true ${n_types} \
+          ${n_app_seqnos} ${app_seqnos} ${n_acks} ${acks}], [0], [dnl
+ofctrl-seqno-req-cfg: 3
+ofctrl-seqno-type: 0
+  last-acked 40
+  40
+])
+
+n_acks=2
+acks="1 2"
+AT_CHECK([ovstest test-ofctrl-seqno ofctrl_seqno_ack_seqnos true ${n_types} \
+          ${n_app_seqnos} ${app_seqnos} ${n_acks} ${acks}], [0], [dnl
+ofctrl-seqno-req-cfg: 3
+ofctrl-seqno-type: 0
+  last-acked 41
+  40
+  41
+])
+
+n_acks=3
+acks="1 2 3"
+AT_CHECK([ovstest test-ofctrl-seqno ofctrl_seqno_ack_seqnos true ${n_types} \
+          ${n_app_seqnos} ${app_seqnos} ${n_acks} ${acks}], [0], [dnl
+ofctrl-seqno-req-cfg: 3
+ofctrl-seqno-type: 0
+  last-acked 42
+  40
+  41
+  42
+])
+
+AS_BOX([No Ack Batching, 2 seqno types])
+n_types=2
+n_app_seqnos=3
+app_seqnos1="40 41 42"
+app_seqnos2="50 51 52"
+
+n_acks=1
+acks="1"
+AT_CHECK([ovstest test-ofctrl-seqno ofctrl_seqno_ack_seqnos false ${n_types} \
+          ${n_app_seqnos} ${app_seqnos1} ${n_app_seqnos} ${app_seqnos2} \
+          ${n_acks} ${acks}], [0], [dnl
+ofctrl-seqno-req-cfg: 6
+ofctrl-seqno-type: 0
+  last-acked 40
+  40
+ofctrl-seqno-type: 1
+  last-acked 0
+])
+
+n_acks=3
+acks="1 2 3"
+AT_CHECK([ovstest test-ofctrl-seqno ofctrl_seqno_ack_seqnos false ${n_types} \
+          ${n_app_seqnos} ${app_seqnos1} ${n_app_seqnos} ${app_seqnos2} \
+          ${n_acks} ${acks}], [0], [dnl
+ofctrl-seqno-req-cfg: 6
+ofctrl-seqno-type: 0
+  last-acked 40
+  40
+ofctrl-seqno-type: 1
+  last-acked 0
+ofctrl-seqno-type: 0
+  last-acked 41
+  41
+ofctrl-seqno-type: 1
+  last-acked 0
+ofctrl-seqno-type: 0
+  last-acked 42
+  42
+ofctrl-seqno-type: 1
+  last-acked 0
+])
+
+n_acks=3
+acks="4 5 6"
+AT_CHECK([ovstest test-ofctrl-seqno ofctrl_seqno_ack_seqnos false ${n_types} \
+          ${n_app_seqnos} ${app_seqnos1} ${n_app_seqnos} ${app_seqnos2} \
+          ${n_acks} ${acks}], [0], [dnl
+ofctrl-seqno-req-cfg: 6
+ofctrl-seqno-type: 0
+  last-acked 42
+  40
+  41
+  42
+ofctrl-seqno-type: 1
+  last-acked 50
+  50
+ofctrl-seqno-type: 0
+  last-acked 42
+ofctrl-seqno-type: 1
+  last-acked 51
+  51
+ofctrl-seqno-type: 0
+  last-acked 42
+ofctrl-seqno-type: 1
+  last-acked 52
+  52
+])
+
+AS_BOX([Ack Batching, 2 seqno types])
+n_types=2
+n_app_seqnos=3
+app_seqnos1="40 41 42"
+app_seqnos2="50 51 52"
+
+n_acks=1
+acks="1"
+AT_CHECK([ovstest test-ofctrl-seqno ofctrl_seqno_ack_seqnos true ${n_types} \
+          ${n_app_seqnos} ${app_seqnos1} ${n_app_seqnos} ${app_seqnos2} \
+          ${n_acks} ${acks}], [0], [dnl
+ofctrl-seqno-req-cfg: 6
+ofctrl-seqno-type: 0
+  last-acked 40
+  40
+ofctrl-seqno-type: 1
+  last-acked 0
+])
+
+n_acks=3
+acks="1 2 3"
+AT_CHECK([ovstest test-ofctrl-seqno ofctrl_seqno_ack_seqnos true ${n_types} \
+          ${n_app_seqnos} ${app_seqnos1} ${n_app_seqnos} ${app_seqnos2} \
+          ${n_acks} ${acks}], [0], [dnl
+ofctrl-seqno-req-cfg: 6
+ofctrl-seqno-type: 0
+  last-acked 42
+  40
+  41
+  42
+ofctrl-seqno-type: 1
+  last-acked 0
+])
+
+n_acks=3
+acks="4 5 6"
+AT_CHECK([ovstest test-ofctrl-seqno ofctrl_seqno_ack_seqnos true ${n_types} \
+          ${n_app_seqnos} ${app_seqnos1} ${n_app_seqnos} ${app_seqnos2} \
+          ${n_acks} ${acks}], [0], [dnl
+ofctrl-seqno-req-cfg: 6
+ofctrl-seqno-type: 0
+  last-acked 42
+  40
+  41
+  42
+ofctrl-seqno-type: 1
+  last-acked 52
+  50
+  51
+  52
+])
+AT_CLEANUP
diff --git a/tests/ovn-performance.at b/tests/ovn-performance.at
index 6cc5b2174..e510c6cef 100644
--- a/tests/ovn-performance.at
+++ b/tests/ovn-performance.at
@@ -232,37 +232,32 @@ AT_SETUP([ovn -- ovn-controller incremental processing])
 
 ovn_start
 net_add n1
-for i in 1 2; do
+for i in `seq 1 5`; do
     sim_add hv$i
     as hv$i
     ovs-vsctl add-br br-phys
     ovn_attach n1 br-phys 192.168.0.$i
-done
-
-for i in 1 2 3; do
-    sim_add gw$i
-    as gw$i
-    ovs-vsctl add-br br-phys
-    ovs-vsctl add-br br-ex
-    ovs-vsctl set open . external_ids:ovn-bridge-mappings="public:br-ex"
-    j=$((i + 2))
-    ovn_attach n1 br-phys 192.168.0.$j
-    ip link add vgw$i type dummy
-    ovs-vsctl add-port br-ex vgw$i
+    if [[ $i -ge 3 ]] ; then
+        ovs-vsctl add-br br-ex
+        ovs-vsctl set open . external_ids:ovn-bridge-mappings="public:br-ex"
+        ip link add vgw$i type dummy
+        ovs-vsctl add-port br-ex vgw$i
+    fi
 done
 
 # Wait for the tunnel ports to be created and up.
 # Otherwise this may affect the lflow_run count.
+for i in `seq 1 5`; do
+    for j in `seq 1 5`; do
+        if [[ $i -ne $j ]] ; then
+            OVS_WAIT_UNTIL([
+                test $(as hv$i ovs-vsctl list interface ovn-hv$j-0 | \
+            grep -c tunnel_egress_iface_carrier=up) -eq 1
+            ])
+        fi
+    done
+done
 
-OVS_WAIT_UNTIL([
-    test $(as hv1 ovs-vsctl list interface ovn-hv2-0 | \
-grep tunnel_egress_iface_carrier=up | wc -l) -eq 1
-])
-
-OVS_WAIT_UNTIL([
-    test $(as hv2 ovs-vsctl list interface ovn-hv1-0 | \
-grep tunnel_egress_iface_carrier=up | wc -l) -eq 1
-])
 
 # Add router lr1
 OVN_CONTROLLER_EXPECT_NO_HIT(
@@ -463,63 +458,63 @@ OVN_CONTROLLER_EXPECT_NO_HIT(
 )
 
 OVN_CONTROLLER_EXPECT_HIT_COND(
-    [hv1 hv2 gw1 gw2 gw3], [lflow_run], [=0 =0 >0 =0 =0],
-    [ovn-nbctl --wait=hv lrp-set-gateway-chassis lr1-public gw1 30 && ovn-nbctl --wait=hv sync]
+    [hv1 hv2 hv3 hv4 hv5], [lflow_run], [=0 =0 >0 =0 =0],
+    [ovn-nbctl --wait=hv lrp-set-gateway-chassis lr1-public hv3 30 && ovn-nbctl --wait=hv sync]
 )
 
-# After this, BFD should be enabled from hv1 and hv2 to gw1.
-# So there should be lflow_run hits in hv1, hv2, gw1 and gw2
+# After this, BFD should be enabled from hv1 and hv2 to hv3.
+# So there should be lflow_run hits in hv1, hv2, hv3 and hv4
 OVN_CONTROLLER_EXPECT_HIT_COND(
-    [hv1 hv2 gw1 gw2 gw3], [lflow_run], [>0 >0 >0 >0 =0],
-    [ovn-nbctl --wait=hv lrp-set-gateway-chassis lr1-public gw2 20 && ovn-nbctl --wait=hv sync]
-)
-
-OVN_CONTROLLER_EXPECT_HIT(
-    [hv1 hv2 gw1 gw2 gw3], [lflow_run],
-    [ovn-nbctl --wait=hv lrp-set-gateway-chassis lr1-public gw3 10 && ovn-nbctl --wait=hv sync]
-)
-
-# create QoS rule
-OVN_CONTROLLER_EXPECT_NO_HIT(
-    [hv1 hv2 gw1 gw2 gw3], [lflow_run],
-    [ovn-nbctl --wait=hv set Logical_Switch_Port ln-public options:qos_burst=1000]
+    [hv1 hv2 hv3 hv4 hv5], [lflow_run], [>0 >0 >0 >0 =0],
+    [ovn-nbctl --wait=hv lrp-set-gateway-chassis lr1-public hv4 20 && ovn-nbctl --wait=hv sync]
 )
 
 OVN_CONTROLLER_EXPECT_HIT(
-    [gw1], [lflow_run],
-    [as gw1 ovs-vsctl set interface vgw1 external-ids:ovn-egress-iface=true]
+    [hv1 hv2 hv3 hv4 hv5], [lflow_run],
+    [ovn-nbctl --wait=hv lrp-set-gateway-chassis lr1-public hv5 10 && ovn-nbctl --wait=hv sync]
 )
 
-# Make gw2 master. There is remote possibility that full recompute
-# triggers for gw2 after it becomes master. Most of the time
-# there will be no recompute.
-ovn-nbctl --wait=hv lrp-set-gateway-chassis lr1-public gw2 40
-gw2_ch=$(ovn-sbctl --bare --columns _uuid list chassis gw2)
-OVS_WAIT_UNTIL([ovn-sbctl find port_binding logical_port=cr-lr1-public chassis=$gw2_ch])
+# Make hv4 master. There is remote possibility that full recompute
+# triggers for hv1-hv5 after hv4 becomes master because of updates to the
+# ovn-hv$i-0 interfaces. Most of the time there will be no recompute.
+ovn-nbctl --wait=hv lrp-set-gateway-chassis lr1-public hv4 40
+hv4_ch=$(ovn-sbctl --bare --columns _uuid list chassis hv4)
+OVS_WAIT_UNTIL([ovn-sbctl find port_binding logical_port=cr-lr1-public chassis=$hv4_ch])
 
 OVN_CONTROLLER_EXPECT_HIT_COND(
-    [hv1 hv2 gw1 gw2 gw3], [lflow_run],  [=0 =0 =0 >=0 =0],
+    [hv1 hv2 hv3 hv4 hv5], [lflow_run],  [>=0 >=0 >=0 >=0 >=0],
     [ovn-nbctl --wait=hv sync]
 )
 
-# Delete gw2 from gateway chassis
+# Delete hv4 from gateway chassis
 OVN_CONTROLLER_EXPECT_HIT(
-    [hv1 hv2 gw1 gw2 gw3], [lflow_run],
-    [ovn-nbctl --wait=hv lrp-del-gateway-chassis lr1-public gw2 && ovn-nbctl --wait=hv sync]
+    [hv1 hv2 hv3 hv4 hv5], [lflow_run],
+    [ovn-nbctl --wait=hv lrp-del-gateway-chassis lr1-public hv4 && ovn-nbctl --wait=hv sync]
 )
 
-# Delete gw1 from gateway chassis
-# After this, the BFD should be disabled entirely as gw3 is the
+# Delete hv3 from gateway chassis
+# After this, the BFD should be disabled entirely as hv5 is the
 # only gateway chassis.
 OVN_CONTROLLER_EXPECT_HIT_COND(
-    [hv1 hv2 gw1 gw2 gw3], [lflow_run],  [>0 >0 >0 =0 >0],
-    [ovn-nbctl --wait=hv lrp-del-gateway-chassis lr1-public gw1]
+    [hv1 hv2 hv3 hv4 hv5], [lflow_run],  [>0 >0 >0 =0 >0],
+    [ovn-nbctl --wait=hv lrp-del-gateway-chassis lr1-public hv3]
 )
 
-# Delete gw3 from gateway chassis. There should be no lflow_run.
+# Delete hv5 from gateway chassis. There should be no lflow_run.
 OVN_CONTROLLER_EXPECT_NO_HIT(
-    [hv1 hv2 gw1 gw2 gw3], [lflow_run],
-    [ovn-nbctl --wait=hv lrp-del-gateway-chassis lr1-public gw3]
+    [hv1 hv2 hv3 hv4 hv5], [lflow_run],
+    [ovn-nbctl --wait=hv lrp-del-gateway-chassis lr1-public hv5]
+)
+
+# create QoS rule
+OVN_CONTROLLER_EXPECT_NO_HIT(
+    [hv1 hv2 hv3 hv4 hv5], [lflow_run],
+    [ovn-nbctl --wait=hv set Logical_Switch_Port ln-public options:qos_burst=1000]
+)
+
+OVN_CONTROLLER_EXPECT_HIT(
+    [hv3], [lflow_run],
+    [as hv3 ovs-vsctl set interface vgw3 external-ids:ovn-egress-iface=true]
 )
 
 for i in 1 2; do
diff --git a/tests/ovn.at b/tests/ovn.at
index 2e0bc9c53..bd59c0a77 100644
--- a/tests/ovn.at
+++ b/tests/ovn.at
@@ -1637,6 +1637,17 @@ tcp_reset { };
     encodes as controller(userdata=00.00.00.0b.00.00.00.00)
     has prereqs tcp
 
+# sctp_abort
+sctp_abort {eth.dst = ff:ff:ff:ff:ff:ff; output; }; output;
+    formats as sctp_abort { eth.dst = ff:ff:ff:ff:ff:ff; output; }; output;
+    encodes as controller(userdata=00.00.00.18.00.00.00.00.00.19.00.10.80.00.06.06.ff.ff.ff.ff.ff.ff.00.00.ff.ff.00.10.00.00.23.20.00.0e.ff.f8.40.00.00.00),resubmit(,64)
+    has prereqs sctp
+
+sctp_abort { };
+    formats as sctp_abort { drop; };
+    encodes as controller(userdata=00.00.00.18.00.00.00.00)
+    has prereqs sctp
+
 # reject
 reject { eth.dst = ff:ff:ff:ff:ff:ff; output; }; output;
     encodes as controller(userdata=00.00.00.16.00.00.00.00.00.19.00.10.80.00.06.06.ff.ff.ff.ff.ff.ff.00.00.ff.ff.00.10.00.00.23.20.00.0e.ff.f8.40.00.00.00),resubmit(,64)
@@ -1807,6 +1818,68 @@ ct_snat_to_vip;
 ct_snat_to_vip(foo);
     Syntax error at `(' expecting `;'.
 
+# bfd packets
+handle_bfd_msg();
+    encodes as controller(userdata=00.00.00.17.00.00.00.00)
+
+# put_fdb
+put_fdb(inport, arp.sha);
+    encodes as push:NXM_OF_ETH_SRC[],push:NXM_NX_ARP_SHA[],pop:NXM_OF_ETH_SRC[],controller(userdata=00.00.00.19.00.00.00.00),pop:NXM_OF_ETH_SRC[]
+    has prereqs eth.type == 0x806
+
+put_fdb(inport, eth.src);
+    encodes as controller(userdata=00.00.00.19.00.00.00.00)
+
+put_fdb(inport, ip4.src);
+    Cannot use 32-bit field ip4.src[0..31] where 48-bit field is required.
+
+# get_fdb
+outport = get_fdb(eth.dst);
+    encodes as set_field:0->reg15,resubmit(,71)
+
+outport = get_fdb(eth.src);
+    encodes as push:NXM_OF_ETH_DST[],push:NXM_OF_ETH_SRC[],pop:NXM_OF_ETH_DST[],set_field:0->reg15,resubmit(,71),pop:NXM_OF_ETH_DST[]
+
+inport = get_fdb(arp.sha);
+    encodes as push:NXM_OF_ETH_DST[],push:NXM_NX_ARP_SHA[],pop:NXM_OF_ETH_DST[],set_field:0->reg15,resubmit(,71),pop:NXM_OF_ETH_DST[],move:NXM_NX_REG15[]->NXM_NX_REG14[]
+    has prereqs eth.type == 0x806
+
+reg0 = get_fdb(arp.tha);
+    encodes as push:NXM_OF_ETH_DST[],push:NXM_NX_ARP_THA[],pop:NXM_OF_ETH_DST[],set_field:0->reg15,resubmit(,71),pop:NXM_OF_ETH_DST[],move:NXM_NX_REG15[]->NXM_NX_XXREG0[96..127]
+    has prereqs eth.type == 0x806
+
+reg0[1..3] = get_fdb(eth.src);
+    Cannot use 3-bit field reg0[1..3] where 32-bit field is required.
+
+reg15 = get_fdb(eth.dst);
+    Syntax error at `reg15' expecting field name.
+
+outport = get_fdb(ip4.dst);
+    Cannot use 32-bit field ip4.dst[0..31] where 48-bit field is required.
+
+# lookup_fdb
+reg0[0] = lookup_fdb(inport, eth.src);
+    encodes as set_field:0/0x100->reg10,resubmit(,72),move:NXM_NX_REG10[8]->NXM_NX_XXREG0[96]
+
+reg1[4] = lookup_fdb(outport, eth.dst);
+    encodes as push:NXM_NX_REG14[],push:NXM_OF_ETH_SRC[],push:NXM_OF_ETH_DST[],push:NXM_NX_REG15[],pop:NXM_NX_REG14[],pop:NXM_OF_ETH_SRC[],set_field:0/0x100->reg10,resubmit(,72),pop:NXM_OF_ETH_SRC[],pop:NXM_NX_REG14[],move:NXM_NX_REG10[8]->NXM_NX_XXREG0[68]
+
+reg0[0] = lookup_fdb(outport, arp.sha);
+    encodes as push:NXM_NX_REG14[],push:NXM_OF_ETH_SRC[],push:NXM_NX_ARP_SHA[],push:NXM_NX_REG15[],pop:NXM_NX_REG14[],pop:NXM_OF_ETH_SRC[],set_field:0/0x100->reg10,resubmit(,72),pop:NXM_OF_ETH_SRC[],pop:NXM_NX_REG14[],move:NXM_NX_REG10[8]->NXM_NX_XXREG0[96]
+    has prereqs eth.type == 0x806
+
+reg0 = lookup_fdb(outport, arp.sha);
+    Cannot use 32-bit field reg0[0..31] where 1-bit field is required.
+
+outport = lookup_fdb(outport, arp.sha);
+    Cannot use string field outport where numeric field is required.
+
+reg1[1] = lookup_fdb(outport, ip4.src);
+    Cannot use 32-bit field ip4.src[0..31] where 48-bit field is required.
+
+reg1[1] = lookup_fdb(ip4.src, eth.src);
+    Cannot use numeric field ip4.src where string field is required.
+
 # Miscellaneous negative tests.
 ;
     Syntax error at `;'.
@@ -1837,53 +1910,46 @@ ovn_start
 # Turn on port security on all the vifs except vif[123]1.
 # Make vif13, vif2[23], vif3[123] destinations for unknown MACs.
 # Add some ACLs for Ethertypes 1234, 1235, 1236.
-ovn-nbctl ls-add lsw0
+check ovn-nbctl ls-add lsw0
 net_add n1
 for i in 1 2 3; do
     sim_add hv$i
     as hv$i
-    ovs-vsctl add-br br-phys
+    check ovs-vsctl add-br br-phys
     ovn_attach n1 br-phys 192.168.0.$i
 
     for j in 1 2 3; do
-        ovs-vsctl add-port br-int vif$i$j -- set Interface vif$i$j external-ids:iface-id=lp$i$j options:tx_pcap=hv$i/vif$i$j-tx.pcap options:rxq_pcap=hv$i/vif$i$j-rx.pcap ofport-request=$i$j
-        ovn-nbctl lsp-add lsw0 lp$i$j
+        check ovs-vsctl add-port br-int vif$i$j -- set Interface vif$i$j external-ids:iface-id=lp$i$j options:tx_pcap=hv$i/vif$i$j-tx.pcap options:rxq_pcap=hv$i/vif$i$j-rx.pcap ofport-request=$i$j
+        check ovn-nbctl lsp-add lsw0 lp$i$j
         if test $j = 1; then
-            ovn-nbctl lsp-set-addresses lp$i$j "f0:00:00:00:00:$i$j 192.168.0.$i$j" unknown
+            check ovn-nbctl lsp-set-addresses lp$i$j "f0:00:00:00:00:$i$j 192.168.0.$i$j" unknown
         else
             if test $j = 3; then
                 ip_addrs="192.168.0.$i$j fe80::ea2a:eaff:fe28:$i$j/64 192.169.0.$i$j"
             else
                 ip_addrs="192.168.0.$i$j"
             fi
-            ovn-nbctl lsp-set-addresses lp$i$j "f0:00:00:00:00:$i$j $ip_addrs"
-            ovn-nbctl lsp-set-port-security lp$i$j f0:00:00:00:00:$i$j
+            check ovn-nbctl lsp-set-addresses lp$i$j "f0:00:00:00:00:$i$j $ip_addrs"
+            check ovn-nbctl lsp-set-port-security lp$i$j f0:00:00:00:00:$i$j
         fi
     done
 done
-ovn-nbctl acl-add lsw0 from-lport 1000 'eth.type == 0x1234' drop
-ovn-nbctl acl-add lsw0 from-lport 1000 'eth.type == 0x1235 && inport == "lp11"' drop
-ovn-nbctl acl-add lsw0 to-lport 1000 'eth.type == 0x1236 && outport == "lp33"' drop
+check ovn-nbctl acl-add lsw0 from-lport 1000 'eth.type == 0x1234' drop
+check ovn-nbctl acl-add lsw0 from-lport 1000 'eth.type == 0x1235 && inport == "lp11"' drop
+check ovn-nbctl acl-add lsw0 to-lport 1000 'eth.type == 0x1236 && outport == "lp33"' drop
 ovn-nbctl create Address_Set name=set1 addresses=\"f0:00:00:00:00:11\",\"f0:00:00:00:00:21\",\"f0:00:00:00:00:31\"
-ovn-nbctl acl-add lsw0 to-lport 1000 'eth.type == 0x1237 && eth.src == $set1 && outport == "lp33"' drop
-
-get_lsp_uuid () {
-    ovn-nbctl lsp-list lsw0 | grep $1 | awk '{ print $1 }'
-}
+check ovn-nbctl acl-add lsw0 to-lport 1000 'eth.type == 0x1237 && eth.src == $set1 && outport == "lp33"' drop
 
-ovn-nbctl create Port_Group name=pg1 ports=`get_lsp_uuid lp22`,`get_lsp_uuid lp33`
-ovn-nbctl acl-add lsw0 to-lport 1000 'eth.type == 0x1238 && outport == @pg1' drop
+check ovn-nbctl pg-add pg1 lp22 lp33
+check ovn-nbctl acl-add lsw0 to-lport 1000 'eth.type == 0x1238 && outport == @pg1' drop
 check ovn-nbctl --wait=hv sync
+wait_for_ports_up
 
 # Pre-populate the hypervisors' ARP tables so that we don't lose any
 # packets for ARP resolution (native tunneling doesn't queue packets
 # for ARP resolution).
 OVN_POPULATE_ARP
 
-# Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
-
 # Make sure there is no attempt to adding duplicated flows by ovn-controller
 AT_FAIL_IF([test -n "`grep duplicate hv1/ovn-controller.log`"])
 AT_FAIL_IF([test -n "`grep duplicate hv2/ovn-controller.log`"])
@@ -2078,11 +2144,7 @@ done
 
 # set address for lp13 with invalid characters.
 # lp13 should be configured with only 192.168.0.13.
-ovn-nbctl lsp-set-addresses lp13 "f0:00:00:00:00:13 192.168.0.13 invalid 192.169.0.13"
-
-# Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
+check ovn-nbctl --wait=hv lsp-set-addresses lp13 "f0:00:00:00:00:13 192.168.0.13 invalid 192.169.0.13"
 
 sip=`ip_to_hex 192 168 0 11`
 tip=`ip_to_hex 192 168 0 13`
@@ -2155,7 +2217,11 @@ for i in 1 2; do
     done
 done
 
-sleep 1
+# Wait for bindings to take effect.
+wait_row_count Port_Binding 1 logical_port=lp11 'encap!=[[]]'
+wait_row_count Port_Binding 1 logical_port=lp12 'encap!=[[]]'
+wait_row_count Port_Binding 1 logical_port=lp21 'encap!=[[]]'
+wait_row_count Port_Binding 1 logical_port=lp22 'encap!=[[]]'
 
 # dump port bindings; since we have vxlan and geneve tunnels, we expect the
 # ports to be bound to geneve tunnels.
@@ -2175,9 +2241,8 @@ check_row_count Port_Binding 1 logical_port=lp22 encap=$encap_rec
 # for ARP resolution).
 OVN_POPULATE_ARP
 
-# Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 
 # Make sure there is no attempt to adding duplicated flows by ovn-controller
 AT_FAIL_IF([test -n "`grep duplicate hv1/ovn-controller.log`"])
@@ -2567,6 +2632,7 @@ for i in 1 2; do
         OVS_WAIT_UNTIL([test x`ovn-nbctl lsp-get-up $lsp_name` = xup])
     done
 done
+wait_for_ports_up
 ovn-nbctl --wait=sb sync
 ovn-sbctl dump-flows > sbflows
 AT_CAPTURE_FILE([sbflows])
@@ -2733,6 +2799,7 @@ for hv in 1 2; do
 done
 
 
+wait_for_ports_up
 ovn-nbctl --wait=sb sync
 
 ovn-sbctl dump-flows > sbflows
@@ -2866,6 +2933,7 @@ for hv in 1 2; do
 done
 
 
+wait_for_ports_up
 ovn-nbctl --wait=sb sync
 ovn-nbctl show
 ovn-sbctl dump-flows > sbflows
@@ -3003,6 +3071,7 @@ for i in 1 2; do
     OVS_WAIT_UNTIL([test x`ovn-nbctl lsp-get-up $lsp_name` = xup])
 done
 
+wait_for_ports_up
 ovn-nbctl --wait=sb sync
 ovn-nbctl show
 ovn-sbctl dump-flows > sbflows
@@ -3213,6 +3282,7 @@ for tag in 10 20; do
         OVS_WAIT_UNTIL([test x`ovn-nbctl lsp-get-up $lsp_name` = xup])
     done
 done
+wait_for_ports_up
 ovn-nbctl --wait=sb sync
 ovn-sbctl dump-flows
 
@@ -3371,9 +3441,8 @@ ovs-vsctl add-port br-phys vif3 -- set Interface vif3 options:tx_pcap=hv3/vif3-t
 # for ARP resolution).
 OVN_POPULATE_ARP
 
-# Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 
 # test_packet INPORT DST SRC ETHTYPE OUTPORT...
 #
@@ -3537,9 +3606,8 @@ ovs-vsctl add-port br-phys vif3 -- set Interface vif3 options:tx_pcap=hv3/vif3-t
 # for ARP resolution).
 OVN_POPULATE_ARP
 
-# Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 
 # test_packet INPORT DST SRC ETHTYPE OUTPORT...
 #
@@ -3728,6 +3796,7 @@ for i in 1 2 3; do
     done
 done
 
+wait_for_ports_up
 check ovn-nbctl --wait=hv sync
 
 # Pre-populate the hypervisors' ARP tables so that we don't lose any
@@ -3735,9 +3804,6 @@ check ovn-nbctl --wait=hv sync
 # for ARP resolution).
 OVN_POPULATE_ARP
 
-# Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-
 # test_ip INPORT SRC_MAC DST_MAC SRC_IP DST_IP OUTPORT...
 #
 # This shell function causes a packet to be received on INPORT.  The packet's
@@ -4134,8 +4200,8 @@ done
 OVN_POPULATE_ARP
 
 # Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 
 # test_ip INPORT SRC_MAC DST_MAC SRC_IP DST_IP OUTPORT...
 #
@@ -4307,8 +4373,8 @@ done
 OVN_POPULATE_ARP
 
 # Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 
 # Given the name of a logical port, prints the name of the hypervisor
 # on which it is located.
@@ -4740,8 +4806,8 @@ ovs-vsctl -- add-port br-int hv2-vif1 -- \
 OVN_POPULATE_ARP
 
 # Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 
 # Packet to send.
 packet="inport==\"ls1-lp1\" && eth.src==$ls1_lp1_mac && eth.dst==$rp_ls1_mac &&
@@ -4851,9 +4917,8 @@ ovs-vsctl -- add-port br-int vif2 -- \
     ofport-request=1
 
 
-# Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 
 # Send ip packets between the two ports.
 
@@ -4885,11 +4950,7 @@ as hv1 ovs-ofctl dump-flows br-int
 
 
 #Disable router R1
-ovn-nbctl set Logical_Router R1 enabled=false
-
-# Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
+ovn-nbctl --wait=hv set Logical_Router R1 enabled=false
 
 echo "---------SB dump-----"
 ovn-sbctl list datapath_binding
@@ -4964,10 +5025,11 @@ ovs-vsctl -- add-port br-int vif2 -- \
     options:rxq_pcap=hv1/vif2-rx.pcap \
     ofport-request=1
 
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 
-# Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
+ovs-sbctl dump-flows > sbflows
+AT_CAPTURE_FILE([sbflows])
 
 # Send ip packets between the two ports.
 
@@ -4979,39 +5041,11 @@ dst_ip=`ip_to_hex 172 16 1 2`
 packet=${dst_mac}${src_mac}08004500001c0000000040110000${src_ip}${dst_ip}0035111100080000
 as hv1 ovs-appctl netdev-dummy/receive vif1 $packet
 
-
-echo "---------NB dump-----"
-ovn-nbctl show
-echo "---------------------"
-ovn-nbctl list logical_router
-echo "---------------------"
-ovn-nbctl list logical_router_port
-echo "---------------------"
-
-echo "---------SB dump-----"
-ovn-sbctl list datapath_binding
-echo "---------------------"
-ovn-sbctl list logical_flow
-echo "---------------------"
-
-echo "------ hv1 dump ----------"
-as hv1 ovs-ofctl dump-flows br-int
-
 #Disable router R1
-ovn-nbctl set Logical_Router R1 enabled=false
-
-echo "---------SB dump-----"
-ovn-sbctl list datapath_binding
-echo "---------------------"
-ovn-sbctl list logical_flow
-echo "---------------------"
-
-echo "------ hv1 dump ----------"
-as hv1 ovs-ofctl dump-flows br-int
+ovn-nbctl --wait=hv set Logical_Router R1 enabled=false
 
-# Allow some time for the disabling of logical router R1 to propagate.
-# XXX This should be more systematic.
-sleep 1
+ovs-sbctl dump-flows > sbflows2
+AT_CAPTURE_FILE([sbflows2])
 
 as hv1 ovs-appctl netdev-dummy/receive vif1 $packet
 
@@ -5114,8 +5148,11 @@ ovs-vsctl -- add-port br-int hv2-vif1 -- \
 OVN_POPULATE_ARP
 
 # Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
+
+ovn-sbctl dump-flows > sbflows
+AT_CAPTURE_FILE([sbflows])
 
 # Send ip packets between foo1 and alice1
 src_mac="f00000010203"
@@ -5133,25 +5170,6 @@ dst_ip=`ip_to_hex 172 16 2 2`
 packet=${dst_mac}${src_mac}08004500001c0000000040110000${src_ip}${dst_ip}0035111100080000
 as hv1 ovs-appctl netdev-dummy/receive hv1-vif1 $packet
 
-echo "---------NB dump-----"
-ovn-nbctl show
-echo "---------------------"
-ovn-nbctl list logical_router
-echo "---------------------"
-ovn-nbctl list logical_router_port
-echo "---------------------"
-
-echo "---------SB dump-----"
-ovn-sbctl list datapath_binding
-echo "---------------------"
-ovn-sbctl list port_binding
-echo "---------------------"
-
-echo "------ hv1 dump ----------"
-as hv1 ovs-ofctl dump-flows br-int
-echo "------ hv2 dump ----------"
-as hv2 ovs-ofctl dump-flows br-int
-
 # Packet to Expect at bob1
 src_mac="000000010205"
 dst_mac="f00000010205"
@@ -5333,8 +5351,8 @@ ovs-vsctl -- add-port br-int hv2-vif1 -- \
 OVN_POPULATE_ARP
 
 # Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 
 # Send ip packets between foo1 and alice1
 src_mac="f00000010203"
@@ -5469,7 +5487,8 @@ as hv1 ovs-appctl vlog/set dbg
 
 OVN_POPULATE_ARP
 
-sleep 2
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 
 as hv1 ovs-vsctl show
 
@@ -6189,7 +6208,8 @@ ovs-vsctl -- add-port br-int hv1-vif5 -- \
 
 OVN_POPULATE_ARP
 
-sleep 2
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 
 trim_zeros() {
     sed 's/\(00\)\{1,\}$//'
@@ -6469,10 +6489,8 @@ ovn-nbctl lsp-add foo foo1 \
 ovn-nbctl lsp-add alice alice1 \
 -- lsp-set-addresses alice1 "f0:00:00:01:02:04 172.16.1.2"
 
-
-# Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 2
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 
 # Send ip packets between foo1 and alice1
 src_mac="f00000010203"
@@ -6535,7 +6553,8 @@ ip_prefix=192.168.1.0/24 nexthop=20.0.0.1 -- add Logical_Router \
 R2 static_routes @lrt
 
 # Wait for ovn-controller to catch up.
-sleep 1
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 
 # Send the packet again.
 as hv1 ovs-appctl netdev-dummy/receive hv1-vif1 $packet
@@ -6605,11 +6624,11 @@ ovs-vsctl -- add-port br-int vif2 -- \
     options:rxq_pcap=hv1/vif2-rx.pcap \
     ofport-request=1
 
-
 # Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
-
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
+ovn-nbctl dump-flows > sbflows
+AT_CAPTURE_FILE([sbflows])
 
 for i in 1 2; do
     : > vif$i.expected
@@ -6764,10 +6783,6 @@ ovs-vsctl -- add-port br-int vif3 -- \
     options:rxq_pcap=pbr-hv/vif3-rx.pcap \
     ofport-request=1
 
-# Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
-
 ls1_ro_mac=00:00:00:01:02:f1
 ls1_ro_ip=192.168.1.1
 
@@ -6952,10 +6967,6 @@ ovs-vsctl -- add-port br-int vif3 -- \
     options:rxq_pcap=pbr-hv/vif3-rx.pcap \
     ofport-request=1
 
-# Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
-
 ls1_ro_mac=00:00:00:01:02:f1
 ls1_ro_ip=2001::1
 
@@ -7158,6 +7169,7 @@ ovn-nbctl lsp-del lp1
 ovn-nbctl ls-del ls1
 
 # wait for earlier changes to take effect
+wait_for_ports_up
 check ovn-nbctl --wait=sb sync
 
 # ensure OF rules are no longer present. There used to be a bug here.
@@ -7204,14 +7216,15 @@ ovn-nbctl acl-add lsw0 to-lport 1002 'outport == "lp1" && ip6 && icmp6'  allow-r
 ovn-nbctl acl-add lsw0 to-lport 1002 'outport == "lp2" && ip6 && icmp6'  allow-related
 
 # Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
+# XXX The "sleep" here seems to be essential for ovn-northd-ddlog,
+# which may indicate that it needs improvement.
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 sleep 1
 
-# Given the name of a logical port, prints the name of the hypervisor
-# on which it is located.
-vif_to_hv() {
-    echo hv1${1%?}
-}
+ovn-nbctl dump-flows > sbflows
+AT_CAPTURE_FILE([sbflows])
+
 for i in 1 2; do
     : > $i.expected
 done
@@ -7225,11 +7238,6 @@ na_packet=fa163e940598fa163ea1f9ae86dd6000000000203afffd81ce49a9480000f8163efffe
 as hv1 ovs-appctl netdev-dummy/receive vif1 $ns_packet
 echo $na_packet >> 1.expected
 
-echo "------ hv1 dump ------"
-as hv1 ovs-vsctl show
-as hv1 ovs-ofctl -O OpenFlow13 show br-int
-as hv1 ovs-ofctl -O OpenFlow13 dump-flows br-int
-
 for i in 1 2; do
     OVN_CHECK_PACKETS([hv1/vif$i-tx.pcap], [$i.expected])
 done
@@ -7250,9 +7258,7 @@ ovn_attach n1 br-phys 192.168.0.1
 
 row=`ovn-nbctl create Address_Set name=set1 addresses=\"1.1.1.1\"`
 ovn-nbctl set Address_Set $row name=set1 addresses=\"1.1.1.1,1.1.1.2\"
-ovn-nbctl destroy Address_Set $row
-
-sleep 1
+ovn-nbctl --wait=hv destroy Address_Set $row
 
 # A bug previously existed in the address set support code
 # that caused ovn-controller to crash after an address set
@@ -7640,8 +7646,8 @@ ovs-vsctl -- add-port br-int hv1-vif3 -- \
     ofport-request=3
 
 # Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 
 # Send ip packets between foo1 and foo2
 src_mac="0a0000a80103"
@@ -7848,32 +7854,11 @@ ovs-vsctl -- add-port br-int hv1-ls2lp2 -- \
     ofport-request=2
 
 # Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
-
-echo "---------NB dump-----"
-ovn-nbctl show
-echo "---------------------"
-ovn-nbctl list logical_router
-echo "---------------------"
-ovn-nbctl list logical_router_port
-echo "---------------------"
-
-echo "---------SB dump-----"
-ovn-sbctl list datapath_binding
-echo "---------------------"
-ovn-sbctl list port_binding
-echo "---------------------"
-ovn-sbctl dump-flows
-echo "---------------------"
-ovn-sbctl list chassis
-ovn-sbctl list encap
-echo "---------------------"
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 
-echo "------Flows dump-----"
-as hv1
-ovs-ofctl dump-flows
-echo "---------------------"
+ovn-sbctl dump-flows > sbflows
+AT_CAPTURE_FILE([sbflows])
 
 src_mac="f00000000003"
 dst_mac="f00000000001"
@@ -8256,18 +8241,18 @@ as hv1
 AT_CHECK([ovs-vsctl add-port br-int localvif1 -- set Interface localvif1 external_ids:iface-id=localvif1])
 
 # On hv1, check that there are no flows outputting bcast to tunnel
-OVS_WAIT_UNTIL([test `ovs-ofctl dump-flows br-int table=32 | ofctl_strip | grep output | wc -l` -eq 0])
+OVS_WAIT_UNTIL([test `ovs-ofctl dump-flows br-int table=37 | ofctl_strip | grep output | wc -l` -eq 0])
 
 # On hv2, check that no flow outputs bcast to tunnel to hv1.
 as hv2
-OVS_WAIT_UNTIL([test `ovs-ofctl dump-flows br-int table=32 | ofctl_strip | grep output | wc -l` -eq 0])
+OVS_WAIT_UNTIL([test `ovs-ofctl dump-flows br-int table=37 | ofctl_strip | grep output | wc -l` -eq 0])
 
 # Now bind vif2 on hv2.
 AT_CHECK([ovs-vsctl add-port br-int localvif2 -- set Interface localvif2 external_ids:iface-id=localvif2])
 
 # At this point, the broadcast flow on vif2 should be deleted.
-# because, there is now a localnet vif bound (table=32 programming logic)
-OVS_WAIT_UNTIL([test `ovs-ofctl dump-flows br-int table=32 | ofctl_strip | grep output | wc -l` -eq 0])
+# because, there is now a localnet vif bound (table=37 programming logic)
+OVS_WAIT_UNTIL([test `ovs-ofctl dump-flows br-int table=37 | ofctl_strip | grep output | wc -l` -eq 0])
 
 # Verify that the local net patch port exists on hv2.
 OVS_WAIT_UNTIL([test `ovs-vsctl show | grep "Port patch-br-int-to-ln_port" | wc -l` -eq 1])
@@ -8319,6 +8304,7 @@ ovn-nbctl --wait=sb lsp-add lsw0 lp2
 ovn-nbctl lsp-set-addresses lp1 $lp1_mac
 ovn-nbctl lsp-set-addresses lp2 $lp2_mac
 ovn-nbctl --wait=sb sync
+wait_for_ports_up
 
 ovn-nbctl acl-add lsw0 to-lport 1000 'tcp.dst==80' drop
 ovn-nbctl --log --severity=alert --name=drop-flow acl-add lsw0 to-lport 1000 'tcp.dst==81' drop
@@ -8425,6 +8411,7 @@ ovn-nbctl --wait=sb lsp-add lsw0 lp2
 ovn-nbctl lsp-set-addresses lp1 $lp1_mac
 ovn-nbctl lsp-set-addresses lp2 $lp2_mac
 ovn-nbctl --wait=sb sync
+wait_for_ports_up
 
 
 # Add an ACL that rate-limits logs at 10 per second.
@@ -8515,6 +8502,7 @@ ovn-nbctl --wait=sb lsp-add lsw0 lp2
 ovn-nbctl lsp-set-addresses lp1 $lp1_mac
 ovn-nbctl lsp-set-addresses lp2 $lp2_mac
 ovn-nbctl --wait=sb sync
+wait_for_ports_up
 
 ovn-appctl -t ovn-controller vlog/set file:dbg
 
@@ -8562,6 +8550,7 @@ check ovs-vsctl add-br br-phys
 ovn_attach n1 br-phys 192.168.0.1
 check ovs-vsctl add-port br-int vif1 -- set Interface vif1 external-ids:iface-id=lp1 options:tx_pcap=vif1-tx.pcap options:rxq_pcap=vif1-rx.pcap ofport-request=1
 check ovs-vsctl add-port br-int vif2 -- set Interface vif2 external-ids:iface-id=lp2 options:tx_pcap=vif2-tx.pcap options:rxq_pcap=vif2-rx.pcap ofport-request=2
+wait_for_ports_up lp1 lp2
 
 AT_CAPTURE_FILE([trace])
 ovn_trace () {
@@ -8960,8 +8949,8 @@ ovs-vsctl -- add-port br-int vm2 -- \
 OVN_POPULATE_ARP
 
 # Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 
 # Test that ovn-controllers create ct-zone entry for container ports.
 foo1_zoneid=$(as hv1 ovs-vsctl get bridge br-int external_ids:ct-zone-foo1)
@@ -8986,8 +8975,10 @@ bar2_zoneid=$(as hv2 ovs-vsctl get bridge br-int external_ids:ct-zone-bar2)
 AT_CHECK([test  -z $bar2_zoneid])
 
 # Add back bar2
+wait_for_ports_up
 ovn-nbctl lsp-add bar bar2 vm2 1 \
 -- lsp-set-addresses bar2 "f0:00:00:01:02:08 192.168.2.3"
+wait_for_ports_up
 ovn-nbctl --wait=hv sync
 
 bar2_zoneid=$(as hv2 ovs-vsctl get bridge br-int external_ids:ct-zone-bar2)
@@ -9126,6 +9117,13 @@ OVS_WAIT_UNTIL([test xup = x$(ovn-nbctl lsp-get-up vm1)])
 OVS_WAIT_UNTIL([test xup = x$(ovn-nbctl lsp-get-up foo1)])
 OVS_WAIT_UNTIL([test xup = x$(ovn-nbctl lsp-get-up bar1)])
 
+# Move VM1 to a new logical switch.
+ovn-nbctl ls-add mgmt2
+ovn-nbctl lsp-del vm1 -- lsp-add mgmt2 vm1
+OVS_WAIT_UNTIL([test xup = x$(ovn-nbctl lsp-get-up vm1)])
+OVS_WAIT_UNTIL([test xup = x$(ovn-nbctl lsp-get-up foo1)])
+OVS_WAIT_UNTIL([test xup = x$(ovn-nbctl lsp-get-up bar1)])
+
 as hv1 ovs-vsctl del-port vm1
 OVS_WAIT_UNTIL([test xdown = x$(ovn-nbctl lsp-get-up vm1)])
 OVS_WAIT_UNTIL([test xdown = x$(ovn-nbctl lsp-get-up foo1)])
@@ -9267,8 +9265,8 @@ ovn-nbctl --wait=hv lsp-add bob bob1 \
 OVN_POPULATE_ARP
 
 # Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 
 trim_zeros() {
     sed 's/\(00\)\{1,\}$//'
@@ -9375,7 +9373,8 @@ ovs-vsctl -- add-port br-int hv1-vif2 -- \
     ofport-request=2
 
 OVN_POPULATE_ARP
-sleep 2
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 as hv1 ovs-vsctl show
 
 echo "*************************"
@@ -9868,6 +9867,7 @@ check as gw1 ovs-vsctl set open . external-ids:ovn-bridge-mappings=phys:br-phys
 check as gw2 ovs-vsctl set open . external-ids:ovn-bridge-mappings=phys:br-phys
 check as ext1 ovs-vsctl set open . external-ids:ovn-bridge-mappings=phys:br-phys
 
+wait_for_ports_up
 AT_CHECK([ovn-nbctl --wait=sb sync], [0], [ignore])
 
 ovn-sbctl dump-flows > sbflows
@@ -9935,13 +9935,9 @@ test_ip_packet()
     fi
     as ext1 reset_pcap_file ext1-vif1 ext1/vif1
 
-    sleep 1
-
     # Resend packet from foo1 to outside1
     check as hv1 ovs-appctl netdev-dummy/receive hv1-vif1 $packet
 
-    sleep 1
-
     AT_CAPTURE_FILE([exp])
     AT_CAPTURE_FILE([rcv])
     check_packets() {
@@ -10131,6 +10127,7 @@ as gw1 ovs-vsctl set open . external-ids:ovn-bridge-mappings=phys:br-phys
 as gw2 ovs-vsctl set open . external-ids:ovn-bridge-mappings=phys:br-phys
 as ext1 ovs-vsctl set open . external-ids:ovn-bridge-mappings=phys:br-phys
 
+wait_for_ports_up
 check ovn-nbctl --wait=sb sync
 
 ovn-sbctl dump-flows > sbflows
@@ -10143,8 +10140,7 @@ hv1_ch_uuid=$(fetch_column Chassis _uuid name=hv1)
 wait_column "$hv1_ch_uuid" HA_Chassis_Group ref_chassis
 
 # Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 2
+check ovn-nbctl --wait=hv sync
 
 reset_pcap_file() {
     local iface=$1
@@ -10342,6 +10338,7 @@ check as hv3 ovs-vsctl set open . external-ids:ovn-bridge-mappings=phys:br-phys
 
 
 dnl Allow some time for ovn-northd and ovn-controller to catch up.
+wait_for_ports_up
 ovn-nbctl --wait=hv sync
 
 (echo "---------NB dump-----"
@@ -10386,12 +10383,12 @@ AT_CAPTURE_FILE([hv2flows])
 
 AT_CHECK(
   [# Check that redirect mapping is programmed only on hv2
-   grep table=33 hv1flows | grep =0x3,metadata=0x1 | wc -l
-   grep table=33 hv2flows | grep =0x3,metadata=0x1 | grep load:0x2- | wc -l
+   grep table=38 hv1flows | grep =0x3,metadata=0x1 | wc -l
+   grep table=38 hv2flows | grep =0x3,metadata=0x1 | grep load:0x2- | wc -l
 
    # Check that hv1 sends chassisredirect port traffic to hv2
-   grep table=32 hv1flows | grep =0x3,metadata=0x1 | grep output | wc -l
-   grep table=32 hv2flows | grep =0x3,metadata=0x1 | wc -l
+   grep table=37 hv1flows | grep =0x3,metadata=0x1 | grep output | wc -l
+   grep table=37 hv2flows | grep =0x3,metadata=0x1 | wc -l
 
    # Check that arp reply on distributed gateway port is only programmed on hv2
    grep arp hv1flows | grep load:0x2- | grep =0x2,metadata=0x1 | wc -l
@@ -10461,6 +10458,7 @@ OVS_WAIT_UNTIL([test 1 = `as hv2 ovs-vsctl show | \
 grep "Port patch-br-int-to-ln-alice" | wc -l`])
 
 dnl Allow some time for ovn-controller to catch up.
+wait_for_ports_up
 ovn-nbctl --wait=hv sync
 
 # ARP for router IP address from outside1
@@ -10534,8 +10532,8 @@ ovn-nbctl lsp-add foo foo2 \
 -- lsp-set-addresses foo2 "f0:00:00:01:02:06 192.168.1.3"
 
 # Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 
 : > hv1-vif2.expected
 
@@ -10624,10 +10622,6 @@ AT_CHECK([ovn-nbctl lsp-set-addresses ln_port unknown])
 AT_CHECK([ovn-nbctl lsp-set-type ln_port localnet])
 AT_CHECK([ovn-nbctl --wait=hv lsp-set-options ln_port network_name=physnet1])
 
-# Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 2
-
 # Expect no packets when hv2 bridge-mapping is not present
 : > packets
 OVN_CHECK_PACKETS([hv1/snoopvif-tx.pcap], [packets])
@@ -10847,50 +10841,54 @@ ovn-nbctl lsp-set-addresses ln-outside unknown
 ovn-nbctl lsp-set-type ln-outside localnet
 ovn-nbctl lsp-set-options ln-outside network_name=phys
 
-# Allow some time for ovn-northd and ovn-controller to catch up.
-check ovn-nbctl --wait=hv sync
-
 # Check that there is a logical flow in logical switch foo's pipeline
 # to set the outport to rp-foo (which is expected).
 OVS_WAIT_UNTIL([test 1 = `ovn-sbctl dump-flows foo | grep ls_in_l2_lkup | \
 grep rp-foo | grep -v is_chassis_resident | grep priority=50 -c`])
 
 # Set the option 'reside-on-redirect-chassis' for foo
-check ovn-nbctl --wait=hv set logical_router_port foo options:reside-on-redirect-chassis=true
+check ovn-nbctl set logical_router_port foo options:reside-on-redirect-chassis=true
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
+
 # Check that there is a logical flow in logical switch foo's pipeline
 # to set the outport to rp-foo with the condition is_chassis_redirect.
-ovn-sbctl dump-flows foo
-OVS_WAIT_UNTIL([test 1 = `ovn-sbctl dump-flows foo | grep ls_in_l2_lkup | \
+ovn-sbctl dump-flows foo > sbflows
+AT_CAPTURE_FILE([sbflows])
+OVS_WAIT_UNTIL([test 1 = `grep ls_in_l2_lkup sbflows | \
 grep rp-foo | grep is_chassis_resident | grep priority=50 -c`])
 
-echo "---------NB dump-----"
-ovn-nbctl show
-echo "---------------------"
-ovn-nbctl list logical_router
-echo "---------------------"
-ovn-nbctl list nat
-echo "---------------------"
-ovn-nbctl list logical_router_port
-echo "---------------------"
-
-echo "---------SB dump-----"
-ovn-sbctl list datapath_binding
-echo "---------------------"
-ovn-sbctl list port_binding
-echo "---------------------"
-ovn-sbctl dump-flows
-echo "---------------------"
-ovn-sbctl list chassis
-echo "---------------------"
+(echo "---------NB dump-----"
+ ovn-nbctl show
+ echo "---------------------"
+ ovn-nbctl list logical_router
+ echo "---------------------"
+ ovn-nbctl list nat
+ echo "---------------------"
+ ovn-nbctl list logical_router_port
+ echo "---------------------") > nbdump
+AT_CAPTURE_FILE([nbdump])
+
+(echo "---------SB dump-----"
+ ovn-sbctl list datapath_binding
+ echo "---------------------"
+ ovn-sbctl list port_binding
+ echo "---------------------"
+ ovn-sbctl list chassis
+ echo "---------------------") > sbdump
+AT_CAPTURE_FILE([sbdump])
 
 for chassis in hv1 hv2 hv3; do
-    as $chassis
-    echo "------ $chassis dump ----------"
-    ovs-vsctl show br-int
-    ovs-ofctl show br-int
-    ovs-ofctl dump-flows br-int
-    echo "--------------------------"
+    (as $chassis
+     echo "------ $chassis dump ----------"
+     ovs-vsctl show
+     ovs-ofctl show br-int
+     ovs-ofctl dump-flows br-int
+     echo "--------------------------") > ${chassis}dump
 done
+AT_CAPTURE_FILE([hv1dump])
+AT_CAPTURE_FILE([hv2dump])
+AT_CAPTURE_FILE([hv3dump])
 
 foo1_ip=$(ip_to_hex 192 168 1 2)
 gw_ip=$(ip_to_hex 172 16 1 6)
@@ -10940,8 +10938,8 @@ as hv3 reset_pcap_file hv3-vif1 hv3/vif1
 as hv1 ovs-appctl netdev-dummy/receive hv1-vif1 $packet
 sleep 2
 
-# On hv1, table 32 check that no packet goes via the tunnel port
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=32 \
+# On hv1, table 37 check that no packet goes via the tunnel port
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=37 \
 | grep "NXM_NX_TUN_ID" | grep -v n_packets=0 | wc -l], [0], [[0
 ]])
 
@@ -11083,8 +11081,8 @@ ovs-vsctl -- add-port br-int hv1-vif3 -- \
     ofport-request=3
 
 # Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 
 reset_pcap_file() {
     local iface=$1
@@ -11337,8 +11335,11 @@ ovs-vsctl -- add-port br-int hv2-vif1 -- \
 OVN_POPULATE_ARP
 
 # Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
+
+ovn-sbctl dump-flows > sbflows
+AT_CAPTURE_FILE([sbflows])
 
 # Send ip packets between foo1 and alice1
 src_mac="f00000010203"
@@ -11402,6 +11403,7 @@ for i in 1 2; do
         OVS_WAIT_UNTIL([test x`ovn-nbctl lsp-get-up lp${i}1` = xup])
 done
 
+wait_for_ports_up
 ovn-nbctl --wait=sb sync
 ovn-sbctl dump-flows
 
@@ -11553,6 +11555,7 @@ ovn-nbctl lsp-set-type ln-outside localnet
 ovn-nbctl lsp-set-options ln-outside network_name=phys
 
 # Allow some time for ovn-northd and ovn-controller to catch up.
+wait_for_ports_up
 check ovn-nbctl --wait=hv sync
 
 echo "---------NB dump-----"
@@ -11626,20 +11629,20 @@ echo $hv2_gw1_ofport
 echo $hv2_gw2_ofport
 
 echo "--- hv1 ---"
-as hv1 ovs-ofctl dump-flows br-int table=32
+as hv1 ovs-ofctl dump-flows br-int table=37
 
 echo "--- hv2 ---"
-as hv2 ovs-ofctl dump-flows br-int table=32
+as hv2 ovs-ofctl dump-flows br-int table=37
 
 gw1_chassis=$(fetch_column Chassis _uuid name=gw1)
 gw2_chassis=$(fetch_column Chassis _uuid name=gw2)
 
-OVS_WAIT_UNTIL([as hv1 ovs-ofctl dump-flows br-int table=32 | \
+OVS_WAIT_UNTIL([as hv1 ovs-ofctl dump-flows br-int table=37 | \
 grep active_backup | grep slaves:$hv1_gw1_ofport,$hv1_gw2_ofport \
 | wc -l], [0], [1
 ])
 
-OVS_WAIT_UNTIL([as hv2 ovs-ofctl dump-flows br-int table=32 | \
+OVS_WAIT_UNTIL([as hv2 ovs-ofctl dump-flows br-int table=37 | \
 grep active_backup | grep slaves:$hv2_gw1_ofport,$hv2_gw2_ofport \
 | wc -l], [0], [1
 ])
@@ -11676,15 +11679,16 @@ ovn-nbctl --id=@gc0 create Gateway_Chassis \
           set Logical_Router_Port outside 'gateway_chassis=[@gc0,@gc1]'
 
 
+wait_for_ports_up
 check ovn-nbctl --wait=hv sync
 
 # we make sure that the hypervisors noticed, and inverted the slave ports
-OVS_WAIT_UNTIL([as hv1 ovs-ofctl dump-flows br-int table=32 | \
+OVS_WAIT_UNTIL([as hv1 ovs-ofctl dump-flows br-int table=37 | \
 grep active_backup | grep slaves:$hv1_gw2_ofport,$hv1_gw1_ofport \
 | wc -l], [0], [1
 ])
 
-OVS_WAIT_UNTIL([as hv2 ovs-ofctl dump-flows br-int table=32 | \
+OVS_WAIT_UNTIL([as hv2 ovs-ofctl dump-flows br-int table=37 | \
 grep active_backup | grep slaves:$hv2_gw2_ofport,$hv2_gw1_ofport \
 | wc -l], [0], [1
 ])
@@ -11837,12 +11841,12 @@ ovn-nbctl set Logical_Router_Port outside ha_chassis_group=$hagrp1_uuid
 wait_row_count HA_Chassis_Group 1
 wait_row_count HA_Chassis 2
 
-OVS_WAIT_UNTIL([as hv1 ovs-ofctl dump-flows br-int table=32 | \
+OVS_WAIT_UNTIL([as hv1 ovs-ofctl dump-flows br-int table=37 | \
 grep active_backup | grep slaves:$hv1_gw1_ofport,$hv1_gw2_ofport \
 | wc -l], [0], [1
 ])
 
-OVS_WAIT_UNTIL([as hv2 ovs-ofctl dump-flows br-int table=32 | \
+OVS_WAIT_UNTIL([as hv2 ovs-ofctl dump-flows br-int table=37 | \
 grep active_backup | grep slaves:$hv2_gw1_ofport,$hv2_gw2_ofport \
 | wc -l], [0], [1
 ])
@@ -11894,12 +11898,12 @@ wait_column "$exp_ref_ch_list" HA_Chassis_Group ref_chassis
 # Increase the priority of gw2
 ovn-nbctl --wait=sb ha-chassis-group-add-chassis hagrp1 gw2 40
 
-OVS_WAIT_UNTIL([as hv1 ovs-ofctl dump-flows br-int table=32 | \
+OVS_WAIT_UNTIL([as hv1 ovs-ofctl dump-flows br-int table=37 | \
 grep active_backup | grep slaves:$hv1_gw2_ofport,$hv1_gw1_ofport \
 | wc -l], [0], [1
 ])
 
-OVS_WAIT_UNTIL([as hv2 ovs-ofctl dump-flows br-int table=32 | \
+OVS_WAIT_UNTIL([as hv2 ovs-ofctl dump-flows br-int table=37 | \
 grep active_backup | grep slaves:$hv2_gw2_ofport,$hv2_gw1_ofport \
 | wc -l], [0], [1
 ])
@@ -12041,6 +12045,7 @@ AT_CHECK([ovn-nbctl lsp-set-type ln_port localnet])
 AT_CHECK([ovn-nbctl lsp-set-options ln_port network_name=physnet1])
 
 # wait for earlier changes to take effect
+wait_for_ports_up
 check ovn-nbctl --wait=hv sync
 
 reset_pcap_file() {
@@ -12241,6 +12246,7 @@ ovn-nbctl lsp-set-type ln-outside localnet
 ovn-nbctl lsp-set-options ln-outside network_name=phys
 
 # Allow some time for ovn-northd and ovn-controller to catch up.
+wait_for_ports_up
 check ovn-nbctl --wait=hv sync
 
 # currently when ovn-controller is restarted, the old entry is deleted
@@ -12878,6 +12884,45 @@ test_tcp_syn_packet() {
     check as hv$hv ovs-appctl netdev-dummy/receive vif$inport $packet
 }
 
+# test_sctp_init_packet INPORT HV ETH_SRC ETH_DST IPV4_SRC IPV4_DST IP_CHKSUM SCTP_SPORT SCTP_DPORT SCTP_INIT_TAG SCTP_CHKSUM EXP_IP_CHKSUM EXP_SCTP_ABORT_CHKSUM
+#
+# Causes a packet to be received on INPORT of the hypervisor HV. The packet is an SCTP INIT chunk with
+# ETH_SRC, ETH_DST, IPV4_SRC, IPV4_DST, IP_CHKSUM, SCTP_SPORT, SCTP_DPORT, and SCTP_CHKSUM as specified.
+# The INIT "initiate_tag" will be set to SCTP_INIT_TAG.
+# EXP_IP_CHKSUM and EXP_SCTP_CHKSUM are the ip and sctp checksums of the SCTP ABORT chunk generated from the ACL rule hit
+#
+# INPORT is an lport number, e.g. 11 for vif11.
+# HV is a hypervisor number.
+# ETH_SRC and ETH_DST are each 12 hex digits.
+# IPV4_SRC and IPV4_DST are each 8 hex digits.
+# SCTP_SPORT and SCTP_DPORT are 4 hex digits.
+# IP_CHKSUM and EXP_IP_CHKSUM are 4 hex digits.
+# SCTP_CHKSUM and EXP_SCTP_CHKSUM are 8 hex digits.
+test_sctp_init_packet() {
+    local inport=$1 hv=$2 eth_src=$3 eth_dst=$4 ipv4_src=$5 ipv4_dst=$6 ip_chksum=$7
+    local sctp_sport=$8 sctp_dport=$9 sctp_init_tag=${10} sctp_chksum=${11}
+    local exp_ip_chksum=${12} exp_sctp_abort_chksum=${13}
+
+    local ip_ttl=ff
+    local eth_hdr=${eth_dst}${eth_src}0800
+    local ip_hdr=4500002500004000${ip_ttl}84${ip_chksum}${ipv4_src}${ipv4_dst}
+    local sctp_hdr=${sctp_sport}${sctp_dport}00000000${sctp_chksum}
+    local sctp_init=01000014${sctp_init_tag}0000000000010001${sctp_init_tag}
+
+    local packet=${eth_hdr}${ip_hdr}${sctp_hdr}${sctp_init}
+
+    local sctp_abort_ttl=3f
+    local reply_eth_hdr=${eth_src}${eth_dst}0800
+    local reply_ip_hdr=4500002400004000${sctp_abort_ttl}84${exp_ip_chksum}${ipv4_dst}${ipv4_src}
+    local reply_sctp_hdr=${sctp_dport}${sctp_sport}${sctp_init_tag}${exp_sctp_abort_chksum}
+    local reply_sctp_abort=06000004
+
+    local reply=${reply_eth_hdr}${reply_ip_hdr}${reply_sctp_hdr}${reply_sctp_abort}
+    echo $reply >> vif$inport.expected
+
+    check as hv$hv ovs-appctl netdev-dummy/receive vif$inport $packet
+}
+
 # Create hypervisors hv[123].
 # Add vif1[123] to hv1, vif2[123] to hv2, vif3[123] to hv3.
 # Add all of the vifs to a single logical switch sw0.
@@ -12904,8 +12949,6 @@ for i in 1 2 3; do
 done
 
 OVN_POPULATE_ARP
-# allow some time for ovn-northd and ovn-controller to catch up.
-sleep 1
 
 for i in 1 2 3; do
     : > vif${i}1.expected
@@ -12916,6 +12959,7 @@ check ovn-nbctl --log acl-add sw0 from-lport 1000 "inport == \"sw0-p11\"" reject
 check ovn-nbctl --log acl-add sw0 from-lport 1000 "inport == \"sw0-p21\"" reject
 
 # Allow some time for ovn-northd and ovn-controller to catch up.
+wait_for_ports_up
 check ovn-nbctl --wait=hv sync
 
 ovn-sbctl dump-flows > sbflows
@@ -12931,6 +12975,10 @@ test_tcp_syn_packet 11 1 000000000011 000000000021 $(ip_to_hex 192 168 1 11) $(i
 test_tcp_syn_packet 21 2 000000000021 000000000011 $(ip_to_hex 192 168 1 21) $(ip_to_hex 192 168 1 11) 0000 8b40 3039 0000 b85f 70e4
 test_tcp_syn_packet 31 3 000000000031 000000000012 $(ip_to_hex 192 168 1 31) $(ip_to_hex 192 168 1 12) 0000 8b40 3039 0000 b854 70d9
 
+test_sctp_init_packet 11 1 000000000011 000000000021 $(ip_to_hex 192 168 1 11) $(ip_to_hex 192 168 1 21) 0000 8b40 3039 00000001 82112601 b7e5 10fe95b6
+test_sctp_init_packet 21 2 000000000021 000000000011 $(ip_to_hex 192 168 1 21) $(ip_to_hex 192 168 1 11) 0000 8b40 3039 00000002 C0379D5A b7e5 39f23aaf
+test_sctp_init_packet 31 3 000000000031 000000000012 $(ip_to_hex 192 168 1 31) $(ip_to_hex 192 168 1 12) 0000 8b40 3039 00000003 028E263C b7da 7124045b
+
 for i in 1 2 3; do
     OVN_CHECK_PACKETS([hv$i/vif${i}1-tx.pcap], [vif${i}1.expected])
 done
@@ -13062,8 +13110,8 @@ done
 OVN_POPULATE_ARP
 
 # Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 
 # test_ip INPORT SRC_MAC DST_MAC SRC_IP DST_IP OUTPORT...
 #
@@ -13142,10 +13190,6 @@ for is in 1 2 3; do
   done
 done
 
-# Allow some time for packet forwarding.
-# XXX This can be improved.
-sleep 1
-
 # Now check the packets actually received against the ones expected.
 for i in 1 2 3; do
     for j in 1 2 3; do
@@ -13284,8 +13328,8 @@ done
 OVN_POPULATE_ARP
 
 # Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 
 lsp_to_mac() {
     echo f0:00:00:00:0${1:0:1}:${1:1:2}
@@ -13391,10 +13435,6 @@ for is in 1 2 3; do
   done
 done
 
-# Allow some time for packet forwarding.
-# XXX This can be improved.
-sleep 1
-
 # Now check the packets actually received against the ones expected.
 for i in 1 2 3; do
     for j in 1 2 3; do
@@ -13704,6 +13744,7 @@ grep conjunction.*conjunction.*conjunction | wc -l`])
 ovn-nbctl acl-del ls1 to-lport 1001 \
 'ip4 && ip4.src == $set1 && ip4.dst == $set1'
 
+wait_for_ports_up
 ovn-nbctl --wait=hv sync
 # priority=2001,ip,metadata=0x1,nw_dst=10.0.0.10 actions=conjunction(10,1/2)
 # priority=2001,ip,metadata=0x1,nw_dst=10.0.0.8 actions=conjunction(11,1/2)
@@ -13725,27 +13766,30 @@ AT_CLEANUP
 AT_SETUP([ovn -- Superseding ACLs with conjunction])
 ovn_start
 
-ovn-nbctl ls-add ls1
+check ovn-nbctl set nb_global . options:svc_monitor_mac=66:66:66:66:66:66
+check ovn-nbctl ls-add ls1
 
-ovn-nbctl lsp-add ls1 ls1-lp1 \
--- lsp-set-addresses ls1-lp1 "f0:00:00:00:00:01"
+check ovn-nbctl lsp-add ls1 ls1-lp1 \
+-- lsp-set-addresses ls1-lp1 "f0:00:00:00:00:01" \
+-- set logical_switch_port ls1-lp1 options:requested-tnl-key=1
 
-ovn-nbctl lsp-add ls1 ls1-lp2 \
--- lsp-set-addresses ls1-lp2 "f0:00:00:00:00:02"
+check ovn-nbctl lsp-add ls1 ls1-lp2 \
+-- lsp-set-addresses ls1-lp2 "f0:00:00:00:00:02" \
+-- set logical_switch_port ls1-lp1 options:requested-tnl-key=2
 
 net_add n1
 sim_add hv1
 
 as hv1
-ovs-vsctl add-br br-phys
+check ovs-vsctl add-br br-phys
 ovn_attach n1 br-phys 192.168.0.1
-ovs-vsctl -- add-port br-int hv1-vif1 -- \
+check ovs-vsctl -- add-port br-int hv1-vif1 -- \
     set interface hv1-vif1 external-ids:iface-id=ls1-lp1 \
     options:tx_pcap=hv1/vif1-tx.pcap \
     options:rxq_pcap=hv1/vif1-rx.pcap \
     ofport-request=1
 
-ovs-vsctl -- add-port br-int hv1-vif2 -- \
+check ovs-vsctl -- add-port br-int hv1-vif2 -- \
     set interface hv1-vif2 external-ids:iface-id=ls1-lp2 \
     options:tx_pcap=hv1/vif2-tx.pcap \
     options:rxq_pcap=hv1/vif2-rx.pcap \
@@ -13765,7 +13809,8 @@ test_ip() {
     local packet=${dst_mac}${src_mac}08004500001c0000000040110000${src_ip}\
 ${dst_ip}0035111100080000
     shift; shift; shift; shift; shift
-    as hv1 ovs-appctl netdev-dummy/receive hv1-vif1 $packet
+    check as hv1 ovs-appctl netdev-dummy/receive hv1-vif1 $packet
+    ovs-appctl ofproto/trace br-int in_port=hv1-vif1 "$packet" > trace
     for outport; do
         echo $packet >> $outport.expected
     done
@@ -13774,19 +13819,51 @@ ${dst_ip}0035111100080000
 reset_pcap_file() {
     local iface=$1
     local pcap_file=$2
-    ovs-vsctl -- set Interface $iface options:tx_pcap=dummy-tx.pcap \
+    check ovs-vsctl -- set Interface $iface options:tx_pcap=dummy-tx.pcap \
 options:rxq_pcap=dummy-rx.pcap
     rm -f ${pcap_file}*.pcap
-    ovs-vsctl -- set Interface $iface options:tx_pcap=${pcap_file}-tx.pcap \
+    check ovs-vsctl -- set Interface $iface options:tx_pcap=${pcap_file}-tx.pcap \
 options:rxq_pcap=${pcap_file}-rx.pcap
 }
 
 # Add a default deny ACL and an allow ACL for specific IP traffic.
-ovn-nbctl acl-add ls1 to-lport 2 'arp' allow
-ovn-nbctl acl-add ls1 to-lport 1 'ip4' drop
-ovn-nbctl acl-add ls1 to-lport 3 '(ip4.src==10.0.0.1 || ip4.src==10.0.0.2) && (ip4.dst == 10.0.0.3 || ip4.dst == 10.0.0.4)' allow
-ovn-nbctl acl-add ls1 to-lport 3 '(ip4.src==10.0.0.1 || ip4.src==10.0.0.42) && (ip4.dst == 10.0.0.3 || ip4.dst == 10.0.0.4)' allow
-ovn-nbctl --wait=hv sync
+check ovn-nbctl acl-add ls1 to-lport 2 'arp' allow
+check ovn-nbctl acl-add ls1 to-lport 1 'ip4' drop
+check ovn-nbctl acl-add ls1 to-lport 3 '(ip4.src==10.0.0.1 || ip4.src==10.0.0.2) && (ip4.dst == 10.0.0.3 || ip4.dst == 10.0.0.4)' allow
+check ovn-nbctl acl-add ls1 to-lport 3 '(ip4.src==10.0.0.1 || ip4.src==10.0.0.42) && (ip4.dst == 10.0.0.3 || ip4.dst == 10.0.0.4)' allow
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
+
+ovn-sbctl dump-flows > sbflows
+AT_CAPTURE_FILE([sbflows])
+
+# Traffic 10.0.0.1, 10.0.0.2 -> 10.0.0.3, 10.0.0.4 should be allowed.
+for src in `seq 1 2`; do
+    for dst in `seq 3 4`; do
+        sip=`ip_to_hex 10 0 0 $src`
+        dip=`ip_to_hex 10 0 0 $dst`
+
+        test_ip 1 f00000000001 f00000000002 $sip $dip 2
+    done
+done
+
+# Traffic 10.0.0.1, 10.0.0.2 -> 10.0.0.5 should be dropped.
+dip=`ip_to_hex 10 0 0 5`
+for src in `seq 1 2`; do
+    sip=`ip_to_hex 10 0 0 $src`
+
+    test_ip 1 f00000000001 f00000000002 $sip $dip
+done
+
+cat 2.expected > expout
+$PYTHON "$ovs_srcdir/utilities/ovs-pcap.in" hv1/vif2-tx.pcap > 2.packets
+AT_CHECK([cat 2.packets], [0], [expout])
+reset_pcap_file hv1-vif2 hv1/vif2
+rm -f 2.packets
+> 2.expected
+
+# Trigger recompute and make sure that the traffic still works as expected.
+as hv1 ovn-appctl -t ovn-controller recompute
 
 # Traffic 10.0.0.1, 10.0.0.2 -> 10.0.0.3, 10.0.0.4 should be allowed.
 for src in `seq 1 2`; do
@@ -13814,9 +13891,9 @@ rm -f 2.packets
 > 2.expected
 
 # Add two less restrictive allow ACLs for src IP 10.0.0.1.
-ovn-nbctl acl-add ls1 to-lport 3 'ip4.src==10.0.0.1 || ip4.src==10.0.0.1' allow
-ovn-nbctl acl-add ls1 to-lport 3 'ip4.src==10.0.0.1' allow
-ovn-nbctl --wait=hv sync
+check ovn-nbctl acl-add ls1 to-lport 3 'ip4.src==10.0.0.1 || ip4.src==10.0.0.1' allow
+check ovn-nbctl acl-add ls1 to-lport 3 'ip4.src==10.0.0.1' allow
+check ovn-nbctl --wait=hv sync
 
 # Check OVS flows, the less restrictive flows should have been installed.
 AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=45 | ofctl_strip_all | \
@@ -13858,11 +13935,9 @@ reset_pcap_file hv1-vif2 hv1/vif2
 rm -f 2.packets
 > 2.expected
 
-#sleep infinity
-
 # Remove the first less restrictive allow ACL.
-ovn-nbctl acl-del ls1 to-lport 3 'ip4.src==10.0.0.1 || ip4.src==10.0.0.1'
-ovn-nbctl --wait=hv sync
+check ovn-nbctl acl-del ls1 to-lport 3 'ip4.src==10.0.0.1 || ip4.src==10.0.0.1'
+check ovn-nbctl --wait=hv sync
 
 # Check OVS flows, the second less restrictive allow ACL should have been installed.
 AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=45 | ofctl_strip_all | \
@@ -13878,8 +13953,8 @@ AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=45 | ofctl_strip_all | \
 ])
 
 # Remove the less restrictive allow ACL.
-ovn-nbctl acl-del ls1 to-lport 3 'ip4.src==10.0.0.1'
-ovn-nbctl --wait=hv sync
+check ovn-nbctl acl-del ls1 to-lport 3 'ip4.src==10.0.0.1'
+check ovn-nbctl --wait=hv sync
 
 # Check OVS flows, the 10.0.0.1 conjunction should have been reinstalled.
 AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=45 | ofctl_strip_all | \
@@ -13917,8 +13992,8 @@ $PYTHON "$ovs_srcdir/utilities/ovs-pcap.in" hv1/vif2-tx.pcap > 2.packets
 AT_CHECK([cat 2.packets], [0], [expout])
 
 # Re-add the less restrictive allow ACL for src IP 10.0.0.1
-ovn-nbctl acl-add ls1 to-lport 3 'ip4.src==10.0.0.1' allow
-ovn-nbctl --wait=hv sync
+check ovn-nbctl acl-add ls1 to-lport 3 'ip4.src==10.0.0.1' allow
+check ovn-nbctl --wait=hv sync
 
 # Check OVS flows, the less restrictive flows should have been installed.
 AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=45 | ofctl_strip_all | \
@@ -13933,6 +14008,29 @@ AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=45 | ofctl_strip_all | \
  table=45, priority=1003,ip,metadata=0x1,nw_src=10.0.0.42 actions=conjunction()
 ])
 
+# Add another ACL that overlaps with the existing less restrictive ones.
+check ovn-nbctl acl-add ls1 to-lport 3 'udp || ((ip4.src==10.0.0.1 || ip4.src==10.0.0.2) && (ip4.dst == 10.0.0.3 || ip4.dst == 10.0.0.4))' allow
+check ovn-nbctl --wait=hv sync
+
+# Check OVS flows, the same conjunctive flows as above should still be there,
+# with an additional conjunction action.
+#
+# New non-conjunctive flows should be added to match on 'udp'.
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=45 | ofctl_strip_all | \
+   grep "priority=1003" | \
+   sed 's/conjunction([[^)]]*)/conjunction()/g' | sort], [0], [dnl
+ table=45, priority=1003,conj_id=2,ip,metadata=0x1 actions=resubmit(,46)
+ table=45, priority=1003,conj_id=3,ip,metadata=0x1 actions=resubmit(,46)
+ table=45, priority=1003,conj_id=4,ip,metadata=0x1 actions=resubmit(,46)
+ table=45, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.3 actions=conjunction(),conjunction(),conjunction()
+ table=45, priority=1003,ip,metadata=0x1,nw_dst=10.0.0.4 actions=conjunction(),conjunction(),conjunction()
+ table=45, priority=1003,ip,metadata=0x1,nw_src=10.0.0.1 actions=resubmit(,46)
+ table=45, priority=1003,ip,metadata=0x1,nw_src=10.0.0.2 actions=conjunction(),conjunction()
+ table=45, priority=1003,ip,metadata=0x1,nw_src=10.0.0.42 actions=conjunction()
+ table=45, priority=1003,udp,metadata=0x1 actions=resubmit(,46)
+ table=45, priority=1003,udp6,metadata=0x1 actions=resubmit(,46)
+])
+
 OVN_CLEANUP([hv1])
 AT_CLEANUP
 
@@ -13983,8 +14081,8 @@ ovn-nbctl create Address_Set name=set1 addresses=\"f0:00:00:00:00:11\",\"f0:00:0
 OVN_POPULATE_ARP
 
 # Allow some time for ovn-northd and ovn-controller to catch up.
-# XXX This should be more systematic.
-sleep 1
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 
 # Make sure there is no attempt to adding duplicated flows by ovn-controller
 AT_FAIL_IF([test -n "`grep duplicate hv1/ovn-controller.log`"])
@@ -14224,6 +14322,7 @@ done
 
 OVN_POPULATE_ARP
 # allow some time for ovn-northd and ovn-controller to catch up.
+wait_for_ports_up
 ovn-nbctl --wait=hv sync
 
 test_ip_packet 1 1 000000000001 00000000ff01 $(ip_to_hex 192 168 1 1) $(ip_to_hex 192 168 2 1) $(ip_to_hex 192 168 1 254) 0000 f87c ea96
@@ -14294,6 +14393,45 @@ test_tcp_syn_packet() {
     as hv$hv ovs-appctl netdev-dummy/receive vif$inport $packet
 }
 
+# test_sctp_init_packet INPORT HV ETH_SRC ETH_DST IPV4_SRC IPV4_DST IP_CHKSUM SCTP_SPORT SCTP_DPORT SCTP_INIT_TAG SCTP_CHKSUM EXP_IP_CHKSUM EXP_SCTP_ABORT_CHKSUM
+#
+# Causes a packet to be received on INPORT of the hypervisor HV. The packet is an SCTP INIT chunk with
+# ETH_SRC, ETH_DST, IPV4_SRC, IPV4_DST, IP_CHKSUM, SCTP_SPORT, SCTP_DPORT, and SCTP_CHKSUM as specified.
+# The INIT "initiate_tag" will be set to SCTP_INIT_TAG.
+# EXP_IP_CHKSUM and EXP_SCTP_CHKSUM are the ip and sctp checksums of the SCTP ABORT chunk generated by OVN logical router
+#
+# INPORT is an lport number, e.g. 1 for vif1.
+# HV is a hypervisor number.
+# ETH_SRC and ETH_DST are each 12 hex digits.
+# IPV4_SRC and IPV4_DST are each 8 hex digits.
+# SCTP_SPORT and SCTP_DPORT are 4 hex digits.
+# IP_CHKSUM and EXP_IP_CHKSUM are 4 hex digits.
+# SCTP_CHKSUM and EXP_SCTP_CHKSUM are 8 hex digits.
+test_sctp_init_packet() {
+    local inport=$1 hv=$2 eth_src=$3 eth_dst=$4 ipv4_src=$5 ipv4_dst=$6 ip_chksum=$7
+    local sctp_sport=$8 sctp_dport=$9 sctp_init_tag=${10} sctp_chksum=${11}
+    local exp_ip_chksum=${12} exp_sctp_abort_chksum=${13}
+
+    local ip_ttl=ff
+    local eth_hdr=${eth_dst}${eth_src}0800
+    local ip_hdr=4500002500004000${ip_ttl}84${ip_chksum}${ipv4_src}${ipv4_dst}
+    local sctp_hdr=${sctp_sport}${sctp_dport}00000000${sctp_chksum}
+    local sctp_init=01000014${sctp_init_tag}0000000000010001${sctp_init_tag}
+
+    local packet=${eth_hdr}${ip_hdr}${sctp_hdr}${sctp_init}
+
+    local sctp_abort_ttl=3e
+    local reply_eth_hdr=${eth_src}${eth_dst}0800
+    local reply_ip_hdr=4500002400004000${sctp_abort_ttl}84${exp_ip_chksum}${ipv4_dst}${ipv4_src}
+    local reply_sctp_hdr=${sctp_dport}${sctp_sport}${sctp_init_tag}${exp_sctp_abort_chksum}
+    local reply_sctp_abort=06000004
+
+    local reply=${reply_eth_hdr}${reply_ip_hdr}${reply_sctp_hdr}${reply_sctp_abort}
+    echo $reply >> vif$inport.expected
+
+    check as hv$hv ovs-appctl netdev-dummy/receive vif$inport $packet
+}
+
 # test_tcp6_packet INPORT HV ETH_SRC ETH_DST IPV6_SRC IPV6_ROUTER TCP_SPORT TCP_DPORT TCP_CHKSUM EXP_TCP_RST_CHKSUM
 #
 # Causes a packet to be received on INPORT of the hypervisor HV. The packet is a TCP syn segment with
@@ -14314,6 +14452,36 @@ test_tcp6_packet() {
     as hv$hv ovs-appctl netdev-dummy/receive vif$inport $packet
 }
 
+# test_tcp6_packet INPORT HV ETH_SRC ETH_DST IPV6_SRC IPV6_ROUTER SCTP_SPORT SCTP_DPORT SCTP_INIT_TAG SCTP_CHKSUM EXP_SCTP_ABORT_CHKSUM
+#
+# Causes a packet to be received on INPORT of the hypervisor HV. The packet is an SCTP INIT chunk with
+# ETH_SRC, ETH_DST, IPV6_SRC, IPV6_ROUTER, SCTP_SPORT, SCTP_DPORT and SCTP_CHKSUM as specified.
+# The INIT "initiate_tag" will be set to SCTP_INIT_TAG.
+# EXP_SCTP_CHKSUM is the sctp checksum of the SCTP ABORT chunk generated by OVN logical router
+test_sctp6_packet() {
+    local inport=$1 hv=$2 eth_src=$3 eth_dst=$4 ipv6_src=$5 ipv6_router=$6
+    local sctp_sport=$7 sctp_dport=$8 sctp_init_tag=$9 sctp_chksum=${10}
+    local exp_sctp_abort_chksum=${11}
+    shift 11
+
+    local eth_hdr=${eth_dst}${eth_src}86dd
+    local ip_hdr=60000000002084ff${ipv6_src}${ipv6_router}
+    local sctp_hdr=${sctp_sport}${sctp_dport}00000000${sctp_chksum}
+    local sctp_init=01000014${sctp_init_tag}0000000000010001${sctp_init_tag}
+
+    local packet=${eth_hdr}${ip_hdr}${sctp_hdr}${sctp_init}
+
+    local reply_eth_hdr=${eth_src}${eth_dst}86dd
+    local reply_ip_hdr=600000000010843e${ipv6_router}${ipv6_src}
+    local reply_sctp_hdr=${sctp_dport}${sctp_sport}${sctp_init_tag}${exp_sctp_abort_chksum}
+    local reply_sctp_abort=06000004
+
+    local reply=${reply_eth_hdr}${reply_ip_hdr}${reply_sctp_hdr}${reply_sctp_abort}
+    echo $reply >> vif$inport.expected
+
+    check as hv$hv ovs-appctl netdev-dummy/receive vif$inport $packet
+}
+
 # test_ip6_packet INPORT HV ETH_SRC ETH_DST IPV6_SRC IPV6_DST IPV6_PROTO IPV6_LEN DATA EXP_ICMP_CODE EXP_ICMP_CHKSUM
 #
 # Causes a packet to be received on INPORT of the hypervisor HV. The packet is an IPv6
@@ -14365,16 +14533,17 @@ done
 
 OVN_POPULATE_ARP
 # allow some time for ovn-northd and ovn-controller to catch up.
+wait_for_ports_up
 ovn-nbctl --wait=hv sync
 
 test_ip_packet 1 1 000000000001 00000000ff01 $(ip_to_hex 192 168 1 1) $(ip_to_hex 192 168 1 254) 11 0000 f87c f485 0303
-test_ip_packet 1 1 000000000001 00000000ff01 $(ip_to_hex 192 168 1 1) $(ip_to_hex 192 168 1 254) 84 0000 f87c f413 0302
 test_ip6_packet 1 1 000000000001 00000000ff01 20010db8000100000000000000000011 20010db8000100000000000000000001 11 0015 dbb8303900155bac6b646f65206676676e6d66720a 0104 1d31
 OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [vif1.expected])
 
 test_tcp_syn_packet 2 2 000000000002 00000000ff02 $(ip_to_hex 192 168 2 1) $(ip_to_hex 192 168 2 254) 0000 8b40 3039 0000 b680 6e05
-test_ip6_packet 2 2 000000000002 00000000ff02 20010db8000200000000000000000011 20010db8000200000000000000000001 84 0004 01020304 0103 5e74
+test_sctp_init_packet 2 2 000000000002 00000000ff02 $(ip_to_hex 192 168 2 1) $(ip_to_hex 192 168 2 254) 0000 8b40 3039 00000001 82112601 b606 10fe95b6
 test_tcp6_packet 2 2 000000000002 00000000ff02 20010db8000200000000000000000011 20010db8000200000000000000000001 8b40 3039 0000 98cd
+test_sctp6_packet 2 2 000000000002 00000000ff02 20010db8000200000000000000000011 20010db8000200000000000000000001 8b40 3039 00000002 C0379D5A 39f23aaf
 OVN_CHECK_PACKETS([hv2/vif2-tx.pcap], [vif2.expected])
 
 OVN_CLEANUP([hv1], [hv2])
@@ -14439,7 +14608,8 @@ ovs-vsctl -- add-port br-int hv2-vif1 -- \
 
 OVN_POPULATE_ARP
 
-sleep 1
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 
 packet="inport==\"sw1-p1\" && eth.src==$sw1_p1_mac && eth.dst==$sw1_ro_mac &&
        ip4 && ip.ttl==64 && ip4.src==$sw1_p1_ip && ip4.dst==$sw2_p1_ip &&
@@ -14632,6 +14802,8 @@ OVS_WAIT_UNTIL(
 logical_port=ls1-lp_ext1`
     test "$chassis" = "$hv1_uuid"])
 
+wait_for_ports_up ls1-lp_ext1
+
 # There should be DHCPv4/v6 OF flows for the ls1-lp_ext1 port in hv1
 (ovn-sbctl dump-flows lr0; ovn-sbctl dump-flows ls1) > sbflows
 as hv1 ovs-ofctl dump-flows br-int > brintflows
@@ -14912,6 +15084,7 @@ OVS_WAIT_UNTIL(
     [chassis=`ovn-sbctl --bare --columns chassis find port_binding \
 logical_port=ls1-lp_ext1`
     test "$chassis" = "$hv2_uuid"])
+wait_for_ports_up ls1-lp_ext1
 
 # There should be OF flows for DHCP4/v6 for the ls1-lp_ext1 port in hv2
 AT_CHECK([as hv2 ovs-ofctl dump-flows br-int | \
@@ -15026,6 +15199,7 @@ OVS_WAIT_UNTIL(
     [chassis=`ovn-sbctl --bare --columns chassis find port_binding \
 logical_port=ls1-lp_ext1`
     test "$chassis" = "$hv1_uuid"])
+wait_for_ports_up ls1-lp_ext1
 
 as hv1
 ovs-vsctl show
@@ -15106,6 +15280,7 @@ OVS_WAIT_UNTIL(
     [chassis=`ovn-sbctl --bare --columns chassis find port_binding \
 logical_port=ls1-lp_ext1`
     test "$chassis" = "$hv3_uuid"])
+wait_for_ports_up ls1-lp_ext1
 
 as hv1
 ovs-vsctl show
@@ -15190,11 +15365,12 @@ OVS_WAIT_UNTIL(
     [chassis=`ovn-sbctl --bare --columns chassis find port_binding \
 logical_port=ls1-lp_ext1`
     test "$chassis" = "$hv1_uuid"])
+wait_for_ports_up ls1-lp_ext1
 
 # There should be a flow in hv2 to drop traffic from ls1-lp_ext1 destined
 # to router mac.
 AT_CHECK([as hv2 ovs-ofctl dump-flows br-int \
-table=28,dl_src=f0:00:00:00:00:03,dl_dst=a0:10:00:00:00:01 | \
+table=30,dl_src=f0:00:00:00:00:03,dl_dst=a0:10:00:00:00:01 | \
 grep -c "actions=drop"], [0], [1
 ])
 
@@ -15207,6 +15383,7 @@ OVS_WAIT_UNTIL(
     [chassis=`ovn-sbctl --bare --columns chassis find port_binding \
 logical_port=ls1-lp_ext1`
     test "$chassis" = "$hv2_uuid"])
+wait_for_ports_up ls1-lp_ext1
 
 as hv1
 OVS_APP_EXIT_AND_WAIT([ovs-vswitchd])
@@ -15357,7 +15534,8 @@ ovs-vsctl -- add-port br-int hv2-vif1 -- \
 
 OVN_POPULATE_ARP
 
-sleep 1
+wait_for_ports_up
+check ovn-nbctl --wait=hv sync
 
 packet="inport==\"sw1-p1\" && eth.src==$sw1_p1_mac && eth.dst==$sw1_ro_mac &&
        ip4 && ip.ttl==64 && ip4.src==$sw1_p1_ip && ip4.dst==$sw2_p1_ip &&
@@ -15640,6 +15818,7 @@ test_ip6_packet_larger() {
     fi
 }
 
+wait_for_ports_up
 ovn-nbctl --wait=hv sync
 
 ovn-nbctl show > nbdump
@@ -15789,6 +15968,7 @@ ovn-nbctl lsp-add sw1 rp-sw1 -- set Logical_Switch_Port rp-sw1 \
 
 ovn-nbctl lsp-add sw0 sw0-p0 \
     -- lsp-set-addresses sw0-p0 "f0:00:00:01:02:03 192.168.1.2 2001::2"
+
 ovn-nbctl lsp-add sw0 sw0-p1 \
     -- lsp-set-addresses sw0-p1 "f0:00:00:11:02:03 192.168.1.3 2001::3"
 
@@ -15799,6 +15979,7 @@ ovn-nbctl lr-nat-add lr0 snat 172.16.1.1 192.168.1.0/24
 ovn-nbctl lr-nat-add lr0 snat 2002::1 2001::/64
 
 OVN_POPULATE_ARP
+wait_for_ports_up
 ovn-nbctl --wait=hv sync
 
 ovn-sbctl dump-flows > sbflows
@@ -15847,6 +16028,14 @@ ovn-nbctl --wait=hv sync
 ovn-sbctl dump-flows > sbflows2
 AT_CAPTURE_FILE([sbflows2])
 
+# create a route policy for pkt marking
+check ovn-nbctl lr-policy-add lr0 2000 "ip4.src == 192.168.1.3" allow
+policy=$(fetch_column nb:Logical_Router_Policy _uuid priority=2000)
+check ovn-nbctl set logical_router_policy $policy options:pkt_mark=100
+as hv2
+# add a flow in egress pipeline to check pkt marking
+ovs-ofctl --protocols=OpenFlow13 add-flow br-int "table=37,priority=200,ip,nw_src=172.16.1.2,pkt_mark=0x64 actions=resubmit(,38)"
+
 dst_ip=$(ip_to_hex 172 16 2 10)
 fip_ip=$(ip_to_hex 172 16 1 2)
 src_ip=$(ip_to_hex 192 168 1 3)
@@ -15857,6 +16046,8 @@ echo $(get_arp_req f00000010204 $fip_ip $gw_router_ip) >> expected
 send_arp_reply 2 1 $gw_router_mac f00000010204 $gw_router_ip $fip_ip
 echo "${gw_router_mac}f0000001020408004500001c00004000fe0121b4${fip_ip}${dst_ip}${data}" >> expected
 
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=37 | grep pkt_mark=0x64 | grep -q n_packets=1],[0])
+
 OVN_CHECK_PACKETS([hv2/vif1-tx.pcap], [expected])
 
 OVN_CLEANUP([hv1],[hv2])
@@ -16045,6 +16236,7 @@ for i in 1 2 3 4 5; do
 done
 
 dnl Wait for the changes to be propagated
+wait_for_ports_up
 check ovn-nbctl --wait=hv sync
 
 dnl Assert that each Chassis has a tunnel formed to every other Chassis
@@ -16324,6 +16516,7 @@ ovn-nbctl lrp-add router router-to-ls2 00:00:01:01:02:05 192.168.2.3/24
 ovn-nbctl lsp-add ls1 ls1-to-router -- set Logical_Switch_Port ls1-to-router type=router options:router-port=router-to-ls1 -- lsp-set-addresses ls1-to-router router
 ovn-nbctl lsp-add ls2 ls2-to-router -- set Logical_Switch_Port ls2-to-router type=router options:router-port=router-to-ls2 -- lsp-set-addresses ls2-to-router router
 
+wait_for_ports_up
 ovn-nbctl --wait=sb sync
 #ovn-sbctl dump-flows
 
@@ -16500,6 +16693,7 @@ ovn-nbctl lsp-set-type sw0-vir virtual
 ovn-nbctl set logical_switch_port sw0-vir options:virtual-ip=10.0.0.10
 ovn-nbctl set logical_switch_port sw0-vir options:virtual-parents=sw0-p1,sw0-p2,sw0-p3
 
+wait_for_ports_up
 ovn-nbctl --wait=hv sync
 
 # Check that logical flows are added for sw0-vir in lsp_in_arp_rsp pipeline
@@ -16555,12 +16749,10 @@ spa=$(ip_to_hex 10 0 0 10)
 tpa=$(ip_to_hex 10 0 0 10)
 send_garp 1 1 $eth_src $eth_dst $spa $tpa
 
-OVS_WAIT_UNTIL([test x$(ovn-sbctl --bare --columns chassis find port_binding \
-logical_port=sw0-vir) = x$hv1_ch_uuid], [0], [])
-
-AT_CHECK([test x$(ovn-sbctl --bare --columns virtual_parent find port_binding \
-logical_port=sw0-vir) = xsw0-p1])
-
+wait_row_count Port_Binding 1 logical_port=sw0-vir chassis=$hv1_ch_uuid
+check_row_count Port_Binding 1 logical_port=sw0-vir virtual_parent=sw0-p1
+wait_for_ports_up sw0-vir
+check ovn-nbctl --wait=hv sync
 
 # There should be an arp resolve flow to resolve the virtual_ip with the
 # sw0-p1's MAC.
@@ -16578,6 +16770,8 @@ ovn-sbctl clear port_binding $pb_uuid virtual_parent
 OVS_WAIT_UNTIL([test x$(ovn-sbctl --bare --columns chassis find port_binding \
 logical_port=sw0-vir) = x])
 
+wait_row_count nb:Logical_Switch_Port 1 up=false name=sw0-vir
+
 # From sw0-p0 resend GARP for 10.0.0.10. hv1 should reclaim sw0-vir
 # and sw0-p1 should be its virtual_parent.
 send_garp 1 1 $eth_src $eth_dst $spa $tpa
@@ -16588,6 +16782,8 @@ logical_port=sw0-vir) = x$hv1_ch_uuid], [0], [])
 AT_CHECK([test x$(ovn-sbctl --bare --columns virtual_parent find port_binding \
 logical_port=sw0-vir) = xsw0-p1])
 
+wait_for_ports_up sw0-vir
+
 # From sw0-p3 send GARP for 10.0.0.10. hv1 should claim sw0-vir
 # and sw0-p3 should be its virtual_parent.
 eth_src=505400000005
@@ -16602,6 +16798,7 @@ logical_port=sw0-vir) = x$hv1_ch_uuid], [0], [])
 OVS_WAIT_UNTIL([test x$(ovn-sbctl --bare --columns virtual_parent find port_binding \
 logical_port=sw0-vir) = xsw0-p3])
 
+wait_for_ports_up sw0-vir
 
 # There should be an arp resolve flow to resolve the virtual_ip with the
 # sw0-p2's MAC.
@@ -16627,6 +16824,7 @@ logical_port=sw0-vir) = x$hv2_ch_uuid], [0], [])
 AT_CHECK([test x$(ovn-sbctl --bare --columns virtual_parent find port_binding \
 logical_port=sw0-vir) = xsw0-p2])
 
+wait_for_ports_up sw0-vir
 
 # There should be an arp resolve flow to resolve the virtual_ip with the
 # sw0-p3's MAC.
@@ -16652,6 +16850,8 @@ sleep 1
 AT_CHECK([test x$(ovn-sbctl --bare --columns virtual_parent find port_binding \
 logical_port=sw0-vir) = xsw0-p1])
 
+wait_for_ports_up sw0-vir
+
 ovn-sbctl dump-flows lr0 > lr0-flows5
 AT_CAPTURE_FILE([lr0-flows5])
 AT_CHECK([grep lr_in_arp_resolve lr0-flows5 | grep "reg0 == 10.0.0.10" | sed 's/table=../table=??/'], [0], [dnl
@@ -16668,6 +16868,8 @@ sleep 1
 AT_CHECK([test x$(ovn-sbctl --bare --columns virtual_parent find port_binding \
 logical_port=sw0-vir) = x])
 
+wait_row_count nb:Logical_Switch_Port 1 up=false name=sw0-vir
+
 # Since the sw0-vir is not claimed by any chassis, eth.dst should be set to
 # zero if the ip4.dst is the virtual ip.
 ovn-sbctl dump-flows lr0 > lr0-flows6
@@ -16691,6 +16893,8 @@ sleep 1
 AT_CHECK([test x$(ovn-sbctl --bare --columns virtual_parent find port_binding \
 logical_port=sw0-vir) = xsw0-p2])
 
+wait_for_ports_up sw0-vir
+
 ovn-sbctl dump-flows lr0 > lr0-flows7
 AT_CAPTURE_FILE([lr0-flows7])
 AT_CHECK([grep lr_in_arp_resolve lr0-flows7 | grep "reg0 == 10.0.0.10" | sed 's/table=../table=??/'], [0], [dnl
@@ -16705,6 +16909,8 @@ logical_port=sw0-vir) = x], [0], [])
 AT_CHECK([test x$(ovn-sbctl --bare --columns virtual_parent find port_binding \
 logical_port=sw0-vir) = x])
 
+wait_row_count nb:Logical_Switch_Port 1 up=false name=sw0-vir
+
 # Clear virtual_ip column of sw0-vir. There should be no bind_vport flows.
 ovn-nbctl --wait=hv remove logical_switch_port sw0-vir options virtual-ip
 
@@ -16807,22 +17013,22 @@ ovs-vsctl -- add-port br-int vif33 -- \
         options:rxq_pcap=hv$i/vif33-rx.pcap \
         ofport-request=33
 
-ovn-nbctl --wait=hv set NB_Global . options:controller_event=true
-ovn-nbctl lb-add lb0 192.168.1.100:80 ""
+ovn-nbctl --event lb-add lb0 192.168.1.100:80 ""
 ovn-nbctl ls-lb-add sw0 lb0
 uuid_lb0=$(ovn-nbctl --bare --columns=_uuid find load_balancer name=lb0)
 
-ovn-nbctl lb-add lb1 192.168.2.100:80 ""
+ovn-nbctl --event lb-add lb1 192.168.2.100:80 ""
 ovn-nbctl lr-lb-add lr0 lb1
 uuid_lb1=$(ovn-nbctl --bare --columns=_uuid find load_balancer name=lb1)
 
-ovn-nbctl lb-add lb2 [[2001::10]]:50051 ""
+ovn-nbctl --event lb-add lb2 [[2001::10]]:50051 ""
 ovn-nbctl ls-lb-add sw0 lb2
 uuid_lb2=$(ovn-nbctl --bare --columns=_uuid find load_balancer name=lb2)
 
 ovn-nbctl --wait=hv meter-add event-elb drop 100 pktps 10
 
 OVN_POPULATE_ARP
+wait_for_ports_up
 check ovn-nbctl --wait=hv sync
 ovn-sbctl lflow-list > sbflows
 AT_CAPTURE_FILE([sbflows])
@@ -16889,6 +17095,8 @@ AT_CHECK_UNQUOTED([ovn-sbctl get controller_event $uuid event_info:load_balancer
 "$uuid_lb2"
 ])
 
+AT_CHECK_UNQUOTED([ovn-trace sw0 'inport == "sw0-p11" && eth.src == 00:00:00:00:00:11 && ip4.dst == 192.168.1.100 && tcp && tcp.dst == 80' | grep -q 'event = "empty_lb_backends"'], [0])
+
 OVN_CLEANUP([hv1], [hv2])
 AT_CLEANUP
 
@@ -17159,6 +17367,7 @@ AT_CAPTURE_FILE([sbflows3])
 cp ovn-sb/ovn-sb.db ovn-sb3.db
 ovn-sbctl dump-flows > sbflows3
 
+AS_BOX([IGMP traffic test 1])
 # Send traffic and make sure it gets forwarded only on the two ports that
 # joined.
 > expected
@@ -17207,6 +17416,7 @@ send_igmp_v3_report hv1-vif1 hv1 \
 wait_row_count IGMP_Group 1 address=239.0.1.68
 check ovn-nbctl --wait=hv sync
 
+AS_BOX([IGMP traffic test 2])
 # Send traffic and make sure it gets forwarded only on the port that joined.
 as hv1 reset_pcap_file hv1-vif1 hv1/vif1
 as hv2 reset_pcap_file hv2-vif1 hv2/vif1
@@ -17246,6 +17456,7 @@ send_igmp_v3_report hv1-vif1 hv1 \
 # Check that the IGMP Group is learned.
 wait_row_count IGMP_Group 1 address=224.0.0.42
 
+AS_BOX([IGMP traffic test 3])
 # Send traffic and make sure it gets flooded to all ports.
 as hv1 reset_pcap_file hv1-vif1 hv1/vif1
 as hv1 reset_pcap_file hv1-vif2 hv1/vif2
@@ -17275,6 +17486,7 @@ check ovn-nbctl set Logical_Switch sw2 \
     other_config:mcast_eth_src="00:00:00:00:02:fe" \
     other_config:mcast_ip4_src="20.0.0.254"
 
+AS_BOX([IGMP traffic test 4])
 # Wait for 1 query interval (1 sec) and check that two queries are generated.
 > expected
 store_igmp_v3_query 0000000002fe $(ip_to_hex 20 0 0 254) 84dd expected
@@ -17296,6 +17508,7 @@ check ovn-nbctl set Logical_Switch sw3       \
 
 check ovn-nbctl --wait=hv sync
 
+AS_BOX([IGMP traffic test 5])
 # Send traffic from sw3 and make sure rtr doesn't relay it.
 > expected_empty
 
@@ -17345,6 +17558,7 @@ send_igmp_v3_report hv2-vif3 hv2 \
 wait_row_count IGMP_Group 2 address=239.0.1.68
 check ovn-nbctl --wait=hv sync
 
+AS_BOX([IGMP traffic test 6])
 # Send traffic from sw3 and make sure it is relayed by rtr.
 # to ports that joined.
 > expected_routed_sw1
@@ -17394,6 +17608,7 @@ send_igmp_v3_report hv1-vif4 hv1 \
 wait_row_count IGMP_Group 3 address=239.0.1.68
 check ovn-nbctl --wait=hv sync
 
+AS_BOX([IGMP traffic test 7])
 # Send traffic from sw3 and make sure it is relayed by rtr
 # to ports that joined.
 > expected_routed_sw1
@@ -17493,6 +17708,7 @@ send_igmp_v3_report hv1-vif2 hv1 \
 wait_row_count IGMP_Group 1 address=239.0.1.68
 check ovn-nbctl --wait=hv sync
 
+AS_BOX([IGMP traffic test 8])
 # Send traffic from sw1-p21
 send_ip_multicast_pkt hv2-vif1 hv2 \
     000000000001 01005e000144 \
@@ -17790,6 +18006,7 @@ check ovs-vsctl -- add-port br-int hv2-vif4 -- \
     ofport-request=1
 check ovs-vsctl set open . external-ids:ovn-bridge-mappings=phys:br-phys
 
+wait_for_ports_up
 check ovn-nbctl --wait=hv sync
 
 AT_CAPTURE_FILE([sbflows])
@@ -18470,6 +18687,7 @@ m4_define([DVR_N_S_ARP_HANDLING],
 
    # Set a hypervisor as gateway chassis, for router port 172.31.0.1
    ovn-nbctl lrp-set-gateway-chassis router-to-underlay hv3
+   wait_for_ports_up
    ovn-nbctl --wait=sb sync
 
    wait_row_count Port_Binding 1 logical_port=cr-router-to-underlay
@@ -18689,6 +18907,7 @@ m4_define([DVR_N_S_PING],
    ovn-nbctl lrp-set-gateway-chassis router-to-underlay hv3
    ovn-nbctl lrp-set-redirect-type router-to-underlay bridged
 
+   wait_for_ports_up
    ovn-nbctl --wait=sb sync
 
 
@@ -18816,7 +19035,7 @@ m4_define([DVR_N_S_PING],
    OVN_CHECK_PACKETS_REMOVE_BROADCAST([hv4/vif-north-tx.pcap], [vif-north.expected])
 
    # Confirm that packets did not go out via tunnel port.
-   AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep table=33 | grep NXM_NX_TUN_METADATA0 | grep n_packets=0 | wc -l], [0], [[0
+   AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep table=38 | grep NXM_NX_TUN_METADATA0 | grep n_packets=0 | wc -l], [0], [[0
 ]])
 
    # Confirm that packet went out via localnet port
@@ -18919,6 +19138,7 @@ ovn-nbctl lsp-set-addresses sw1-lr0 00:00:00:00:ff:02
 ovn-nbctl lsp-set-options sw1-lr0 router-port=lr0-sw1
 
 OVN_POPULATE_ARP
+wait_for_ports_up
 ovn-nbctl --wait=hv sync
 
 as hv1 ovs-appctl -t ovn-controller vlog/set dbg
@@ -18945,7 +19165,8 @@ list mac_binding], [0], [lr0-sw0
 50:54:00:00:00:03
 ])
 
-AT_CHECK([test 1 = `cat hv1/ovn-controller.log | grep NXT_PACKET_IN2 | wc -l`])
+AT_CHECK([test 1 = `cat hv1/ovn-controller.log | grep NXT_PACKET_IN2 | \
+grep table_id=10 | wc -l`])
 AT_CHECK([test 1 = `as hv1 ovs-ofctl dump-flows br-int table=10 | grep arp | \
 grep controller | grep -v n_packets=0 | wc -l`])
 
@@ -18962,7 +19183,8 @@ OVS_WAIT_UNTIL([test 1 = `as hv1 ovs-ofctl dump-flows br-int table=67 | grep n_p
 
 # The packet should not be sent to ovn-controller. The packet
 # count should be 1 only.
-AT_CHECK([test 1 = `cat hv1/ovn-controller.log | grep NXT_PACKET_IN2 | wc -l`])
+AT_CHECK([test 1 = `cat hv1/ovn-controller.log | grep NXT_PACKET_IN2 | \
+grep table_id=10 | wc -l`])
 AT_CHECK([test 1 = `as hv1 ovs-ofctl dump-flows br-int table=10 | grep arp | \
 grep controller | grep -v n_packets=0 | wc -l`])
 
@@ -18975,7 +19197,8 @@ send_garp 1 1 $eth_src $eth_dst $spa $tpa
 
 # The garp packet should be sent to ovn-controller and the mac_binding entry
 # should be updated.
-OVS_WAIT_UNTIL([test 2 = `cat hv1/ovn-controller.log | grep NXT_PACKET_IN2 | wc -l`])
+OVS_WAIT_UNTIL([test 2 = `cat hv1/ovn-controller.log | grep NXT_PACKET_IN2 | \
+grep table_id=10 | wc -l`])
 
 check_row_count MAC_Binding 1
 
@@ -19000,7 +19223,8 @@ send_garp 1 1 $eth_src $eth_dst $spa $tpa
 
 # The garp packet should be sent to ovn-controller and the mac_binding entry
 # should be updated.
-OVS_WAIT_UNTIL([test 3 = `cat hv1/ovn-controller.log | grep NXT_PACKET_IN2 | wc -l`])
+OVS_WAIT_UNTIL([test 3 = `cat hv1/ovn-controller.log | grep NXT_PACKET_IN2 | \
+grep table_id=10 | wc -l`])
 
 OVS_WAIT_UNTIL(
     [test 1 = `as hv1 ovs-ofctl dump-flows br-int table=67 | grep dl_src=50:54:00:00:00:33 \
@@ -19021,7 +19245,8 @@ OVS_WAIT_UNTIL(
 | grep n_packets=1 | wc -l`]
 )
 
-AT_CHECK([test 3 = `cat hv1/ovn-controller.log | grep NXT_PACKET_IN2 | wc -l`])
+AT_CHECK([test 3 = `cat hv1/ovn-controller.log | grep NXT_PACKET_IN2 | \
+grep table_id=10 | wc -l`])
 
 # Now send ARP reply packet with IP - 10.0.0.40 and mac 505400000023
 eth_src=505400000023
@@ -19038,7 +19263,8 @@ send_arp_reply 1 1 $eth_src $eth_dst $spa $tpa
 
 # The garp packet should be sent to ovn-controller and the mac_binding entry
 # should be updated.
-OVS_WAIT_UNTIL([test 4 = `cat hv1/ovn-controller.log | grep NXT_PACKET_IN2 | wc -l`])
+OVS_WAIT_UNTIL([test 4 = `cat hv1/ovn-controller.log | grep NXT_PACKET_IN2 | \
+grep table_id=10 | wc -l`])
 
 # Wait for an entry in table=67 for the learnt mac_binding entry.
 
@@ -19054,7 +19280,8 @@ OVS_WAIT_UNTIL(
 | grep n_packets=1 | wc -l`]
 )
 
-AT_CHECK([test 4 = `cat hv1/ovn-controller.log | grep NXT_PACKET_IN2 | wc -l`])
+AT_CHECK([test 4 = `cat hv1/ovn-controller.log | grep NXT_PACKET_IN2 | \
+grep table_id=10 | wc -l`])
 
 send_arp_reply 1 1 $eth_src $eth_dst $spa $tpa
 OVS_WAIT_UNTIL(
@@ -19062,7 +19289,8 @@ OVS_WAIT_UNTIL(
 | grep n_packets=2 | wc -l`]
 )
 
-AT_CHECK([test 4 = `cat hv1/ovn-controller.log | grep NXT_PACKET_IN2 | wc -l`])
+AT_CHECK([test 4 = `cat hv1/ovn-controller.log | grep NXT_PACKET_IN2 | \
+grep table_id=10 | wc -l`])
 
 OVN_CLEANUP([hv1], [hv2])
 AT_CLEANUP
@@ -19100,8 +19328,7 @@ ovn-nbctl lsp-add ls1 lp11
 ovn-nbctl lsp-set-addresses lp11 "f0:00:00:00:00:11"
 ovn-nbctl lsp-set-port-security lp11 f0:00:00:00:00:11
 
-OVS_WAIT_UNTIL([test x`ovn-nbctl lsp-get-up lp11` = xup])
-
+wait_for_ports_up
 ovn-nbctl --wait=sb sync
 
 ovn-nbctl show
@@ -19270,6 +19497,7 @@ ovn-nbctl lrp-set-gateway-chassis router-to-underlay hv3
 ovn-nbctl --stateless lr-nat-add router dnat_and_snat 172.31.0.100 192.168.1.1
 ovn-nbctl lrp-set-redirect-type router-to-underlay bridged
 
+wait_for_ports_up
 ovn-nbctl --wait=sb sync
 
 
@@ -19534,6 +19762,7 @@ check ovn-nbctl lsp-set-options ln-public network_name=public
 check ovn-nbctl --wait=hv lrp-set-gateway-chassis lr0-public hv1 20
 
 OVN_POPULATE_ARP
+wait_for_ports_up
 check ovn-nbctl --wait=hv sync
 
 wait_row_count Service_Monitor 2
@@ -19542,7 +19771,7 @@ AT_CAPTURE_FILE([sbflows])
 OVS_WAIT_FOR_OUTPUT(
   [ovn-sbctl dump-flows > sbflows
    ovn-sbctl dump-flows sw0 | grep ct_lb | grep priority=120 | sed 's/table=..//'], 0,
-  [  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(backends=10.0.0.3:80,20.0.0.3:80; hash_fields="ip_dst,ip_src,tcp_dst,tcp_src");)
+  [  (ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.3:80,20.0.0.3:80; hash_fields="ip_dst,ip_src,tcp_dst,tcp_src");)
 ])
 
 AT_CAPTURE_FILE([sbflows2])
@@ -19722,6 +19951,7 @@ ovn-nbctl lsp-set-options ln-public network_name=public
 ovn-nbctl --wait=hv lrp-set-gateway-chassis lr0-public hv1 20
 
 OVN_POPULATE_ARP
+wait_for_ports_up
 ovn-nbctl --wait=hv sync
 
 # And now for the anticlimax. We need to ensure that there is no
@@ -19861,6 +20091,7 @@ check ovs-vsctl -- add-port br-int hv1-vif2 -- \
     ofport-request=3
 
 OVN_POPULATE_ARP
+wait_for_ports_up
 check ovn-nbctl --wait=hv sync
 
 ovn-sbctl dump-flows > sbflows
@@ -20216,6 +20447,7 @@ ovn-nbctl lsp-add lsw0 lp1
 ovn-nbctl lsp-set-addresses lp1 "f0:00:00:00:00:01 10.0.0.1"
 ovn-nbctl acl-add lsw0 from-lport 1000 'eth.type == 0x1234' drop
 
+wait_for_ports_up
 check ovn-nbctl --wait=hv sync
 
 # Trace with --ovs should see ovs flow related to the ACL
@@ -20310,6 +20542,7 @@ for az in `seq 1 $n_az`; do
     done
     check ovn-nbctl --wait=hv sync
     ovn-sbctl list Port_Binding > az$az.ports
+    wait_for_ports_up
 done
 
 # Pre-populate the hypervisors' ARP tables so that we don't lose any
@@ -20485,6 +20718,7 @@ ovs-vsctl -- add-port br-int hv1-vif3 -- \
 
 # wait for earlier changes to take effect
 check ovn-nbctl --wait=hv sync
+wait_for_ports_up
 
 ovn-sbctl dump-flows > sbflows
 AT_CAPTURE_FILE([sbflows])
@@ -20672,8 +20906,9 @@ build_tcp_syn() {
 
 send_ipv4_pkt() {
     local hv=$1 inport=$2 eth_src=$3 eth_dst=$4
-    local ip_src=$5 ip_dst=$6 ip_proto=$7 ip_len=$8 ip_chksum=$9
-    local l4_payload=${10}
+    local ip_src=$5 ip_dst=$6 ip_proto=$7 ip_len=$8
+    local l4_payload=$9
+    local hp_ip_src=${10}
     local hp_l4_payload=${11}
     local outfile=${12}
 
@@ -20681,8 +20916,10 @@ send_ipv4_pkt() {
 
     local eth=${eth_dst}${eth_src}0800
     local hp_eth=${eth_src}${eth_dst}0800
-    local ip=4500${ip_len}00004000${ip_ttl}${ip_proto}${ip_chksum}${ip_src}${ip_dst}
-    local hp_ip=4500${ip_len}00004000${ip_ttl}${ip_proto}${ip_chksum}${ip_dst}${ip_src}
+    local ip=4500${ip_len}00004000${ip_ttl}${ip_proto}0000${ip_src}${ip_dst}
+    ip=$(ip4_csum_inplace $ip)
+    local hp_ip=4500${ip_len}00004000${ip_ttl}${ip_proto}0000${hp_ip_src}${ip_src}
+    hp_ip=$(ip4_csum_inplace ${hp_ip})
     local packet=${eth}${ip}${l4_payload}
     local hp_packet=${hp_eth}${hp_ip}${hp_l4_payload}
 
@@ -20694,15 +20931,16 @@ send_ipv6_pkt() {
     local hv=$1 inport=$2 eth_src=$3 eth_dst=$4
     local ip_src=$5 ip_dst=$6 ip_proto=$7 ip_len=$8
     local l4_payload=$9
-    local hp_l4_payload=${10}
-    local outfile=${11}
+    local hp_ip_src=${10}
+    local hp_l4_payload=${11}
+    local outfile=${12}
 
     local ip_ttl=40
 
     local eth=${eth_dst}${eth_src}86dd
     local hp_eth=${eth_src}${eth_dst}86dd
     local ip=60000000${ip_len}${ip_proto}${ip_ttl}${ip_src}${ip_dst}
-    local hp_ip=60000000${ip_len}${ip_proto}${ip_ttl}${ip_dst}${ip_src}
+    local hp_ip=60000000${ip_len}${ip_proto}${ip_ttl}${hp_ip_src}${ip_src}
     local packet=${eth}${ip}${l4_payload}
     local hp_packet=${hp_eth}${hp_ip}${hp_l4_payload}
 
@@ -20724,16 +20962,26 @@ ovs-vsctl -- add-port br-int hv1-vif1 -- \
 
 # One logical switch with IPv4 and IPv6 load balancers that hairpin the
 # traffic.
+# Also create "duplicate" load balancers, i.e., different VIPs using the same
+# backends.
 ovn-nbctl ls-add sw
 ovn-nbctl lsp-add sw lsp -- lsp-set-addresses lsp 00:00:00:00:00:01
-ovn-nbctl lb-add lb-ipv4-tcp 88.88.88.88:8080 42.42.42.1:4041 tcp
-ovn-nbctl lb-add lb-ipv4-udp 88.88.88.88:4040 42.42.42.1:2021 udp
-ovn-nbctl lb-add lb-ipv6-tcp [[8800::0088]]:8080 [[4200::1]]:4041 tcp
-ovn-nbctl lb-add lb-ipv6-udp [[8800::0088]]:4040 [[4200::1]]:2021 udp
+ovn-nbctl lb-add lb-ipv4-tcp     88.88.88.88:8080 42.42.42.1:4041 tcp
+ovn-nbctl lb-add lb-ipv4-tcp-dup 88.88.88.89:8080 42.42.42.1:4041 tcp
+ovn-nbctl lb-add lb-ipv4-udp     88.88.88.88:4040 42.42.42.1:2021 udp
+ovn-nbctl lb-add lb-ipv4-udp-dup 88.88.88.89:4040 42.42.42.1:2021 udp
+ovn-nbctl lb-add lb-ipv6-tcp     [[8800::0088]]:8080 [[4200::1]]:4041 tcp
+ovn-nbctl lb-add lb-ipv6-tcp-dup [[8800::0089]]:8080 [[4200::1]]:4041 tcp
+ovn-nbctl lb-add lb-ipv6-udp     [[8800::0088]]:4040 [[4200::1]]:2021 udp
+ovn-nbctl lb-add lb-ipv6-udp-dup [[8800::0089]]:4040 [[4200::1]]:2021 udp
 ovn-nbctl ls-lb-add sw lb-ipv4-tcp
+ovn-nbctl ls-lb-add sw lb-ipv4-tcp-dup
 ovn-nbctl ls-lb-add sw lb-ipv4-udp
+ovn-nbctl ls-lb-add sw lb-ipv4-udp-dup
 ovn-nbctl ls-lb-add sw lb-ipv6-tcp
+ovn-nbctl ls-lb-add sw lb-ipv6-tcp-dup
 ovn-nbctl ls-lb-add sw lb-ipv6-udp
+ovn-nbctl ls-lb-add sw lb-ipv6-udp-dup
 
 ovn-nbctl lr-add rtr
 ovn-nbctl lrp-add rtr rtr-sw 00:00:00:00:01:00 42.42.42.254/24 4200::00ff/64
@@ -20743,67 +20991,332 @@ ovn-nbctl lsp-add sw sw-rtr                       \
     -- lsp-set-options sw-rtr router-port=rtr-sw
 
 ovn-nbctl --wait=hv sync
+wait_for_ports_up
 
-# Inject IPv4 TCP packet from lsp.
+ovn-sbctl dump-flows > sbflows
+AT_CAPTURE_FILE([sbflows])
 > expected
+
+AS_BOX([IPv4 TCP Hairpin])
+
+# Inject IPv4 TCP packets from lsp.
 tcp_payload=$(build_tcp_syn 84d0 1f90 05a7)
 hp_tcp_payload=$(build_tcp_syn 84d0 0fc9 156e)
 send_ipv4_pkt hv1 hv1-vif1 000000000001 000000000100 \
     $(ip_to_hex 42 42 42 1) $(ip_to_hex 88 88 88 88) \
-    06 0028 35f5 \
-    ${tcp_payload} ${hp_tcp_payload} \
+    06 0028 \
+    ${tcp_payload} \
+    $(ip_to_hex 88 88 88 88) ${hp_tcp_payload} \
     expected
 
-ovn-sbctl dump-flows > sbflows
-AT_CAPTURE_FILE([sbflows])
+tcp_payload=$(build_tcp_syn 84d1 1f90 05a5)
+hp_tcp_payload=$(build_tcp_syn 84d1 0fc9 156c)
+send_ipv4_pkt hv1 hv1-vif1 000000000001 000000000100 \
+    $(ip_to_hex 42 42 42 1) $(ip_to_hex 88 88 88 89) \
+    06 0028 \
+    ${tcp_payload} \
+    $(ip_to_hex 88 88 88 89) ${hp_tcp_payload} \
+    expected
+
+# Check that traffic is hairpinned.
+OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [expected])
+
+# Check learned hairpin reply flows.
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=69 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=69, tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.89,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+# Change LB Hairpin SNAT IP.
+# Also flush conntrack to avoid reusing an existing entry.
+as hv1 ovs-appctl dpctl/flush-conntrack
+
+ovn-nbctl --wait=hv set load_balancer lb-ipv4-tcp options:hairpin_snat_ip="88.88.88.87"
+# Inject IPv4 TCP packets from lsp.
+tcp_payload=$(build_tcp_syn 84d0 1f90 05a7)
+hp_tcp_payload=$(build_tcp_syn 84d0 0fc9 156f)
+send_ipv4_pkt hv1 hv1-vif1 000000000001 000000000100 \
+    $(ip_to_hex 42 42 42 1) $(ip_to_hex 88 88 88 88) \
+    06 0028 \
+    ${tcp_payload} \
+    $(ip_to_hex 88 88 88 87) ${hp_tcp_payload} \
+    expected
+
+tcp_payload=$(build_tcp_syn 84d1 1f90 05a5)
+hp_tcp_payload=$(build_tcp_syn 84d1 0fc9 156c)
+send_ipv4_pkt hv1 hv1-vif1 000000000001 000000000100 \
+    $(ip_to_hex 42 42 42 1) $(ip_to_hex 88 88 88 89) \
+    06 0028 \
+    ${tcp_payload} \
+    $(ip_to_hex 88 88 88 89) ${hp_tcp_payload} \
+    expected
 
 # Check that traffic is hairpinned.
 OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [expected])
 
-# Inject IPv4 UDP packet from lsp.
+# Check learned hairpin reply flows.
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=69 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=69, tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.87,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.89,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AS_BOX([IPv4 UDP Hairpin])
+
+# Inject IPv4 UDP packets from lsp.
 udp_payload=$(build_udp 84d0 0fc8 6666)
 hp_udp_payload=$(build_udp 84d0 07e5 6e49)
 send_ipv4_pkt hv1 hv1-vif1 000000000001 000000000100 \
     $(ip_to_hex 42 42 42 1) $(ip_to_hex 88 88 88 88) \
-    11 001e 35f4 \
-    ${udp_payload} ${hp_udp_payload} \
+    11 001e \
+    ${udp_payload} \
+    $(ip_to_hex 88 88 88 88) ${hp_udp_payload} \
+    expected
+
+udp_payload=$(build_udp 84d1 0fc8 6664)
+hp_udp_payload=$(build_udp 84d1 07e5 6e47)
+send_ipv4_pkt hv1 hv1-vif1 000000000001 000000000100 \
+    $(ip_to_hex 42 42 42 1) $(ip_to_hex 88 88 88 89) \
+    11 001e \
+    ${udp_payload} \
+    $(ip_to_hex 88 88 88 89) ${hp_udp_payload} \
+    expected
+
+# Check that traffic is hairpinned.
+OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [expected])
+
+# Check learned hairpin reply flows.
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=69 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=69, tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.87,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.89,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.89,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+# Change LB Hairpin SNAT IP.
+# Also flush conntrack to avoid reusing an existing entry.
+as hv1 ovs-appctl dpctl/flush-conntrack
+ovn-nbctl --wait=hv set load_balancer lb-ipv4-udp options:hairpin_snat_ip="88.88.88.87"
+# Inject IPv4 UDP packets from lsp.
+udp_payload=$(build_udp 84d0 0fc8 6666)
+hp_udp_payload=$(build_udp 84d0 07e5 6e4a)
+send_ipv4_pkt hv1 hv1-vif1 000000000001 000000000100 \
+    $(ip_to_hex 42 42 42 1) $(ip_to_hex 88 88 88 88) \
+    11 001e \
+    ${udp_payload} \
+    $(ip_to_hex 88 88 88 87) ${hp_udp_payload} \
+    expected
+
+udp_payload=$(build_udp 84d1 0fc8 6664)
+hp_udp_payload=$(build_udp 84d1 07e5 6e47)
+send_ipv4_pkt hv1 hv1-vif1 000000000001 000000000100 \
+    $(ip_to_hex 42 42 42 1) $(ip_to_hex 88 88 88 89) \
+    11 001e \
+    ${udp_payload} \
+    $(ip_to_hex 88 88 88 89) ${hp_udp_payload} \
     expected
 
 # Check that traffic is hairpinned.
 OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [expected])
 
-# Inject IPv6 TCP packet from lsp.
+# Check learned hairpin reply flows.
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=69 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=69, tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.87,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.89,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.87,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.89,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AS_BOX([IPv6 TCP Hairpin])
+
+# Inject IPv6 TCP packets from lsp.
 tcp_payload=$(build_tcp_syn 84d0 1f90 3ff9)
 hp_tcp_payload=$(build_tcp_syn 84d0 0fc9 4fc0)
 send_ipv6_pkt hv1 hv1-vif1 000000000001 000000000100 \
     42000000000000000000000000000001 88000000000000000000000000000088 \
     06 0014 \
-    ${tcp_payload} ${hp_tcp_payload} \
+    ${tcp_payload} \
+    88000000000000000000000000000088 ${hp_tcp_payload} \
     expected
 
-# Check that traffic is hairpinned.
-OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [expected])
-
-# Inject IPv6 UDP packet from lsp.
-udp_payload=$(build_udp 84d0 0fc8 a0b8)
-hp_udp_payload=$(build_udp 84d0 07e5 a89b)
+tcp_payload=$(build_tcp_syn 84d1 1f90 3ff7)
+hp_tcp_payload=$(build_tcp_syn 84d1 0fc9 4fbe)
 send_ipv6_pkt hv1 hv1-vif1 000000000001 000000000100 \
-    42000000000000000000000000000001 88000000000000000000000000000088 \
-    11 000a \
-    ${udp_payload} ${hp_udp_payload} \
+    42000000000000000000000000000001 88000000000000000000000000000089 \
+    06 0014 \
+    ${tcp_payload} \
+    88000000000000000000000000000089 ${hp_tcp_payload} \
     expected
 
 # Check that traffic is hairpinned.
 OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [expected])
 
-OVN_CLEANUP([hv1])
-AT_CLEANUP
+# Check learned hairpin reply flows.
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=69 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=69, tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.87,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.89,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::89,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.87,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.89,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
 
-AT_SETUP([ovn -- Big Load Balancer])
-ovn_start
+# Change LB Hairpin SNAT IP.
+# Also flush conntrack to avoid reusing an existing entry.
+as hv1 ovs-appctl dpctl/flush-conntrack
+ovn-nbctl --wait=hv set load_balancer lb-ipv6-tcp options:hairpin_snat_ip="8800::0087"
 
-ovn-nbctl ls-add ls1
-ovn-nbctl lsp-add ls1 lsp1
+# Inject IPv6 TCP packets from lsp.
+tcp_payload=$(build_tcp_syn 84d0 1f90 3ff9)
+hp_tcp_payload=$(build_tcp_syn 84d0 0fc9 4fc1)
+send_ipv6_pkt hv1 hv1-vif1 000000000001 000000000100 \
+    42000000000000000000000000000001 88000000000000000000000000000088 \
+    06 0014 \
+    ${tcp_payload} \
+    88000000000000000000000000000087 ${hp_tcp_payload} \
+    expected
+
+tcp_payload=$(build_tcp_syn 84d1 1f90 3ff7)
+hp_tcp_payload=$(build_tcp_syn 84d1 0fc9 4fbe)
+send_ipv6_pkt hv1 hv1-vif1 000000000001 000000000100 \
+    42000000000000000000000000000001 88000000000000000000000000000089 \
+    06 0014 \
+    ${tcp_payload} \
+    88000000000000000000000000000089 ${hp_tcp_payload} \
+    expected
+
+# Check that traffic is hairpinned.
+OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [expected])
+
+# Check learned hairpin reply flows.
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=69 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=69, tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.87,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.89,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::87,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::89,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.87,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.89,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AS_BOX([IPv6 UDP Hairpin])
+
+# Inject IPv6 UDP packets from lsp.
+udp_payload=$(build_udp 84d0 0fc8 a0b8)
+hp_udp_payload=$(build_udp 84d0 07e5 a89b)
+send_ipv6_pkt hv1 hv1-vif1 000000000001 000000000100 \
+    42000000000000000000000000000001 88000000000000000000000000000088 \
+    11 000a \
+    ${udp_payload} \
+    88000000000000000000000000000088 ${hp_udp_payload} \
+    expected
+
+udp_payload=$(build_udp 84d1 0fc8 a0b6)
+hp_udp_payload=$(build_udp 84d1 07e5 a899)
+send_ipv6_pkt hv1 hv1-vif1 000000000001 000000000100 \
+    42000000000000000000000000000001 88000000000000000000000000000089 \
+    11 000a \
+    ${udp_payload} \
+    88000000000000000000000000000089 ${hp_udp_payload} \
+    expected
+
+# Check that traffic is hairpinned.
+OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [expected])
+
+# Check learned hairpin reply flows.
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=69 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=69, tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.87,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.89,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::87,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::89,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.87,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.89,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::89,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+# Change LB Hairpin SNAT IP.
+# Also flush conntrack to avoid reusing an existing entry.
+as hv1 ovs-appctl dpctl/flush-conntrack
+ovn-nbctl --wait=hv set load_balancer lb-ipv6-udp options:hairpin_snat_ip="8800::0087"
+
+# Inject IPv6 UDP packets from lsp.
+udp_payload=$(build_udp 84d0 0fc8 a0b8)
+hp_udp_payload=$(build_udp 84d0 07e5 a89b)
+send_ipv6_pkt hv1 hv1-vif1 000000000001 000000000100 \
+    42000000000000000000000000000001 88000000000000000000000000000088 \
+    11 000a \
+    ${udp_payload} \
+    88000000000000000000000000000087 ${hp_udp_payload} \
+    expected
+
+udp_payload=$(build_udp 84d1 0fc8 a0b6)
+hp_udp_payload=$(build_udp 84d1 07e5 a899)
+send_ipv6_pkt hv1 hv1-vif1 000000000001 000000000100 \
+    42000000000000000000000000000001 88000000000000000000000000000089 \
+    11 000a \
+    ${udp_payload} \
+    88000000000000000000000000000089 ${hp_udp_payload} \
+    expected
+
+# Check learned hairpin reply flows.
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=69 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=69, tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.87,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.89,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::87,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::89,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.87,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.89,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::87,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::89,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AS_BOX([Delete VIP])
+check ovn-nbctl --wait=hv set Load_Balancer lb-ipv4-tcp vips='"88.88.88.88:8080"=""'
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=69 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=69, tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.89,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::87,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::89,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.87,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.89,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::87,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::89,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+AS_BOX([Delete LB])
+check ovn-nbctl --wait=hv     \
+    -- lb-del lb-ipv4-tcp     \
+    -- lb-del lb-ipv4-tcp-dup \
+    -- lb-del lb-ipv4-udp     \
+    -- lb-del lb-ipv4-udp-dup
+
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=69 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=69, tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::87,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::89,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::87,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::89,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+check ovn-nbctl --wait=hv     \
+    -- lb-del lb-ipv6-tcp     \
+    -- lb-del lb-ipv6-tcp-dup
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=69 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=69, udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::87,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+ table=69, udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::89,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+])
+
+check ovn-nbctl --wait=hv     \
+    -- lb-del lb-ipv6-udp     \
+    -- lb-del lb-ipv6-udp-dup
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=69 | ofctl_strip_all | grep -v NXST], [1], [dnl
+])
+
+OVN_CLEANUP([hv1])
+AT_CLEANUP
+
+AT_SETUP([ovn -- Big Load Balancer])
+ovn_start
+
+ovn-nbctl ls-add ls1
+ovn-nbctl lsp-add ls1 lsp1
 
 net_add n1
 sim_add hv1
@@ -20936,6 +21449,7 @@ check ovn-nbctl lsp-set-options ln-public network_name=public
 check ovn-nbctl lrp-set-gateway-chassis lr0-public hv1 20
 check ovn-nbctl lr-nat-add lr0 snat 172.168.0.100 10.0.0.0/24
 check ovn-nbctl --wait=hv sync
+wait_for_ports_up
 
 wait_row_count datapath_binding 1 external-ids:name=lr0
 lr0_dp_uuid=$(ovn-sbctl --bare --columns _uuid list datapath_binding lr0)
@@ -21156,31 +21670,31 @@ AT_CHECK([
 
 AT_CHECK([ovn-sbctl lflow-list | grep -E "lr_in_policy.*priority=1001" | sort], [0], [dnl
   table=12(lr_in_policy       ), priority=1001 , dnl
-match=(ip6), action=(pkt.mark = 4294967295; next;)
+match=(ip6), action=(pkt.mark = 4294967295; reg8[[0..15]] = 0; next;)
 ])
 
 ovn-nbctl --wait=hv set logical_router_policy $pol5 options:pkt_mark=-1
 AT_CHECK([ovn-sbctl lflow-list | grep -E "lr_in_policy.*priority=1001" | sort], [0], [dnl
   table=12(lr_in_policy       ), priority=1001 , dnl
-match=(ip6), action=(next;)
+match=(ip6), action=(reg8[[0..15]] = 0; next;)
 ])
 
 ovn-nbctl --wait=hv set logical_router_policy $pol5 options:pkt_mark=2147483648
 AT_CHECK([ovn-sbctl lflow-list | grep -E "lr_in_policy.*priority=1001" | sort], [0], [dnl
   table=12(lr_in_policy       ), priority=1001 , dnl
-match=(ip6), action=(pkt.mark = 2147483648; next;)
+match=(ip6), action=(pkt.mark = 2147483648; reg8[[0..15]] = 0; next;)
 ])
 
 ovn-nbctl --wait=hv set logical_router_policy $pol5 options:pkt_mark=foo
 AT_CHECK([ovn-sbctl lflow-list | grep -E "lr_in_policy.*priority=1001" | sort], [0], [dnl
   table=12(lr_in_policy       ), priority=1001 , dnl
-match=(ip6), action=(next;)
+match=(ip6), action=(reg8[[0..15]] = 0; next;)
 ])
 
 ovn-nbctl --wait=hv set logical_router_policy $pol5 options:pkt_mark=4294967296
 AT_CHECK([ovn-sbctl lflow-list | grep -E "lr_in_policy.*priority=1001" | sort], [0], [dnl
   table=12(lr_in_policy       ), priority=1001 , dnl
-match=(ip6), action=(next;)
+match=(ip6), action=(reg8[[0..15]] = 0; next;)
 ])
 
 OVN_CLEANUP([hv1])
@@ -21602,22 +22116,22 @@ AT_CHECK([test ! -z $p1_zoneid])
 p2_zoneid=$(as hv1 ovs-vsctl get bridge br-int external_ids:ct-zone-sw0-p2 | sed 's/"//g')
 AT_CHECK([test ! -z $p2_zoneid])
 
-AT_CHECK([test $(ovs-ofctl dump-flows br-int table=33,metadata=${sw0_dpkey},\
+AT_CHECK([test $(ovs-ofctl dump-flows br-int table=38,metadata=${sw0_dpkey},\
 reg15=0x${p1_dpkey} | grep REG13 | wc -l) -eq 1])
 
-AT_CHECK([test $(ovs-ofctl dump-flows br-int table=33,metadata=${sw0_dpkey},\
+AT_CHECK([test $(ovs-ofctl dump-flows br-int table=38,metadata=${sw0_dpkey},\
 reg15=0x${p1_dpkey} | grep "load:0x${p1_zoneid}->NXM_NX_REG13" | wc -l) -eq 1])
 
-AT_CHECK([test $(ovs-ofctl dump-flows br-int table=33,metadata=${sw1_dpkey},\
+AT_CHECK([test $(ovs-ofctl dump-flows br-int table=38,metadata=${sw1_dpkey},\
 reg15=0x${p2_dpkey} | grep REG13 | wc -l) -eq 1])
 
-AT_CHECK([test $(ovs-ofctl dump-flows br-int table=33,metadata=${sw1_dpkey},\
+AT_CHECK([test $(ovs-ofctl dump-flows br-int table=38,metadata=${sw1_dpkey},\
 reg15=0x${p2_dpkey} | grep "load:0x${p2_zoneid}->NXM_NX_REG13" | wc -l) -eq 1])
 
 ovs-vsctl set interface hv1-vif1 external_ids:iface-id=foo
 OVS_WAIT_UNTIL([test x$(ovn-nbctl lsp-get-up sw0-p1) = xdown])
 
-AT_CHECK([test $(ovs-ofctl dump-flows br-int table=33,metadata=${sw0_dpkey},\
+AT_CHECK([test $(ovs-ofctl dump-flows br-int table=38,metadata=${sw0_dpkey},\
 reg15=0x${p1_dpkey} | grep REG13 | wc -l) -eq 0])
 
 p1_zoneid=$(as hv1 ovs-vsctl get bridge br-int external_ids:ct-zone-sw0-p1 | sed 's/"//g')
@@ -21629,16 +22143,16 @@ OVS_WAIT_UNTIL([test x$(ovn-nbctl lsp-get-up sw0-p1) = xup])
 p1_zoneid=$(as hv1 ovs-vsctl get bridge br-int external_ids:ct-zone-sw0-p1 | sed 's/"//g')
 AT_CHECK([test ! -z $p1_zoneid])
 
-AT_CHECK([test $(ovs-ofctl dump-flows br-int table=33,metadata=${sw0_dpkey},\
+AT_CHECK([test $(ovs-ofctl dump-flows br-int table=38,metadata=${sw0_dpkey},\
 reg15=0x${p1_dpkey} | grep REG13 | wc -l) -eq 1])
 
-AT_CHECK([test $(ovs-ofctl dump-flows br-int table=33,metadata=${sw0_dpkey},\
+AT_CHECK([test $(ovs-ofctl dump-flows br-int table=38,metadata=${sw0_dpkey},\
 reg15=0x${p1_dpkey} | grep "load:0x${p1_zoneid}->NXM_NX_REG13" | wc -l) -eq 1])
 
 ovs-vsctl del-port hv1-vif2
 OVS_WAIT_UNTIL([test x$(ovn-nbctl lsp-get-up sw0-p2) = xdown])
 
-AT_CHECK([test $(ovs-ofctl dump-flows br-int table=33,metadata=${sw0_dpkey},\
+AT_CHECK([test $(ovs-ofctl dump-flows br-int table=38,metadata=${sw0_dpkey},\
 reg15=0x${p2_dpkey} | grep REG13 | wc -l) -eq 0])
 
 p2_zoneid=$(as hv1 ovs-vsctl get bridge br-int external_ids:ct-zone-sw0-p2 | sed 's/"//g')
@@ -21646,7 +22160,7 @@ AT_CHECK([test -z $p2_zoneid])
 
 ovn-nbctl lsp-del sw0-p1
 
-OVS_WAIT_UNTIL([test $(ovs-ofctl dump-flows br-int table=33,metadata=${sw0_dpkey},\
+OVS_WAIT_UNTIL([test $(ovs-ofctl dump-flows br-int table=38,metadata=${sw0_dpkey},\
 reg15=0x${p1_dpkey} | grep REG13 | wc -l) -eq 0])
 
 p1_zoneid=$(as hv1 ovs-vsctl get bridge br-int external_ids:ct-zone-sw0-p1 | sed 's/"//g')
@@ -21723,6 +22237,7 @@ check ovn-nbctl --policy="src-ip" lr-route-add DR 10.0.0.0/24 20.0.0.2
 check ovn-nbctl --ecmp-symmetric-reply --policy="src-ip" lr-route-add GW 10.0.0.0/24 172.16.0.2
 check ovn-nbctl --ecmp-symmetric-reply --policy="src-ip" lr-route-add GW 10.0.0.0/24 172.16.0.3
 
+wait_for_ports_up
 check ovn-nbctl --wait=hv sync
 
 # Ensure ECMP symmetric reply flows are not present on any hypervisor.
@@ -21753,26 +22268,25 @@ ovn-nbctl set Logical_Router $gw_uuid options:chassis=hv1
 ovn-nbctl --wait=hv sync
 
 # And ensure that ECMP symmetric reply flows are present only on hv1
-AT_CHECK([
-    test 1 -eq $(as hv1 ovs-ofctl dump-flows br-int table=15 | \
-    grep "priority=100" | \
-    grep "ct(commit,zone=NXM_NX_REG11\\[[0..15\\]],exec(move:NXM_OF_ETH_SRC\\[[\\]]->NXM_NX_CT_LABEL\\[[32..79\\]],load:0x[[0-9]]->NXM_NX_CT_LABEL\\[[80..95\\]]))" -c)
-])
-AT_CHECK([
-    test 1 -eq $(as hv1 ovs-ofctl dump-flows br-int table=21 | \
-    grep "priority=200" | \
-    grep "actions=move:NXM_NX_CT_LABEL\\[[32..79\\]]->NXM_OF_ETH_DST\\[[\\]]" -c)
-])
+as hv1 ovs-ofctl dump-flows br-int > hv1flows
+AT_CAPTURE_FILE([hv1flows])
+as hv2 ovs-ofctl dump-flows br-int > hv2flows
+AT_CAPTURE_FILE([hv2flows])
 
 AT_CHECK([
-    test 0 -eq $(as hv2 ovs-ofctl dump-flows br-int table=15 | \
-    grep "priority=100" | \
-    grep "ct(commit,zone=NXM_NX_REG11\\[[0..15\\]],exec(move:NXM_OF_ETH_SRC\\[[\\]]->NXM_NX_CT_LABEL\\[[32..79\\]],load:0x[[0-9]]->NXM_NX_CT_LABEL\\[[80..95\\]]))" -c)
-])
-AT_CHECK([
-    test 0 -eq $(as hv2 ovs-ofctl dump-flows br-int table=21 | \
-    grep "priority=200" | \
-    grep "actions=move:NXM_NX_CT_LABEL\\[[32..79\\]]->NXM_OF_ETH_DST\\[[\\]]" -c)
+    for hv in 1 2; do
+        grep table=15 hv${hv}flows | \
+        grep "priority=100" | \
+        grep -c "ct(commit,zone=NXM_NX_REG11\\[[0..15\\]],exec(move:NXM_OF_ETH_SRC\\[[\\]]->NXM_NX_CT_LABEL\\[[32..79\\]],load:0x[[0-9]]->NXM_NX_CT_LABEL\\[[80..95\\]]))"
+
+        grep table=22 hv${hv}flows | \
+        grep "priority=200" | \
+        grep -c "actions=move:NXM_NX_CT_LABEL\\[[32..79\\]]->NXM_OF_ETH_DST\\[[\\]]"
+    done; :], [0], [dnl
+1
+1
+0
+0
 ])
 
 OVN_CLEANUP([hv1], [hv2])
@@ -21856,6 +22370,7 @@ ovs-vsctl -- add-port br-int hv2-vif1 -- \
 # for ARP resolution).
 OVN_POPULATE_ARP
 
+wait_for_ports_up
 ovn-nbctl --wait=hv sync
 
 AT_CHECK([ovn-sbctl lflow-list | grep lr_in_arp_resolve | grep 10.0.0.1], [1], [])
@@ -21895,22 +22410,22 @@ as hv1
 ovs-vsctl add-br br-phys
 ovn_attach n1 br-phys 192.168.0.1
 
-ovn-nbctl ls-add sw0
-ovn-nbctl lsp-add sw0 sw0-p1
-ovn-nbctl lsp-set-addresses sw0-p1 "10:14:00:00:00:03 10.0.0.3"
-ovn-nbctl lsp-set-port-security sw0-p1 "10:14:00:00:00:03 10.0.0.3"
+check ovn-nbctl ls-add sw0
+check ovn-nbctl lsp-add sw0 sw0-p1
+check ovn-nbctl lsp-set-addresses sw0-p1 "10:14:00:00:00:03 10.0.0.3"
+check ovn-nbctl lsp-set-port-security sw0-p1 "10:14:00:00:00:03 10.0.0.3"
 
-ovn-nbctl lsp-add sw0 sw0-p2
-ovn-nbctl lsp-set-addresses sw0-p2 "10:14:00:00:00:04 10.0.0.4"
-ovn-nbctl lsp-set-port-security sw0-p2 "10:14:00:00:00:04 10.0.0.4"
+check ovn-nbctl lsp-add sw0 sw0-p2
+check ovn-nbctl lsp-set-addresses sw0-p2 "10:14:00:00:00:04 10.0.0.4"
+check ovn-nbctl lsp-set-port-security sw0-p2 "10:14:00:00:00:04 10.0.0.4"
 
-ovn-nbctl lsp-add sw0 sw0-p3
-ovn-nbctl lsp-set-addresses sw0-p3 "10:14:00:00:00:05 10.0.0.5"
-ovn-nbctl lsp-set-port-security sw0-p3 "10:14:00:00:00:05 10.0.0.5"
+check ovn-nbctl lsp-add sw0 sw0-p3
+check ovn-nbctl lsp-set-addresses sw0-p3 "10:14:00:00:00:05 10.0.0.5"
+check ovn-nbctl lsp-set-port-security sw0-p3 "10:14:00:00:00:05 10.0.0.5"
 
-ovn-nbctl lsp-add sw0 sw0-p4
-ovn-nbctl lsp-set-addresses sw0-p4 "10:14:00:00:00:06 10.0.0.6"
-ovn-nbctl lsp-set-port-security sw0-p4 "10:14:00:00:00:06 10.0.0.6"
+check ovn-nbctl lsp-add sw0 sw0-p4
+check ovn-nbctl lsp-set-addresses sw0-p4 "10:14:00:00:00:06 10.0.0.6"
+check ovn-nbctl lsp-set-port-security sw0-p4 "10:14:00:00:00:06 10.0.0.6"
 
 as hv1
 ovs-vsctl -- add-port br-int hv1-vif1 -- \
@@ -21934,88 +22449,101 @@ ovs-vsctl -- add-port br-int hv1-vif4 -- \
     options:rxq_pcap=hv1/vif4-rx.pcap \
     ofport-request=4
 
-OVS_WAIT_UNTIL([test x$(ovn-nbctl lsp-get-up sw0-p1) = xup])
-OVS_WAIT_UNTIL([test x$(ovn-nbctl lsp-get-up sw0-p2) = xup])
-OVS_WAIT_UNTIL([test x$(ovn-nbctl lsp-get-up sw0-p3) = xup])
-OVS_WAIT_UNTIL([test x$(ovn-nbctl lsp-get-up sw0-p4) = xup])
+wait_for_ports_up
 
-ovn-nbctl pg-add pg0 sw0-p1 sw0-p2
-ovn-nbctl acl-add pg0 to-lport 1002 "outport == @pg0 && ip4 && tcp.dst >= 80 && tcp.dst <= 82" allow
-ovn-nbctl --wait=hv sync
+check ovn-nbctl pg-add pg0 sw0-p1 sw0-p2
+check ovn-nbctl acl-add pg0 to-lport 1002 "outport == @pg0 && ip4 && tcp.dst >= 80 && tcp.dst <= 82" allow
+check ovn-nbctl --wait=hv sync
 
-OVS_WAIT_UNTIL([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep -c "conj_id=2")])
+# wait_conj_id_count COUNT ["ID COUNT [MATCH]"]...
+#
+# Waits until COUNT flows matching against conj_id appear in the
+# table 45 on hv1's br-int bridge.  Makes the flows available in
+# "hv1flows", which will be logged on error.
+#
+# In addition, for each quoted "ID COUNT" or "ID COUNT MATCH",
+# verifies that there are COUNT flows in table 45 that match
+# aginst conj_id=ID and (if MATCH) is nonempty, match MATCH.
+wait_conj_id_count() {
+  AT_CAPTURE_FILE([hv1flows])
+  local retval
+  case $1 in
+      (0) retval=1 ;;
+      (*) retval=0 ;;
+  esac
+
+  echo "waiting for $1 conj_id flows..."
+  OVS_WAIT_FOR_OUTPUT_UNQUOTED(
+    [ovs-ofctl dump-flows br-int > hv1flows
+     grep table=45 hv1flows | grep -c conj_id],
+    [$retval], [$1
+])
+
+  shift
+  for arg; do
+    set -- $arg; id=$1 count=$2 match=$3
+    echo "checking that there are $count ${match:+$match }flows with conj_id=$id..."
+    AT_CHECK_UNQUOTED(
+      [grep table=45 hv1flows | grep "$match" | grep -c conj_id=$id],
+      [0], [$count
+])
+  done
+}
 
-# Add sw0-p3 to the port group pg0. The conj_id should be 2.
-ovn-nbctl pg-set-ports pg0 sw0-p1 sw0-p2 sw0-p3
-OVS_WAIT_UNTIL([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep -c "conj_id")])
-AT_CHECK([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep -c "conj_id=2")])
+AS_BOX([Add sw0-p3 to the port group pg0. The conj_id should be 2.])
+check ovn-nbctl --wait=hv pg-set-ports pg0 sw0-p1 sw0-p2 sw0-p3
+wait_conj_id_count 1 "2 1"
 
-# Add sw0p4 to the port group pg0. The conj_id should be 2.
-ovn-nbctl pg-set-ports pg0 sw0-p1 sw0-p2 sw0-p3 sw0-p4
-OVS_WAIT_UNTIL([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep -c "conj_id")])
-AT_CHECK([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep -c "conj_id=2")])
+AS_BOX([Add sw0p4 to the port group pg0. The conj_id should be 2.])
+check ovn-nbctl --wait=hv pg-set-ports pg0 sw0-p1 sw0-p2 sw0-p3 sw0-p4
+wait_conj_id_count 1 "2 1"
 
-# Add another ACL with conjunction.
-ovn-nbctl acl-add pg0 to-lport 1002 "outport == @pg0 && ip4 && udp.dst >= 80 && udp.dst <= 82" allow
-OVS_WAIT_UNTIL([test 2 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep -c "conj_id")])
-AT_CHECK([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep tcp | grep -c "conj_id=2")])
-AT_CHECK([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep udp | grep -c "conj_id=3")])
+AS_BOX([Add another ACL with conjunction.])
+check ovn-nbctl --wait=hv acl-add pg0 to-lport 1002 "outport == @pg0 && ip4 && udp.dst >= 80 && udp.dst <= 82" allow
+wait_conj_id_count 2 "2 1 tcp" "3 1 udp"
 
-# Delete tcp ACL.
-ovn-nbctl acl-del pg0 to-lport 1002 "outport == @pg0 && ip4 && tcp.dst >= 80 && tcp.dst <= 82"
-OVS_WAIT_UNTIL([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep -c "conj_id")])
-AT_CHECK([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep udp | grep -c "conj_id=3")])
+AS_BOX([Delete tcp ACL.])
+check ovn-nbctl --wait=hv acl-del pg0 to-lport 1002 "outport == @pg0 && ip4 && tcp.dst >= 80 && tcp.dst <= 82"
+wait_conj_id_count 1 "3 1 udp"
 
-# Add back the tcp ACL.
-ovn-nbctl acl-add pg0 to-lport 1002 "outport == @pg0 && ip4 && tcp.dst >= 80 && tcp.dst <= 82" allow
-OVS_WAIT_UNTIL([test 2 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep -c "conj_id")])
+AS_BOX([Add back the tcp ACL.])
+check ovn-nbctl --wait=hv acl-add pg0 to-lport 1002 "outport == @pg0 && ip4 && tcp.dst >= 80 && tcp.dst <= 82" allow
+wait_conj_id_count 2 "3 1 udp" "4 1 tcp"
 AT_CHECK([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep udp | grep -c "conj_id=3")])
 AT_CHECK([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep tcp | grep -c "conj_id=4")])
 
-ovn-nbctl acl-add pg0 to-lport 1002 "outport == @pg0 && inport == @pg0 && ip4 && tcp.dst >= 84 && tcp.dst <= 86" allow
-OVS_WAIT_UNTIL([test 3 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep -c "conj_id")])
-AT_CHECK([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep udp | grep -c "conj_id=3")])
-AT_CHECK([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep tcp | grep -c "conj_id=4")])
-AT_CHECK([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep tcp | grep -c "conj_id=5")])
+AS_BOX([Add another tcp ACL.])
+check ovn-nbctl --wait=hv acl-add pg0 to-lport 1002 "outport == @pg0 && inport == @pg0 && ip4 && tcp.dst >= 84 && tcp.dst <= 86" allow
+wait_conj_id_count 3 "3 1 udp" "4 1 tcp" "5 1 tcp"
 
-ovn-nbctl clear port_group pg0 acls
-OVS_WAIT_UNTIL([test 0 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep -c "conj_id")])
+AS_BOX([Clear ACLs.])
+check ovn-nbctl --wait=hv clear port_group pg0 acls
+wait_conj_id_count 0
 
-ovn-nbctl --wait=hv acl-add pg0 to-lport 1002 "outport == @pg0 && ip4 && tcp.dst >= 80 && tcp.dst <= 82" allow
-ovn-nbctl --wait=hv acl-add pg0 to-lport 1002 "outport == @pg0 && ip4 && udp.dst >= 80 && udp.dst <= 82" allow
-OVS_WAIT_UNTIL([test 2 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep -c "conj_id")])
-AT_CHECK([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep tcp | grep -c "conj_id=6")])
-AT_CHECK([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep udp | grep -c "conj_id=7")])
+AS_BOX([Add TCP ACL.])
+check ovn-nbctl --wait=hv acl-add pg0 to-lport 1002 "outport == @pg0 && ip4 && tcp.dst >= 80 && tcp.dst <= 82" allow
+check ovn-nbctl acl-add pg0 to-lport 1002 "outport == @pg0 && ip4 && udp.dst >= 80 && udp.dst <= 82" allow
+wait_conj_id_count 2 "6 1 tcp" "7 1 udp"
 
-# Flush the lflow cache.
+AS_BOX([Flush lflow cache.])
 as hv1 ovn-appctl -t ovn-controller flush-lflow-cache
-OVS_WAIT_UNTIL([test 2 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep -c "conj_id")])
-AT_CHECK([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep -c "conj_id=2")])
-AT_CHECK([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep -c "conj_id=3")])
-
-# Disable lflow caching.
+wait_conj_id_count 2 "2 1" "3 1"
 
+AS_BOX([Disable lflow caching.])
 as hv1 ovs-vsctl set open . external_ids:ovn-enable-lflow-cache=false
 
-# Wait until ovn-enble-lflow-cache is processed by ovn-controller.
-OVS_WAIT_UNTIL([
-    test $(ovn-sbctl get chassis hv1 other_config:ovn-enable-lflow-cache) = '"false"'
-])
-
-AT_CHECK([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep -c "conj_id=2")])
-AT_CHECK([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep -c "conj_id=3")])
+AS_BOX([Wait until ovn-enble-lflow-cache is processed by ovn-controller.])
+wait_row_count Chassis 1 name=hv1 other_config:ovn-enable-lflow-cache=false
+wait_conj_id_count 2 "2 1" "3 1"
 
-# Remove port sw0-p4 from port group.
-ovn-nbctl pg-set-ports pg0 sw0-p1 sw0-p2 sw0-p3
-OVS_WAIT_UNTIL([test 2 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep -c "conj_id")])
-AT_CHECK([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep -c "conj_id=4")])
-AT_CHECK([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep -c "conj_id=5")])
+AS_BOX([Remove port sw0-p4 from port group.])
+check ovn-nbctl --wait=hv pg-set-ports pg0 sw0-p1 sw0-p2 sw0-p3
+wait_conj_id_count 2 "4 1" "5 1"
 
+AS_BOX([Recompute.])
 as hv1 ovn-appctl -t ovn-controller recompute
 
-OVS_WAIT_UNTIL([test 2 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep -c "conj_id")])
-OVS_WAIT_UNTIL([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep -c "conj_id=2")])
-AT_CHECK([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=45 | grep -c "conj_id=3")])
+wait_conj_id_count 2 "2 1" "3 1"
 
 OVN_CLEANUP([hv1])
 
@@ -22131,6 +22659,77 @@ AT_CHECK_UNQUOTED([grep -c "output:4" offlows_table65_2.txt], [0], [dnl
 OVN_CLEANUP([hv1])
 AT_CLEANUP
 
+AT_SETUP([ovn -- Container port Incremental Processing])
+ovn_start
+
+net_add n1
+sim_add hv1
+as hv1
+ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.10
+
+as hv1
+ovs-vsctl \
+    -- add-port br-int vif1 \
+    -- set Interface vif1 external_ids:iface-id=lsp1 \
+    ofport-request=1
+
+check ovn-nbctl ls-add ls1 \
+    -- ls-add ls2 \
+    -- lsp-add ls1 lsp1 \
+    -- lsp-add ls2 lsp-cont1 lsp1 1
+check ovn-nbctl --wait=hv sync
+
+# Wait for ports to be bound.
+wait_row_count Chassis 1 name=hv1
+ch=$(fetch_column Chassis _uuid name=hv1)
+wait_row_count Port_Binding 1 logical_port=lsp1 chassis=$ch
+wait_row_count Port_Binding 1 logical_port=lsp-cont1 chassis=$ch
+
+AS_BOX([delete OVS VIF and OVN container port])
+as hv1 ovn-appctl -t ovn-controller debug/pause
+as hv1 ovs-vsctl del-port vif1
+
+check ovn-nbctl --wait=sb lsp-del lsp-cont1
+as hv1 ovn-appctl -t ovn-controller debug/resume
+
+check ovn-nbctl --wait=hv sync
+check_row_count Port_Binding 1 logical_port=lsp1 chassis="[[]]"
+
+AS_BOX([readd OVS VIF])
+as hv1
+ovs-vsctl \
+    -- add-port br-int vif1 \
+    -- set Interface vif1 external_ids:iface-id=lsp1 \
+    ofport-request=1
+wait_row_count Port_Binding 1 logical_port=lsp1 chassis=$ch
+
+AS_BOX([readd OVN container port])
+check ovn-nbctl lsp-add ls2 lsp-cont1 lsp1 1
+check ovn-nbctl --wait=hv sync
+check_row_count Port_Binding 1 logical_port=lsp-cont1 chassis=$ch
+
+AS_BOX([delete both OVN VIF and OVN container port])
+as hv1 ovn-appctl -t ovn-controller debug/pause
+check ovn-nbctl lsp-del lsp1 \
+    -- lsp-del lsp-cont1
+check ovn-nbctl --wait=sb sync
+as hv1 ovn-appctl -t ovn-controller debug/resume
+
+AS_BOX([readd both OVN VIF and OVN container port])
+as hv1 ovn-appctl -t ovn-controller debug/pause
+check ovn-nbctl lsp-add ls1 lsp1 \
+    -- lsp-add ls2 lsp-cont1 lsp1 1
+check ovn-nbctl --wait=sb sync
+as hv1 ovn-appctl -t ovn-controller debug/resume
+
+check ovn-nbctl --wait=hv sync
+wait_row_count Port_Binding 1 logical_port=lsp1 chassis=$ch
+wait_row_count Port_Binding 1 logical_port=lsp-cont1 chassis=$ch
+
+OVN_CLEANUP([hv1])
+AT_CLEANUP
+
 # Test dropping traffic destined to router owned IPs.
 AT_SETUP([ovn -- gateway router drop traffic for own IPs])
 ovn_start
@@ -22145,7 +22744,8 @@ ovn-nbctl lsp-add s1 lsp-s1-r1 -- set Logical_Switch_Port lsp-s1-r1 type=router
 
 # Create logical port p1 in s1
 ovn-nbctl lsp-add s1 p1 \
--- lsp-set-addresses p1 "f0:00:00:00:01:02 10.0.1.2"
+-- lsp-set-addresses p1 "f0:00:00:00:01:02 10.0.1.2" \
+-- lsp-set-port-security p1 "f0:00:00:00:01:02 10.0.1.2"
 
 # Create two hypervisor and create OVS ports corresponding to logical ports.
 net_add n1
@@ -22165,6 +22765,7 @@ ovs-vsctl -- add-port br-int hv1-vif1 -- \
 # for ARP resolution).
 OVN_POPULATE_ARP
 
+wait_for_ports_up
 ovn-nbctl --wait=hv sync
 
 sw_key=$(ovn-sbctl --bare --columns tunnel_key list datapath_binding r1)
@@ -22208,7 +22809,7 @@ AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep "actions=controller" | grep
 ])
 
 # The packet should've been dropped in the lr_in_arp_resolve stage.
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep -E "table=21, n_packets=1,.* priority=1,ip,metadata=0x${sw_key},nw_dst=10.0.1.1 actions=drop" -c], [0], [dnl
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep -E "table=22, n_packets=1,.* priority=1,ip,metadata=0x${sw_key},nw_dst=10.0.1.1 actions=drop" -c], [0], [dnl
 1
 ])
 
@@ -22281,6 +22882,7 @@ check test "$hvt2" -gt 0
 # Then wait for 9 out of 10
 sleep 1
 check as hv3 ovn-appctl -t ovn-controller exit --restart
+wait_for_ports_up
 ovn-nbctl --wait=sb sync
 wait_row_count Chassis_Private 9 name!=hv3 nb_cfg=2
 check_row_count Chassis_Private 1 name=hv3 nb_cfg=1
@@ -22454,6 +23056,7 @@ ovn-nbctl set logical_router gw_router options:chassis=hv3
 ovn-nbctl lr-nat-add gw_router snat 172.16.0.200 30.0.0.0/24
 ovn-nbctl lr-nat-add gw_router snat 172.16.0.201 30.0.0.3
 
+wait_for_ports_up
 ovn-nbctl --wait=hv sync
 
 # Create an interface in br-phys in hv2 and send ARP request for 172.16.0.100
@@ -22643,6 +23246,7 @@ check ovn-nbctl acl-add ls1 to-lport 1001 \
 check ovn-nbctl acl-add ls1 to-lport 1001 \
     'outport == "lsp1" && ip4 && ip4.src == {10.0.0.2, 10.0.0.3}' allow
 
+wait_for_ports_up
 check ovn-nbctl --wait=hv sync
 
 sip=`ip_to_hex 10 0 0 2`
@@ -22811,6 +23415,7 @@ ovs-vsctl -- add-port br-int hv1-vif1 -- \
     options:rxq_pcap=hv1/vif1-rx.pcap \
     ofport-request=1
 
+wait_for_ports_up
 ovn-nbctl --wait=hv sync
 
 # Expected conjunction flows:
@@ -22869,6 +23474,7 @@ as hv1 ovs-vsctl \
 ovn-nbctl --wait=hv sync
 
 # hv1 ovn-controller should not bind sw0-p2.
+wait_for_ports_up sw0-p1
 check_row_count Port_Binding 0 logical_port=sw0-p2 chassis=$c
 
 # Trigger recompute and sw0-p2 should not be claimed.
@@ -22976,93 +23582,79 @@ check ovn-nbctl lb-add lb-ipv4-udp 88.88.88.88:4040 42.42.42.1:2021 udp
 check ovn-nbctl lb-add lb-ipv6-tcp [[8800::0088]]:8080 [[4200::1]]:4041 tcp
 check ovn-nbctl --wait=hv lb-add lb-ipv6-udp [[8800::0088]]:4040 [[4200::1]]:2021 udp
 
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68], [0], [dnl
-NXST_FLOW reply (xid=0x8):
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST], [1], [dnl
 ])
 
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69], [0], [dnl
-NXST_FLOW reply (xid=0x8):
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST], [1], [dnl
 ])
 
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70], [0], [dnl
-NXST_FLOW reply (xid=0x8):
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST], [1], [dnl
 ])
 
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68], [0], [dnl
-NXST_FLOW reply (xid=0x8):
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST], [1], [dnl
 ])
 
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69], [0], [dnl
-NXST_FLOW reply (xid=0x8):
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST], [1], [dnl
 ])
 
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70], [0], [dnl
-NXST_FLOW reply (xid=0x8):
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | grep -v NXST], [1], [dnl
 ])
 
 check ovn-nbctl --wait=hv ls-lb-add sw0 lb-ipv4-tcp
 
 OVS_WAIT_UNTIL(
-    [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 1]
+    [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 1]
 )
 
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8-], [0], [dnl
-priority=100,ct_label=0x2/0x2,tcp,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
 ])
 
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8-], [0], [dnl
-priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69], [0], [dnl
+NXST_FLOW reply (xid=0x8):
 ])
 
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8-], [0], [dnl
-priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=70, priority=100,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
 ])
 
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68], [0], [dnl
-NXST_FLOW reply (xid=0x8):
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST], [1], [dnl
 ])
 
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69], [0], [dnl
-NXST_FLOW reply (xid=0x8):
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST], [1], [dnl
 ])
 
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70], [0], [dnl
-NXST_FLOW reply (xid=0x8):
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | grep -v NXST], [1], [dnl
 ])
 
 check ovn-nbctl lb-add lb-ipv4-tcp 88.88.88.90:8080 42.42.42.42:4041,52.52.52.52:4042 tcp
 
 OVS_WAIT_UNTIL(
-    [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 3]
+    [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 3]
 )
 
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,ct_label=0x2/0x2,tcp,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
 ])
 
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69], [0], [dnl
+NXST_FLOW reply (xid=0x8):
 ])
 
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8-], [0], [dnl
-priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
-priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=70, priority=100,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
 ])
 
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68], [0], [dnl
-NXST_FLOW reply (xid=0x8):
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST], [1], [dnl
 ])
 
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69], [0], [dnl
-NXST_FLOW reply (xid=0x8):
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST], [1], [dnl
 ])
 
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70], [0], [dnl
-NXST_FLOW reply (xid=0x8):
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | grep -v NXST], [1], [dnl
 ])
 
 check ovn-nbctl lsp-add sw0 sw0-p2
@@ -23070,184 +23662,159 @@ check ovn-nbctl lsp-add sw0 sw0-p2
 OVS_WAIT_UNTIL([test x$(ovn-nbctl lsp-get-up sw0-p2) = xup])
 
 OVS_WAIT_UNTIL(
-    [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 3]
+    [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 3]
 )
 
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,ct_label=0x2/0x2,tcp,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
 ])
 
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST], [1], [dnl
 ])
 
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8-], [0], [dnl
-priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
-priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=70, priority=100,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
 ])
 
 check ovn-nbctl --wait=hv ls-lb-add sw0 lb-ipv4-udp
 
 OVS_WAIT_UNTIL(
-    [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 4]
+    [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 4]
 )
 
 OVS_WAIT_UNTIL(
-    [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 4]
+    [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 4]
 )
 
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,ct_label=0x2/0x2,tcp,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,udp,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,udp,reg1=0x58585858,reg2=0xfc8/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
 ])
 
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST], [1], [dnl
 ])
 
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
-priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=70, priority=100,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+ table=70, priority=100,udp,reg1=0x58585858,reg2=0xfc8/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
 ])
 
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,ct_label=0x2/0x2,tcp,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,udp,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,udp,reg1=0x58585858,reg2=0xfc8/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
 ])
 
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST], [1], [dnl
 ])
 
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
-priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=70, priority=100,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+ table=70, priority=100,udp,reg1=0x58585858,reg2=0xfc8/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
 ])
 
 check ovn-nbctl --wait=hv ls-lb-add sw0 lb-ipv6-tcp
 
 OVS_WAIT_UNTIL(
-    [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 5]
+    [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 5]
 )
 
 OVS_WAIT_UNTIL(
-    [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 5]
+    [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 5]
 )
 
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,ct_label=0x2/0x2,tcp,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp6,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,udp,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,udp,reg1=0x58585858,reg2=0xfc8/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
 ])
 
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST], [1], [dnl
 ])
 
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
-priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
-priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=70, priority=100,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+ table=70, priority=100,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=100,udp,reg1=0x58585858,reg2=0xfc8/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
 ])
 
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,ct_label=0x2/0x2,tcp,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp6,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,udp,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,udp,reg1=0x58585858,reg2=0xfc8/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
 ])
 
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST], [1], [dnl
 ])
 
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
-priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
-priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=70, priority=100,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+ table=70, priority=100,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=100,udp,reg1=0x58585858,reg2=0xfc8/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
 ])
 
 check ovn-nbctl --wait=hv ls-lb-add sw0 lb-ipv6-udp
 
 OVS_WAIT_UNTIL(
-    [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 6]
+    [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 6]
 )
 
 OVS_WAIT_UNTIL(
-    [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 6]
+    [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 6]
 )
 
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,ct_label=0x2/0x2,tcp,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp6,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,udp,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,udp6,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,udp,reg1=0x58585858,reg2=0xfc8/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
 ])
 
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST], [1], [dnl
 ])
 
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
-priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
-priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=70, priority=100,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+ table=70, priority=100,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=100,udp,reg1=0x58585858,reg2=0xfc8/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
 ])
 
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,ct_label=0x2/0x2,tcp,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp6,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,udp,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,udp6,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,udp,reg1=0x58585858,reg2=0xfc8/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
 ])
 
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST], [1], [dnl
 ])
 
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
-priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
-priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=70, priority=100,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+ table=70, priority=100,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=100,udp,reg1=0x58585858,reg2=0xfc8/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
 ])
 
 check ovn-nbctl --wait=hv ls-lb-add sw1 lb-ipv6-udp
@@ -23255,65 +23822,115 @@ check ovn-nbctl --wait=hv ls-lb-add sw1 lb-ipv6-udp
 # Number of hairpin flows shouldn't change as it doesn't depend on how many
 # datapaths the LB is applied.
 OVS_WAIT_UNTIL(
-    [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 6]
+    [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 6]
 )
 
 OVS_WAIT_UNTIL(
-    [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 6]
+    [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 6]
 )
 
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,ct_label=0x2/0x2,tcp,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp6,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,udp,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,udp6,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,udp,reg1=0x58585858,reg2=0xfc8/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST], [1], [dnl
+])
+
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=70, priority=100,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+ table=70, priority=100,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=100,udp,reg1=0x58585858,reg2=0xfc8/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=100,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,metadata=0x2 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,udp,reg1=0x58585858,reg2=0xfc8/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST], [1], [dnl
+])
+
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=70, priority=100,tcp,reg1=0x58585858,reg2=0x1f90/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,tcp,reg1=0x5858585a,reg2=0x1f90/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+ table=70, priority=100,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=100,udp,reg1=0x58585858,reg2=0xfc8/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=100,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,metadata=0x2 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+])
+
+# Check backwards compatibility with ovn-northd versions that don't store the
+# original destination tuple.
+#
+# ovn-controller should fall back to matching on ct_nw_dst()/ct_tp_dst().
+as northd-backup ovn-appctl -t ovn-northd pause
+as northd ovn-appctl -t ovn-northd pause
+
+check ovn-sbctl \
+    -- remove load_balancer lb-ipv4-tcp options hairpin_orig_tuple \
+    -- remove load_balancer lb-ipv6-tcp options hairpin_orig_tuple \
+    -- remove load_balancer lb-ipv4-udp options hairpin_orig_tuple \
+    -- remove load_balancer lb-ipv6-udp options hairpin_orig_tuple
+
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=68 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=68, priority=100,ct_state=+trk+dnat,ct_label=0x2/0x2,ct_ipv6_dst=8800::88,ct_nw_proto=17,ct_tp_dst=4040,udp6,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_state=+trk+dnat,ct_label=0x2/0x2,ct_ipv6_dst=8800::88,ct_nw_proto=6,ct_tp_dst=8080,tcp6,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_state=+trk+dnat,ct_label=0x2/0x2,ct_nw_dst=88.88.88.88,ct_nw_proto=17,ct_tp_dst=4040,udp,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_state=+trk+dnat,ct_label=0x2/0x2,ct_nw_dst=88.88.88.88,ct_nw_proto=6,ct_tp_dst=8080,tcp,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_state=+trk+dnat,ct_label=0x2/0x2,ct_nw_dst=88.88.88.90,ct_nw_proto=6,ct_tp_dst=8080,tcp,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_state=+trk+dnat,ct_label=0x2/0x2,ct_nw_dst=88.88.88.90,ct_nw_proto=6,ct_tp_dst=8080,tcp,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
 ])
 
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,udp6,metadata=0x2,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST], [1], [dnl
 ])
 
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
-priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x2 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
-priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
-priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+OVS_WAIT_FOR_OUTPUT([as hv1 ovs-ofctl dump-flows br-int table=70 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=70, priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ct_nw_proto=17,ct_tp_dst=4040,udp6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ct_nw_proto=17,ct_tp_dst=4040,udp6,metadata=0x2 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ct_nw_proto=6,ct_tp_dst=8080,tcp6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ct_nw_proto=17,ct_tp_dst=4040,udp,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ct_nw_proto=6,ct_tp_dst=8080,tcp,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ct_nw_proto=6,ct_tp_dst=8080,tcp,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
 ])
 
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,ct_label=0x2/0x2,tcp,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,tcp6,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,udp,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,udp6,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+OVS_WAIT_FOR_OUTPUT([as hv2 ovs-ofctl dump-flows br-int table=68 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=68, priority=100,ct_state=+trk+dnat,ct_label=0x2/0x2,ct_ipv6_dst=8800::88,ct_nw_proto=17,ct_tp_dst=4040,udp6,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_state=+trk+dnat,ct_label=0x2/0x2,ct_ipv6_dst=8800::88,ct_nw_proto=6,ct_tp_dst=8080,tcp6,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_state=+trk+dnat,ct_label=0x2/0x2,ct_nw_dst=88.88.88.88,ct_nw_proto=17,ct_tp_dst=4040,udp,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_state=+trk+dnat,ct_label=0x2/0x2,ct_nw_dst=88.88.88.88,ct_nw_proto=6,ct_tp_dst=8080,tcp,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_state=+trk+dnat,ct_label=0x2/0x2,ct_nw_dst=88.88.88.90,ct_nw_proto=6,ct_tp_dst=8080,tcp,nw_src=42.42.42.42,nw_dst=42.42.42.42,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_state=+trk+dnat,ct_label=0x2/0x2,ct_nw_dst=88.88.88.90,ct_nw_proto=6,ct_tp_dst=8080,tcp,nw_src=52.52.52.52,nw_dst=52.52.52.52,tp_dst=4042 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.90,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
 ])
 
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,tcp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp,metadata=0x1,nw_src=42.42.42.42,nw_dst=88.88.88.90,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp,metadata=0x1,nw_src=52.52.52.52,nw_dst=88.88.88.90,tp_src=4042 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,udp6,metadata=0x2,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST], [1], [dnl
 ])
 
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
-priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x2 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
-priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
-priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
+OVS_WAIT_FOR_OUTPUT([as hv2 ovs-ofctl dump-flows br-int table=70 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=70, priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ct_nw_proto=17,ct_tp_dst=4040,udp6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ct_nw_proto=17,ct_tp_dst=4040,udp6,metadata=0x2 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ct_nw_proto=6,ct_tp_dst=8080,tcp6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ct_nw_proto=17,ct_tp_dst=4040,udp,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ct_nw_proto=6,ct_tp_dst=8080,tcp,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.90,ct_nw_proto=6,ct_tp_dst=8080,tcp,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.90))
 ])
 
+# Resume ovn-northd.
+as northd ovn-appctl -t ovn-northd resume
+as northd-backup ovn-appctl -t ovn-northd resume
+check ovn-nbctl --wait=hv sync
+
 as hv2 ovs-vsctl del-port hv2-vif1
 OVS_WAIT_UNTIL([test x$(ovn-nbctl lsp-get-up sw0-p2) = xdown])
 
@@ -23321,75 +23938,73 @@ OVS_WAIT_UNTIL([test x$(ovn-nbctl lsp-get-up sw0-p2) = xdown])
 as hv2 ovn-appctl -t ovn-controller recompute
 
 OVS_WAIT_UNTIL(
-    [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 0]
+    [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 0]
 )
 
 OVS_WAIT_UNTIL(
-    [test $(as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | wc -l) -eq 0]
+    [test $(as hv2 ovs-ofctl dump-flows br-int table=69 | grep -c -v NXST) -eq 0]
 )
 
 OVS_WAIT_UNTIL(
-    [test $(as hv2 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | wc -l) -eq 0]
+    [test $(as hv2 ovs-ofctl dump-flows br-int table=70 | grep -c -v NXST) -eq 0]
 )
 
 OVS_WAIT_UNTIL(
-    [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 6]
+    [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 6]
 )
 
 check ovn-nbctl --wait=hv lb-del lb-ipv4-tcp
 
 OVS_WAIT_UNTIL(
-    [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 3]
+    [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 3]
 )
 
 OVS_WAIT_UNTIL(
-    [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 0]
+    [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 0]
 )
 
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,ct_label=0x2/0x2,tcp6,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,udp,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,ct_label=0x2/0x2,udp6,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=68 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=68, priority=100,ct_label=0x2/0x2,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=4041 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=6,NXM_OF_TCP_SRC[[]]=NXM_OF_TCP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,udp,reg1=0x58585858,reg2=0xfc8/0xffff,nw_src=42.42.42.1,nw_dst=42.42.42.1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x800,NXM_OF_IP_SRC[[]],ip_dst=88.88.88.88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
+ table=68, priority=100,ct_label=0x2/0x2,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,ipv6_src=4200::1,ipv6_dst=4200::1,tp_dst=2021 actions=load:0x1->NXM_NX_REG10[[7]],learn(table=69,delete_learned,OXM_OF_METADATA[[]],eth_type=0x86dd,NXM_NX_IPV6_SRC[[]],ipv6_dst=8800::88,nw_proto=17,NXM_OF_UDP_SRC[[]]=NXM_OF_UDP_DST[[]],load:0x1->NXM_NX_REG10[[7]])
 ])
 
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,tcp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=4041 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,udp,metadata=0x1,nw_src=42.42.42.1,nw_dst=88.88.88.88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,udp6,metadata=0x1,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
-priority=100,udp6,metadata=0x2,ipv6_src=4200::1,ipv6_dst=8800::88,tp_src=2021 actions=load:0x1->NXM_NX_REG10[[7]]
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=69], [0], [dnl
+NXST_FLOW reply (xid=0x8):
 ])
 
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
-priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
-priority=100,ct_state=+trk+dnat,ct_ipv6_dst=8800::88,ipv6,metadata=0x2 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
-priority=100,ct_state=+trk+dnat,ct_nw_dst=88.88.88.88,ip,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=70 | ofctl_strip_all | grep -v NXST], [0], [dnl
+ table=70, priority=100,tcp6,reg2=0x1f90/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=100,udp,reg1=0x58585858,reg2=0xfc8/0xffff,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=88.88.88.88))
+ table=70, priority=100,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,metadata=0x1 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
+ table=70, priority=100,udp6,reg2=0xfc8/0xffff,reg4=0x88000000,reg5=0,reg6=0,reg7=0x88,metadata=0x2 actions=ct(commit,zone=NXM_NX_REG12[[0..15]],nat(src=8800::88))
 ])
 
 check ovn-nbctl --wait=hv ls-del sw0
 check ovn-nbctl --wait=hv ls-del sw1
 
 OVS_WAIT_UNTIL(
-    [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 0]
+    [test $(as hv1 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 0]
 )
 
 OVS_WAIT_UNTIL(
-    [test $(as hv1 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | wc -l) -eq 0]
+    [test $(as hv1 ovs-ofctl dump-flows br-int table=69 | grep -c -v NXST) -eq 0]
 )
 
 OVS_WAIT_UNTIL(
-    [test $(as hv1 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | wc -l) -eq 0]
+    [test $(as hv1 ovs-ofctl dump-flows br-int table=70 | grep -c -v NXST) -eq 0]
 )
 
 OVS_WAIT_UNTIL(
-    [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -v NXST | wc -l) -eq 0]
+    [test $(as hv2 ovs-ofctl dump-flows br-int table=68 | grep -c -v NXST) -eq 0]
 )
 
 OVS_WAIT_UNTIL(
-    [test $(as hv2 ovs-ofctl dump-flows br-int table=69 | grep -v NXST | wc -l) -eq 0]
+    [test $(as hv2 ovs-ofctl dump-flows br-int table=69 | grep -c -v NXST) -eq 0]
 )
 
 OVS_WAIT_UNTIL(
-    [test $(as hv2 ovs-ofctl dump-flows br-int table=70 | grep -v NXST | wc -l) -eq 0]
+    [test $(as hv2 ovs-ofctl dump-flows br-int table=70 | grep -c -v NXST) -eq 0]
 )
 
 OVN_CLEANUP([hv1], [hv2])
@@ -23541,3 +24156,680 @@ as ovn-nb
 OVS_APP_EXIT_AND_WAIT([ovsdb-server])
 
 AT_CLEANUP
+
+AT_SETUP([ovn -- propagate Port_Binding.up to NB and OVS])
+ovn_start
+
+net_add n1
+sim_add hv1
+as hv1
+ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.1
+
+check ovn-nbctl ls-add ls
+
+AS_BOX([add OVS port for existing LSP])
+check ovn-nbctl lsp-add ls lsp1
+check ovn-nbctl --wait=hv sync
+check_column "false" Port_Binding up logical_port=lsp1
+
+check ovs-vsctl add-port br-int lsp1 -- set Interface lsp1 external-ids:iface-id=lsp1
+wait_column "true" Port_Binding up logical_port=lsp1
+wait_column "true" nb:Logical_Switch_Port up name=lsp1
+OVS_WAIT_UNTIL([test `ovs-vsctl get Interface lsp1 external_ids:ovn-installed` = '"true"'])
+
+AS_BOX([add LSP for existing OVS port])
+check ovs-vsctl add-port br-int lsp2 -- set Interface lsp2 external-ids:iface-id=lsp2
+check ovn-nbctl lsp-add ls lsp2
+check ovn-nbctl --wait=hv sync
+check_column "true" Port_Binding up logical_port=lsp2
+wait_column "true" nb:Logical_Switch_Port up name=lsp2
+OVS_WAIT_UNTIL([test `ovs-vsctl get Interface lsp2 external_ids:ovn-installed` = '"true"'])
+
+AS_BOX([ovn-controller should not reset Port_Binding.up without northd])
+# Pause northd and clear the "up" field to simulate older ovn-northd
+# versions writing to the Southbound DB.
+as northd ovn-appctl -t ovn-northd pause
+as northd-backup ovn-appctl -t ovn-northd pause
+
+as hv1 ovn-appctl -t ovn-controller debug/pause
+check ovn-sbctl clear Port_Binding lsp1 up
+as hv1 ovn-appctl -t ovn-controller debug/resume
+
+# Forcefully release the Port_Binding so ovn-controller reclaims it.
+# Make sure the Port_Binding.up field is not updated though.
+check ovn-sbctl clear Port_Binding lsp1 chassis
+hv1_uuid=$(fetch_column Chassis _uuid name=hv1)
+wait_column "$hv1_uuid" Port_Binding chassis logical_port=lsp1
+check_column "" Port_Binding up logical_port=lsp1
+
+# Once northd should explicitly set the Port_Binding.up field to 'false' and
+# ovn-controller sets it to 'true' as soon as the update is processed.
+as northd ovn-appctl -t ovn-northd resume
+as northd-backup ovn-appctl -t ovn-northd resume
+wait_column "true" Port_Binding up logical_port=lsp1
+wait_column "true" nb:Logical_Switch_Port up name=lsp1
+
+AS_BOX([ovn-controller should reset Port_Binding.up - from NULL])
+# If Port_Binding.up is cleared externally, ovn-northd resets it to 'false'
+# and ovn-controller finally sets it to 'true' once the update is processed.
+as hv1 ovn-appctl -t ovn-controller debug/pause
+check ovn-sbctl clear Port_Binding lsp1 up
+check ovn-nbctl --wait=sb sync
+wait_column "false" nb:Logical_Switch_Port up name=lsp1
+as hv1 ovn-appctl -t ovn-controller debug/resume
+wait_column "true" Port_Binding up logical_port=lsp1
+wait_column "true" nb:Logical_Switch_Port up name=lsp1
+
+AS_BOX([ovn-controller should reset Port_Binding.up - from false])
+# If Port_Binding.up is externally set to 'false', ovn-controller should sets
+# it to 'true' once the update is processed.
+as hv1 ovn-appctl -t ovn-controller debug/pause
+check ovn-sbctl set Port_Binding lsp1 up=false
+check ovn-nbctl --wait=sb sync
+wait_column "false" nb:Logical_Switch_Port up name=lsp1
+as hv1 ovn-appctl -t ovn-controller debug/resume
+wait_column "true" Port_Binding up logical_port=lsp1
+wait_column "true" nb:Logical_Switch_Port up name=lsp1
+
+OVN_CLEANUP([hv1])
+AT_CLEANUP
+
+# Test case to check that ovn-controller doesn't assert when
+# handling port group updates.
+AT_SETUP([ovn -- No ovn-controller assert for port group updates])
+ovn_start
+
+net_add n1
+sim_add hv1
+as hv1
+ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.10
+
+as hv1
+ovs-vsctl \
+    -- add-port br-int vif1 \
+    -- set Interface vif1 external_ids:iface-id=sw0-port1 \
+    ofport-request=1
+
+check ovn-nbctl ls-add sw0
+check ovn-nbctl lsp-add sw0 sw0-port1
+check ovn-nbctl lsp-set-addresses sw0-port1 "10:14:00:00:00:01 192.168.0.2"
+
+check ovn-nbctl lsp-add sw0 sw0-port2
+check ovn-nbctl lsp-add sw0 sw0-port3
+check ovn-nbctl lsp-add sw0 sw0-port4
+check ovn-nbctl lsp-add sw0 sw0-port5
+check ovn-nbctl lsp-add sw0 sw0-port6
+check ovn-nbctl lsp-add sw0 sw0-port7
+
+ovn-nbctl create address_set name=as1
+ovn-nbctl set address_set . addresses="10.0.0.10,10.0.0.11,10.0.0.12"
+
+ovn-nbctl pg-add pg1 sw0-port1 sw0-port2 sw0-port3
+ovn-nbctl acl-add pg1 to-lport 1002 "outport == @pg1 && ip4.dst == \$as1 && icmp4" drop
+ovn-nbctl acl-add pg1 to-lport 1002 "outport == @pg1 && ip4.dst == \$as1 && tcp && tcp.dst >=10000 && tcp.dst <= 20000" drop
+ovn-nbctl acl-add pg1 to-lport 1002 "outport == @pg1 && ip4.dst == \$as1 && udp && udp.dst >=10000 && udp.dst <= 20000" drop
+
+ovn-nbctl pg-add pg2 sw0-port2 sw0-port3 sw0-port4 sw0-port5
+ovn-nbctl acl-add pg2 to-lport 1002 "outport == @pg2 && ip4.dst == \$as1 && icmp4" allow-related
+ovn-nbctl acl-add pg2 to-lport 1002 "outport == @pg2 && ip4.dst == \$as1 && tcp && tcp.dst >=30000 && tcp.dst <= 40000" drop
+ovn-nbctl acl-add pg2 to-lport 1002 "outport == @pg2 && ip4.dst == \$as1 && udp && udp.dst >=30000 && udp.dst <= 40000" drop
+
+ovn-nbctl pg-add pg3 sw0-port1 sw0-port5
+ovn-nbctl acl-add pg3 to-lport 1002 "outport == @pg3 && ip4.dst == \$as1 && icmp4" allow-related
+ovn-nbctl acl-add pg3 to-lport 1002 "outport == @pg3 && ip4.dst == \$as1 && tcp && tcp.dst >=20000 && tcp.dst <= 30000" allow-related
+ovn-nbctl acl-add pg3 to-lport 1002 "outport == @pg3 && ip4.dst == \$as1 && udp && udp.dst >=20000 && udp.dst <= 30000" allow-related
+
+AS_BOX([Delete and add the port groups multiple times])
+
+for i in $(seq 1 10)
+do
+    check ovn-nbctl --wait=hv clear port_Group pg1 ports
+    check ovn-nbctl --wait=hv clear port_Group pg2 ports
+    check ovn-nbctl --wait=hv clear port_Group pg3 ports
+    check ovn-nbctl --wait=hv pg-set-ports pg1 sw0-port1
+    check ovn-nbctl --wait=hv pg-set-ports pg1 sw0-port1 sw0-port4
+    check ovn-nbctl --wait=hv pg-set-ports pg1 sw0-port1 sw0-port4 sw0-port5
+
+    check ovn-nbctl --wait=hv pg-set-ports pg2 sw0-port2
+    check ovn-nbctl --wait=hv pg-set-ports pg2 sw0-port2 sw0-port6
+    check ovn-nbctl --wait=hv pg-set-ports pg2 sw0-port2 sw0-port6 sw0-port7
+
+    check ovn-nbctl --wait=hv pg-set-ports pg3 sw0-port1
+    check ovn-nbctl --wait=hv pg-set-ports pg3 sw0-port1 sw0-port3
+    check ovn-nbctl --wait=hv pg-set-ports pg3 sw0-port1 sw0-port3 sw0-port6
+
+    # Make sure that ovn-controller has not asserted.
+    AT_CHECK([kill -0 $(cat hv1/ovn-controller.pid)])
+done
+
+OVN_CLEANUP([hv1])
+AT_CLEANUP
+
+# Test case to check that ovn-controller doesn't assert when
+# handling conjunction flows.  When ovn-controller claims
+# the first port of a logical switch datapath, it programs the flows
+# for this datapath incrementally (without full recompute).  If
+# suppose, in the same SB update from ovsdb-server, a logical flow is added
+# which results in conjunction action, then this logical flow is also
+# handled incrementally.  The newly added logical flow is processed
+# twice which results in wrong oflows and it results in an assertion
+# in ovn-controller.  Test this ovn-controller handles this scenario
+# properly and doesn't assert.
+AT_SETUP([ovn -- No ovn-controller assert when generating conjunction flows])
+ovn_start
+
+net_add n1
+sim_add hv1
+as hv1
+ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.10
+
+as hv1
+ovs-vsctl \
+    -- add-port br-int vif1 \
+    -- set Interface vif1 external_ids:iface-id=sw0-p1 \
+    ofport-request=1
+
+check as hv1
+ovs-vsctl set open . external_ids:ovn-monitor-all=true
+
+check ovn-nbctl ls-add sw0
+check ovn-nbctl pg-add pg1
+check ovn-nbctl pg-add pg2
+check ovn-nbctl lsp-add sw0 sw0-p2
+check ovn-nbctl lsp-set-addresses sw0-p2 "00:00:00:00:00:02 192.168.47.2"
+check ovn-nbctl lsp-add sw0 sw0-p3
+check ovn-nbctl lsp-set-addresses sw0-p3 "00:00:00:00:00:03 192.168.47.3"
+
+# Pause ovn-northd. When it is resumed, all the below NB updates
+# will be sent in one transaction.
+
+check as northd ovn-appctl -t ovn-northd pause
+check as northd-backup ovn-appctl -t ovn-northd pause
+
+check ovn-nbctl lsp-add sw0 sw0-p1
+check ovn-nbctl lsp-set-addresses sw0-p1 "00:00:00:00:00:01 192.168.47.1"
+check ovn-nbctl pg-set-ports pg1 sw0-p1 sw0-p2
+check ovn-nbctl pg-set-ports pg2 sw0-p3
+check ovn-nbctl acl-add pg1 to-lport 1002 "outport == @pg1 && ip4 && ip4.src == \$pg2_ip4 && udp && udp.dst >= 1 && udp.dst <= 65535" allow-related
+
+# resume ovn-northd now. This should result in a single update message
+# from SB ovsdb-server to ovn-controller for all the above NB updates.
+check as northd ovn-appctl -t ovn-northd resume
+
+AS_BOX([Wait for sw0-p1 to be up])
+wait_for_ports_up sw0-p1
+
+# When the port group pg1 is updated, it should not result in
+# any assert in ovn-controller.
+ovn-nbctl --wait=hv pg-set-ports pg1 sw0-p1 sw0-p2 sw0-p3
+AT_CHECK([kill -0 $(cat hv1/ovn-controller.pid)])
+check ovn-nbctl --wait=hv sync
+
+# Check OVS flows are installed properly.
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=45 | ofctl_strip_all | \
+    grep "priority=2002" | grep conjunction | \
+    sed 's/conjunction([[^)]]*)/conjunction()/g' | sort], [0], [dnl
+ table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x10/0xfff0 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x100/0xff00 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x1000/0xf000 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x2/0xfffe actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x20/0xffe0 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x200/0xfe00 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x2000/0xe000 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x4/0xfffc actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x40/0xffc0 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x400/0xfc00 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x4000/0xc000 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x8/0xfff8 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x80/0xff80 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x800/0xf800 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x8000/0x8000 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x100/0x100,metadata=0x1,nw_src=192.168.47.3,tp_dst=1 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x100/0x100,reg15=0x3,metadata=0x1,nw_src=192.168.47.3 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x10/0xfff0 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x100/0xff00 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x1000/0xf000 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x2/0xfffe actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x20/0xffe0 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x200/0xfe00 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x2000/0xe000 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x4/0xfffc actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x40/0xffc0 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x400/0xfc00 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x4000/0xc000 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x8/0xfff8 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x80/0xff80 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x800/0xf800 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=0x8000/0x8000 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x80/0x80,metadata=0x1,nw_src=192.168.47.3,tp_dst=1 actions=conjunction()
+ table=45, priority=2002,udp,reg0=0x80/0x80,reg15=0x3,metadata=0x1,nw_src=192.168.47.3 actions=conjunction()
+])
+
+OVN_CLEANUP([hv1])
+AT_CLEANUP
+
+AT_SETUP([ovn -- OVN FDB (MAC learning) - 2 HVs, 2 LS, 1 LR ])
+ovn_start
+
+# Create the first logical switch with one port
+check ovn-nbctl ls-add sw0
+check ovn-nbctl lsp-add sw0 sw0-p1
+check ovn-nbctl lsp-set-addresses sw0-p1 "50:54:00:00:00:03 10.0.0.3" unknown
+
+check ovn-nbctl lsp-add sw0 sw0-p2
+check ovn-nbctl lsp-set-addresses sw0-p2 "50:54:00:00:00:04 10.0.0.4"
+# Port security is set for sw0-p2
+check ovn-nbctl lsp-set-port-security sw0-p2 "50:54:00:00:00:04 10.0.0.4"
+
+# sw0-p1 and sw0-p3 have unknown address and no port security.
+# FDB should be enabled for these lports.
+check ovn-nbctl lsp-add sw0 sw0-p3
+check ovn-nbctl lsp-set-addresses sw0-p3 unknown
+
+# Create the second logical switch with one port
+check ovn-nbctl ls-add sw1
+check ovn-nbctl lsp-add sw1 sw1-p1
+check ovn-nbctl lsp-set-addresses sw1-p1 "40:54:00:00:00:03 11.0.0.3" unknown
+
+check ovn-nbctl lsp-add sw1 sw1-p2
+check ovn-nbctl lsp-set-addresses sw1-p2 "40:54:00:00:00:04 11.0.0.4"
+check ovn-nbctl lsp-set-port-security sw1-p2 "40:54:00:00:00:04 11.0.0.4"
+
+# Create a logical router and attach both logical switches
+check ovn-nbctl lr-add lr0
+check ovn-nbctl lrp-add lr0 lr0-sw0 00:00:00:00:ff:01 10.0.0.1/24
+check ovn-nbctl lsp-add sw0 sw0-lr0
+check ovn-nbctl lsp-set-type sw0-lr0 router
+check ovn-nbctl lsp-set-addresses sw0-lr0 router
+check ovn-nbctl lsp-set-options sw0-lr0 router-port=lr0-sw0
+
+check ovn-nbctl lrp-add lr0 lr0-sw1 00:00:00:00:ff:02 11.0.0.1/24
+check ovn-nbctl lsp-add sw1 sw1-lr0
+check ovn-nbctl lsp-set-type sw1-lr0 router
+check ovn-nbctl lsp-set-addresses sw1-lr0 router
+check ovn-nbctl lsp-set-options sw1-lr0 router-port=lr0-sw1
+ovn-nbctl --wait=hv sync
+
+net_add n1
+
+sim_add hv1
+as hv1
+ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.1
+ovs-vsctl -- add-port br-int hv1-vif1 -- \
+    set interface hv1-vif1 external-ids:iface-id=sw0-p1 \
+    options:tx_pcap=hv1/vif1-tx.pcap \
+    options:rxq_pcap=hv1/vif1-rx.pcap \
+    ofport-request=1
+ovs-vsctl -- add-port br-int hv1-vif2 -- \
+    set interface hv1-vif2 external-ids:iface-id=sw1-p2 \
+    options:tx_pcap=hv1/vif2-tx.pcap \
+    options:rxq_pcap=hv1/vif2-rx.pcap \
+    ofport-request=2
+ovs-vsctl -- add-port br-int hv1-vif3 -- \
+    set interface hv1-vif3 external-ids:iface-id=sw0-p3 \
+    options:tx_pcap=hv1/vif3-tx.pcap \
+    options:rxq_pcap=hv1/vif3-rx.pcap \
+    ofport-request=3
+
+sim_add hv2
+as hv2
+ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.2
+ovs-vsctl -- add-port br-int hv2-vif1 -- \
+    set interface hv2-vif1 external-ids:iface-id=sw0-p2 \
+    options:tx_pcap=hv2/vif1-tx.pcap \
+    options:rxq_pcap=hv2/vif1-rx.pcap \
+    ofport-request=1
+ovs-vsctl -- add-port br-int hv2-vif2 -- \
+    set interface hv2-vif2 external-ids:iface-id=sw1-p1 \
+    options:tx_pcap=hv2/vif2-tx.pcap \
+    options:rxq_pcap=hv2/vif2-rx.pcap \
+    ofport-request=2
+
+OVN_POPULATE_ARP
+
+ip_to_hex() {
+    printf "%02x%02x%02x%02x" "$@"
+}
+
+send_icmp_packet() {
+    local inport=$1 hv=$2 eth_src=$3 eth_dst=$4 ipv4_src=$5 ipv4_dst=$6 ip_chksum=$7 data=$8
+    shift 8
+
+    local ip_ttl=ff
+    local ip_len=001c
+    local packet=${eth_dst}${eth_src}08004500${ip_len}00004000${ip_ttl}01${ip_chksum}${ipv4_src}${ipv4_dst}${data}
+    echo $packet > expected
+    as hv$hv ovs-appctl netdev-dummy/receive hv$hv-vif$inport $packet
+}
+
+reset_pcap_file() {
+    local iface=$1
+    local pcap_file=$2
+    ovs-vsctl -- set Interface $iface options:tx_pcap=dummy-tx.pcap \
+options:rxq_pcap=dummy-rx.pcap
+    rm -f ${pcap_file}*.pcap
+    ovs-vsctl -- set Interface $iface options:tx_pcap=${pcap_file}-tx.pcap \
+options:rxq_pcap=${pcap_file}-rx.pcap
+}
+
+trim_zeros() {
+    sed 's/\(00\)\{1,\}$//'
+}
+
+AS_BOX([Wait for all ports to be up])
+wait_for_ports_up
+
+# Check that there is put_fdb() flow added by ovn-northd for sw0-p1
+ovn-sbctl dump-flows sw0 > sw0flows
+AT_CAPTURE_FILE([sw0flows])
+
+AT_CHECK([grep "ls_in_lookup_fdb" sw0flows | sort], [0], [dnl
+  table=3 (ls_in_lookup_fdb   ), priority=0    , dnl
+match=(1), action=(next;)
+  table=3 (ls_in_lookup_fdb   ), priority=100  , dnl
+match=(inport == "sw0-p1"), action=(reg0[[11]] = lookup_fdb(inport, eth.src); next;)
+  table=3 (ls_in_lookup_fdb   ), priority=100  , dnl
+match=(inport == "sw0-p3"), action=(reg0[[11]] = lookup_fdb(inport, eth.src); next;)
+])
+
+AT_CHECK([grep "ls_in_put_fdb" sw0flows | sort], [0], [dnl
+  table=4 (ls_in_put_fdb      ), priority=0    , dnl
+match=(1), action=(next;)
+  table=4 (ls_in_put_fdb      ), priority=100  , dnl
+match=(inport == "sw0-p1" && reg0[[11]] == 0), action=(put_fdb(inport, eth.src); next;)
+  table=4 (ls_in_put_fdb      ), priority=100  , dnl
+match=(inport == "sw0-p3" && reg0[[11]] == 0), action=(put_fdb(inport, eth.src); next;)
+])
+
+# Send a packet from sw0-p1 with a different mac not present
+# in it's addresses.
+AS_BOX([Send a pkt from sw0-p1 with a different mac address])
+
+# Use the src mac 50:54:00:00:00:13 instead of 50:54:00:00:00:03
+src_mac=505400000013
+src_ip=$(ip_to_hex 10 0 0 13)
+
+# send the packet to sw0-p2
+dst_mac=505400000004
+dst_ip=$(ip_to_hex 10 0 0 4)
+
+data=0800bee4391a0001
+send_icmp_packet 1 1 $src_mac $dst_mac $src_ip $dst_ip 0000 $data
+OVN_CHECK_PACKETS([hv2/vif1-tx.pcap], [expected])
+
+# There should be one row in fdb
+AS_BOX([Check that the FDB entry is created])
+wait_row_count FDB 1
+
+sw0_dpkey=$(fetch_column datapath_binding tunnel_key external_ids:name=sw0)
+sw0p1_dpkey=$(fetch_column port_binding tunnel_key logical_port=sw0-p1)
+sw0p3_dpkey=$(fetch_column port_binding tunnel_key logical_port=sw0-p3)
+
+check_column '50:54:00:00:00:13' fdb mac
+check_column $sw0_dpkey fdb dp_key
+check_column $sw0p1_dpkey fdb port_key
+
+# Make sure that OVS tables 71 and 72 are populated on both hv1 and hv2.
+AS_BOX([Check that ovn-controller programs the flows for FDB])
+as hv1 ovs-ofctl dump-flows br-int table=71 > hv1_offlows_table71.txt
+as hv2 ovs-ofctl dump-flows br-int table=71 > hv2_offlows_table71.txt
+
+AT_CAPTURE_FILE([hv1_offlows_table71.txt])
+AT_CAPTURE_FILE([hv2_offlows_table71.txt])
+AT_CHECK([cat hv1_offlows_table71.txt | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,metadata=0x1,dl_dst=50:54:00:00:00:13 actions=load:0x1->NXM_NX_REG15[[]]
+])
+
+AT_CHECK([cat hv2_offlows_table71.txt | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,metadata=0x1,dl_dst=50:54:00:00:00:13 actions=load:0x1->NXM_NX_REG15[[]]
+])
+
+as hv1 ovs-ofctl dump-flows br-int table=72 > hv1_offlows_table72.txt
+as hv2 ovs-ofctl dump-flows br-int table=72 > hv2_offlows_table72.txt
+
+AT_CAPTURE_FILE([hv1_offlows_table72.txt])
+AT_CAPTURE_FILE([hv2_offlows_table72.txt])
+AT_CHECK([cat hv1_offlows_table72.txt | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,reg14=0x1,metadata=0x1,dl_src=50:54:00:00:00:13 actions=load:0x1->NXM_NX_REG10[[8]]
+])
+
+AT_CHECK([cat hv2_offlows_table72.txt | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,reg14=0x1,metadata=0x1,dl_src=50:54:00:00:00:13 actions=load:0x1->NXM_NX_REG10[[8]]
+])
+
+# Use the src mac 50:54:00:00:00:14 instead of 50:54:00:00:00:03
+src_mac=505400000014
+src_ip=$(ip_to_hex 10 0 0 14)
+
+as hv2 reset_pcap_file hv2-vif1 hv2/vif1
+
+send_icmp_packet 1 1 $src_mac $dst_mac $src_ip $dst_ip 0000 $data
+OVN_CHECK_PACKETS([hv2/vif1-tx.pcap], [expected])
+
+# There should be two rows in fdb
+wait_row_count FDB 2
+
+check_column "50:54:00:00:00:13 50:54:00:00:00:14" fdb mac
+check_column "$sw0_dpkey $sw0_dpkey" fdb dp_key
+check_column "$sw0p1_dpkey $sw0p1_dpkey" fdb port_key
+
+# Make sure that OVS tables 71 and 72 are populated on both hv1 and hv2.
+as hv1 ovs-ofctl dump-flows br-int table=71 > hv1_offlows_table71.txt
+as hv2 ovs-ofctl dump-flows br-int table=71 > hv2_offlows_table71.txt
+
+AT_CAPTURE_FILE([hv1_offlows_table71.txt])
+AT_CAPTURE_FILE([hv2_offlows_table71.txt])
+AT_CHECK([cat hv1_offlows_table71.txt | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,metadata=0x1,dl_dst=50:54:00:00:00:13 actions=load:0x1->NXM_NX_REG15[[]]
+priority=100,metadata=0x1,dl_dst=50:54:00:00:00:14 actions=load:0x1->NXM_NX_REG15[[]]
+])
+
+AT_CHECK([cat hv2_offlows_table71.txt | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,metadata=0x1,dl_dst=50:54:00:00:00:13 actions=load:0x1->NXM_NX_REG15[[]]
+priority=100,metadata=0x1,dl_dst=50:54:00:00:00:14 actions=load:0x1->NXM_NX_REG15[[]]
+])
+
+as hv1 ovs-ofctl dump-flows br-int table=72 > hv1_offlows_table72.txt
+as hv2 ovs-ofctl dump-flows br-int table=72 > hv2_offlows_table72.txt
+
+AT_CAPTURE_FILE([hv1_offlows_table72.txt])
+AT_CAPTURE_FILE([hv2_offlows_table72.txt])
+AT_CHECK([cat hv1_offlows_table72.txt | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,reg14=0x1,metadata=0x1,dl_src=50:54:00:00:00:13 actions=load:0x1->NXM_NX_REG10[[8]]
+priority=100,reg14=0x1,metadata=0x1,dl_src=50:54:00:00:00:14 actions=load:0x1->NXM_NX_REG10[[8]]
+])
+
+AT_CHECK([cat hv2_offlows_table72.txt | grep -v NXST | cut -d ' ' -f8- | sort], [0], [dnl
+priority=100,reg14=0x1,metadata=0x1,dl_src=50:54:00:00:00:13 actions=load:0x1->NXM_NX_REG10[[8]]
+priority=100,reg14=0x1,metadata=0x1,dl_src=50:54:00:00:00:14 actions=load:0x1->NXM_NX_REG10[[8]]
+])
+
+as hv1 reset_pcap_file hv1-vif1 hv1/vif1
+as hv2 reset_pcap_file hv2-vif1 hv2/vif1
+
+# Send the packet from sw0-p2 to sw0-p1 with the dst mac 50:54:00:00:00:13
+src_mac=505400000004
+src_ip=$(ip_to_hex 10 0 0 4)
+
+dst_mac=505400000013
+dst_ip=$(ip_to_hex 10 0 0 13)
+
+send_icmp_packet 1 2 $src_mac $dst_mac $src_ip $dst_ip 0000 $data
+OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [expected])
+
+as hv1 reset_pcap_file hv1-vif1 hv1/vif1
+dst_mac=505400000014
+dst_ip=$(ip_to_hex 10 0 0 14)
+
+send_icmp_packet 1 2 $src_mac $dst_mac $src_ip $dst_ip 0000 $data
+OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [expected])
+
+as hv1 reset_pcap_file hv1-vif1 hv1/vif1
+as hv1 reset_pcap_file hv1-vif3 hv1/vif3
+
+# Send a packet from sw0-p2 to an unknown mac. Should be received
+# by both sw0-p1 and sw0-p3 (as unknown is set).
+AS_BOX([Send pkt from sw0-p2 to an unknown mac])
+
+src_mac=505400000004
+src_ip=$(ip_to_hex 10 0 0 4)
+
+dst_mac=505400000023
+dst_ip=$(ip_to_hex 10 0 0 23)
+
+send_icmp_packet 1 2 $src_mac $dst_mac $src_ip $dst_ip 0000 $data
+
+OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [expected])
+OVN_CHECK_PACKETS([hv1/vif3-tx.pcap], [expected])
+
+AS_BOX([Flip the mac - 50:54:00:00:00:13 from sw0-p1 to sw0-p3])
+
+# Use the src mac 50:54:00:00:00:13
+src_mac=505400000013
+src_ip=$(ip_to_hex 10 0 0 23)
+
+# send the packet to sw0-p2
+dst_mac=505400000004
+dst_ip=$(ip_to_hex 10 0 0 4)
+
+data=0800bee4391a0001
+
+as hv2 reset_pcap_file hv2-vif1 hv2/vif1
+as hv1 reset_pcap_file hv1-vif3 hv1/vif3
+
+# Send the pkt from sw0-p3 to sw0-p2.
+send_icmp_packet 3 1 $src_mac $dst_mac $src_ip $dst_ip 0000 $data
+OVN_CHECK_PACKETS([hv2/vif1-tx.pcap], [expected])
+
+# fdb row count should be still 2. But the mac 50:54:00:00:00:13
+# should be learnt on sw0-p3.
+
+wait_row_count FDB 2
+
+check_column "50:54:00:00:00:13 50:54:00:00:00:14" fdb mac
+check_column "$sw0_dpkey $sw0_dpkey" fdb dp_key
+check_column "$sw0p1_dpkey $sw0p3_dpkey" fdb port_key
+
+check_column "$sw0p3_dpkey" fdb port_key mac="50\:54\:00\:00\:00\:13"
+
+as hv1 reset_pcap_file hv1-vif1 hv1/vif1
+as hv1 reset_pcap_file hv1-vif3 hv1/vif3
+
+# Send the packet from sw0-p2 to sw0-p3 with the dst mac 50:54:00:00:00:13
+src_mac=505400000004
+src_ip=$(ip_to_hex 10 0 0 4)
+
+dst_mac=505400000013
+dst_ip=$(ip_to_hex 10 0 0 13)
+
+send_icmp_packet 1 2 $src_mac $dst_mac $src_ip $dst_ip 0000 $data
+OVN_CHECK_PACKETS([hv1/vif3-tx.pcap], [expected])
+
+# sw0-p1 should not receive the packet.
+: > expected
+OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [expected])
+
+AS_BOX([Test routing])
+
+# Test the routing.
+# Send the packet from sw1-p2 (hv1) to sw0-p1 (hv1) with dst ip 10.0.0.14
+# The packet should be delivered to sw0-p1 with dst mac 50:54:00:00:00:14
+# Before sending add mac_binding entry for 10.0.0.14
+
+lr0_dp_uuid=$(fetch_column datapath_binding _uuid external_ids:name=lr0)
+
+ovn-sbctl create mac_binding ip=10.0.0.14 logical_port=lr0-sw0 \
+mac="50\:54\:00\:00\:00\:14" datapath=$lr0_dp_uuid
+
+# Wait till the mac_binding flows appear in hv1
+OVS_WAIT_UNTIL([test 1 = $(as hv1 ovs-ofctl dump-flows br-int table=66 \
+| grep -c  reg0=0xa00000e)])
+
+src_mac=405400000004
+src_ip=$(ip_to_hex 11 0 0 4)
+
+dst_mac=00000000ff02 # lr0-sw1 mac
+dst_ip=$(ip_to_hex 10 0 0 14)
+
+as hv1 reset_pcap_file hv1-vif1 hv1/vif1
+as hv1 reset_pcap_file hv1-vif3 hv1/vif3
+
+send_icmp_packet 2 1 $src_mac $dst_mac $src_ip $dst_ip 0000 $data
+
+exp_packet=50540000001400000000ff0108004500001c00004000fe010100${src_ip}${dst_ip}${data}
+echo $exp_packet > expected
+OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [expected])
+
+# sw0-p3 should not receive the packet.
+: > expected
+OVN_CHECK_PACKETS([hv1/vif3-tx.pcap], [expected])
+
+# Now the send the packet from sw1-p1 (hv2) to sw0-p1 (hv1) with dst ip 10.0.0.14
+# The acket should be delivered to sw0-p1 with dst mac 50:54:00:00:00:14
+
+src_mac=405400000003
+src_ip=$(ip_to_hex 11 0 0 3)
+
+dst_mac=00000000ff02 # lr0-sw1 mac
+dst_ip=$(ip_to_hex 10 0 0 14)
+
+as hv1 reset_pcap_file hv1-vif1 hv1/vif1
+send_icmp_packet 2 2 $src_mac $dst_mac $src_ip $dst_ip 0000 $data
+
+exp_packet=50540000001400000000ff0108004500001c00004000fe010100${src_ip}${dst_ip}${data}
+echo $exp_packet > expected
+OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [expected])
+
+AS_BOX([Clear the FDB rows])
+
+# Clear the fdb rows.
+check ovn-sbctl --all destroy fdb
+ovn-sbctl list fdb
+
+as hv1 reset_pcap_file hv1-vif1 hv1/vif1
+as hv1 reset_pcap_file hv1-vif3 hv1/vif3
+
+# Send the packet from sw0-p2 to sw0-p1 with the dst mac 50:54:00:00:00:14
+# It should be delivered to both sw0-p1 and sw0-p3 since we have cleared the
+# FDB table.
+src_mac=505400000004
+src_ip=$(ip_to_hex 10 0 0 4)
+
+dst_mac=505400000014
+dst_ip=$(ip_to_hex 10 0 0 13)
+
+send_icmp_packet 1 2 $src_mac $dst_mac $src_ip $dst_ip 0000 $data
+
+OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [expected])
+OVN_CHECK_PACKETS([hv1/vif3-tx.pcap], [expected])
+
+# Make sure that OVS tables 71 and 72 are empty.
+as hv1 ovs-ofctl dump-flows br-int table=71 > hv1_offlows_table71.txt
+as hv2 ovs-ofctl dump-flows br-int table=71 > hv2_offlows_table71.txt
+
+AT_CAPTURE_FILE([hv1_offlows_table71.txt])
+AT_CAPTURE_FILE([hv2_offlows_table71.txt])
+AT_CHECK([cat hv1_offlows_table71.txt | grep -v NXST], [1], [dnl
+])
+
+AT_CHECK([cat hv2_offlows_table71.txt | grep -v NXST], [1], [dnl
+])
+
+as hv1 ovs-ofctl dump-flows br-int table=72 > hv1_offlows_table72.txt
+as hv2 ovs-ofctl dump-flows br-int table=72 > hv2_offlows_table72.txt
+
+AT_CAPTURE_FILE([hv1_offlows_table72.txt])
+AT_CAPTURE_FILE([hv2_offlows_table72.txt])
+AT_CHECK([cat hv1_offlows_table72.txt | grep -v NXST], [1], [dnl
+])
+
+AT_CHECK([cat hv2_offlows_table72.txt | grep -v NXST], [1], [dnl
+])
+
+OVN_CLEANUP([hv1], [hv2])
+AT_CLEANUP
diff --git a/tests/ovs-macros.at b/tests/ovs-macros.at
index 05b17ebce..8b1b03e24 100644
--- a/tests/ovs-macros.at
+++ b/tests/ovs-macros.at
@@ -266,14 +266,16 @@ m4_define([OVS_WAIT_UNTIL],
   [OVS_WAIT([$1], [$2], [AT_LINE], [until $1])])
 
 dnl OVS_WAIT_FOR_OUTPUT(COMMAND, EXIT-STATUS, STDOUT, STDERR)
+dnl OVS_WAIT_FOR_OUTPUT_UNQUOTED(COMMAND, EXIT-STATUS, STDOUT, STDERR)
 dnl
 dnl Executes shell COMMAND in a loop until it exits with status EXIT-STATUS,
 dnl prints STDOUT on stdout, and prints STDERR on stderr.  If this doesn't
 dnl happen within a reasonable time limit, then the test fails.
-m4_define([OVS_WAIT_FOR_OUTPUT], [dnl
+dnl
+dnl The UNQUOTED version expands shell $variables, $(command)s, and so on.
+dnl The plain version does not
+m4_define([OVS_WAIT_FOR_OUTPUT__], [dnl
 wait_expected_status=m4_if([$2], [], [0], [$2])
-AT_DATA([wait-expected-stdout], [$3])
-AT_DATA([wait-expected-stderr], [$4])
 ovs_wait_command() {
     $1
 }
@@ -293,6 +295,18 @@ ovs_wait_failed () {
 }
 ovs_wait "AS_ESCAPE([AT_LINE])" "for output from AS_ESCAPE([$1])"
 ])
+m4_define([OVS_WAIT_FOR_OUTPUT], [dnl
+AT_DATA([wait-expected-stdout], [$3])
+AT_DATA([wait-expected-stderr], [$4])
+OVS_WAIT_FOR_OUTPUT__([$1], [$2])
+])
+m4_define([OVS_WAIT_FOR_OUTPUT_UNQUOTED], [dnl
+cat > wait-expected-stdout <<EOF
+$3[]EOF
+cat > wait-expected-stderr <<EOF
+$4[]EOF
+OVS_WAIT_FOR_OUTPUT__([$1], [$2])
+])
     
 dnl OVS_WAIT_WHILE(COMMAND[, IF-FAILED])
 dnl
diff --git a/tests/system-ovn.at b/tests/system-ovn.at
index d59f7c97e..9819573bb 100644
--- a/tests/system-ovn.at
+++ b/tests/system-ovn.at
@@ -1574,6 +1574,18 @@ OVS_WAIT_UNTIL([
     grep "selection_method=hash,fields(ip_src,ip_dst,sctp_src,sctp_dst)" -c) -eq 2
 ])
 
+ovn-nbctl --reject lb-add lb3 30.0.0.10:80 ""
+ovn-nbctl ls-lb-add foo lb3
+# Filter reset segments
+NS_CHECK_EXEC([foo1], [tcpdump -c 1 -neei foo1 ip[[33:1]]=0x14 > rst.pcap &])
+sleep 1
+NS_CHECK_EXEC([foo1], [wget -q 30.0.0.10],[4])
+
+OVS_WAIT_UNTIL([
+    n_reset=$(cat rst.pcap | wc -l)
+    test "${n_reset}" = "1"
+])
+
 OVS_APP_EXIT_AND_WAIT([ovn-controller])
 
 as ovn-sb
@@ -2212,6 +2224,144 @@ tcp,orig=(src=172.16.1.2,dst=192.168.2.2,sport=<cleared>,dport=<cleared>),reply=
 
 OVS_WAIT_UNTIL([check_est_flows], [check established flows])
 
+ovn-nbctl set logical_router R2 options:lb_force_snat_ip=router_ip
+
+# Destroy the load balancer and create again. ovn-controller will
+# clear the OF flows and re add again and clears the n_packets
+# for these flows.
+ovn-nbctl destroy load_balancer $uuid
+uuid=`ovn-nbctl  create load_balancer vips:30.0.0.1="192.168.1.2,192.168.2.2"`
+ovn-nbctl set logical_router R2 load_balancer=$uuid
+
+# Config OVN load-balancer with another VIP (this time with ports).
+ovn-nbctl set load_balancer $uuid vips:'"30.0.0.2:8000"'='"192.168.1.2:80,192.168.2.2:80"'
+
+ovn-nbctl list load_balancer
+ovn-sbctl dump-flows R2
+OVS_WAIT_UNTIL([ovs-ofctl -O OpenFlow13 dump-flows br-int table=41 | \
+grep 'nat(src=20.0.0.2)'])
+
+rm -f wget*.log
+
+dnl Test load-balancing that includes L4 ports in NAT.
+for i in `seq 1 20`; do
+    echo Request $i
+    NS_CHECK_EXEC([alice1], [wget 30.0.0.2:8000 -t 5 -T 1 --retry-connrefused -v -o wget$i.log])
+done
+
+dnl Each server should have at least one connection.
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.2) |
+sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
+tcp,orig=(src=172.16.1.2,dst=30.0.0.2,sport=<cleared>,dport=<cleared>),reply=(src=192.168.1.2,dst=172.16.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,labels=0x2,protoinfo=(state=<cleared>)
+tcp,orig=(src=172.16.1.2,dst=30.0.0.2,sport=<cleared>,dport=<cleared>),reply=(src=192.168.2.2,dst=172.16.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,labels=0x2,protoinfo=(state=<cleared>)
+])
+
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(20.0.0.2) |
+sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
+tcp,orig=(src=172.16.1.2,dst=192.168.1.2,sport=<cleared>,dport=<cleared>),reply=(src=192.168.1.2,dst=20.0.0.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,protoinfo=(state=<cleared>)
+tcp,orig=(src=172.16.1.2,dst=192.168.2.2,sport=<cleared>,dport=<cleared>),reply=(src=192.168.2.2,dst=20.0.0.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,protoinfo=(state=<cleared>)
+])
+
+OVS_WAIT_UNTIL([check_est_flows], [check established flows])
+
+OVS_APP_EXIT_AND_WAIT([ovn-controller])
+
+as ovn-sb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as ovn-nb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as northd
+OVS_APP_EXIT_AND_WAIT([ovn-northd])
+
+as
+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+/connection dropped.*/d"])
+AT_CLEANUP
+
+AT_SETUP([ovn -- load balancing in gateway router hairpin scenario])
+AT_KEYWORDS([ovnlb])
+
+CHECK_CONNTRACK()
+CHECK_CONNTRACK_NAT()
+ovn_start
+OVS_TRAFFIC_VSWITCHD_START()
+ADD_BR([br-int])
+check ovs-vsctl add-br br-ext
+
+
+# Set external-ids in br-int needed for ovn-controller
+ovs-vsctl \
+        -- set Open_vSwitch . external-ids:system-id=hv1 \
+        -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+        -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+        -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+        -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
+
+# Start ovn-controller
+start_daemon ovn-controller
+
+check ovn-nbctl lr-add R1
+
+check ovn-nbctl ls-add sw0
+check ovn-nbctl ls-add public
+
+check ovn-nbctl lrp-add R1 rp-sw0 00:00:01:01:02:03 192.168.1.1/24
+check ovn-nbctl lrp-add R1 rp-public 00:00:02:01:02:03 172.16.1.1/24
+
+check ovn-nbctl set logical_router R1 options:chassis=hv1
+
+check ovn-nbctl lsp-add sw0 sw0-rp -- set Logical_Switch_Port sw0-rp \
+    type=router options:router-port=rp-sw0 \
+    -- lsp-set-addresses sw0-rp router
+
+check ovn-nbctl lsp-add public public-rp -- set Logical_Switch_Port public-rp \
+    type=router options:router-port=rp-public \
+    -- lsp-set-addresses public-rp router
+
+check ovs-vsctl set Open_vSwitch . external-ids:ovn-bridge-mappings=phynet:br-ext
+
+check ovn-nbctl lsp-add public public1 \
+        -- lsp-set-addresses public1 unknown \
+        -- lsp-set-type public1 localnet \
+        -- lsp-set-options public1 network_name=phynet
+
+ADD_NAMESPACES(server)
+ADD_VETH(s1, server, br-ext, "172.16.1.100/24", "1a:00:00:00:00:01", \
+         "172.16.1.1")
+
+OVS_WAIT_UNTIL([test "$(ip netns exec server ip a | grep fe80 | grep tentative)" = ""])
+
+ADD_NAMESPACES(client)
+ADD_VETH(c1, client, br-ext, "172.16.1.110/24", "1a:00:00:00:00:02", \
+         "172.16.1.1")
+
+OVS_WAIT_UNTIL([test "$(ip netns exec client ip a | grep fe80 | grep tentative)" = ""])
+
+# Start webservers in 'server'.
+OVS_START_L7([server], [http])
+
+# Create a load balancer and associate to R1
+check ovn-nbctl lb-add lb1 172.16.1.150:80 172.16.1.100:80
+check ovn-nbctl lr-lb-add R1 lb1
+
+check ovn-nbctl --wait=hv sync
+
+for i in $(seq 1 5); do
+    echo Request $i
+    NS_CHECK_EXEC([client], [wget 172.16.1.100 -t 5 -T 1 --retry-connrefused -v -o wget$i.log])
+done
+
+# Now send the traffic from client to the VIP - 172.16.1.150
+check ovn-nbctl set logical_router R1 options:lb_force_snat_ip=router_ip
+check ovn-nbctl --wait=hv sync
+
+for i in $(seq 1 5); do
+    echo Request $i
+    NS_CHECK_EXEC([client], [wget 172.16.1.150 -t 5 -T 1 --retry-connrefused -v -o wget$i.log])
+done
+
 OVS_APP_EXIT_AND_WAIT([ovn-controller])
 
 as ovn-sb
@@ -2225,6 +2375,7 @@ OVS_APP_EXIT_AND_WAIT([ovn-northd])
 
 as
 OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+/Failed to acquire.*/d
 /connection dropped.*/d"])
 AT_CLEANUP
 
@@ -4151,7 +4302,7 @@ ovn-nbctl lsp-set-type sw1-lr0 router
 ovn-nbctl lsp-set-addresses sw1-lr0 router
 ovn-nbctl lsp-set-options sw1-lr0 router-port=lr0-sw1
 
-ovn-nbctl lb-add lb1 10.0.0.10:80 10.0.0.3:80,20.0.0.3:80
+ovn-nbctl --reject lb-add lb1 10.0.0.10:80 10.0.0.3:80,20.0.0.3:80
 
 ovn-nbctl --wait=sb set load_balancer . ip_port_mappings:10.0.0.3=sw0-p1:10.0.0.2
 ovn-nbctl --wait=sb set load_balancer . ip_port_mappings:20.0.0.3=sw1-p1:20.0.0.2
@@ -4266,6 +4417,20 @@ ovn-sbctl list service_monitor
 OVS_WAIT_UNTIL([test 2 = `ovn-sbctl --bare --columns status find \
 service_monitor protocol=udp | sed '/^$/d' | grep offline | wc -l`])
 
+# Stop webserer in sw1-p1
+pid_file=$(cat l7_pid_file)
+NS_CHECK_EXEC([sw1-p1], [kill $(cat $pid_file)])
+
+NS_CHECK_EXEC([sw0-p2], [tcpdump -c 1 -neei sw0-p2 ip[[33:1]]=0x14 > rst.pcap &])
+OVS_WAIT_UNTIL([test 2 = `ovn-sbctl --bare --columns status find \
+service_monitor protocol=tcp | sed '/^$/d' | grep offline | wc -l`])
+NS_CHECK_EXEC([sw0-p2], [wget 10.0.0.10 -v -o wget$i.log],[4])
+
+OVS_WAIT_UNTIL([
+    n_reset=$(cat rst.pcap | wc -l)
+    test "${n_reset}" = "1"
+])
+
 OVS_APP_EXIT_AND_WAIT([ovn-controller])
 
 as ovn-sb
@@ -4309,10 +4474,14 @@ start_daemon ovn-controller
 # One logical switch with IPv4 load balancers that hairpin the traffic.
 ovn-nbctl ls-add sw
 ovn-nbctl lsp-add sw lsp -- lsp-set-addresses lsp 00:00:00:00:00:01
-ovn-nbctl lb-add lb-ipv4-tcp 88.88.88.88:8080 42.42.42.1:4041 tcp
-ovn-nbctl lb-add lb-ipv4-udp 88.88.88.88:4040 42.42.42.1:2021 udp
+ovn-nbctl lb-add lb-ipv4-tcp     88.88.88.88:8080 42.42.42.1:4041 tcp
+ovn-nbctl lb-add lb-ipv4-tcp-dup 88.88.88.89:8080 42.42.42.1:4041 tcp
+ovn-nbctl lb-add lb-ipv4-udp     88.88.88.88:4040 42.42.42.1:2021 udp
+ovn-nbctl lb-add lb-ipv4-udp-dup 88.88.88.89:4040 42.42.42.1:2021 udp
 ovn-nbctl ls-lb-add sw lb-ipv4-tcp
+ovn-nbctl ls-lb-add sw lb-ipv4-tcp-dup
 ovn-nbctl ls-lb-add sw lb-ipv4-udp
+ovn-nbctl ls-lb-add sw lb-ipv4-udp-dup
 
 ovn-nbctl lr-add rtr
 ovn-nbctl lrp-add rtr rtr-sw 00:00:00:00:01:00 42.42.42.254/24
@@ -4328,24 +4497,26 @@ ADD_VETH(lsp, lsp, br-int, "42.42.42.1/24", "00:00:00:00:00:01", \
 ovn-nbctl --wait=hv -t 3 sync
 
 # Start IPv4 TCP server on lsp.
-NS_CHECK_EXEC([lsp], [timeout 2s nc -l 42.42.42.1 4041 &], [0])
+NS_CHECK_EXEC([lsp], [timeout 2s nc -k -l 42.42.42.1 4041 &], [0])
 
-# Check that IPv4 TCP hairpin connection succeeds.
+# Check that IPv4 TCP hairpin connection succeeds on both VIPs.
 NS_CHECK_EXEC([lsp], [nc 88.88.88.88 8080 -z], [0])
+NS_CHECK_EXEC([lsp], [nc 88.88.88.89 8080 -z], [0])
 
 # Capture IPv4 UDP hairpinned packets.
-filter="src 88.88.88.88 and dst 42.42.42.1 and dst port 2021 and udp"
-NS_CHECK_EXEC([lsp], [tcpdump -n -c 1 -i lsp ${filter} > lsp.pcap &])
+filter="dst 42.42.42.1 and dst port 2021 and udp"
+NS_CHECK_EXEC([lsp], [tcpdump -n -c 2 -i lsp ${filter} > lsp.pcap &])
 
 sleep 1
 
 # Generate IPv4 UDP hairpin traffic.
 NS_CHECK_EXEC([lsp], [nc -u 88.88.88.88 4040 -z &], [0])
+NS_CHECK_EXEC([lsp], [nc -u 88.88.88.89 4040 -z &], [0])
 
 # Check hairpin traffic.
 OVS_WAIT_UNTIL([
     total_pkts=$(cat lsp.pcap | wc -l)
-    test "${total_pkts}" = "1"
+    test "${total_pkts}" = "2"
 ])
 
 OVS_APP_EXIT_AND_WAIT([ovn-controller])
@@ -4388,10 +4559,14 @@ start_daemon ovn-controller
 # One logical switch with IPv6 load balancers that hairpin the traffic.
 ovn-nbctl ls-add sw
 ovn-nbctl lsp-add sw lsp -- lsp-set-addresses lsp 00:00:00:00:00:01
-ovn-nbctl lb-add lb-ipv6-tcp [[8800::0088]]:8080 [[4200::1]]:4041 tcp
-ovn-nbctl lb-add lb-ipv6-udp [[8800::0088]]:4040 [[4200::1]]:2021 udp
+ovn-nbctl lb-add lb-ipv6-tcp     [[8800::0088]]:8080 [[4200::1]]:4041 tcp
+ovn-nbctl lb-add lb-ipv6-tcp-dup [[8800::0089]]:8080 [[4200::1]]:4041 tcp
+ovn-nbctl lb-add lb-ipv6-udp     [[8800::0088]]:4040 [[4200::1]]:2021 udp
+ovn-nbctl lb-add lb-ipv6-udp-dup [[8800::0089]]:4040 [[4200::1]]:2021 udp
 ovn-nbctl ls-lb-add sw lb-ipv6-tcp
+ovn-nbctl ls-lb-add sw lb-ipv6-tcp-dup
 ovn-nbctl ls-lb-add sw lb-ipv6-udp
+ovn-nbctl ls-lb-add sw lb-ipv6-udp-dup
 
 ovn-nbctl lr-add rtr
 ovn-nbctl lrp-add rtr rtr-sw 00:00:00:00:01:00 4200::00ff/64
@@ -4406,24 +4581,26 @@ OVS_WAIT_UNTIL([test "$(ip netns exec lsp ip a | grep 4200::1 | grep tentative)"
 ovn-nbctl --wait=hv -t 3 sync
 
 # Start IPv6 TCP server on lsp.
-NS_CHECK_EXEC([lsp], [timeout 2s nc -l 4200::1 4041 &], [0])
+NS_CHECK_EXEC([lsp], [timeout 2s nc -k -l 4200::1 4041 &], [0])
 
-# Check that IPv6 TCP hairpin connection succeeds.
+# Check that IPv6 TCP hairpin connection succeeds on both VIPs.
 NS_CHECK_EXEC([lsp], [nc 8800::0088 8080 -z], [0])
+NS_CHECK_EXEC([lsp], [nc 8800::0089 8080 -z], [0])
 
 # Capture IPv4 UDP hairpinned packets.
-filter="src 8800::0088 and dst 4200::1 and dst port 2021 and udp"
-NS_CHECK_EXEC([lsp], [tcpdump -n -c 1 -i lsp $filter > lsp.pcap &])
+filter="dst 4200::1 and dst port 2021 and udp"
+NS_CHECK_EXEC([lsp], [tcpdump -n -c 2 -i lsp $filter > lsp.pcap &])
 
 sleep 1
 
 # Generate IPv6 UDP hairpin traffic.
 NS_CHECK_EXEC([lsp], [nc -u 8800::0088 4040 -z &], [0])
+NS_CHECK_EXEC([lsp], [nc -u 8800::0089 4040 -z &], [0])
 
 # Check hairpin traffic.
 OVS_WAIT_UNTIL([
     total_pkts=$(cat lsp.pcap | wc -l)
-    test "${total_pkts}" = "1"
+    test "${total_pkts}" = "2"
 ])
 
 OVS_APP_EXIT_AND_WAIT([ovn-controller])
@@ -5505,3 +5682,152 @@ as
 OVS_TRAFFIC_VSWITCHD_STOP(["/.*error receiving.*/d
 /.*terminating with signal 15.*/d"])
 AT_CLEANUP
+
+AT_SETUP([ovn -- BFD])
+AT_SKIP_IF([test $HAVE_BFDD_BEACON = no])
+AT_SKIP_IF([test $HAVE_TCPDUMP = no])
+AT_KEYWORDS([ovn-bfd])
+
+ovn_start
+OVS_TRAFFIC_VSWITCHD_START()
+
+ADD_BR([br-int])
+ADD_BR([br-ext])
+
+check ovs-ofctl add-flow br-ext action=normal
+# Set external-ids in br-int needed for ovn-controller
+check ovs-vsctl \
+        -- set Open_vSwitch . external-ids:system-id=hv1 \
+        -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+        -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+        -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+        -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
+
+# Start ovn-controller
+start_daemon ovn-controller
+
+check ovn-nbctl lr-add R1
+
+check ovn-nbctl ls-add sw0
+check ovn-nbctl ls-add sw1
+check ovn-nbctl ls-add public
+
+check ovn-nbctl lrp-add R1 rp-sw0 00:00:01:01:02:03 192.168.1.1/24
+check ovn-nbctl lrp-add R1 rp-sw1 00:00:03:01:02:03 192.168.2.1/24
+check ovn-nbctl lrp-add R1 rp-public 00:00:02:01:02:03 172.16.1.1/24 1000::a/64 \
+    -- lrp-set-gateway-chassis rp-public hv1
+
+check ovn-nbctl lsp-add sw0 sw0-rp -- set Logical_Switch_Port sw0-rp \
+    type=router options:router-port=rp-sw0 \
+    -- lsp-set-addresses sw0-rp router
+check ovn-nbctl lsp-add sw1 sw1-rp -- set Logical_Switch_Port sw1-rp \
+    type=router options:router-port=rp-sw1 \
+    -- lsp-set-addresses sw1-rp router
+
+check ovn-nbctl lsp-add public public-rp -- set Logical_Switch_Port public-rp \
+    type=router options:router-port=rp-public \
+    -- lsp-set-addresses public-rp router
+
+ADD_NAMESPACES(sw01)
+ADD_VETH(sw01, sw01, br-int, "192.168.1.2/24", "f0:00:00:01:02:03", \
+         "192.168.1.1")
+check ovn-nbctl lsp-add sw0 sw01 \
+    -- lsp-set-addresses sw01 "f0:00:00:01:02:03 192.168.1.2"
+
+ADD_NAMESPACES(sw11)
+ADD_VETH(sw11, sw11, br-int, "192.168.2.2/24", "f0:00:00:02:02:03", \
+         "192.168.2.1")
+check ovn-nbctl lsp-add sw1 sw11 \
+    -- lsp-set-addresses sw11 "f0:00:00:02:02:03 192.168.2.2"
+
+ADD_NAMESPACES(server)
+NS_CHECK_EXEC([server], [ip link set dev lo up])
+ADD_VETH(s1, server, br-ext, "172.16.1.50/24", "f0:00:00:01:02:05", \
+         "172.16.1.1")
+NS_CHECK_EXEC([server], [ip addr add 1000::b/64 dev s1])
+
+AT_CHECK([ovs-vsctl set Open_vSwitch . external-ids:ovn-bridge-mappings=phynet:br-ext])
+check ovn-nbctl lsp-add public public1 \
+        -- lsp-set-addresses public1 unknown \
+        -- lsp-set-type public1 localnet \
+        -- lsp-set-options public1 network_name=phynet
+
+NS_CHECK_EXEC([server], [bfdd-beacon --listen=172.16.1.50], [0])
+NS_CHECK_EXEC([server], [bfdd-control allow 172.16.1.1], [0], [dnl
+Allowing connections from 172.16.1.1
+])
+
+check ovn-nbctl --bfd lr-route-add R1 100.0.0.0/8 172.16.1.50 rp-public
+uuid=$(fetch_column nb:bfd _uuid logical_port="rp-public")
+route_uuid=$(fetch_column nb:logical_router_static_route _uuid ip_prefix="100.0.0.0/8")
+check ovn-nbctl --wait=hv sync
+
+wait_column "up" nb:bfd status logical_port=rp-public
+OVS_WAIT_UNTIL([ovn-sbctl dump-flows R1 | grep 'match=(ip4.dst == 100.0.0.0/8)' | grep -q 172.16.1.50])
+
+# un-associate the bfd connection and the static route
+check ovn-nbctl clear logical_router_static_route $route_uuid bfd
+wait_column "admin_down" nb:bfd status logical_port=rp-public
+OVS_WAIT_UNTIL([ip netns exec server bfdd-control status | grep -qi state=Down])
+NS_CHECK_EXEC([server], [tcpdump -nni s1 udp port 3784 -Q in > bfd.pcap &])
+sleep 5
+kill $(pidof tcpdump)
+AT_CHECK([grep -qi bfd bfd.pcap],[1])
+
+# restart the connection
+check ovn-nbctl set logical_router_static_route $route_uuid bfd=$uuid
+wait_column "up" nb:bfd status logical_port=rp-public
+
+# switch to gw router configuration
+check ovn-nbctl clear logical_router_static_route $route_uuid bfd
+wait_column "admin_down" nb:bfd status logical_port=rp-public
+OVS_WAIT_UNTIL([ip netns exec server bfdd-control status | grep -qi state=Down])
+check ovn-nbctl clear logical_router_port rp-public gateway_chassis
+check ovn-nbctl set logical_router R1 options:chassis=hv1
+check ovn-nbctl set logical_router_static_route $route_uuid bfd=$uuid
+wait_column "up" nb:bfd status logical_port=rp-public
+
+# stop bfd endpoint
+NS_CHECK_EXEC([server], [bfdd-control stop], [0], [dnl
+stopping
+])
+
+wait_column "down" nb:bfd status logical_port=rp-public
+OVS_WAIT_UNTIL([test "$(ovn-sbctl dump-flows R1 | grep 'match=(ip4.dst == 100.0.0.0/8)' | grep 172.16.1.50)" = ""])
+
+# remove bfd entry
+ovn-nbctl destroy bfd $uuid
+check_row_count bfd 0
+NS_CHECK_EXEC([server], [tcpdump -nni s1 udp port 3784 -Q in > bfd.pcap &])
+sleep 5
+kill $(pidof tcpdump)
+AT_CHECK([grep -qi bfd bfd.pcap],[1])
+
+uuid_v6=$(ovn-nbctl create bfd logical_port=rp-public dst_ip=\"1000::b\")
+check ovn-nbctl lr-route-add R1 2000::/64 1000::b
+route_uuid_v6=$(fetch_column nb:logical_router_static_route _uuid ip_prefix=\"2000::/64\")
+ovn-nbctl set logical_router_static_route $route_uuid_v6 bfd=$uuid_v6
+check ovn-nbctl --wait=hv sync
+NS_CHECK_EXEC([server], [bfdd-beacon --listen=1000::b], [0])
+NS_CHECK_EXEC([server], [bfdd-control allow 1000::a], [0], [dnl
+Allowing connections from 1000::a
+])
+
+wait_column "up" nb:bfd status logical_port=rp-public
+ovn-nbctl destroy bfd $uuid_v6
+
+kill $(pidof ovn-controller)
+
+as ovn-sb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as ovn-nb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as northd
+OVS_APP_EXIT_AND_WAIT([ovn-northd])
+
+as
+OVS_TRAFFIC_VSWITCHD_STOP(["/.*error receiving.*/d
+/.*terminating with signal 15.*/d"])
+AT_CLEANUP
diff --git a/tests/test-ovn.c b/tests/test-ovn.c
index 49a1947f6..3fbe90b32 100644
--- a/tests/test-ovn.c
+++ b/tests/test-ovn.c
@@ -1346,6 +1346,8 @@ test_parse_actions(struct ovs_cmdl_context *ctx OVS_UNUSED)
                 .lb_hairpin_ptable = OFTABLE_CHK_LB_HAIRPIN,
                 .lb_hairpin_reply_ptable = OFTABLE_CHK_LB_HAIRPIN_REPLY,
                 .ct_snat_vip_ptable = OFTABLE_CT_SNAT_FOR_VIP,
+                .fdb_ptable = OFTABLE_GET_FDB,
+                .fdb_lookup_ptable = OFTABLE_LOOKUP_FDB,
             };
             struct ofpbuf ofpacts;
             ofpbuf_init(&ofpacts, 0);
diff --git a/tests/testsuite.at b/tests/testsuite.at
index 960227dcc..3eba785c6 100644
--- a/tests/testsuite.at
+++ b/tests/testsuite.at
@@ -26,6 +26,7 @@ m4_include([tests/ovn.at])
 m4_include([tests/ovn-performance.at])
 m4_include([tests/ovn-northd.at])
 m4_include([tests/ovn-nbctl.at])
+m4_include([tests/ovn-ofctrl-seqno.at])
 m4_include([tests/ovn-sbctl.at])
 m4_include([tests/ovn-ic-nbctl.at])
 m4_include([tests/ovn-ic-sbctl.at])
diff --git a/utilities/checkpatch.py b/utilities/checkpatch.py
index 981a433be..fb96fd66b 100755
--- a/utilities/checkpatch.py
+++ b/utilities/checkpatch.py
@@ -189,7 +189,8 @@ line_length_blacklist = re.compile(
 # Don't enforce a requirement that leading whitespace be all spaces on
 # files that include these characters in their name, since these kinds
 # of files need lines with leading tabs.
-leading_whitespace_blacklist = re.compile(r'\.(mk|am|at)$|debian/rules')
+leading_whitespace_blacklist = re.compile(
+    r'\.(mk|am|at)$|debian/rules|\.gitmodules$')
 
 
 def is_subtracted_line(line):
diff --git a/utilities/ovn-ctl b/utilities/ovn-ctl
index c44201ccf..211c764a6 100755
--- a/utilities/ovn-ctl
+++ b/utilities/ovn-ctl
@@ -251,6 +251,11 @@ $cluster_remote_port
 
     [ "$OVN_USER" != "" ] && set "$@" --user "$OVN_USER"
 
+    if test X"$OVSDB_DISABLE_FILE_COLUMN_DIFF" = Xyes; then
+        (ovsdb-server --help | grep -q disable-file-column-diff) \
+            && set "$@" --disable-file-column-diff
+    fi
+
     if test X"$detach" != Xno; then
         set "$@" --detach --monitor
     else
@@ -715,6 +720,8 @@ set_defaults () {
     OVSDB_NB_WRAPPER=
     OVSDB_SB_WRAPPER=
 
+    OVSDB_DISABLE_FILE_COLUMN_DIFF=no
+
     OVN_USER=
 
     OVN_CONTROLLER_LOG="-vconsole:emer -vsyslog:err -vfile:info"
@@ -932,6 +939,11 @@ Options:
   --ovs-user="user[:group]"      pass the --user flag to ovs daemons
   --ovsdb-nb-wrapper=WRAPPER     run with a wrapper like valgrind for debugging
   --ovsdb-sb-wrapper=WRAPPER     run with a wrapper like valgrind for debugging
+  --ovsdb-disable-file-column-diff=no|yes
+                                 Specifies whether or not ovsdb-server
+                                 processes should be started with
+                                 --disable-file-column-diff.
+                                 More details in ovsdb(7).  (default: no)
   -h, --help                     display this help message
 
 File location options:
diff --git a/utilities/ovn-nbctl.8.xml b/utilities/ovn-nbctl.8.xml
index 59302296b..2cab592ce 100644
--- a/utilities/ovn-nbctl.8.xml
+++ b/utilities/ovn-nbctl.8.xml
@@ -659,6 +659,7 @@
     <dl>
       <dt>[<code>--may-exist</code>] [<code>--policy</code>=<var>POLICY</var>]
         [<code>--ecmp</code>] [<code>--ecmp-symmetric-reply</code>]
+        [<code>--bfd[=<var>UUID</var></code>]]
         <code>lr-route-add</code> <var>router</var>
         <var>prefix</var> <var>nexthop</var> [<var>port</var>]</dt>
       <dd>
@@ -695,6 +696,16 @@
           it is not necessary to set both.
         </p>
 
+        <p>
+          <code>--bfd</code> option is used to link a BFD session to the
+          OVN route. If the BFD session UUID is provided, it will be used
+          for the OVN route otherwise the next-hop will be used to perform
+          a lookup in the OVN BFD table.
+          If the lookup fails and <var>port</var> is specified, a new entry
+          in the BFD table will be created using the <var>nexthop</var> as
+          <var>dst_ip</var> and <var>port</var> as <var>logical_port</var>.
+        </p>
+
         <p>
           It is an error if a route with <var>prefix</var> and
           <var>POLICY</var> already exists, unless <code>--may-exist</code>,
@@ -739,7 +750,7 @@
     <dl>
       <dt>[<code>--may-exist</code>]<code>lr-policy-add</code>
           <var>router</var> <var>priority</var> <var>match</var>
-          <var>action</var> [<var>nexthop</var>]
+          <var>action</var> [<var>nexthop</var>[,<var>nexthop</var>,...]]
           [<var>options key=value]</var>] </dt>
       <dd>
         <p>
@@ -748,10 +759,12 @@
           are similar to OVN ACLs, but exist on the logical-router. Reroute
           policies are needed for service-insertion and service-chaining.
           <var>nexthop</var> is an optional parameter. It needs to be provided
-          only when <var>action</var> is <var>reroute</var>. A policy is
-          uniquely identified by <var>priority</var> and <var>match</var>.
-          Multiple policies can have the same <var>priority</var>.
-          <var>options</var> sets the router policy options as key-value pair.
+          only when <var>action</var> is <var>reroute</var>. Multiple
+          <code>nexthops</code> can be specified for ECMP routing.
+          A policy is uniquely identified by <var>priority</var> and
+          <var>match</var>. Multiple policies can have the same
+          <var>priority</var>. <var>options</var> sets the router policy
+          options as key-value pair.
           The supported option is : <code>pkt_mark</code>.
         </p>
 
@@ -903,7 +916,7 @@
 
     <h1>Load Balancer Commands</h1>
     <dl>
-      <dt>[<code>--may-exist</code> | <code>--add-duplicate</code>] <code>lb-add</code> <var>lb</var> <var>vip</var> <var>ips</var> [<var>protocol</var>]</dt>
+        <dt>[<code>--may-exist</code> | <code>--add-duplicate</code> | <code>--reject</code> | <code>--event</code>] <code>lb-add</code> <var>lb</var> <var>vip</var> <var>ips</var> [<var>protocol</var>]</dt>
       <dd>
         <p>
          Creates a new load balancer named <var>lb</var> with the provided
@@ -936,6 +949,23 @@
          creates a new load balancer with a duplicate name.
         </p>
 
+        <p>
+         If the load balancer is created with <code>--reject</code> option and
+         it has no active backends, a TCP reset segment (for tcp) or an ICMP
+         port unreachable packet (for all other kind of traffic) will be sent
+         whenever an incoming packet is received for this load-balancer.
+         Please note using <code>--reject</code> option will disable
+         empty_lb SB controller event for this load balancer.
+        </p>
+
+        <p>
+         If the load balancer is created with <code>--event</code> option and
+         it has no active backends, whenever the lb receives traffic, the event
+         is reported in the Controller_Event table in the SB db.
+         Please note <code>--event</code> option can't be specified with
+         <code>--reject</code> one.
+        </p>
+
         <p>
          The following example adds a load balancer.
         </p>
diff --git a/utilities/ovn-nbctl.c b/utilities/ovn-nbctl.c
index d19e1b6c6..dc0c50854 100644
--- a/utilities/ovn-nbctl.c
+++ b/utilities/ovn-nbctl.c
@@ -125,6 +125,65 @@ static char * OVS_WARN_UNUSED_RESULT main_loop(const char *args,
                                                const struct timer *);
 static void server_loop(struct ovsdb_idl *idl, int argc, char *argv[]);
 
+/* A context for keeping track of which switch/router certain ports are
+ * connected to.
+ *
+ * It is required to track changes that we did within current set of commands
+ * because partial updates of sets in database are not reflected in the idl
+ * until transaction is committed and updates received from the server. */
+struct nbctl_context {
+    struct ctl_context base;
+    struct shash lsp_to_ls_map;
+    struct shash lrp_to_lr_map;
+    bool context_valid;
+};
+
+static void
+nbctl_context_init(struct nbctl_context *nbctx)
+{
+    nbctx->context_valid = false;
+    shash_init(&nbctx->lsp_to_ls_map);
+    shash_init(&nbctx->lrp_to_lr_map);
+}
+
+static void
+nbctl_context_destroy(struct nbctl_context *nbctx)
+{
+    nbctx->context_valid = false;
+    shash_destroy(&nbctx->lsp_to_ls_map);
+    shash_destroy(&nbctx->lrp_to_lr_map);
+}
+
+/* Casts 'base' into 'struct nbctl_context' and initializes it if needed. */
+static struct nbctl_context *
+nbctl_context_get(struct ctl_context *base)
+{
+    struct nbctl_context *nbctx;
+
+    nbctx = CONTAINER_OF(base, struct nbctl_context, base);
+
+    if (nbctx->context_valid) {
+        return nbctx;
+    }
+
+    const struct nbrec_logical_switch *ls;
+    NBREC_LOGICAL_SWITCH_FOR_EACH (ls, base->idl) {
+        for (size_t i = 0; i < ls->n_ports; i++) {
+            shash_add_once(&nbctx->lsp_to_ls_map, ls->ports[i]->name, ls);
+        }
+    }
+
+    const struct nbrec_logical_router *lr;
+    NBREC_LOGICAL_ROUTER_FOR_EACH (lr, base->idl) {
+        for (size_t i = 0; i < lr->n_ports; i++) {
+            shash_add_once(&nbctx->lrp_to_lr_map, lr->ports[i]->name, lr);
+        }
+    }
+
+    nbctx->context_valid = true;
+    return nbctx;
+}
+
 int
 main(int argc, char *argv[])
 {
@@ -707,7 +766,7 @@ Route commands:\n\
   lr-route-list ROUTER      print routes for ROUTER\n\
 \n\
 Policy commands:\n\
-  lr-policy-add ROUTER PRIORITY MATCH ACTION [NEXTHOP] \
+  lr-policy-add ROUTER PRIORITY MATCH ACTION [NEXTHOP,[NEXTHOP,...]] \
 [OPTIONS KEY=VALUE ...] \n\
                             add a policy to router\n\
   lr-policy-del ROUTER [{PRIORITY | UUID} [MATCH]]\n\
@@ -1249,6 +1308,7 @@ static void
 nbctl_ls_del(struct ctl_context *ctx)
 {
     bool must_exist = !shash_find(&ctx->options, "--if-exists");
+    struct nbctl_context *nbctx = nbctl_context_get(ctx);
     const char *id = ctx->argv[1];
     const struct nbrec_logical_switch *ls = NULL;
 
@@ -1261,6 +1321,11 @@ nbctl_ls_del(struct ctl_context *ctx)
         return;
     }
 
+    /* Updating runtime cache. */
+    for (size_t i = 0; i < ls->n_ports; i++) {
+        shash_find_and_delete(&nbctx->lsp_to_ls_map, ls->ports[i]->name);
+    }
+
     nbrec_logical_switch_delete(ls);
 }
 
@@ -1317,22 +1382,19 @@ lsp_by_name_or_uuid(struct ctl_context *ctx, const char *id,
 
 /* Returns the logical switch that contains 'lsp'. */
 static char * OVS_WARN_UNUSED_RESULT
-lsp_to_ls(const struct ovsdb_idl *idl,
+lsp_to_ls(struct ctl_context *ctx,
           const struct nbrec_logical_switch_port *lsp,
           const struct nbrec_logical_switch **ls_p)
 {
+    struct nbctl_context *nbctx = nbctl_context_get(ctx);
     const struct nbrec_logical_switch *ls;
     *ls_p = NULL;
 
-    NBREC_LOGICAL_SWITCH_FOR_EACH (ls, idl) {
-        for (size_t i = 0; i < ls->n_ports; i++) {
-            if (ls->ports[i] == lsp) {
-                *ls_p = ls;
-                return NULL;
-            }
-        }
+    ls = shash_find_data(&nbctx->lsp_to_ls_map, lsp->name);
+    if (ls) {
+        *ls_p = ls;
+        return NULL;
     }
-
     /* Can't happen because of the database schema */
     return xasprintf("logical port %s is not part of any logical switch",
                      lsp->name);
@@ -1353,6 +1415,7 @@ static void
 nbctl_lsp_add(struct ctl_context *ctx)
 {
     bool may_exist = shash_find(&ctx->options, "--may-exist") != NULL;
+    struct nbctl_context *nbctx = nbctl_context_get(ctx);
 
     const struct nbrec_logical_switch *ls = NULL;
     char *error = ls_by_name_or_uuid(ctx, ctx->argv[1], true, &ls);
@@ -1395,7 +1458,7 @@ nbctl_lsp_add(struct ctl_context *ctx)
         }
 
         const struct nbrec_logical_switch *lsw;
-        error = lsp_to_ls(ctx->idl, lsp, &lsw);
+        error = lsp_to_ls(ctx, lsp, &lsw);
         if (error) {
             ctx->error = error;
             return;
@@ -1448,31 +1511,27 @@ nbctl_lsp_add(struct ctl_context *ctx)
     }
 
     /* Insert the logical port into the logical switch. */
-    nbrec_logical_switch_verify_ports(ls);
-    struct nbrec_logical_switch_port **new_ports = xmalloc(sizeof *new_ports *
-                                                    (ls->n_ports + 1));
-    nullable_memcpy(new_ports, ls->ports, sizeof *new_ports * ls->n_ports);
-    new_ports[ls->n_ports] = CONST_CAST(struct nbrec_logical_switch_port *,
-                                             lsp);
-    nbrec_logical_switch_set_ports(ls, new_ports, ls->n_ports + 1);
-    free(new_ports);
+    nbrec_logical_switch_update_ports_addvalue(ls, lsp);
+
+    /* Updating runtime cache. */
+    shash_add(&nbctx->lsp_to_ls_map, lsp_name, ls);
 }
 
-/* Removes logical switch port 'ls->ports[idx]'. */
+/* Removes logical switch port 'lsp' from the logical switch 'ls'. */
 static void
-remove_lsp(const struct nbrec_logical_switch *ls, size_t idx)
+remove_lsp(struct ctl_context *ctx,
+           const struct nbrec_logical_switch *ls,
+           const struct nbrec_logical_switch_port *lsp)
 {
-    const struct nbrec_logical_switch_port *lsp = ls->ports[idx];
+    struct nbctl_context *nbctx = nbctl_context_get(ctx);
+
+    /* Updating runtime cache. */
+    shash_find_and_delete(&nbctx->lsp_to_ls_map, lsp->name);
 
     /* First remove 'lsp' from the array of ports.  This is what will
      * actually cause the logical port to be deleted when the transaction is
      * sent to the database server (due to garbage collection). */
-    struct nbrec_logical_switch_port **new_ports
-        = xmemdup(ls->ports, sizeof *new_ports * ls->n_ports);
-    new_ports[idx] = new_ports[ls->n_ports - 1];
-    nbrec_logical_switch_verify_ports(ls);
-    nbrec_logical_switch_set_ports(ls, new_ports, ls->n_ports - 1);
-    free(new_ports);
+    nbrec_logical_switch_update_ports_delvalue(ls, lsp);
 
     /* Delete 'lsp' from the IDL.  This won't have a real effect on the
      * database server (the IDL will suppress it in fact) but it means that it
@@ -1498,18 +1557,13 @@ nbctl_lsp_del(struct ctl_context *ctx)
 
     /* Find the switch that contains 'lsp', then delete it. */
     const struct nbrec_logical_switch *ls;
-    NBREC_LOGICAL_SWITCH_FOR_EACH (ls, ctx->idl) {
-        for (size_t i = 0; i < ls->n_ports; i++) {
-            if (ls->ports[i] == lsp) {
-                remove_lsp(ls, i);
-                return;
-            }
-        }
-    }
 
-    /* Can't happen because of the database schema. */
-    ctl_error(ctx, "logical port %s is not part of any logical switch",
-              ctx->argv[1]);
+    error = lsp_to_ls(ctx, lsp, &ls);
+    if (error) {
+        ctx->error = error;
+        return;
+    }
+    remove_lsp(ctx, ls, lsp);
 }
 
 static void
@@ -1658,7 +1712,7 @@ nbctl_lsp_set_addresses(struct ctl_context *ctx)
     }
 
     const struct nbrec_logical_switch *ls;
-    error = lsp_to_ls(ctx->idl, lsp, &ls);
+    error = lsp_to_ls(ctx, lsp, &ls);
     if (error) {
         ctx->error = error;
         return;
@@ -2299,17 +2353,11 @@ nbctl_acl_add(struct ctl_context *ctx)
     }
 
     /* Insert the acl into the logical switch/port group. */
-    struct nbrec_acl **new_acls = xmalloc(sizeof *new_acls * (n_acls + 1));
-    nullable_memcpy(new_acls, acls, sizeof *new_acls * n_acls);
-    new_acls[n_acls] = acl;
     if (pg) {
-        nbrec_port_group_verify_acls(pg);
-        nbrec_port_group_set_acls(pg, new_acls, n_acls + 1);
+        nbrec_port_group_update_acls_addvalue(pg, acl);
     } else {
-        nbrec_logical_switch_verify_acls(ls);
-        nbrec_logical_switch_set_acls(ls, new_acls, n_acls + 1);
+        nbrec_logical_switch_update_acls_addvalue(ls, acl);
     }
-    free(new_acls);
 }
 
 static void
@@ -2349,23 +2397,15 @@ nbctl_acl_del(struct ctl_context *ctx)
     /* If priority and match are not specified, delete all ACLs with the
      * specified direction. */
     if (ctx->argc == 3) {
-        struct nbrec_acl **new_acls = xmalloc(sizeof *new_acls * n_acls);
-
-        int n_new_acls = 0;
         for (size_t i = 0; i < n_acls; i++) {
-            if (strcmp(direction, acls[i]->direction)) {
-                new_acls[n_new_acls++] = acls[i];
+            if (!strcmp(direction, acls[i]->direction)) {
+                if (pg) {
+                    nbrec_port_group_update_acls_delvalue(pg, acls[i]);
+                } else {
+                    nbrec_logical_switch_update_acls_delvalue(ls, acls[i]);
+                }
             }
         }
-
-        if (pg) {
-            nbrec_port_group_verify_acls(pg);
-            nbrec_port_group_set_acls(pg, new_acls, n_new_acls);
-        } else {
-            nbrec_logical_switch_verify_acls(ls);
-            nbrec_logical_switch_set_acls(ls, new_acls, n_new_acls);
-        }
-        free(new_acls);
         return;
     }
 
@@ -2387,19 +2427,11 @@ nbctl_acl_del(struct ctl_context *ctx)
 
         if (priority == acl->priority && !strcmp(ctx->argv[4], acl->match) &&
              !strcmp(direction, acl->direction)) {
-            struct nbrec_acl **new_acls
-                = xmemdup(acls, sizeof *new_acls * n_acls);
-            new_acls[i] = acls[n_acls - 1];
             if (pg) {
-                nbrec_port_group_verify_acls(pg);
-                nbrec_port_group_set_acls(pg, new_acls,
-                                          n_acls - 1);
+                nbrec_port_group_update_acls_delvalue(pg, acl);
             } else {
-                nbrec_logical_switch_verify_acls(ls);
-                nbrec_logical_switch_set_acls(ls, new_acls,
-                                              n_acls - 1);
+                nbrec_logical_switch_update_acls_delvalue(ls, acl);
             }
-            free(new_acls);
             return;
         }
     }
@@ -2552,15 +2584,7 @@ nbctl_qos_add(struct ctl_context *ctx)
     }
 
     /* Insert the qos rule the logical switch. */
-    nbrec_logical_switch_verify_qos_rules(ls);
-    struct nbrec_qos **new_qos_rules
-        = xmalloc(sizeof *new_qos_rules * (ls->n_qos_rules + 1));
-    nullable_memcpy(new_qos_rules,
-                    ls->qos_rules, sizeof *new_qos_rules * ls->n_qos_rules);
-    new_qos_rules[ls->n_qos_rules] = qos;
-    nbrec_logical_switch_set_qos_rules(ls, new_qos_rules,
-                                       ls->n_qos_rules + 1);
-    free(new_qos_rules);
+    nbrec_logical_switch_update_qos_rules_addvalue(ls, qos);
 }
 
 static void
@@ -2597,34 +2621,31 @@ nbctl_qos_del(struct ctl_context *ctx)
     /* If uuid was specified, delete qos_rule with the
      * specified uuid. */
     if (ctx->argc == 3) {
-        struct nbrec_qos **new_qos_rules
-            = xmalloc(sizeof *new_qos_rules * ls->n_qos_rules);
+        size_t i;
 
-        int n_qos_rules = 0;
         if (qos_rule_uuid) {
-            for (size_t i = 0; i < ls->n_qos_rules; i++) {
-                if (!uuid_equals(qos_rule_uuid,
-                                 &(ls->qos_rules[i]->header_.uuid))) {
-                    new_qos_rules[n_qos_rules++] = ls->qos_rules[i];
+            for (i = 0; i < ls->n_qos_rules; i++) {
+                if (uuid_equals(qos_rule_uuid,
+                                &(ls->qos_rules[i]->header_.uuid))) {
+                    nbrec_logical_switch_update_qos_rules_delvalue(
+                        ls, ls->qos_rules[i]);
+                    break;
                 }
             }
-            if (n_qos_rules == ls->n_qos_rules) {
+            if (i == ls->n_qos_rules) {
                 ctl_error(ctx, "uuid is not found");
             }
 
         /* If priority and match are not specified, delete all qos_rules
          * with the specified direction. */
         } else {
-            for (size_t i = 0; i < ls->n_qos_rules; i++) {
-                if (strcmp(direction, ls->qos_rules[i]->direction)) {
-                    new_qos_rules[n_qos_rules++] = ls->qos_rules[i];
+            for (i = 0; i < ls->n_qos_rules; i++) {
+                if (!strcmp(direction, ls->qos_rules[i]->direction)) {
+                    nbrec_logical_switch_update_qos_rules_delvalue(
+                        ls, ls->qos_rules[i]);
                 }
             }
         }
-
-        nbrec_logical_switch_verify_qos_rules(ls);
-        nbrec_logical_switch_set_qos_rules(ls, new_qos_rules, n_qos_rules);
-        free(new_qos_rules);
         return;
     }
 
@@ -2651,14 +2672,7 @@ nbctl_qos_del(struct ctl_context *ctx)
 
         if (priority == qos->priority && !strcmp(ctx->argv[4], qos->match) &&
              !strcmp(direction, qos->direction)) {
-            struct nbrec_qos **new_qos_rules
-                = xmemdup(ls->qos_rules,
-                          sizeof *new_qos_rules * ls->n_qos_rules);
-            new_qos_rules[i] = ls->qos_rules[ls->n_qos_rules - 1];
-            nbrec_logical_switch_verify_qos_rules(ls);
-            nbrec_logical_switch_set_qos_rules(ls, new_qos_rules,
-                                          ls->n_qos_rules - 1);
-            free(new_qos_rules);
+            nbrec_logical_switch_update_qos_rules_delvalue(ls, qos);
             return;
         }
     }
@@ -2821,6 +2835,14 @@ nbctl_lb_add(struct ctl_context *ctx)
 
     bool may_exist = shash_find(&ctx->options, "--may-exist") != NULL;
     bool add_duplicate = shash_find(&ctx->options, "--add-duplicate") != NULL;
+    bool empty_backend_rej = shash_find(&ctx->options, "--reject") != NULL;
+    bool empty_backend_event = shash_find(&ctx->options, "--event") != NULL;
+
+    if (empty_backend_event && empty_backend_rej) {
+            ctl_error(ctx,
+                      "--reject and --event can't specified at the same time");
+            return;
+    }
 
     const char *lb_proto;
     bool is_update_proto = false;
@@ -2934,6 +2956,14 @@ nbctl_lb_add(struct ctl_context *ctx)
     smap_add(CONST_CAST(struct smap *, &lb->vips),
             lb_vip_normalized, ds_cstr(&lb_ips_new));
     nbrec_load_balancer_set_vips(lb, &lb->vips);
+    if (empty_backend_rej) {
+        const struct smap options = SMAP_CONST1(&options, "reject", "true");
+        nbrec_load_balancer_set_options(lb, &options);
+    }
+    if (empty_backend_event) {
+        const struct smap options = SMAP_CONST1(&options, "event", "true");
+        nbrec_load_balancer_set_options(lb, &options);
+    }
 out:
     ds_destroy(&lb_ips_new);
 
@@ -3115,17 +3145,7 @@ nbctl_lr_lb_add(struct ctl_context *ctx)
     }
 
     /* Insert the load balancer into the logical router. */
-    nbrec_logical_router_verify_load_balancer(lr);
-    struct nbrec_load_balancer **new_lbs
-        = xmalloc(sizeof *new_lbs * (lr->n_load_balancer + 1));
-
-    nullable_memcpy(new_lbs, lr->load_balancer,
-                    sizeof *new_lbs * lr->n_load_balancer);
-    new_lbs[lr->n_load_balancer] = CONST_CAST(struct nbrec_load_balancer *,
-            new_lb);
-    nbrec_logical_router_set_load_balancer(lr, new_lbs,
-            lr->n_load_balancer + 1);
-    free(new_lbs);
+    nbrec_logical_router_update_load_balancer_addvalue(lr, new_lb);
 }
 
 static void
@@ -3158,15 +3178,7 @@ nbctl_lr_lb_del(struct ctl_context *ctx)
 
         if (uuid_equals(&del_lb->header_.uuid, &lb->header_.uuid)) {
             /* Remove the matching rule. */
-            nbrec_logical_router_verify_load_balancer(lr);
-
-            struct nbrec_load_balancer **new_lbs
-                = xmemdup(lr->load_balancer,
-                    sizeof *new_lbs * lr->n_load_balancer);
-            new_lbs[i] = lr->load_balancer[lr->n_load_balancer - 1];
-            nbrec_logical_router_set_load_balancer(lr, new_lbs,
-                                          lr->n_load_balancer - 1);
-            free(new_lbs);
+            nbrec_logical_router_update_load_balancer_delvalue(lr, lb);
             return;
         }
     }
@@ -3240,17 +3252,7 @@ nbctl_ls_lb_add(struct ctl_context *ctx)
     }
 
     /* Insert the load balancer into the logical switch. */
-    nbrec_logical_switch_verify_load_balancer(ls);
-    struct nbrec_load_balancer **new_lbs
-        = xmalloc(sizeof *new_lbs * (ls->n_load_balancer + 1));
-
-    nullable_memcpy(new_lbs, ls->load_balancer,
-                    sizeof *new_lbs * ls->n_load_balancer);
-    new_lbs[ls->n_load_balancer] = CONST_CAST(struct nbrec_load_balancer *,
-            new_lb);
-    nbrec_logical_switch_set_load_balancer(ls, new_lbs,
-            ls->n_load_balancer + 1);
-    free(new_lbs);
+    nbrec_logical_switch_update_load_balancer_addvalue(ls, new_lb);
 }
 
 static void
@@ -3283,15 +3285,7 @@ nbctl_ls_lb_del(struct ctl_context *ctx)
 
         if (uuid_equals(&del_lb->header_.uuid, &lb->header_.uuid)) {
             /* Remove the matching rule. */
-            nbrec_logical_switch_verify_load_balancer(ls);
-
-            struct nbrec_load_balancer **new_lbs
-                = xmemdup(ls->load_balancer,
-                        sizeof *new_lbs * ls->n_load_balancer);
-            new_lbs[i] = ls->load_balancer[ls->n_load_balancer - 1];
-            nbrec_logical_switch_set_load_balancer(ls, new_lbs,
-                                          ls->n_load_balancer - 1);
-            free(new_lbs);
+            nbrec_logical_switch_update_load_balancer_delvalue(ls, lb);
             return;
         }
     }
@@ -3378,6 +3372,7 @@ static void
 nbctl_lr_del(struct ctl_context *ctx)
 {
     bool must_exist = !shash_find(&ctx->options, "--if-exists");
+    struct nbctl_context *nbctx = nbctl_context_get(ctx);
     const char *id = ctx->argv[1];
     const struct nbrec_logical_router *lr = NULL;
 
@@ -3390,6 +3385,11 @@ nbctl_lr_del(struct ctl_context *ctx)
         return;
     }
 
+    /* Updating runtime cache. */
+    for (size_t i = 0; i < lr->n_ports; i++) {
+        shash_find_and_delete(&nbctx->lrp_to_lr_map, lr->ports[i]->name);
+    }
+
     nbrec_logical_router_delete(lr);
 }
 
@@ -3645,7 +3645,8 @@ nbctl_lr_policy_add(struct ctl_context *ctx)
         return;
     }
     const char *action = ctx->argv[4];
-    char *next_hop = NULL;
+    size_t n_nexthops = 0;
+    char **nexthops = NULL;
 
     bool reroute = false;
     /* Validate action. */
@@ -3665,7 +3666,8 @@ nbctl_lr_policy_add(struct ctl_context *ctx)
     /* Check if same routing policy already exists.
      * A policy is uniquely identified by priority and match */
     bool may_exist = !!shash_find(&ctx->options, "--may-exist");
-    for (int i = 0; i < lr->n_policies; i++) {
+    size_t i;
+    for (i = 0; i < lr->n_policies; i++) {
         const struct nbrec_logical_router_policy *policy = lr->policies[i];
         if (policy->priority == priority &&
             !strcmp(policy->match, ctx->argv[3])) {
@@ -3676,12 +3678,53 @@ nbctl_lr_policy_add(struct ctl_context *ctx)
             return;
         }
     }
+
     if (reroute) {
-        next_hop = normalize_prefix_str(ctx->argv[5]);
-        if (!next_hop) {
-            ctl_error(ctx, "bad next hop argument: %s", ctx->argv[5]);
-            return;
+        char *nexthops_arg = xstrdup(ctx->argv[5]);
+        char *save_ptr, *next_hop, *token;
+
+        n_nexthops = 0;
+        size_t n_allocs = 0;
+
+        bool nexthops_is_ipv4 = true;
+        for (token = strtok_r(nexthops_arg, ",", &save_ptr);
+            token != NULL; token = strtok_r(NULL, ",", &save_ptr)) {
+            next_hop = normalize_addr_str(token);
+
+            if (!next_hop) {
+                ctl_error(ctx, "bad next hop argument: %s", ctx->argv[5]);
+                free(nexthops_arg);
+                for (i = 0; i < n_nexthops; i++) {
+                    free(nexthops[i]);
+                }
+                free(nexthops);
+                return;
+            }
+            if (n_nexthops == n_allocs) {
+                nexthops = x2nrealloc(nexthops, &n_allocs, sizeof *nexthops);
+            }
+
+            bool is_ipv4 = strchr(next_hop, '.') ? true : false;
+            if (n_nexthops == 0) {
+                nexthops_is_ipv4 = is_ipv4;
+            }
+
+            if (is_ipv4 != nexthops_is_ipv4) {
+                ctl_error(ctx, "bad next hops argument, not in the same "
+                          "addr family : %s", ctx->argv[5]);
+                free(nexthops_arg);
+                free(next_hop);
+                for (i = 0; i < n_nexthops; i++) {
+                    free(nexthops[i]);
+                }
+                free(nexthops);
+                return;
+            }
+            nexthops[n_nexthops] = next_hop;
+            n_nexthops++;
         }
+
+        free(nexthops_arg);
     }
 
     struct nbrec_logical_router_policy *policy;
@@ -3690,12 +3733,13 @@ nbctl_lr_policy_add(struct ctl_context *ctx)
     nbrec_logical_router_policy_set_match(policy, ctx->argv[3]);
     nbrec_logical_router_policy_set_action(policy, action);
     if (reroute) {
-        nbrec_logical_router_policy_set_nexthop(policy, next_hop);
+        nbrec_logical_router_policy_set_nexthops(
+            policy, (const char **)nexthops, n_nexthops);
     }
 
     /* Parse the options. */
     struct smap options = SMAP_INITIALIZER(&options);
-    for (size_t i = reroute ? 6 : 5; i < ctx->argc; i++) {
+    for (i = reroute ? 6 : 5; i < ctx->argc; i++) {
         char *key, *value;
         value = xstrdup(ctx->argv[i]);
         key = strsep(&value, "=");
@@ -3705,7 +3749,10 @@ nbctl_lr_policy_add(struct ctl_context *ctx)
             ctl_error(ctx, "No value specified for the option : %s", key);
             smap_destroy(&options);
             free(key);
-            free(next_hop);
+            for (i = 0; i < n_nexthops; i++) {
+                free(nexthops[i]);
+            }
+            free(nexthops);
             return;
         }
         free(key);
@@ -3713,18 +3760,12 @@ nbctl_lr_policy_add(struct ctl_context *ctx)
     nbrec_logical_router_policy_set_options(policy, &options);
     smap_destroy(&options);
 
-    nbrec_logical_router_verify_policies(lr);
-    struct nbrec_logical_router_policy **new_policies
-        = xmalloc(sizeof *new_policies * (lr->n_policies + 1));
-    memcpy(new_policies, lr->policies,
-           sizeof *new_policies * lr->n_policies);
-    new_policies[lr->n_policies] = policy;
-    nbrec_logical_router_set_policies(lr, new_policies,
-                                      lr->n_policies + 1);
-    free(new_policies);
-    if (next_hop != NULL) {
-        free(next_hop);
+    nbrec_logical_router_update_policies_addvalue(lr, policy);
+
+    for (i = 0; i < n_nexthops; i++) {
+        free(nexthops[i]);
     }
+    free(nexthops);
 }
 
 static void
@@ -3758,38 +3799,34 @@ nbctl_lr_policy_del(struct ctl_context *ctx)
     /* If uuid was specified, delete routing policy with the
      * specified uuid. */
     if (ctx->argc == 3) {
-        struct nbrec_logical_router_policy **new_policies
-            = xmemdup(lr->policies,
-                      sizeof *new_policies * lr->n_policies);
-        int n_policies = 0;
+        size_t i;
 
         if (lr_policy_uuid) {
-            for (size_t i = 0; i < lr->n_policies; i++) {
-                if (!uuid_equals(lr_policy_uuid,
-                                 &(lr->policies[i]->header_.uuid))) {
-                    new_policies[n_policies++] = lr->policies[i];
+            for (i = 0; i < lr->n_policies; i++) {
+                if (uuid_equals(lr_policy_uuid,
+                                &(lr->policies[i]->header_.uuid))) {
+                    nbrec_logical_router_update_policies_delvalue(
+                        lr, lr->policies[i]);
+                    break;
                 }
             }
-            if (n_policies == lr->n_policies) {
+            if (i == lr->n_policies) {
                 if (!shash_find(&ctx->options, "--if-exists")) {
                     ctl_error(ctx, "Logical router policy uuid is not found.");
                 }
-                free(new_policies);
                 return;
             }
 
-    /* If match is not specified, delete all routing policies with the
-     * specified priority. */
+        /* If match is not specified, delete all routing policies with the
+         * specified priority. */
         } else {
-            for (int i = 0; i < lr->n_policies; i++) {
-                if (priority != lr->policies[i]->priority) {
-                    new_policies[n_policies++] = lr->policies[i];
+            for (i = 0; i < lr->n_policies; i++) {
+                if (priority == lr->policies[i]->priority) {
+                    nbrec_logical_router_update_policies_delvalue(
+                        lr, lr->policies[i]);
                 }
             }
         }
-        nbrec_logical_router_verify_policies(lr);
-        nbrec_logical_router_set_policies(lr, new_policies, n_policies);
-        free(new_policies);
         return;
     }
 
@@ -3798,14 +3835,7 @@ nbctl_lr_policy_del(struct ctl_context *ctx)
         struct nbrec_logical_router_policy *routing_policy = lr->policies[i];
         if (priority == routing_policy->priority &&
             !strcmp(ctx->argv[3], routing_policy->match)) {
-            struct nbrec_logical_router_policy **new_policies
-                = xmemdup(lr->policies,
-                          sizeof *new_policies * lr->n_policies);
-            new_policies[i] = lr->policies[lr->n_policies - 1];
-            nbrec_logical_router_verify_policies(lr);
-            nbrec_logical_router_set_policies(lr, new_policies,
-                                              lr->n_policies - 1);
-            free(new_policies);
+            nbrec_logical_router_update_policies_delvalue(lr, routing_policy);
             return;
         }
     }
@@ -3884,6 +3914,47 @@ nbctl_lr_policy_list(struct ctl_context *ctx)
     }
     free(policies);
 }
+
+static struct nbrec_logical_router_static_route *
+nbctl_lr_get_route(const struct nbrec_logical_router *lr, char *prefix,
+                   char *next_hop, bool is_src_route, bool ecmp)
+{
+    for (int i = 0; i < lr->n_static_routes; i++) {
+        struct nbrec_logical_router_static_route *route = lr->static_routes[i];
+
+        /* Compare route policy. */
+        char *nb_policy = route->policy;
+        bool nb_is_src_route = false;
+        if (nb_policy && !strcmp(nb_policy, "src-ip")) {
+                nb_is_src_route = true;
+        }
+        if (is_src_route != nb_is_src_route) {
+            continue;
+        }
+
+        /* Compare route prefix. */
+        char *rt_prefix = normalize_prefix_str(route->ip_prefix);
+        if (!rt_prefix) {
+            /* Ignore existing prefix we couldn't parse. */
+            continue;
+        }
+
+        if (strcmp(rt_prefix, prefix)) {
+            free(rt_prefix);
+            continue;
+        }
+
+        if (ecmp && strcmp(next_hop, route->nexthop)) {
+            free(rt_prefix);
+            continue;
+        }
+
+        free(rt_prefix);
+        return route;
+    }
+    return NULL;
+}
+
 
 static void
 nbctl_lr_route_add(struct ctl_context *ctx)
@@ -3927,44 +3998,42 @@ nbctl_lr_route_add(struct ctl_context *ctx)
         goto cleanup;
     }
 
+    struct shash_node *bfd = shash_find(&ctx->options, "--bfd");
+    const struct nbrec_bfd *nb_bt = NULL;
+    if (bfd) {
+        if (bfd->data) {
+            struct uuid bfd_uuid;
+            if (uuid_from_string(&bfd_uuid, bfd->data)) {
+                nb_bt = nbrec_bfd_get_for_uuid(ctx->idl, &bfd_uuid);
+            }
+            if (!nb_bt) {
+                ctl_error(ctx, "no entry found in the BFD table");
+                goto cleanup;
+            }
+        } else {
+            const struct nbrec_bfd *iter;
+            NBREC_BFD_FOR_EACH (iter, ctx->idl) {
+                if (!strcmp(iter->dst_ip, next_hop)) {
+                    nb_bt = iter;
+                    break;
+                }
+            }
+        }
+    }
+
     bool may_exist = shash_find(&ctx->options, "--may-exist") != NULL;
     bool ecmp_symmetric_reply = shash_find(&ctx->options,
                                            "--ecmp-symmetric-reply") != NULL;
     bool ecmp = shash_find(&ctx->options, "--ecmp") != NULL ||
                 ecmp_symmetric_reply;
+    struct nbrec_logical_router_static_route *route =
+        nbctl_lr_get_route(lr, prefix, next_hop, is_src_route, ecmp);
     if (!ecmp) {
-        for (int i = 0; i < lr->n_static_routes; i++) {
-            const struct nbrec_logical_router_static_route *route
-                = lr->static_routes[i];
-            char *rt_prefix;
-
-            /* Compare route policy. */
-            char *nb_policy = lr->static_routes[i]->policy;
-            bool nb_is_src_route = false;
-            if (nb_policy && !strcmp(nb_policy, "src-ip")) {
-                    nb_is_src_route = true;
-            }
-            if (is_src_route != nb_is_src_route) {
-                continue;
-            }
-
-            /* Compare route prefix. */
-            rt_prefix = normalize_prefix_str(lr->static_routes[i]->ip_prefix);
-            if (!rt_prefix) {
-                /* Ignore existing prefix we couldn't parse. */
-                continue;
-            }
-
-            if (strcmp(rt_prefix, prefix)) {
-                free(rt_prefix);
-                continue;
-            }
-
+        if (route) {
             if (!may_exist) {
                 ctl_error(ctx, "duplicate prefix: %s (policy: %s). Use option"
                           " --ecmp to allow this for ECMP routing.",
                           prefix, is_src_route ? "src-ip" : "dst-ip");
-                free(rt_prefix);
                 goto cleanup;
             }
 
@@ -3981,12 +4050,25 @@ nbctl_lr_route_add(struct ctl_context *ctx)
             if (policy) {
                  nbrec_logical_router_static_route_set_policy(route, policy);
             }
-            free(rt_prefix);
+            if (bfd) {
+                if (!nb_bt) {
+                    if (ctx->argc != 5) {
+                        ctl_error(ctx, "insert entry in the BFD table failed");
+                        goto cleanup;
+                    }
+                    nb_bt = nbrec_bfd_insert(ctx->txn);
+                    nbrec_bfd_set_dst_ip(nb_bt, next_hop);
+                    nbrec_bfd_set_logical_port(nb_bt, ctx->argv[4]);
+                }
+                nbrec_logical_router_static_route_set_bfd(route, nb_bt);
+            }
             goto cleanup;
         }
+    } else if (route) {
+        ctl_error(ctx, "duplicate nexthop for the same ECMP route");
+        goto cleanup;
     }
 
-    struct nbrec_logical_router_static_route *route;
     route = nbrec_logical_router_static_route_insert(ctx->txn);
     nbrec_logical_router_static_route_set_ip_prefix(route, prefix);
     nbrec_logical_router_static_route_set_nexthop(route, next_hop);
@@ -4004,15 +4086,19 @@ nbctl_lr_route_add(struct ctl_context *ctx)
         nbrec_logical_router_static_route_set_options(route, &options);
     }
 
-    nbrec_logical_router_verify_static_routes(lr);
-    struct nbrec_logical_router_static_route **new_routes
-        = xmalloc(sizeof *new_routes * (lr->n_static_routes + 1));
-    nullable_memcpy(new_routes, lr->static_routes,
-               sizeof *new_routes * lr->n_static_routes);
-    new_routes[lr->n_static_routes] = route;
-    nbrec_logical_router_set_static_routes(lr, new_routes,
-                                           lr->n_static_routes + 1);
-    free(new_routes);
+    nbrec_logical_router_update_static_routes_addvalue(lr, route);
+    if (bfd) {
+        if (!nb_bt) {
+            if (ctx->argc != 5) {
+                ctl_error(ctx, "insert entry in the BFD table failed");
+                goto cleanup;
+            }
+            nb_bt = nbrec_bfd_insert(ctx->txn);
+            nbrec_bfd_set_dst_ip(nb_bt, next_hop);
+            nbrec_bfd_set_logical_port(nb_bt, ctx->argv[4]);
+        }
+        nbrec_logical_router_static_route_set_bfd(route, nb_bt);
+    }
 
 cleanup:
     free(next_hop);
@@ -4069,11 +4155,8 @@ nbctl_lr_route_del(struct ctl_context *ctx)
         output_port = ctx->argv[4];
     }
 
-    struct nbrec_logical_router_static_route **new_routes
-        = xmemdup(lr->static_routes,
-                  sizeof *new_routes * lr->n_static_routes);
-    size_t n_new = 0;
-    for (int i = 0; i < lr->n_static_routes; i++) {
+    size_t n_removed = 0;
+    for (size_t i = 0; i < lr->n_static_routes; i++) {
         /* Compare route policy, if specified. */
         if (policy) {
             char *nb_policy = lr->static_routes[i]->policy;
@@ -4082,7 +4165,6 @@ nbctl_lr_route_del(struct ctl_context *ctx)
                     nb_is_src_route = true;
             }
             if (is_src_route != nb_is_src_route) {
-                new_routes[n_new++] = lr->static_routes[i];
                 continue;
             }
         }
@@ -4093,14 +4175,12 @@ nbctl_lr_route_del(struct ctl_context *ctx)
                 normalize_prefix_str(lr->static_routes[i]->ip_prefix);
             if (!rt_prefix) {
                 /* Ignore existing prefix we couldn't parse. */
-                new_routes[n_new++] = lr->static_routes[i];
                 continue;
             }
 
             int ret = strcmp(prefix, rt_prefix);
             free(rt_prefix);
             if (ret) {
-                new_routes[n_new++] = lr->static_routes[i];
                 continue;
             }
         }
@@ -4111,13 +4191,11 @@ nbctl_lr_route_del(struct ctl_context *ctx)
                 normalize_prefix_str(lr->static_routes[i]->nexthop);
             if (!rt_nexthop) {
                 /* Ignore existing nexthop we couldn't parse. */
-                new_routes[n_new++] = lr->static_routes[i];
                 continue;
             }
             int ret = strcmp(nexthop, rt_nexthop);
             free(rt_nexthop);
             if (ret) {
-                new_routes[n_new++] = lr->static_routes[i];
                 continue;
             }
         }
@@ -4126,18 +4204,17 @@ nbctl_lr_route_del(struct ctl_context *ctx)
         if (output_port) {
             char *rt_output_port = lr->static_routes[i]->output_port;
             if (!rt_output_port || strcmp(output_port, rt_output_port)) {
-                new_routes[n_new++] = lr->static_routes[i];
+                continue;
             }
         }
-    }
 
-    if (n_new < lr->n_static_routes) {
-        nbrec_logical_router_verify_static_routes(lr);
-        nbrec_logical_router_set_static_routes(lr, new_routes, n_new);
-        goto out;
+        /* Everything matched. Removing. */
+        nbrec_logical_router_update_static_routes_delvalue(
+            lr, lr->static_routes[i]);
+        n_removed++;
     }
 
-    if (!shash_find(&ctx->options, "--if-exists")) {
+    if (!n_removed && !shash_find(&ctx->options, "--if-exists")) {
         ctl_error(ctx, "no matching route: policy '%s', prefix '%s', nexthop "
                   "'%s', output_port '%s'.",
                   policy ? policy : "any",
@@ -4146,8 +4223,6 @@ nbctl_lr_route_del(struct ctl_context *ctx)
                   output_port ? output_port : "any");
     }
 
-out:
-    free(new_routes);
     free(prefix);
     free(nexthop);
 }
@@ -4418,12 +4493,7 @@ nbctl_lr_nat_add(struct ctl_context *ctx)
     smap_destroy(&nat_options);
 
     /* Insert the NAT into the logical router. */
-    nbrec_logical_router_verify_nat(lr);
-    struct nbrec_nat **new_nats = xmalloc(sizeof *new_nats * (lr->n_nat + 1));
-    nullable_memcpy(new_nats, lr->nat, sizeof *new_nats * lr->n_nat);
-    new_nats[lr->n_nat] = nat;
-    nbrec_logical_router_set_nat(lr, new_nats, lr->n_nat + 1);
-    free(new_nats);
+    nbrec_logical_router_update_nat_addvalue(lr, nat);
 
 cleanup:
     free(new_logical_ip);
@@ -4459,17 +4529,11 @@ nbctl_lr_nat_del(struct ctl_context *ctx)
 
     if (ctx->argc == 3) {
         /*Deletes all NATs with the specified type. */
-        struct nbrec_nat **new_nats = xmalloc(sizeof *new_nats * lr->n_nat);
-        int n_nat = 0;
         for (size_t i = 0; i < lr->n_nat; i++) {
-            if (strcmp(nat_type, lr->nat[i]->type)) {
-                new_nats[n_nat++] = lr->nat[i];
+            if (!strcmp(nat_type, lr->nat[i]->type)) {
+                nbrec_logical_router_update_nat_delvalue(lr, lr->nat[i]);
             }
         }
-
-        nbrec_logical_router_verify_nat(lr);
-        nbrec_logical_router_set_nat(lr, new_nats, n_nat);
-        free(new_nats);
         return;
     }
 
@@ -4491,13 +4555,7 @@ nbctl_lr_nat_del(struct ctl_context *ctx)
             continue;
         }
         if (!strcmp(nat_type, nat->type) && !strcmp(nat_ip, old_ip)) {
-            struct nbrec_nat **new_nats
-                = xmemdup(lr->nat, sizeof *new_nats * lr->n_nat);
-            new_nats[i] = lr->nat[lr->n_nat - 1];
-            nbrec_logical_router_verify_nat(lr);
-            nbrec_logical_router_set_nat(lr, new_nats,
-                                          lr->n_nat - 1);
-            free(new_nats);
+            nbrec_logical_router_update_nat_delvalue(lr, nat);
             should_return = true;
         }
         free(old_ip);
@@ -4667,20 +4725,18 @@ lrp_by_name_or_uuid(struct ctl_context *ctx, const char *id, bool must_exist,
 
 /* Returns the logical router that contains 'lrp'. */
 static char * OVS_WARN_UNUSED_RESULT
-lrp_to_lr(const struct ovsdb_idl *idl,
+lrp_to_lr(struct ctl_context *ctx,
           const struct nbrec_logical_router_port *lrp,
           const struct nbrec_logical_router **lr_p)
 {
+    struct nbctl_context *nbctx = nbctl_context_get(ctx);
     const struct nbrec_logical_router *lr;
     *lr_p = NULL;
 
-    NBREC_LOGICAL_ROUTER_FOR_EACH (lr, idl) {
-        for (size_t i = 0; i < lr->n_ports; i++) {
-            if (lr->ports[i] == lrp) {
-                *lr_p = lr;
-                return NULL;
-            }
-        }
+    lr = shash_find_data(&nbctx->lrp_to_lr_map, lrp->name);
+    if (lr) {
+        *lr_p = lr;
+        return NULL;
     }
 
     /* Can't happen because of the database schema */
@@ -4777,15 +4833,7 @@ nbctl_lrp_set_gateway_chassis(struct ctl_context *ctx)
     nbrec_gateway_chassis_set_priority(gc, priority);
 
     /* Insert the logical gateway chassis into the logical router port. */
-    nbrec_logical_router_port_verify_gateway_chassis(lrp);
-    struct nbrec_gateway_chassis **new_gc = xmalloc(
-        sizeof *new_gc * (lrp->n_gateway_chassis + 1));
-    nullable_memcpy(new_gc, lrp->gateway_chassis,
-                    sizeof *new_gc * lrp->n_gateway_chassis);
-    new_gc[lrp->n_gateway_chassis] = gc;
-    nbrec_logical_router_port_set_gateway_chassis(
-        lrp, new_gc, lrp->n_gateway_chassis + 1);
-    free(new_gc);
+    nbrec_logical_router_port_update_gateway_chassis_addvalue(lrp, gc);
     free(gc_name);
 }
 
@@ -4802,14 +4850,7 @@ remove_gc(const struct nbrec_logical_router_port *lrp, size_t idx)
          * will actually cause the gateway chassis to be deleted when the
          * transaction is sent to the database server (due to garbage
          * collection). */
-        struct nbrec_gateway_chassis **new_gc
-            = xmemdup(lrp->gateway_chassis,
-                      sizeof *new_gc * lrp->n_gateway_chassis);
-        new_gc[idx] = new_gc[lrp->n_gateway_chassis - 1];
-        nbrec_logical_router_port_verify_gateway_chassis(lrp);
-        nbrec_logical_router_port_set_gateway_chassis(
-            lrp, new_gc, lrp->n_gateway_chassis - 1);
-        free(new_gc);
+        nbrec_logical_router_port_update_gateway_chassis_delvalue(lrp, gc);
     }
 
     /* Delete 'gc' from the IDL.  This won't have a real effect on
@@ -4893,6 +4934,7 @@ static void
 nbctl_lrp_add(struct ctl_context *ctx)
 {
     bool may_exist = shash_find(&ctx->options, "--may-exist") != NULL;
+    struct nbctl_context *nbctx = nbctl_context_get(ctx);
 
     const struct nbrec_logical_router *lr = NULL;
     char *error = lr_by_name_or_uuid(ctx, ctx->argv[1], true, &lr);
@@ -4942,7 +4984,7 @@ nbctl_lrp_add(struct ctl_context *ctx)
         }
 
         const struct nbrec_logical_router *bound_lr;
-        error = lrp_to_lr(ctx->idl, lrp, &bound_lr);
+        error = lrp_to_lr(ctx, lrp, &bound_lr);
         if (error) {
             ctx->error = error;
             return;
@@ -5040,31 +5082,27 @@ nbctl_lrp_add(struct ctl_context *ctx)
     }
 
     /* Insert the logical port into the logical router. */
-    nbrec_logical_router_verify_ports(lr);
-    struct nbrec_logical_router_port **new_ports = xmalloc(sizeof *new_ports *
-                                                        (lr->n_ports + 1));
-    nullable_memcpy(new_ports, lr->ports, sizeof *new_ports * lr->n_ports);
-    new_ports[lr->n_ports] = CONST_CAST(struct nbrec_logical_router_port *,
-                                             lrp);
-    nbrec_logical_router_set_ports(lr, new_ports, lr->n_ports + 1);
-    free(new_ports);
+    nbrec_logical_router_update_ports_addvalue(lr, lrp);
+
+    /* Updating runtime cache. */
+    shash_add(&nbctx->lrp_to_lr_map, lrp->name, lr);
 }
 
-/* Removes logical router port 'lr->ports[idx]'. */
+/* Removes logical router port 'lrp' from logical router 'lr'. */
 static void
-remove_lrp(const struct nbrec_logical_router *lr, size_t idx)
+remove_lrp(struct ctl_context *ctx,
+           const struct nbrec_logical_router *lr,
+           const struct nbrec_logical_router_port *lrp)
 {
-    const struct nbrec_logical_router_port *lrp = lr->ports[idx];
+    struct nbctl_context *nbctx = nbctl_context_get(ctx);
+
+    /* Updating runtime cache. */
+    shash_find_and_delete(&nbctx->lrp_to_lr_map, lrp->name);
 
     /* First remove 'lrp' from the array of ports.  This is what will
      * actually cause the logical port to be deleted when the transaction is
      * sent to the database server (due to garbage collection). */
-    struct nbrec_logical_router_port **new_ports
-        = xmemdup(lr->ports, sizeof *new_ports * lr->n_ports);
-    new_ports[idx] = new_ports[lr->n_ports - 1];
-    nbrec_logical_router_verify_ports(lr);
-    nbrec_logical_router_set_ports(lr, new_ports, lr->n_ports - 1);
-    free(new_ports);
+    nbrec_logical_router_update_ports_delvalue(lr, lrp);
 
     /* Delete 'lrp' from the IDL.  This won't have a real effect on
      * the database server (the IDL will suppress it in fact) but it
@@ -5090,18 +5128,13 @@ nbctl_lrp_del(struct ctl_context *ctx)
 
     /* Find the router that contains 'lrp', then delete it. */
     const struct nbrec_logical_router *lr;
-    NBREC_LOGICAL_ROUTER_FOR_EACH (lr, ctx->idl) {
-        for (size_t i = 0; i < lr->n_ports; i++) {
-            if (lr->ports[i] == lrp) {
-                remove_lrp(lr, i);
-                return;
-            }
-        }
-    }
 
-    /* Can't happen because of the database schema. */
-    ctl_error(ctx, "logical port %s is not part of any logical router",
-              ctx->argv[1]);
+    error = lrp_to_lr(ctx, lrp, &lr);
+    if (error) {
+        ctx->error = error;
+        return;
+    }
+    remove_lrp(ctx, lr, lrp);
 }
 
 /* Print a list of logical router ports. */
@@ -5275,7 +5308,7 @@ fwd_group_to_logical_switch(struct ctl_context *ctx,
     }
 
     const struct nbrec_logical_switch *ls;
-    error = lsp_to_ls(ctx->idl, lsp, &ls);
+    error = lsp_to_ls(ctx, lsp, &ls);
     if (error) {
         ctx->error = error;
         return NULL;
@@ -5350,7 +5383,7 @@ nbctl_fwd_group_add(struct ctl_context *ctx)
             return;
         }
         if (lsp) {
-            error = lsp_to_ls(ctx->idl, lsp, &ls);
+            error = lsp_to_ls(ctx, lsp, &ls);
             if (error) {
                 ctx->error = error;
                 return;
@@ -5373,15 +5406,7 @@ nbctl_fwd_group_add(struct ctl_context *ctx)
       nbrec_forwarding_group_set_liveness(fwd_group, true);
     }
 
-    struct nbrec_forwarding_group **new_fwd_groups =
-            xmalloc(sizeof(*new_fwd_groups) * (ls->n_forwarding_groups + 1));
-    memcpy(new_fwd_groups, ls->forwarding_groups,
-           sizeof *new_fwd_groups * ls->n_forwarding_groups);
-    new_fwd_groups[ls->n_forwarding_groups] = fwd_group;
-    nbrec_logical_switch_set_forwarding_groups(ls, new_fwd_groups,
-                                               (ls->n_forwarding_groups + 1));
-    free(new_fwd_groups);
-
+    nbrec_logical_switch_update_forwarding_groups_addvalue(ls, fwd_group);
 }
 
 static void
@@ -5403,14 +5428,8 @@ nbctl_fwd_group_del(struct ctl_context *ctx)
 
     for (int i = 0; i < ls->n_forwarding_groups; ++i) {
         if (!strcmp(ls->forwarding_groups[i]->name, fwd_group->name)) {
-            struct nbrec_forwarding_group **new_fwd_groups =
-                xmemdup(ls->forwarding_groups,
-                        sizeof *new_fwd_groups * ls->n_forwarding_groups);
-            new_fwd_groups[i] =
-                ls->forwarding_groups[ls->n_forwarding_groups - 1];
-            nbrec_logical_switch_set_forwarding_groups(ls, new_fwd_groups,
-                (ls->n_forwarding_groups - 1));
-            free(new_fwd_groups);
+            nbrec_logical_switch_update_forwarding_groups_delvalue(
+                ls, ls->forwarding_groups[i]);
             nbrec_forwarding_group_delete(fwd_group);
             return;
         }
@@ -5498,17 +5517,27 @@ struct ipv4_route {
     const struct nbrec_logical_router_static_route *route;
 };
 
+static int
+__ipv4_route_cmp(const struct ipv4_route *r1, const struct ipv4_route *r2)
+{
+    if (r1->priority != r2->priority) {
+        return r1->priority > r2->priority ? -1 : 1;
+    }
+    if (r1->addr != r2->addr) {
+        return ntohl(r1->addr) < ntohl(r2->addr) ? -1 : 1;
+    }
+    return 0;
+}
+
 static int
 ipv4_route_cmp(const void *route1_, const void *route2_)
 {
     const struct ipv4_route *route1p = route1_;
     const struct ipv4_route *route2p = route2_;
 
-    if (route1p->priority != route2p->priority) {
-        return route1p->priority > route2p->priority ? -1 : 1;
-    }
-    if (route1p->addr != route2p->addr) {
-        return ntohl(route1p->addr) < ntohl(route2p->addr) ? -1 : 1;
+    int ret = __ipv4_route_cmp(route1p, route2p);
+    if (ret) {
+        return ret;
     }
     return route_cmp_details(route1p->route, route2p->route);
 }
@@ -5519,16 +5548,22 @@ struct ipv6_route {
     const struct nbrec_logical_router_static_route *route;
 };
 
+static int
+__ipv6_route_cmp(const struct ipv6_route *r1, const struct ipv6_route *r2)
+{
+    if (r1->priority != r2->priority) {
+        return r1->priority > r2->priority ? -1 : 1;
+    }
+    return memcmp(&r1->addr, &r2->addr, sizeof(r1->addr));
+}
+
 static int
 ipv6_route_cmp(const void *route1_, const void *route2_)
 {
     const struct ipv6_route *route1p = route1_;
     const struct ipv6_route *route2p = route2_;
 
-    if (route1p->priority != route2p->priority) {
-        return route1p->priority > route2p->priority ? -1 : 1;
-    }
-    int ret = memcmp(&route1p->addr, &route2p->addr, sizeof(route1p->addr));
+    int ret = __ipv6_route_cmp(route1p, route2p);
     if (ret) {
         return ret;
     }
@@ -5536,7 +5571,8 @@ ipv6_route_cmp(const void *route1_, const void *route2_)
 }
 
 static void
-print_route(const struct nbrec_logical_router_static_route *route, struct ds *s)
+print_route(const struct nbrec_logical_router_static_route *route,
+            struct ds *s, bool ecmp)
 {
 
     char *prefix = normalize_prefix_str(route->ip_prefix);
@@ -5558,6 +5594,19 @@ print_route(const struct nbrec_logical_router_static_route *route, struct ds *s)
     if (smap_get(&route->external_ids, "ic-learned-route")) {
         ds_put_format(s, " (learned)");
     }
+
+    if (ecmp) {
+        ds_put_cstr(s, " ecmp");
+    }
+
+    if (smap_get_bool(&route->options, "ecmp_symmetric_reply", false)) {
+        ds_put_cstr(s, " ecmp-symmetric-reply");
+    }
+
+    if (route->bfd) {
+        ds_put_cstr(s, " bfd");
+    }
+
     ds_put_char(s, '\n');
 }
 
@@ -5623,7 +5672,16 @@ nbctl_lr_route_list(struct ctl_context *ctx)
         ds_put_cstr(&ctx->output, "IPv4 Routes\n");
     }
     for (int i = 0; i < n_ipv4_routes; i++) {
-        print_route(ipv4_routes[i].route, &ctx->output);
+        bool ecmp = false;
+        if (i < n_ipv4_routes - 1 &&
+            !__ipv4_route_cmp(&ipv4_routes[i], &ipv4_routes[i + 1])) {
+            ecmp = true;
+        } else if (i > 0 &&
+                   !__ipv4_route_cmp(&ipv4_routes[i],
+                                     &ipv4_routes[i - 1])) {
+            ecmp = true;
+        }
+        print_route(ipv4_routes[i].route, &ctx->output, ecmp);
     }
 
     if (n_ipv6_routes) {
@@ -5631,7 +5689,16 @@ nbctl_lr_route_list(struct ctl_context *ctx)
                       n_ipv4_routes ?  "\n" : "");
     }
     for (int i = 0; i < n_ipv6_routes; i++) {
-        print_route(ipv6_routes[i].route, &ctx->output);
+        bool ecmp = false;
+        if (i < n_ipv6_routes - 1 &&
+            !__ipv6_route_cmp(&ipv6_routes[i], &ipv6_routes[i + 1])) {
+            ecmp = true;
+        } else if (i > 0 &&
+                   !__ipv6_route_cmp(&ipv6_routes[i],
+                                     &ipv6_routes[i - 1])) {
+            ecmp = true;
+        }
+        print_route(ipv6_routes[i].route, &ctx->output, ecmp);
     }
 
     free(ipv4_routes);
@@ -6007,17 +6074,7 @@ cmd_ha_ch_grp_add_chassis(struct ctl_context *ctx)
     nbrec_ha_chassis_set_chassis_name(ha_chassis, chassis_name);
     nbrec_ha_chassis_set_priority(ha_chassis, priority);
 
-    nbrec_ha_chassis_group_verify_ha_chassis(ha_ch_grp);
-
-    struct nbrec_ha_chassis **new_ha_chs =
-        xmalloc(sizeof *new_ha_chs * (ha_ch_grp->n_ha_chassis + 1));
-    nullable_memcpy(new_ha_chs, ha_ch_grp->ha_chassis,
-                    sizeof *new_ha_chs * ha_ch_grp->n_ha_chassis);
-    new_ha_chs[ha_ch_grp->n_ha_chassis] =
-        CONST_CAST(struct nbrec_ha_chassis *, ha_chassis);
-    nbrec_ha_chassis_group_set_ha_chassis(ha_ch_grp, new_ha_chs,
-                                          ha_ch_grp->n_ha_chassis + 1);
-    free(new_ha_chs);
+    nbrec_ha_chassis_group_update_ha_chassis_addvalue(ha_ch_grp, ha_chassis);
 }
 
 static void
@@ -6032,11 +6089,9 @@ cmd_ha_ch_grp_remove_chassis(struct ctl_context *ctx)
 
     const char *chassis_name = ctx->argv[2];
     struct nbrec_ha_chassis *ha_chassis = NULL;
-    size_t idx = 0;
     for (size_t i = 0; i < ha_ch_grp->n_ha_chassis; i++) {
         if (!strcmp(ha_ch_grp->ha_chassis[i]->chassis_name, chassis_name)) {
             ha_chassis = ha_ch_grp->ha_chassis[i];
-            idx = i;
             break;
         }
     }
@@ -6047,14 +6102,7 @@ cmd_ha_ch_grp_remove_chassis(struct ctl_context *ctx)
         return;
     }
 
-    struct nbrec_ha_chassis **new_ha_ch
-        = xmemdup(ha_ch_grp->ha_chassis,
-                  sizeof *new_ha_ch * ha_ch_grp->n_ha_chassis);
-    new_ha_ch[idx] = new_ha_ch[ha_ch_grp->n_ha_chassis - 1];
-    nbrec_ha_chassis_group_verify_ha_chassis(ha_ch_grp);
-    nbrec_ha_chassis_group_set_ha_chassis(ha_ch_grp, new_ha_ch,
-                                          ha_ch_grp->n_ha_chassis - 1);
-    free(new_ha_ch);
+    nbrec_ha_chassis_group_update_ha_chassis_delvalue(ha_ch_grp, ha_chassis);
     nbrec_ha_chassis_delete(ha_chassis);
 }
 
@@ -6231,7 +6279,7 @@ do_nbctl(const char *args, struct ctl_command *commands, size_t n_commands,
     struct ovsdb_idl_txn *txn;
     enum ovsdb_idl_txn_status status;
     struct ovsdb_symbol_table *symtab;
-    struct ctl_context ctx;
+    struct nbctl_context ctx;
     struct ctl_command *c;
     struct shash_node *node;
     int64_t next_cfg = 0;
@@ -6268,25 +6316,26 @@ do_nbctl(const char *args, struct ctl_command *commands, size_t n_commands,
         ds_init(&c->output);
         c->table = NULL;
     }
-    ctl_context_init(&ctx, NULL, idl, txn, symtab, NULL);
+    nbctl_context_init(&ctx);
+    ctl_context_init(&ctx.base, NULL, idl, txn, symtab, NULL);
     for (c = commands; c < &commands[n_commands]; c++) {
-        ctl_context_init_command(&ctx, c);
+        ctl_context_init_command(&ctx.base, c);
         if (c->syntax->run) {
-            (c->syntax->run)(&ctx);
+            (c->syntax->run)(&ctx.base);
         }
-        if (ctx.error) {
-            error = xstrdup(ctx.error);
-            ctl_context_done(&ctx, c);
+        if (ctx.base.error) {
+            error = xstrdup(ctx.base.error);
+            ctl_context_done(&ctx.base, c);
             goto out_error;
         }
-        ctl_context_done_command(&ctx, c);
+        ctl_context_done_command(&ctx.base, c);
 
-        if (ctx.try_again) {
-            ctl_context_done(&ctx, NULL);
+        if (ctx.base.try_again) {
+            ctl_context_done(&ctx.base, NULL);
             goto try_again;
         }
     }
-    ctl_context_done(&ctx, NULL);
+    ctl_context_done(&ctx.base, NULL);
 
     SHASH_FOR_EACH (node, &symtab->sh) {
         struct ovsdb_symbol *symbol = node->data;
@@ -6317,14 +6366,14 @@ do_nbctl(const char *args, struct ctl_command *commands, size_t n_commands,
     if (status == TXN_UNCHANGED || status == TXN_SUCCESS) {
         for (c = commands; c < &commands[n_commands]; c++) {
             if (c->syntax->postprocess) {
-                ctl_context_init(&ctx, c, idl, txn, symtab, NULL);
-                (c->syntax->postprocess)(&ctx);
-                if (ctx.error) {
-                    error = xstrdup(ctx.error);
-                    ctl_context_done(&ctx, c);
+                ctl_context_init(&ctx.base, c, idl, txn, symtab, NULL);
+                (c->syntax->postprocess)(&ctx.base);
+                if (ctx.base.error) {
+                    error = xstrdup(ctx.base.error);
+                    ctl_context_done(&ctx.base, c);
                     goto out_error;
                 }
-                ctl_context_done(&ctx, c);
+                ctl_context_done(&ctx.base, c);
             }
         }
     }
@@ -6412,6 +6461,7 @@ do_nbctl(const char *args, struct ctl_command *commands, size_t n_commands,
     done: ;
     }
 
+    nbctl_context_destroy(&ctx);
     ovsdb_symbol_table_destroy(symtab);
     ovsdb_idl_txn_destroy(txn);
     the_idl_txn = NULL;
@@ -6429,6 +6479,7 @@ out_error:
     ovsdb_idl_txn_destroy(txn);
     the_idl_txn = NULL;
 
+    nbctl_context_destroy(&ctx);
     ovsdb_symbol_table_destroy(symtab);
     return error;
 }
@@ -6561,7 +6612,7 @@ static const struct ctl_command_syntax nbctl_commands[] = {
     /* logical router route commands. */
     { "lr-route-add", 3, 4, "ROUTER PREFIX NEXTHOP [PORT]", NULL,
       nbctl_lr_route_add, NULL, "--may-exist,--ecmp,--ecmp-symmetric-reply,"
-      "--policy=", RW },
+      "--policy=,--bfd?", RW },
     { "lr-route-del", 1, 4, "ROUTER [PREFIX [NEXTHOP [PORT]]]", NULL,
       nbctl_lr_route_del, NULL, "--if-exists,--policy=", RW },
     { "lr-route-list", 1, 1, "ROUTER", NULL, nbctl_lr_route_list, NULL,
@@ -6588,7 +6639,7 @@ static const struct ctl_command_syntax nbctl_commands[] = {
       nbctl_lr_nat_set_ext_ips, NULL, "--is-exempted", RW},
     /* load balancer commands. */
     { "lb-add", 3, 4, "LB VIP[:PORT] IP[:PORT]... [PROTOCOL]", NULL,
-      nbctl_lb_add, NULL, "--may-exist,--add-duplicate", RW },
+      nbctl_lb_add, NULL, "--may-exist,--add-duplicate,--reject,--event", RW },
     { "lb-del", 1, 2, "LB [VIP]", NULL, nbctl_lb_del, NULL,
         "--if-exists", RW },
     { "lb-list", 0, 1, "[LB]", NULL, nbctl_lb_list, NULL, "", RO },
diff --git a/utilities/ovn-sbctl.c b/utilities/ovn-sbctl.c
index 0a1b9ffdc..c38e8ec3b 100644
--- a/utilities/ovn-sbctl.c
+++ b/utilities/ovn-sbctl.c
@@ -526,6 +526,7 @@ pre_get_info(struct ctl_context *ctx)
     ovsdb_idl_add_column(ctx->idl, &sbrec_port_binding_col_tunnel_key);
     ovsdb_idl_add_column(ctx->idl, &sbrec_port_binding_col_chassis);
     ovsdb_idl_add_column(ctx->idl, &sbrec_port_binding_col_datapath);
+    ovsdb_idl_add_column(ctx->idl, &sbrec_port_binding_col_up);
 
     ovsdb_idl_add_column(ctx->idl, &sbrec_logical_flow_col_logical_datapath);
     ovsdb_idl_add_column(ctx->idl, &sbrec_logical_flow_col_logical_dp_group);
@@ -665,6 +666,7 @@ cmd_lsp_bind(struct ctl_context *ctx)
     struct sbctl_chassis *sbctl_ch;
     struct sbctl_port_binding *sbctl_bd;
     char *lport_name, *ch_name;
+    bool up = true;
 
     /* port_binding must exist, chassis must exist! */
     lport_name = ctx->argv[1];
@@ -683,6 +685,7 @@ cmd_lsp_bind(struct ctl_context *ctx)
         }
     }
     sbrec_port_binding_set_chassis(sbctl_bd->bd_cfg, sbctl_ch->ch_cfg);
+    sbrec_port_binding_set_up(sbctl_bd->bd_cfg, &up, 1);
     sbctl_context_invalidate_cache(ctx);
 }
 
@@ -699,6 +702,7 @@ cmd_lsp_unbind(struct ctl_context *ctx)
     sbctl_bd = find_port_binding(sbctl_ctx, lport_name, must_exist);
     if (sbctl_bd) {
         sbrec_port_binding_set_chassis(sbctl_bd->bd_cfg, NULL);
+        sbrec_port_binding_set_up(sbctl_bd->bd_cfg, NULL, 0);
     }
 }
 
diff --git a/utilities/ovn-trace.c b/utilities/ovn-trace.c
index 6fad36512..fb88bc06c 100644
--- a/utilities/ovn-trace.c
+++ b/utilities/ovn-trace.c
@@ -405,6 +405,7 @@ struct ovntrace_datapath {
     size_t n_flows, allocated_flows;
 
     struct hmap mac_bindings;   /* Contains "struct ovntrace_mac_binding"s. */
+    struct hmap fdbs;   /* Contains "struct ovntrace_fdb"s. */
 
     bool has_local_l3gateway;
 };
@@ -453,12 +454,24 @@ struct ovntrace_mac_binding {
     struct eth_addr mac;
 };
 
+struct ovntrace_fdb {
+    struct hmap_node node;
+    uint16_t port_key;
+    struct eth_addr mac;
+};
+
 static inline uint32_t
 hash_mac_binding(uint16_t port_key, const struct in6_addr *ip)
 {
     return hash_bytes(ip, sizeof *ip, port_key);
 }
 
+static inline uint32_t
+hash_fdb(const struct eth_addr *mac)
+{
+    return hash_bytes(mac, sizeof *mac, 0);
+}
+
 /* Every ovntrace_datapath, by southbound Datapath_Binding record UUID. */
 static struct hmap datapaths;
 
@@ -478,6 +491,7 @@ static struct shash port_groups;
 static struct hmap dhcp_opts;   /* Contains "struct gen_opts_map"s. */
 static struct hmap dhcpv6_opts; /* Contains "struct gen_opts_map"s. */
 static struct hmap nd_ra_opts; /* Contains "struct gen_opts_map"s. */
+static struct controller_event_options event_opts;
 
 static struct ovntrace_datapath *
 ovntrace_datapath_find_by_sb_uuid(const struct uuid *sb_uuid)
@@ -517,6 +531,18 @@ ovntrace_datapath_find_by_name(const char *name)
     return match;
 }
 
+static struct ovntrace_datapath *
+ovntrace_datapath_find_by_key(uint32_t tunnel_key)
+{
+    struct ovntrace_datapath *dp;
+    HMAP_FOR_EACH (dp, sb_uuid_node, &datapaths) {
+        if (dp->tunnel_key == tunnel_key) {
+            return dp;
+        }
+    }
+    return NULL;
+}
+
 static const struct ovntrace_port *
 ovntrace_port_find_by_key(const struct ovntrace_datapath *dp,
                           uint16_t tunnel_key)
@@ -597,6 +623,20 @@ ovntrace_mac_binding_find_mac_ip(const struct ovntrace_datapath *dp,
     return NULL;
 }
 
+static const struct ovntrace_fdb *
+ovntrace_fdb_find(const struct ovntrace_datapath *dp,
+                  const struct eth_addr *mac)
+{
+    const struct ovntrace_fdb *fdb;
+    HMAP_FOR_EACH_WITH_HASH (fdb, node, hash_fdb(mac),
+                             &dp->fdbs) {
+        if (eth_addr_equals(fdb->mac, *mac)) {
+            return fdb;
+        }
+    }
+    return NULL;
+}
+
 /* If 's' ends with a UUID, returns a copy of it with the UUID truncated to
  * just the first 6 characters; otherwise, returns a copy of 's'. */
 static char *
@@ -637,7 +677,7 @@ read_datapaths(void)
 
         ovs_list_init(&dp->mcgroups);
         hmap_init(&dp->mac_bindings);
-
+        hmap_init(&dp->fdbs);
         hmap_insert(&datapaths, &dp->sb_uuid_node, uuid_hash(&dp->sb_uuid));
     }
 }
@@ -901,10 +941,11 @@ parse_lflow_for_datapath(const struct sbrec_logical_flow *sblf,
             .dhcp_opts = &dhcp_opts,
             .dhcpv6_opts = &dhcpv6_opts,
             .nd_ra_opts = &nd_ra_opts,
+            .controller_event_opts = &event_opts,
             .pipeline = (!strcmp(sblf->pipeline, "ingress")
                          ? OVNACT_P_INGRESS
                          : OVNACT_P_EGRESS),
-            .n_tables = 24,
+            .n_tables = LOG_PIPELINE_LEN,
             .cur_ltable = sblf->table_id,
         };
         uint64_t stub[1024 / 8];
@@ -1006,6 +1047,8 @@ read_gen_opts(void)
 
     hmap_init(&nd_ra_opts);
     nd_ra_opts_init(&nd_ra_opts);
+
+    controller_event_opts_init(&event_opts);
 }
 
 static void
@@ -1049,6 +1092,30 @@ read_mac_bindings(void)
     }
 }
 
+static void
+read_fdbs(void)
+{
+    const struct sbrec_fdb *fdb;
+    SBREC_FDB_FOR_EACH (fdb, ovnsb_idl) {
+        struct eth_addr mac;
+        if (!eth_addr_from_string(fdb->mac, &mac)) {
+            VLOG_WARN("%s: bad Ethernet address", fdb->mac);
+            continue;
+        }
+
+        struct ovntrace_datapath *dp =
+            ovntrace_datapath_find_by_key(fdb->dp_key);
+        if (!dp) {
+            continue;
+        }
+
+        struct ovntrace_fdb *fdb_t = xmalloc(sizeof *fdb_t);
+        fdb_t->mac = mac;
+        fdb_t->port_key = fdb->port_key;
+        hmap_insert(&dp->fdbs, &fdb_t->node, hash_fdb(&mac));
+    }
+}
+
 static void
 read_db(void)
 {
@@ -1060,6 +1127,7 @@ read_db(void)
     read_gen_opts();
     read_flows();
     read_mac_bindings();
+    read_fdbs();
 }
 
 static const struct ovntrace_port *
@@ -1116,6 +1184,11 @@ ovntrace_lookup_port(const void *dp_, const char *port_name,
         return true;
     }
 
+    if (!strcmp(port_name, "none")) {
+        *portp = 0;
+        return true;
+    }
+
     const struct ovntrace_port *port = ovntrace_port_lookup_by_name(port_name);
     if (port) {
         if (port->dp == dp) {
@@ -1802,6 +1875,91 @@ execute_tcp_reset(const struct ovnact_nest *on,
         execute_tcp6_reset(on, dp, uflow, table_id, loopback, pipeline, super);
     }
 }
+
+static void
+execute_sctp4_abort(const struct ovnact_nest *on,
+                    const struct ovntrace_datapath *dp,
+                    const struct flow *uflow, uint8_t table_id,
+                    bool loopback, enum ovnact_pipeline pipeline,
+                    struct ovs_list *super)
+{
+    struct flow sctp_flow = *uflow;
+
+    /* Update fields for TCP SCTP. */
+    if (loopback) {
+        sctp_flow.dl_dst = uflow->dl_src;
+        sctp_flow.dl_src = uflow->dl_dst;
+        sctp_flow.nw_dst = uflow->nw_src;
+        sctp_flow.nw_src = uflow->nw_dst;
+    } else {
+        sctp_flow.dl_dst = uflow->dl_dst;
+        sctp_flow.dl_src = uflow->dl_src;
+        sctp_flow.nw_dst = uflow->nw_dst;
+        sctp_flow.nw_src = uflow->nw_src;
+    }
+    sctp_flow.nw_proto = IPPROTO_SCTP;
+    sctp_flow.nw_ttl = 255;
+    sctp_flow.tp_src = uflow->tp_src;
+    sctp_flow.tp_dst = uflow->tp_dst;
+
+    struct ovntrace_node *node = ovntrace_node_append(
+        super, OVNTRACE_NODE_TRANSFORMATION, "sctp_abort");
+
+    trace_actions(on->nested, on->nested_len, dp, &sctp_flow,
+                  table_id, pipeline, &node->subs);
+}
+
+static void
+execute_sctp6_abort(const struct ovnact_nest *on,
+                    const struct ovntrace_datapath *dp,
+                    const struct flow *uflow, uint8_t table_id,
+                    bool loopback, enum ovnact_pipeline pipeline,
+                    struct ovs_list *super)
+{
+    struct flow sctp_flow = *uflow;
+
+    /* Update fields for SCTP. */
+    if (loopback) {
+        sctp_flow.dl_dst = uflow->dl_src;
+        sctp_flow.dl_src = uflow->dl_dst;
+        sctp_flow.ipv6_dst = uflow->ipv6_src;
+        sctp_flow.ipv6_src = uflow->ipv6_dst;
+    } else {
+        sctp_flow.dl_dst = uflow->dl_dst;
+        sctp_flow.dl_src = uflow->dl_src;
+        sctp_flow.ipv6_dst = uflow->ipv6_dst;
+        sctp_flow.ipv6_src = uflow->ipv6_src;
+    }
+    sctp_flow.nw_proto = IPPROTO_TCP;
+    sctp_flow.nw_ttl = 255;
+    sctp_flow.tp_src = uflow->tp_src;
+    sctp_flow.tp_dst = uflow->tp_dst;
+    sctp_flow.tcp_flags = htons(TCP_RST);
+
+    struct ovntrace_node *node = ovntrace_node_append(
+        super, OVNTRACE_NODE_TRANSFORMATION, "sctp_abort");
+
+    trace_actions(on->nested, on->nested_len, dp, &sctp_flow,
+                  table_id, pipeline, &node->subs);
+}
+
+static void
+execute_sctp_abort(const struct ovnact_nest *on,
+                   const struct ovntrace_datapath *dp,
+                   const struct flow *uflow, uint8_t table_id,
+                   bool loopback, enum ovnact_pipeline pipeline,
+                   struct ovs_list *super)
+{
+    if (get_dl_type(uflow) == htons(ETH_TYPE_IP)) {
+        execute_sctp4_abort(on, dp, uflow, table_id, loopback,
+                            pipeline, super);
+    } else {
+        execute_sctp6_abort(on, dp, uflow, table_id, loopback,
+                            pipeline, super);
+    }
+}
+
+
 static void
 execute_reject(const struct ovnact_nest *on,
                const struct ovntrace_datapath *dp,
@@ -1810,6 +1968,8 @@ execute_reject(const struct ovnact_nest *on,
 {
     if (uflow->nw_proto == IPPROTO_TCP) {
         execute_tcp_reset(on, dp, uflow, table_id, true, pipeline, super);
+    } else if (uflow->nw_proto == IPPROTO_SCTP) {
+        execute_sctp_abort(on, dp, uflow, table_id, true, pipeline, super);
     } else {
         if (get_dl_type(uflow) == htons(ETH_TYPE_IP)) {
             execute_icmp4(on, dp, uflow, table_id, true, pipeline, super);
@@ -1938,6 +2098,66 @@ execute_lookup_mac_bind_ip(const struct ovnact_lookup_mac_bind_ip *bind,
     mf_write_subfield_flow(&dst, &sv, uflow);
 }
 
+static void
+execute_lookup_fdb(const struct ovnact_lookup_fdb *lookup_fdb,
+                   const struct ovntrace_datapath *dp,
+                   struct flow *uflow,
+                   struct ovs_list *super)
+{
+    /* Get logical port number.*/
+    struct mf_subfield port_sf = expr_resolve_field(&lookup_fdb->port);
+    ovs_assert(port_sf.n_bits == 32);
+    uint32_t port_key = mf_get_subfield(&port_sf, uflow);
+
+    /* Get MAC. */
+    struct mf_subfield mac_sf = expr_resolve_field(&lookup_fdb->mac);
+    ovs_assert(mac_sf.n_bits == 48);
+    union mf_subvalue mac_sv;
+    mf_read_subfield(&mac_sf, uflow, &mac_sv);
+
+    const struct ovntrace_fdb *fdb_t
+        = ovntrace_fdb_find(dp, &mac_sv.mac);
+
+    struct mf_subfield dst = expr_resolve_field(&lookup_fdb->dst);
+    uint8_t val = 0;
+
+    if (fdb_t && fdb_t->port_key == port_key) {
+        val = 1;
+        ovntrace_node_append(super, OVNTRACE_NODE_ACTION,
+                             "/* MAC lookup for "ETH_ADDR_FMT" found in "
+                             "FDB. */", ETH_ADDR_ARGS(uflow->dl_dst));
+    } else {
+        ovntrace_node_append(super, OVNTRACE_NODE_ACTION,
+                             "/* lookup mac failed in mac learning table. */");
+    }
+    union mf_subvalue sv = { .u8_val = val };
+    mf_write_subfield_flow(&dst, &sv, uflow);
+}
+
+static void
+execute_get_fdb(const struct ovnact_get_fdb *get_fdb,
+                const struct ovntrace_datapath *dp,
+                struct flow *uflow)
+{
+    /* Get MAC. */
+    struct mf_subfield mac_sf = expr_resolve_field(&get_fdb->mac);
+    ovs_assert(mac_sf.n_bits == 48);
+    union mf_subvalue mac_sv;
+    mf_read_subfield(&mac_sf, uflow, &mac_sv);
+
+    const struct ovntrace_fdb *fdb_t
+        = ovntrace_fdb_find(dp, &mac_sv.mac);
+
+    struct mf_subfield dst = expr_resolve_field(&get_fdb->dst);
+    uint32_t val = 0;
+    if (fdb_t) {
+        val = fdb_t->port_key;
+    }
+
+    union mf_subvalue sv = { .be32_int = htonl(val) };
+    mf_write_subfield_flow(&dst, &sv, uflow);
+}
+
 static void
 execute_put_opts(const struct ovnact_put_opts *po,
                  const char *name, struct flow *uflow,
@@ -2503,6 +2723,11 @@ trace_actions(const struct ovnact *ovnacts, size_t ovnacts_len,
                               false, pipeline, super);
             break;
 
+        case OVNACT_SCTP_ABORT:
+            execute_sctp_abort(ovnact_get_SCTP_ABORT(a), dp, uflow, table_id,
+                               false, pipeline, super);
+            break;
+
         case OVNACT_OVNFIELD_LOAD:
             execute_ovnfield_load(ovnact_get_OVNFIELD_LOAD(a), super);
             break;
@@ -2540,6 +2765,20 @@ trace_actions(const struct ovnact *ovnacts, size_t ovnacts_len,
             break;
         case OVNACT_DHCP6_REPLY:
             break;
+        case OVNACT_BFD_MSG:
+            break;
+
+        case OVNACT_PUT_FDB:
+            /* Nothing to do for tracing. */
+            break;
+
+        case OVNACT_GET_FDB:
+            execute_get_fdb(ovnact_get_GET_FDB(a), dp, uflow);
+            break;
+
+        case OVNACT_LOOKUP_FDB:
+            execute_lookup_fdb(ovnact_get_LOOKUP_FDB(a), dp, uflow, super);
+            break;
         }
     }
     ds_destroy(&s);