diff --git a/.ovn.metadata b/.ovn.metadata index a2a85d4..5b5b237 100644 --- a/.ovn.metadata +++ b/.ovn.metadata @@ -1,5 +1,5 @@ 002450621b33c5690060345b0aac25bc2426d675 SOURCES/docutils-0.12.tar.gz -155f423dbb5434315caac2e453d7f1361c4ab747 SOURCES/openvswitch-2410b95.tar.gz -cae717fbee361a235064a1d79b012b2590908f7c SOURCES/ovn-22.09.0.tar.gz +f9cfb42d90c2c5417825087573feb01117be91f3 SOURCES/openvswitch-a787fbb.tar.gz +ba9c555a492827e129a040e0fa95bfe7e95792a0 SOURCES/ovn-22.12.0.tar.gz d34f96421a86004aa5d26ecf975edefd09f948b1 SOURCES/Pygments-1.4.tar.gz 6beb30f18ffac3de7689b7fd63e9a8a7d9c8df3a SOURCES/Sphinx-1.1.3.tar.gz diff --git a/SOURCES/ovn22.09.patch b/SOURCES/ovn22.09.patch deleted file mode 100644 index 8e656d1..0000000 --- a/SOURCES/ovn22.09.patch +++ /dev/null @@ -1,6249 +0,0 @@ -diff --git a/.ci/linux-build.sh b/.ci/linux-build.sh -index 2b0782aea..dc1ca5240 100755 ---- a/.ci/linux-build.sh -+++ b/.ci/linux-build.sh -@@ -47,15 +47,10 @@ else - fi - - if [ "$TESTSUITE" ]; then -- TESTSUITEFLAGS="" -- if [[ ! -z $TESTSUITE_KW ]]; then -- TESTSUITEFLAGS="-k $TESTSUITE_KW" -- fi -- - if [ "$TESTSUITE" = "system-test" ]; then - configure_ovn $OPTS - make -j4 || { cat config.log; exit 1; } -- if ! sudo make -j4 check-kernel TESTSUITEFLAGS="$TESTSUITEFLAGS" RECHECK=yes; then -+ if ! sudo make -j4 check-kernel TESTSUITEFLAGS="$TEST_RANGE" RECHECK=yes; then - # system-kmod-testsuite.log is necessary for debugging. - cat tests/system-kmod-testsuite.log - exit 1 -@@ -67,7 +62,7 @@ if [ "$TESTSUITE" ]; then - - export DISTCHECK_CONFIGURE_FLAGS="$OPTS" - if ! make distcheck CFLAGS="${COMMON_CFLAGS} ${OVN_CFLAGS}" -j4 \ -- TESTSUITEFLAGS="$TESTSUITEFLAGS -j4" RECHECK=yes -+ TESTSUITEFLAGS="-j4 $TEST_RANGE" RECHECK=yes - then - # testsuite.log is necessary for debugging. - cat */_build/sub/tests/testsuite.log -diff --git a/.ci/ovn-kubernetes/Dockerfile b/.ci/ovn-kubernetes/Dockerfile -index e74b620be..7edf86a13 100644 ---- a/.ci/ovn-kubernetes/Dockerfile -+++ b/.ci/ovn-kubernetes/Dockerfile -@@ -47,9 +47,17 @@ RUN GO111MODULE=on go install github.com/ovn-org/libovsdb/cmd/modelgen@${LIBOVSD - # Clone OVN Kubernetes and build the binary based on the commit passed as argument - WORKDIR /root - RUN git clone https://github.com/ovn-org/ovn-kubernetes.git --WORKDIR /root/ovn-kubernetes/go-controller -+WORKDIR /root/ovn-kubernetes - RUN git checkout ${OVNKUBE_COMMIT} && git log -n 1 - -+# Copy the ovn-kubernetes scripts from the OVN sources and apply any -+# custom changes if needed. -+RUN mkdir -p /tmp/ovn/.ci/ovn-kubernetes -+COPY .ci/ovn-kubernetes /tmp/ovn/.ci/ovn-kubernetes -+WORKDIR /tmp/ovn -+RUN .ci/ovn-kubernetes/prepare.sh /root/ovn-kubernetes -+ -+WORKDIR /root/ovn-kubernetes/go-controller - # Make sure we use the OVN NB/SB schema from the local code. - COPY --from=ovnbuilder /tmp/ovn/ovn-nb.ovsschema pkg/nbdb/ovn-nb.ovsschema - COPY --from=ovnbuilder /tmp/ovn/ovn-sb.ovsschema pkg/sbdb/ovn-sb.ovsschema -diff --git a/.ci/ovn-kubernetes/custom.patch b/.ci/ovn-kubernetes/custom.patch -new file mode 100644 -index 000000000..ea5dd7540 ---- /dev/null -+++ b/.ci/ovn-kubernetes/custom.patch -@@ -0,0 +1,31 @@ -+From 903eef2dd6f9fec818a580760f4757d8137b9974 Mon Sep 17 00:00:00 2001 -+From: Dumitru Ceara -+Date: Mon, 19 Dec 2022 12:18:55 +0100 -+Subject: [PATCH] DOWNSTREAM: Disable session affinity tests. -+ -+Commit https://github.com/ovn-org/ovn-kubernetes/commit/898d2f8f10c4 -+enabled affinity timeout tests but the underlying OVN feature is -+not supported in this branch. Disable affinity tests. -+ -+Signed-off-by: Dumitru Ceara -+--- -+ test/scripts/e2e-kind.sh | 3 +++ -+ 1 file changed, 3 insertions(+) -+ -+diff --git a/test/scripts/e2e-kind.sh b/test/scripts/e2e-kind.sh -+index 69959fa1b..c3b2a5c3e 100755 -+--- a/test/scripts/e2e-kind.sh -++++ b/test/scripts/e2e-kind.sh -+@@ -26,6 +26,9 @@ kube-proxy -+ should set TCP CLOSE_WAIT timeout -+ \[Feature:ProxyTerminatingEndpoints\] -+ -++# Disable session affinity tests completely. -++session affinity -++ -+ # NOT IMPLEMENTED; SEE DISCUSSION IN https://github.com/ovn-org/ovn-kubernetes/pull/1225 -+ named port.+\[Feature:NetworkPolicy\] -+ -+-- -+2.31.1 -+ -diff --git a/.ci/ovn-kubernetes/prepare.sh b/.ci/ovn-kubernetes/prepare.sh -new file mode 100755 -index 000000000..8fc9652af ---- /dev/null -+++ b/.ci/ovn-kubernetes/prepare.sh -@@ -0,0 +1,20 @@ -+#!/bin/bash -+ -+set -ev -+ -+ovnk8s_path=$1 -+topdir=$PWD -+ -+pushd ${ovnk8s_path} -+ -+# Add here any custom operations that need to performed on the -+# ovn-kubernetes cloned repo, e.g., custom patches. -+ -+# git apply --allow-empty is too new so not all git versions from major -+# distros support it, just check if the custom patch file is not empty -+# before applying it. -+[ -s ${topdir}/.ci/ovn-kubernetes/custom.patch ] && \ -+ git apply -v ${topdir}/.ci/ovn-kubernetes/custom.patch -+ -+popd # ${ovnk8s_path} -+exit 0 -diff --git a/.github/workflows/ovn-kubernetes.yml b/.github/workflows/ovn-kubernetes.yml -index ba6b291ff..34ff2cdda 100644 ---- a/.github/workflows/ovn-kubernetes.yml -+++ b/.github/workflows/ovn-kubernetes.yml -@@ -91,12 +91,19 @@ jobs: - go-version: ${{ env.GO_VERSION }} - id: go - -+ - name: Check out ovn -+ uses: actions/checkout@v3 -+ - - name: Check out ovn-kubernetes - uses: actions/checkout@v2 - with: - path: src/github.com/ovn-org/ovn-kubernetes - repository: ovn-org/ovn-kubernetes - -+ - name: Prepare -+ run: | -+ .ci/ovn-kubernetes/prepare.sh src/github.com/ovn-org/ovn-kubernetes -+ - - name: Set up environment - run: | - export GOPATH=$(go env GOPATH) -diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml -index 7a59cd478..88c48dd2c 100644 ---- a/.github/workflows/test.yml -+++ b/.github/workflows/test.yml -@@ -24,7 +24,7 @@ jobs: - M32: ${{ matrix.cfg.m32 }} - OPTS: ${{ matrix.cfg.opts }} - TESTSUITE: ${{ matrix.cfg.testsuite }} -- TESTSUITE_KW: ${{ matrix.cfg.testsuite_kw }} -+ TEST_RANGE: ${{ matrix.cfg.test_range }} - SANITIZERS: ${{ matrix.cfg.sanitizers }} - - name: linux ${{ join(matrix.cfg.*, ' ') }} -@@ -36,31 +36,23 @@ jobs: - cfg: - - { compiler: gcc, opts: --disable-ssl } - - { compiler: clang, opts: --disable-ssl } -- - { compiler: gcc, testsuite: test, testsuite_kw: "parallelization=yes,ovn_monitor_all=yes" } -- - { compiler: gcc, testsuite: test, testsuite_kw: "parallelization=yes,ovn_monitor_all=no" } -- - { compiler: gcc, testsuite: test, testsuite_kw: "parallelization=no,ovn_monitor_all=yes" } -- - { compiler: gcc, testsuite: test, testsuite_kw: "parallelization=no,ovn_monitor_all=no" } -- - { compiler: gcc, testsuite: test, testsuite_kw: "!ovn-northd" } -- - { compiler: clang, testsuite: test, sanitizers: sanitizers, testsuite_kw: "parallelization=yes,ovn_monitor_all=yes" } -- - { compiler: clang, testsuite: test, sanitizers: sanitizers, testsuite_kw: "parallelization=yes,ovn_monitor_all=no" } -- - { compiler: clang, testsuite: test, sanitizers: sanitizers, testsuite_kw: "parallelization=no,ovn_monitor_all=yes" } -- - { compiler: clang, testsuite: test, sanitizers: sanitizers, testsuite_kw: "parallelization=no,ovn_monitor_all=no" } -- - { compiler: clang, testsuite: test, sanitizers: sanitizers, testsuite_kw: "!ovn-northd" } -- - { compiler: gcc, testsuite: test, libs: -ljemalloc, testsuite_kw: "parallelization=yes,ovn_monitor_all=yes" } -- - { compiler: gcc, testsuite: test, libs: -ljemalloc, testsuite_kw: "parallelization=yes,ovn_monitor_all=no" } -- - { compiler: gcc, testsuite: test, libs: -ljemalloc, testsuite_kw: "parallelization=no,ovn_monitor_all=yes" } -- - { compiler: gcc, testsuite: test, libs: -ljemalloc, testsuite_kw: "parallelization=no,ovn_monitor_all=no" } -- - { compiler: gcc, testsuite: test, libs: -ljemalloc, testsuite_kw: "!ovn-northd" } -- - { compiler: clang, testsuite: test, libs: -ljemalloc, testsuite_kw: "parallelization=yes,ovn_monitor_all=yes" } -- - { compiler: clang, testsuite: test, libs: -ljemalloc, testsuite_kw: "parallelization=yes,ovn_monitor_all=no" } -- - { compiler: clang, testsuite: test, libs: -ljemalloc, testsuite_kw: "parallelization=no,ovn_monitor_all=yes" } -- - { compiler: clang, testsuite: test, libs: -ljemalloc, testsuite_kw: "parallelization=no,ovn_monitor_all=no" } -- - { compiler: clang, testsuite: test, libs: -ljemalloc, testsuite_kw: "!ovn-northd" } -- - { compiler: gcc, testsuite: system-test, testsuite_kw: "parallelization=yes,ovn_monitor_all=yes" } -- - { compiler: gcc, testsuite: system-test, testsuite_kw: "parallelization=yes,ovn_monitor_all=no" } -- - { compiler: gcc, testsuite: system-test, testsuite_kw: "parallelization=no,ovn_monitor_all=yes" } -- - { compiler: gcc, testsuite: system-test, testsuite_kw: "parallelization=no,ovn_monitor_all=no" } -- - { compiler: gcc, testsuite: system-test, testsuite_kw: "!ovn-northd" } -+ - { compiler: gcc, testsuite: test, test_range: "-500" } -+ - { compiler: gcc, testsuite: test, test_range: "501-1000" } -+ - { compiler: gcc, testsuite: test, test_range: "1001-" } -+ - { compiler: clang, testsuite: test, sanitizers: sanitizers, test_range: "-300" } -+ - { compiler: clang, testsuite: test, sanitizers: sanitizers, test_range: "301-600" } -+ - { compiler: clang, testsuite: test, sanitizers: sanitizers, test_range: "601-900" } -+ - { compiler: clang, testsuite: test, sanitizers: sanitizers, test_range: "901-1200" } -+ - { compiler: clang, testsuite: test, sanitizers: sanitizers, test_range: "1201-" } -+ - { compiler: gcc, testsuite: test, libs: -ljemalloc, test_range: "-500" } -+ - { compiler: gcc, testsuite: test, libs: -ljemalloc, test_range: "501-1000" } -+ - { compiler: gcc, testsuite: test, libs: -ljemalloc, test_range: "1001-" } -+ - { compiler: clang, testsuite: test, libs: -ljemalloc, test_range: "-500" } -+ - { compiler: clang, testsuite: test, libs: -ljemalloc, test_range: "501-1000" } -+ - { compiler: clang, testsuite: test, libs: -ljemalloc, test_range: "1001-" } -+ - { compiler: gcc, testsuite: system-test, test_range: "-100" } -+ - { compiler: gcc, testsuite: system-test, test_range: "101-200" } -+ - { compiler: gcc, testsuite: system-test, test_range: "201-" } - - { compiler: gcc, m32: m32, opts: --disable-ssl} - - steps: -diff --git a/Makefile.am b/Makefile.am -index 3b0df8393..f7758d114 100644 ---- a/Makefile.am -+++ b/Makefile.am -@@ -85,12 +85,13 @@ EXTRA_DIST = \ - MAINTAINERS.rst \ - README.rst \ - NOTICE \ -- .cirrus.yml \ - .ci/linux-build.sh \ - .ci/linux-prepare.sh \ - .ci/osx-build.sh \ - .ci/osx-prepare.sh \ - .ci/ovn-kubernetes/Dockerfile \ -+ .ci/ovn-kubernetes/prepare.sh \ -+ .ci/ovn-kubernetes/custom.patch \ - .github/workflows/test.yml \ - .github/workflows/ovn-kubernetes.yml \ - boot.sh \ -diff --git a/NEWS b/NEWS -index ef6a99fed..1a7a7855d 100644 ---- a/NEWS -+++ b/NEWS -@@ -1,3 +1,10 @@ -+OVN v22.09.2 - xx xxx xxxx -+-------------------------- -+ -+OVN v22.09.1 - 20 Dec 2022 -+-------------------------- -+ - Bug fixes -+ - OVN v22.09.0 - 16 Sep 2022 - -------------------------- - - ovn-controller: Add configuration knob, through OVS external-id -diff --git a/build-aux/sodepends.py b/build-aux/sodepends.py -index 343fda1af..7b1f9c840 100755 ---- a/build-aux/sodepends.py -+++ b/build-aux/sodepends.py -@@ -63,7 +63,8 @@ def sodepends(include_info, filenames, dst): - continue - - # Open file. -- include_dirs = [info[0] for info in include_info] -+ include_dirs = [info[1] if len(info) == 2 else info[0] -+ for info in include_info] - fn = soutil.find_file(include_dirs, toplevel) - if not fn: - ok = False -diff --git a/configure.ac b/configure.ac -index 765aacb17..408184649 100644 ---- a/configure.ac -+++ b/configure.ac -@@ -13,7 +13,7 @@ - # limitations under the License. - - AC_PREREQ(2.63) --AC_INIT(ovn, 22.09.0, bugs@openvswitch.org) -+AC_INIT(ovn, 22.09.2, bugs@openvswitch.org) - AC_CONFIG_MACRO_DIR([m4]) - AC_CONFIG_AUX_DIR([build-aux]) - AC_CONFIG_HEADERS([config.h]) -diff --git a/controller/binding.c b/controller/binding.c -index 8f6b4b19d..5df62baef 100644 ---- a/controller/binding.c -+++ b/controller/binding.c -@@ -220,7 +220,14 @@ set_noop_qos(struct ovsdb_idl_txn *ovs_idl_txn, - static void - set_qos_type(struct netdev *netdev, const char *type) - { -- int error = netdev_set_qos(netdev, type, NULL); -+ /* 34359738360 == (2^32 - 1) * 8. netdev_set_qos() doesn't support -+ * 64-bit rate netlink attributes, so the maximum value is 2^32 - 1 bytes. -+ * The 'max-rate' config option is in bits, so multiplying by 8. -+ * Without setting max-rate the reported link speed will be used, which -+ * can be unrecognized for certain NICs or reported too low for virtual -+ * interfaces. */ -+ const struct smap conf = SMAP_CONST1(&conf, "max-rate", "34359738360"); -+ int error = netdev_set_qos(netdev, type, &conf); - if (error) { - static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1); - VLOG_WARN_RL(&rl, "%s: could not set qdisc type \"%s\" (%s)", -@@ -1866,6 +1873,7 @@ build_local_bindings(struct binding_ctx_in *b_ctx_in, - lbinding = local_binding_create(iface_id, iface_rec); - local_binding_add(local_bindings, lbinding); - } else { -+ lbinding->multiple_bindings = true; - static struct vlog_rate_limit rl = - VLOG_RATE_LIMIT_INIT(1, 5); - VLOG_WARN_RL( -@@ -2156,6 +2164,10 @@ consider_iface_claim(const struct ovsrec_interface *iface_rec, - lbinding = local_binding_create(iface_id, iface_rec); - local_binding_add(local_bindings, lbinding); - } else { -+ if (lbinding->iface && lbinding->iface != iface_rec) { -+ lbinding->multiple_bindings = true; -+ b_ctx_out->local_lports_changed = true; -+ } - lbinding->iface = iface_rec; - } - -@@ -2174,6 +2186,13 @@ consider_iface_claim(const struct ovsrec_interface *iface_rec, - return true; - } - -+ /* If multiple bindings to the same port, remove the "old" binding. -+ * This ensures that change tracking is correct. -+ */ -+ if (lbinding->multiple_bindings) { -+ remove_related_lport(pb, b_ctx_out); -+ } -+ - enum en_lport_type lport_type = get_lport_type(pb); - if (lport_type == LP_LOCALPORT) { - return consider_localport(pb, b_ctx_in, b_ctx_out); -@@ -2226,6 +2245,29 @@ consider_iface_release(const struct ovsrec_interface *iface_rec, - struct shash *binding_lports = &b_ctx_out->lbinding_data->lports; - - lbinding = local_binding_find(local_bindings, iface_id); -+ -+ if (lbinding) { -+ if (lbinding->multiple_bindings) { -+ VLOG_INFO("Multiple bindings for %s: force recompute to clean up", -+ iface_id); -+ return false; -+ } else { -+ int64_t ofport = iface_rec->n_ofport ? *iface_rec->ofport : 0; -+ if (lbinding->iface != iface_rec && !ofport) { -+ /* If external_ids:iface-id is set within the same transaction -+ * as adding an interface to a bridge, ovn-controller is -+ * usually initially notified of ovs interface changes with -+ * ofport == 0. If the lport was bound to a different interface -+ * we do not want to release it. -+ */ -+ VLOG_DBG("Not releasing lport %s as %s was claimed " -+ "and %s was never bound)", iface_id, lbinding->iface ? -+ lbinding->iface->name : "", iface_rec->name); -+ return true; -+ } -+ } -+ } -+ - struct binding_lport *b_lport = - local_binding_get_primary_or_localport_lport(lbinding); - if (is_binding_lport_this_chassis(b_lport, b_ctx_in->chassis_rec)) { -@@ -2666,7 +2708,7 @@ consider_patch_port_for_local_datapaths(const struct sbrec_port_binding *pb, - get_local_datapath(b_ctx_out->local_datapaths, - peer->datapath->tunnel_key); - } -- if (peer_ld && need_add_patch_peer_to_local( -+ if (peer_ld && need_add_peer_to_local( - b_ctx_in->sbrec_port_binding_by_name, peer, - b_ctx_in->chassis_rec)) { - add_local_datapath( -@@ -2681,7 +2723,7 @@ consider_patch_port_for_local_datapaths(const struct sbrec_port_binding *pb, - /* Add the peer datapath to the local datapaths if it's - * not present yet. - */ -- if (need_add_patch_peer_to_local( -+ if (need_add_peer_to_local( - b_ctx_in->sbrec_port_binding_by_name, pb, - b_ctx_in->chassis_rec)) { - add_local_datapath_peer_port( -@@ -3034,6 +3076,7 @@ local_binding_create(const char *name, const struct ovsrec_interface *iface) - struct local_binding *lbinding = xzalloc(sizeof *lbinding); - lbinding->name = xstrdup(name); - lbinding->iface = iface; -+ lbinding->multiple_bindings = false; - ovs_list_init(&lbinding->binding_lports); - - return lbinding; -diff --git a/controller/binding.h b/controller/binding.h -index ad959a9e6..6c3a98b02 100644 ---- a/controller/binding.h -+++ b/controller/binding.h -@@ -135,6 +135,7 @@ struct local_binding { - char *name; - const struct ovsrec_interface *iface; - struct ovs_list binding_lports; -+ bool multiple_bindings; - }; - - struct local_binding_data { -diff --git a/controller/lflow.h b/controller/lflow.h -index 543d3cd96..e57b061c3 100644 ---- a/controller/lflow.h -+++ b/controller/lflow.h -@@ -81,6 +81,7 @@ struct uuid; - #define OFTABLE_CHK_OUT_PORT_SEC 75 - #define OFTABLE_ECMP_NH_MAC 76 - #define OFTABLE_ECMP_NH 77 -+#define OFTABLE_CHK_LB_AFFINITY 78 - - enum ref_type { - REF_TYPE_ADDRSET, -diff --git a/controller/local_data.c b/controller/local_data.c -index 9eee568d1..035f10fff 100644 ---- a/controller/local_data.c -+++ b/controller/local_data.c -@@ -115,14 +115,19 @@ local_datapath_destroy(struct local_datapath *ld) - free(ld); - } - --/* Checks if pb is a patch port and the peer datapath should be added to local -- * datapaths. */ -+/* Checks if pb is running on local gw router or pb is a patch port -+ * and the peer datapath should be added to local datapaths. */ - bool --need_add_patch_peer_to_local( -+need_add_peer_to_local( - struct ovsdb_idl_index *sbrec_port_binding_by_name, - const struct sbrec_port_binding *pb, - const struct sbrec_chassis *chassis) - { -+ /* This port is running on local gw router. */ -+ if (!strcmp(pb->type, "l3gateway") && pb->chassis == chassis) { -+ return true; -+ } -+ - /* If it is not a patch port, no peer to add. */ - if (strcmp(pb->type, "patch")) { - return false; -@@ -571,7 +576,7 @@ add_local_datapath__(struct ovsdb_idl_index *sbrec_datapath_binding_by_key, - peer_name); - - if (peer && peer->datapath) { -- if (need_add_patch_peer_to_local( -+ if (need_add_peer_to_local( - sbrec_port_binding_by_name, pb, chassis)) { - struct local_datapath *peer_ld = - add_local_datapath__(sbrec_datapath_binding_by_key, -diff --git a/controller/local_data.h b/controller/local_data.h -index d898c8aa5..b5429eb58 100644 ---- a/controller/local_data.h -+++ b/controller/local_data.h -@@ -66,7 +66,7 @@ struct local_datapath *local_datapath_alloc( - struct local_datapath *get_local_datapath(const struct hmap *, - uint32_t tunnel_key); - bool --need_add_patch_peer_to_local( -+need_add_peer_to_local( - struct ovsdb_idl_index *sbrec_port_binding_by_name, - const struct sbrec_port_binding *, - const struct sbrec_chassis *); -diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c -index 43fbf2ba3..a92fc895c 100644 ---- a/controller/ovn-controller.c -+++ b/controller/ovn-controller.c -@@ -151,6 +151,14 @@ struct pending_pkt { - /* Registered ofctrl seqno type for nb_cfg propagation. */ - static size_t ofctrl_seq_type_nb_cfg; - -+/* Only set monitor conditions on tables that are available in the -+ * server schema. -+ */ -+#define sb_table_set_monitor_condition(idl, table, cond) \ -+ (sbrec_server_has_##table##_table(idl) \ -+ ? sbrec_##table##_set_condition(idl, cond) \ -+ : 0) -+ - static unsigned int - update_sb_monitors(struct ovsdb_idl *ovnsb_idl, - const struct sbrec_chassis *chassis, -@@ -279,16 +287,16 @@ update_sb_monitors(struct ovsdb_idl *ovnsb_idl, - - out:; - unsigned int cond_seqnos[] = { -- sbrec_port_binding_set_condition(ovnsb_idl, &pb), -- sbrec_logical_flow_set_condition(ovnsb_idl, &lf), -- sbrec_logical_dp_group_set_condition(ovnsb_idl, &ldpg), -- sbrec_mac_binding_set_condition(ovnsb_idl, &mb), -- sbrec_multicast_group_set_condition(ovnsb_idl, &mg), -- sbrec_dns_set_condition(ovnsb_idl, &dns), -- sbrec_controller_event_set_condition(ovnsb_idl, &ce), -- sbrec_ip_multicast_set_condition(ovnsb_idl, &ip_mcast), -- sbrec_igmp_group_set_condition(ovnsb_idl, &igmp), -- sbrec_chassis_private_set_condition(ovnsb_idl, &chprv), -+ sb_table_set_monitor_condition(ovnsb_idl, port_binding, &pb), -+ sb_table_set_monitor_condition(ovnsb_idl, logical_flow, &lf), -+ sb_table_set_monitor_condition(ovnsb_idl, logical_dp_group, &ldpg), -+ sb_table_set_monitor_condition(ovnsb_idl, mac_binding, &mb), -+ sb_table_set_monitor_condition(ovnsb_idl, multicast_group, &mg), -+ sb_table_set_monitor_condition(ovnsb_idl, dns, &dns), -+ sb_table_set_monitor_condition(ovnsb_idl, controller_event, &ce), -+ sb_table_set_monitor_condition(ovnsb_idl, ip_multicast, &ip_mcast), -+ sb_table_set_monitor_condition(ovnsb_idl, igmp_group, &igmp), -+ sb_table_set_monitor_condition(ovnsb_idl, chassis_private, &chprv), - }; - - unsigned int expected_cond_seqno = 0; -@@ -658,7 +666,8 @@ update_ct_zones(const struct shash *binding_lports, - const char *user; - struct sset all_users = SSET_INITIALIZER(&all_users); - struct simap req_snat_zones = SIMAP_INITIALIZER(&req_snat_zones); -- unsigned long unreq_snat_zones[BITMAP_N_LONGS(MAX_CT_ZONES)]; -+ unsigned long unreq_snat_zones_map[BITMAP_N_LONGS(MAX_CT_ZONES)]; -+ struct simap unreq_snat_zones = SIMAP_INITIALIZER(&unreq_snat_zones); - - struct shash_node *shash_node; - SHASH_FOR_EACH (shash_node, binding_lports) { -@@ -698,49 +707,46 @@ update_ct_zones(const struct shash *binding_lports, - bitmap_set0(ct_zone_bitmap, ct_zone->data); - simap_delete(ct_zones, ct_zone); - } else if (!simap_find(&req_snat_zones, ct_zone->name)) { -- bitmap_set1(unreq_snat_zones, ct_zone->data); -+ bitmap_set1(unreq_snat_zones_map, ct_zone->data); -+ simap_put(&unreq_snat_zones, ct_zone->name, ct_zone->data); - } - } - - /* Prioritize requested CT zones */ - struct simap_node *snat_req_node; - SIMAP_FOR_EACH (snat_req_node, &req_snat_zones) { -- struct simap_node *node = simap_find(ct_zones, snat_req_node->name); -- if (node) { -- if (node->data == snat_req_node->data) { -- /* No change to this request, so no action needed */ -- continue; -- } else { -- /* Zone request has changed for this node. delete old entry */ -- bitmap_set0(ct_zone_bitmap, node->data); -- simap_delete(ct_zones, node); -- } -- } -- - /* Determine if someone already had this zone auto-assigned. - * If so, then they need to give up their assignment since - * that zone is being explicitly requested now. - */ -- if (bitmap_is_set(unreq_snat_zones, snat_req_node->data)) { -- struct simap_node *dup; -- SIMAP_FOR_EACH_SAFE (dup, ct_zones) { -- if (dup != snat_req_node && dup->data == snat_req_node->data) { -- simap_delete(ct_zones, dup); -- break; -+ if (bitmap_is_set(unreq_snat_zones_map, snat_req_node->data)) { -+ struct simap_node *unreq_node; -+ SIMAP_FOR_EACH_SAFE (unreq_node, &unreq_snat_zones) { -+ if (unreq_node->data == snat_req_node->data) { -+ simap_find_and_delete(ct_zones, unreq_node->name); -+ simap_delete(&unreq_snat_zones, unreq_node); - } - } -+ - /* Set this bit to 0 so that if multiple datapaths have requested - * this zone, we don't needlessly double-detect this condition. - */ -- bitmap_set0(unreq_snat_zones, snat_req_node->data); -+ bitmap_set0(unreq_snat_zones_map, snat_req_node->data); - } - -- add_pending_ct_zone_entry(pending_ct_zones, CT_ZONE_OF_QUEUED, -- snat_req_node->data, true, -- snat_req_node->name); -- -- bitmap_set1(ct_zone_bitmap, snat_req_node->data); -- simap_put(ct_zones, snat_req_node->name, snat_req_node->data); -+ struct simap_node *node = simap_find(ct_zones, snat_req_node->name); -+ if (node) { -+ if (node->data != snat_req_node->data) { -+ /* Zone request has changed for this node. delete old entry and -+ * create new one*/ -+ add_pending_ct_zone_entry(pending_ct_zones, CT_ZONE_OF_QUEUED, -+ snat_req_node->data, true, -+ snat_req_node->name); -+ bitmap_set0(ct_zone_bitmap, node->data); -+ } -+ bitmap_set1(ct_zone_bitmap, snat_req_node->data); -+ node->data = snat_req_node->data; -+ } - } - - /* xxx This is wasteful to assign a zone to each port--even if no -@@ -758,6 +764,7 @@ update_ct_zones(const struct shash *binding_lports, - } - - simap_destroy(&req_snat_zones); -+ simap_destroy(&unreq_snat_zones); - sset_destroy(&all_users); - shash_destroy(&all_lds); - } -@@ -799,11 +806,36 @@ commit_ct_zones(const struct ovsrec_bridge *br_int, - } - } - -+/* Connection tracking zones. */ -+struct ed_type_ct_zones { -+ unsigned long bitmap[BITMAP_N_LONGS(MAX_CT_ZONES)]; -+ struct shash pending; -+ struct simap current; -+ -+ /* Tracked data. */ -+ bool recomputed; -+}; -+ - static void - restore_ct_zones(const struct ovsrec_bridge_table *bridge_table, - const struct ovsrec_open_vswitch_table *ovs_table, -- struct simap *ct_zones, unsigned long *ct_zone_bitmap) -+ struct ed_type_ct_zones *ct_zones_data) - { -+ memset(ct_zones_data->bitmap, 0, sizeof ct_zones_data->bitmap); -+ bitmap_set1(ct_zones_data->bitmap, 0); /* Zone 0 is reserved. */ -+ -+ struct shash_node *pending_node; -+ SHASH_FOR_EACH (pending_node, &ct_zones_data->pending) { -+ struct ct_zone_pending_entry *ctpe = pending_node->data; -+ -+ if (ctpe->add) { -+ VLOG_DBG("restoring ct zone %"PRId32" for '%s'", ctpe->zone, -+ pending_node->name); -+ bitmap_set1(ct_zones_data->bitmap, ctpe->zone); -+ simap_put(&ct_zones_data->current, pending_node->name, ctpe->zone); -+ } -+ } -+ - const struct ovsrec_open_vswitch *cfg; - cfg = ovsrec_open_vswitch_table_first(ovs_table); - if (!cfg) { -@@ -829,14 +861,18 @@ restore_ct_zones(const struct ovsrec_bridge_table *bridge_table, - continue; - } - -+ if (shash_find(&ct_zones_data->pending, user)) { -+ continue; -+ } -+ - unsigned int zone; - if (!str_to_uint(node->value, 10, &zone)) { - continue; - } - - VLOG_DBG("restoring ct zone %"PRId32" for '%s'", zone, user); -- bitmap_set1(ct_zone_bitmap, zone); -- simap_put(ct_zones, user, zone); -+ bitmap_set1(ct_zones_data->bitmap, zone); -+ simap_put(&ct_zones_data->current, user, zone); - } - } - -@@ -2058,16 +2094,6 @@ out: - return true; - } - --/* Connection tracking zones. */ --struct ed_type_ct_zones { -- unsigned long bitmap[BITMAP_N_LONGS(MAX_CT_ZONES)]; -- struct shash pending; -- struct simap current; -- -- /* Tracked data. */ -- bool recomputed; --}; -- - static void * - en_ct_zones_init(struct engine_node *node, struct engine_arg *arg OVS_UNUSED) - { -@@ -2082,9 +2108,7 @@ en_ct_zones_init(struct engine_node *node, struct engine_arg *arg OVS_UNUSED) - shash_init(&data->pending); - simap_init(&data->current); - -- memset(data->bitmap, 0, sizeof data->bitmap); -- bitmap_set1(data->bitmap, 0); /* Zone 0 is reserved. */ -- restore_ct_zones(bridge_table, ovs_table, &data->current, data->bitmap); -+ restore_ct_zones(bridge_table, ovs_table, data); - return data; - } - -@@ -2111,6 +2135,12 @@ en_ct_zones_run(struct engine_node *node, void *data) - struct ed_type_runtime_data *rt_data = - engine_get_input_data("runtime_data", node); - -+ const struct ovsrec_open_vswitch_table *ovs_table = -+ EN_OVSDB_GET(engine_get_input("OVS_open_vswitch", node)); -+ const struct ovsrec_bridge_table *bridge_table = -+ EN_OVSDB_GET(engine_get_input("OVS_bridge", node)); -+ -+ restore_ct_zones(bridge_table, ovs_table, ct_zones_data); - update_ct_zones(&rt_data->lbinding_data.lports, &rt_data->local_datapaths, - &ct_zones_data->current, ct_zones_data->bitmap, - &ct_zones_data->pending); -@@ -2188,7 +2218,7 @@ ct_zones_runtime_data_handler(struct engine_node *node, void *data) - - struct hmap *tracked_dp_bindings = &rt_data->tracked_dp_bindings; - struct tracked_datapath *tdp; -- int scan_start = 0; -+ int scan_start = 1; - - bool updated = false; - -@@ -4197,6 +4227,7 @@ main(int argc, char *argv[]) - } - stopwatch_start(PINCTRL_RUN_STOPWATCH_NAME, - time_msec()); -+ pinctrl_update(ovnsb_idl_loop.idl, br_int->name); - pinctrl_run(ovnsb_idl_txn, - sbrec_datapath_binding_by_key, - sbrec_port_binding_by_datapath, -diff --git a/controller/physical.c b/controller/physical.c -index f3c8bddce..705146316 100644 ---- a/controller/physical.c -+++ b/controller/physical.c -@@ -803,6 +803,14 @@ put_replace_router_port_mac_flows(struct ovsdb_idl_index - - ofpact_put_OUTPUT(ofpacts_p)->port = ofport; - -+ /* Replace the MAC back and strip vlan. In case of l2 flooding -+ * traffic (ARP/ND) we need to restore previous state so other ports -+ * do not receive the traffic tagged and with wrong MAC. */ -+ ofpact_put_SET_ETH_SRC(ofpacts_p)->mac = router_port_mac; -+ if (tag) { -+ ofpact_put_STRIP_VLAN(ofpacts_p); -+ } -+ - ofctrl_add_flow(flow_table, OFTABLE_LOG_TO_PHY, 150, - localnet_port->header_.uuid.parts[0], - &match, ofpacts_p, &localnet_port->header_.uuid); -diff --git a/controller/pinctrl.c b/controller/pinctrl.c -index 3f5d0af79..bcbb04eed 100644 ---- a/controller/pinctrl.c -+++ b/controller/pinctrl.c -@@ -173,6 +173,7 @@ struct pinctrl { - pthread_t pinctrl_thread; - /* Latch to destroy the 'pinctrl_thread' */ - struct latch pinctrl_thread_exit; -+ bool mac_binding_can_timestamp; - }; - - static struct pinctrl pinctrl; -@@ -544,6 +545,7 @@ pinctrl_init(void) - bfd_monitor_init(); - init_fdb_entries(); - pinctrl.br_int_name = NULL; -+ pinctrl.mac_binding_can_timestamp = false; - pinctrl_handler_seq = seq_create(); - pinctrl_main_seq = seq_create(); - -@@ -3519,7 +3521,7 @@ pinctrl_handler(void *arg_) - } - - static void --pinctrl_set_br_int_name_(char *br_int_name) -+pinctrl_set_br_int_name_(const char *br_int_name) - OVS_REQUIRES(pinctrl_mutex) - { - if (br_int_name && (!pinctrl.br_int_name || strcmp(pinctrl.br_int_name, -@@ -3533,13 +3535,31 @@ pinctrl_set_br_int_name_(char *br_int_name) - } - - void --pinctrl_set_br_int_name(char *br_int_name) -+pinctrl_set_br_int_name(const char *br_int_name) - { - ovs_mutex_lock(&pinctrl_mutex); - pinctrl_set_br_int_name_(br_int_name); - ovs_mutex_unlock(&pinctrl_mutex); - } - -+void -+pinctrl_update(const struct ovsdb_idl *idl, const char *br_int_name) -+{ -+ ovs_mutex_lock(&pinctrl_mutex); -+ pinctrl_set_br_int_name_(br_int_name); -+ -+ bool can_timestamp = sbrec_server_has_mac_binding_table_col_timestamp(idl); -+ if (can_timestamp != pinctrl.mac_binding_can_timestamp) { -+ pinctrl.mac_binding_can_timestamp = can_timestamp; -+ -+ /* Notify pinctrl_handler that mac binding timestamp column -+ * availability has changed. */ -+ notify_pinctrl_handler(); -+ } -+ -+ ovs_mutex_unlock(&pinctrl_mutex); -+} -+ - /* Called by ovn-controller. */ - void - pinctrl_run(struct ovsdb_idl_txn *ovnsb_idl_txn, -@@ -3563,7 +3583,6 @@ pinctrl_run(struct ovsdb_idl_txn *ovnsb_idl_txn, - const struct shash *local_active_ports_ras) - { - ovs_mutex_lock(&pinctrl_mutex); -- pinctrl_set_br_int_name_(br_int->name); - run_put_mac_bindings(ovnsb_idl_txn, sbrec_datapath_binding_by_key, - sbrec_port_binding_by_key, - sbrec_mac_binding_by_lport_ip); -@@ -4245,12 +4264,17 @@ mac_binding_add_to_sb(struct ovsdb_idl_txn *ovnsb_idl_txn, - b = sbrec_mac_binding_insert(ovnsb_idl_txn); - sbrec_mac_binding_set_logical_port(b, logical_port); - sbrec_mac_binding_set_ip(b, ip); -- sbrec_mac_binding_set_mac(b, mac_string); - sbrec_mac_binding_set_datapath(b, dp); -- sbrec_mac_binding_set_timestamp(b, time_wall_msec()); -- } else if (strcmp(b->mac, mac_string)) { -+ } -+ -+ if (strcmp(b->mac, mac_string)) { - sbrec_mac_binding_set_mac(b, mac_string); -- sbrec_mac_binding_set_timestamp(b, time_wall_msec()); -+ -+ /* For backward compatibility check if timestamp column is available -+ * in SB DB. */ -+ if (pinctrl.mac_binding_can_timestamp) { -+ sbrec_mac_binding_set_timestamp(b, time_wall_msec()); -+ } - } - } - -@@ -4378,7 +4402,7 @@ run_buffered_binding(struct ovsdb_idl_index *sbrec_mac_binding_by_lport_ip, - const struct sbrec_port_binding *pb; - SBREC_PORT_BINDING_FOR_EACH_EQUAL (pb, target, - sbrec_port_binding_by_datapath) { -- if (strcmp(pb->type, "patch")) { -+ if (strcmp(pb->type, "patch") && strcmp(pb->type, "l3gateway")) { - continue; - } - struct buffered_packets *cur_qp; -diff --git a/controller/pinctrl.h b/controller/pinctrl.h -index d4f52e94d..cfece04da 100644 ---- a/controller/pinctrl.h -+++ b/controller/pinctrl.h -@@ -26,6 +26,7 @@ - struct hmap; - struct shash; - struct lport_index; -+struct ovsdb_idl; - struct ovsdb_idl_index; - struct ovsdb_idl_txn; - struct ovsrec_bridge; -@@ -57,7 +58,8 @@ void pinctrl_run(struct ovsdb_idl_txn *ovnsb_idl_txn, - const struct shash *local_active_ports_ras); - void pinctrl_wait(struct ovsdb_idl_txn *ovnsb_idl_txn); - void pinctrl_destroy(void); --void pinctrl_set_br_int_name(char *br_int_name); -+void pinctrl_set_br_int_name(const char *br_int_name); -+void pinctrl_update(const struct ovsdb_idl *idl, const char *br_int_name); - - struct activated_port { - uint32_t dp_key; -diff --git a/debian/changelog b/debian/changelog -index 267e12baa..08cc66fc0 100644 ---- a/debian/changelog -+++ b/debian/changelog -@@ -1,3 +1,15 @@ -+OVN (22.09.2-1) unstable; urgency=low -+ [ OVN team ] -+ * New upstream version -+ -+ -- OVN team Tue, 20 Dec 2022 13:53:56 -0500 -+ -+OVN (22.09.1-1) unstable; urgency=low -+ [ OVN team ] -+ * New upstream version -+ -+ -- OVN team Tue, 20 Dec 2022 13:53:56 -0500 -+ - ovn (22.09.0-1) unstable; urgency=low - - * New upstream version -diff --git a/ic/ovn-ic.c b/ic/ovn-ic.c -index e5c193d9d..9a80a7f68 100644 ---- a/ic/ovn-ic.c -+++ b/ic/ovn-ic.c -@@ -71,6 +71,7 @@ struct ic_context { - struct ovsdb_idl_index *icsbrec_port_binding_by_az; - struct ovsdb_idl_index *icsbrec_port_binding_by_ts; - struct ovsdb_idl_index *icsbrec_port_binding_by_ts_az; -+ struct ovsdb_idl_index *icsbrec_route_by_az; - struct ovsdb_idl_index *icsbrec_route_by_ts; - struct ovsdb_idl_index *icsbrec_route_by_ts_az; - }; -@@ -756,6 +757,7 @@ port_binding_run(struct ic_context *ctx, - } - icsbrec_port_binding_index_destroy_row(isb_pb_key); - -+ const struct sbrec_port_binding *sb_pb; - const struct icnbrec_transit_switch *ts; - ICNBREC_TRANSIT_SWITCH_FOR_EACH (ts, ctx->ovninb_idl) { - const struct nbrec_logical_switch *ls = find_ts_in_nb(ctx, ts->name); -@@ -787,9 +789,9 @@ port_binding_run(struct ic_context *ctx, - for (int i = 0; i < ls->n_ports; i++) { - lsp = ls->ports[i]; - -- const struct sbrec_port_binding *sb_pb = find_lsp_in_sb(ctx, lsp); - if (!strcmp(lsp->type, "router")) { - /* The port is local. */ -+ sb_pb = find_lsp_in_sb(ctx, lsp); - if (!sb_pb) { - continue; - } -@@ -806,6 +808,7 @@ port_binding_run(struct ic_context *ctx, - if (!isb_pb) { - nbrec_logical_switch_update_ports_delvalue(ls, lsp); - } else { -+ sb_pb = find_lsp_in_sb(ctx, lsp); - if (!sb_pb) { - continue; - } -@@ -881,17 +884,18 @@ ic_route_hash(const struct in6_addr *prefix, unsigned int plen, - static struct ic_route_info * - ic_route_find(struct hmap *routes, const struct in6_addr *prefix, - unsigned int plen, const struct in6_addr *nexthop, -- const char *origin, char *route_table) -+ const char *origin, const char *route_table, uint32_t hash) - { - struct ic_route_info *r; -- uint32_t hash = ic_route_hash(prefix, plen, nexthop, origin, route_table); -+ if (!hash) { -+ hash = ic_route_hash(prefix, plen, nexthop, origin, route_table); -+ } - HMAP_FOR_EACH_WITH_HASH (r, node, hash, routes) { - if (ipv6_addr_equals(&r->prefix, prefix) && - r->plen == plen && - ipv6_addr_equals(&r->nexthop, nexthop) && - !strcmp(r->origin, origin) && -- !strcmp(r->route_table ? r->route_table : "", route_table) && -- ipv6_addr_equals(&r->nexthop, nexthop)) { -+ !strcmp(r->route_table ? r->route_table : "", route_table)) { - return r; - } - } -@@ -942,8 +946,8 @@ add_to_routes_learned(struct hmap *routes_learned, - } - const char *origin = smap_get_def(&nb_route->options, "origin", ""); - if (ic_route_find(routes_learned, &prefix, plen, &nexthop, origin, -- nb_route->route_table)) { -- /* Route is already added to learned in previous iteration. */ -+ nb_route->route_table, 0)) { -+ /* Route was added to learned on previous iteration. */ - return true; - } - -@@ -1090,20 +1094,44 @@ route_need_advertise(const char *policy, - } - - static void --add_to_routes_ad(struct hmap *routes_ad, -- const struct nbrec_logical_router_static_route *nb_route, -- const struct lport_addresses *nexthop_addresses, -- const struct smap *nb_options, const char *route_table) -+add_to_routes_ad(struct hmap *routes_ad, const struct in6_addr prefix, -+ unsigned int plen, const struct in6_addr nexthop, -+ const char *origin, const char *route_table, -+ const struct nbrec_logical_router_port *nb_lrp, -+ const struct nbrec_logical_router_static_route *nb_route) - { -- if (strcmp(route_table, nb_route->route_table)) { -- if (VLOG_IS_DBG_ENABLED()) { -- VLOG_DBG("Skip advertising route %s -> %s as its route table %s !=" -- " %s of TS port", nb_route->ip_prefix, nb_route->nexthop, -- nb_route->route_table, route_table); -- } -- return; -+ if (route_table == NULL) { -+ route_table = ""; -+ } -+ -+ uint hash = ic_route_hash(&prefix, plen, &nexthop, origin, route_table); -+ -+ if (!ic_route_find(routes_ad, &prefix, plen, &nexthop, origin, route_table, -+ hash)) { -+ struct ic_route_info *ic_route = xzalloc(sizeof *ic_route); -+ ic_route->prefix = prefix; -+ ic_route->plen = plen; -+ ic_route->nexthop = nexthop; -+ ic_route->nb_route = nb_route; -+ ic_route->origin = origin; -+ ic_route->route_table = route_table; -+ ic_route->nb_lrp = nb_lrp; -+ hmap_insert(routes_ad, &ic_route->node, hash); -+ } else { -+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1); -+ VLOG_WARN_RL(&rl, "Duplicate route advertisement was suppressed! NB " -+ "route uuid: "UUID_FMT, -+ UUID_ARGS(&nb_route->header_.uuid)); - } -+} - -+static void -+add_static_to_routes_ad( -+ struct hmap *routes_ad, -+ const struct nbrec_logical_router_static_route *nb_route, -+ const struct lport_addresses *nexthop_addresses, -+ const struct smap *nb_options) -+{ - struct in6_addr prefix, nexthop; - unsigned int plen; - if (!parse_route(nb_route->ip_prefix, nb_route->nexthop, -@@ -1142,16 +1170,8 @@ add_to_routes_ad(struct hmap *routes_ad, - ds_destroy(&msg); - } - -- struct ic_route_info *ic_route = xzalloc(sizeof *ic_route); -- ic_route->prefix = prefix; -- ic_route->plen = plen; -- ic_route->nexthop = nexthop; -- ic_route->nb_route = nb_route; -- ic_route->origin = ROUTE_ORIGIN_STATIC; -- ic_route->route_table = nb_route->route_table; -- hmap_insert(routes_ad, &ic_route->node, -- ic_route_hash(&prefix, plen, &nexthop, ROUTE_ORIGIN_STATIC, -- nb_route->route_table)); -+ add_to_routes_ad(routes_ad, prefix, plen, nexthop, ROUTE_ORIGIN_STATIC, -+ nb_route->route_table, NULL, nb_route); - } - - static void -@@ -1195,18 +1215,9 @@ add_network_to_routes_ad(struct hmap *routes_ad, const char *network, - ds_destroy(&msg); - } - -- struct ic_route_info *ic_route = xzalloc(sizeof *ic_route); -- ic_route->prefix = prefix; -- ic_route->plen = plen; -- ic_route->nexthop = nexthop; -- ic_route->nb_lrp = nb_lrp; -- ic_route->origin = ROUTE_ORIGIN_CONNECTED; -- - /* directly-connected routes go to
route table */ -- ic_route->route_table = NULL; -- hmap_insert(routes_ad, &ic_route->node, -- ic_route_hash(&prefix, plen, &nexthop, -- ROUTE_ORIGIN_CONNECTED, "")); -+ add_to_routes_ad(routes_ad, prefix, plen, nexthop, ROUTE_ORIGIN_CONNECTED, -+ NULL, nb_lrp, NULL); - } - - static bool -@@ -1366,7 +1377,7 @@ sync_learned_routes(struct ic_context *ctx, - struct ic_route_info *route_learned - = ic_route_find(&ic_lr->routes_learned, &prefix, plen, - &nexthop, isb_route->origin, -- isb_route->route_table); -+ isb_route->route_table, 0); - if (route_learned) { - /* Sync external-ids */ - struct uuid ext_id; -@@ -1465,7 +1476,7 @@ advertise_routes(struct ic_context *ctx, - } - struct ic_route_info *route_adv = - ic_route_find(routes_ad, &prefix, plen, &nexthop, -- isb_route->origin, isb_route->route_table); -+ isb_route->origin, isb_route->route_table, 0); - if (!route_adv) { - /* Delete the extra route from IC-SB. */ - VLOG_DBG("Delete route %s -> %s from IC-SB, which is not found" -@@ -1545,10 +1556,10 @@ build_ts_routes_to_adv(struct ic_context *ctx, - nbrec_logical_router_update_static_routes_delvalue(lr, - nb_route); - } -- } else { -+ } else if (!strcmp(ts_route_table, nb_route->route_table)) { - /* It may be a route to be advertised */ -- add_to_routes_ad(routes_ad, nb_route, ts_port_addrs, -- &nb_global->options, ts_route_table); -+ add_static_to_routes_ad(routes_ad, nb_route, ts_port_addrs, -+ &nb_global->options); - } - } - -@@ -1581,7 +1592,6 @@ advertise_lr_routes(struct ic_context *ctx, - const struct icsbrec_port_binding *isb_pb; - const char *lrp_name, *ts_name, *route_table; - struct lport_addresses ts_port_addrs; -- const struct nbrec_logical_router *lr = ic_lr->lr; - const struct icnbrec_transit_switch *key; - - struct hmap routes_ad = HMAP_INITIALIZER(&routes_ad); -@@ -1599,7 +1609,7 @@ advertise_lr_routes(struct ic_context *ctx, - VLOG_INFO_RL(&rl, "Route sync ignores port %s on ts %s for router" - " %s because the addresses are invalid.", - isb_pb->logical_port, isb_pb->transit_switch, -- lr->name); -+ ic_lr->lr->name); - continue; - } - lrp_name = get_lrp_name_by_ts_port_name(ctx, isb_pb->logical_port); -@@ -1612,6 +1622,39 @@ advertise_lr_routes(struct ic_context *ctx, - hmap_destroy(&routes_ad); - } - -+static void -+delete_orphan_ic_routes(struct ic_context *ctx, -+ const struct icsbrec_availability_zone *az) -+{ -+ const struct icsbrec_route *isb_route, *isb_route_key = -+ icsbrec_route_index_init_row(ctx->icsbrec_route_by_az); -+ icsbrec_route_index_set_availability_zone(isb_route_key, az); -+ -+ const struct icnbrec_transit_switch *t_sw, *t_sw_key; -+ -+ ICSBREC_ROUTE_FOR_EACH_EQUAL (isb_route, isb_route_key, -+ ctx->icsbrec_route_by_az) -+ { -+ t_sw_key = icnbrec_transit_switch_index_init_row( -+ ctx->icnbrec_transit_switch_by_name); -+ icnbrec_transit_switch_index_set_name(t_sw_key, -+ isb_route->transit_switch); -+ t_sw = icnbrec_transit_switch_index_find( -+ ctx->icnbrec_transit_switch_by_name, t_sw_key); -+ icnbrec_transit_switch_index_destroy_row(t_sw_key); -+ -+ if (!t_sw) { -+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1); -+ VLOG_INFO_RL(&rl, "Deleting orphan ICDB:Route: %s->%s (%s, rtb:%s," -+ " transit switch: %s)", isb_route->ip_prefix, -+ isb_route->nexthop, isb_route->origin, -+ isb_route->route_table, isb_route->transit_switch); -+ icsbrec_route_delete(isb_route); -+ } -+ } -+ icsbrec_route_index_destroy_row(isb_route_key); -+} -+ - static void - route_run(struct ic_context *ctx, - const struct icsbrec_availability_zone *az) -@@ -1620,6 +1663,8 @@ route_run(struct ic_context *ctx, - return; - } - -+ delete_orphan_ic_routes(ctx, az); -+ - struct hmap ic_lrs = HMAP_INITIALIZER(&ic_lrs); - const struct icsbrec_port_binding *isb_pb; - const struct icsbrec_port_binding *isb_pb_key = -@@ -1866,13 +1911,112 @@ main(int argc, char *argv[]) - struct ovsdb_idl_loop ovnisb_idl_loop = OVSDB_IDL_LOOP_INITIALIZER( - ovsdb_idl_create(ovn_ic_sb_db, &icsbrec_idl_class, true, true)); - -- /* ovn-nb db. XXX: add only needed tables and columns */ -+ /* ovn-nb db. */ - struct ovsdb_idl_loop ovnnb_idl_loop = OVSDB_IDL_LOOP_INITIALIZER( -- ovsdb_idl_create(ovnnb_db, &nbrec_idl_class, true, true)); -- -- /* ovn-sb db. XXX: add only needed tables and columns */ -+ ovsdb_idl_create(ovnnb_db, &nbrec_idl_class, false, true)); -+ -+ ovsdb_idl_add_table(ovnnb_idl_loop.idl, &nbrec_table_nb_global); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, &nbrec_nb_global_col_name); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, &nbrec_nb_global_col_options); -+ -+ ovsdb_idl_add_table(ovnnb_idl_loop.idl, -+ &nbrec_table_logical_router_static_route); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_router_static_route_col_route_table); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_router_static_route_col_ip_prefix); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_router_static_route_col_nexthop); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_router_static_route_col_external_ids); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_router_static_route_col_options); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_router_static_route_col_policy); -+ -+ ovsdb_idl_add_table(ovnnb_idl_loop.idl, &nbrec_table_logical_router); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_router_col_name); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_router_col_static_routes); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_router_col_ports); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_router_col_options); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_router_col_external_ids); -+ -+ ovsdb_idl_add_table(ovnnb_idl_loop.idl, &nbrec_table_logical_router_port); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_router_port_col_name); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_router_port_col_networks); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_router_port_col_external_ids); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_router_port_col_options); -+ -+ ovsdb_idl_add_table(ovnnb_idl_loop.idl, &nbrec_table_logical_switch); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_switch_col_name); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_switch_col_ports); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_switch_col_other_config); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_switch_col_external_ids); -+ -+ ovsdb_idl_add_table(ovnnb_idl_loop.idl, &nbrec_table_logical_switch_port); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_switch_port_col_name); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_switch_port_col_addresses); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_switch_port_col_options); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_switch_port_col_type); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_switch_port_col_up); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_switch_port_col_addresses); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_switch_port_col_enabled); -+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, -+ &nbrec_logical_switch_port_col_external_ids); -+ -+ /* ovn-sb db. */ - struct ovsdb_idl_loop ovnsb_idl_loop = OVSDB_IDL_LOOP_INITIALIZER( -- ovsdb_idl_create(ovnsb_db, &sbrec_idl_class, true, true)); -+ ovsdb_idl_create(ovnsb_db, &sbrec_idl_class, false, true)); -+ -+ ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_chassis); -+ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_chassis_col_encaps); -+ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_chassis_col_name); -+ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_chassis_col_hostname); -+ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_chassis_col_other_config); -+ -+ ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_encap); -+ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_encap_col_chassis_name); -+ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_encap_col_type); -+ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_encap_col_ip); -+ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_encap_col_options); -+ -+ ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_datapath_binding); -+ ovsdb_idl_add_column(ovnsb_idl_loop.idl, -+ &sbrec_datapath_binding_col_external_ids); -+ -+ ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_port_binding); -+ ovsdb_idl_add_column(ovnsb_idl_loop.idl, -+ &sbrec_port_binding_col_datapath); -+ ovsdb_idl_add_column(ovnsb_idl_loop.idl, -+ &sbrec_port_binding_col_mac); -+ ovsdb_idl_add_column(ovnsb_idl_loop.idl, -+ &sbrec_port_binding_col_options); -+ ovsdb_idl_add_column(ovnsb_idl_loop.idl, -+ &sbrec_port_binding_col_logical_port); -+ ovsdb_idl_add_column(ovnsb_idl_loop.idl, -+ &sbrec_port_binding_col_external_ids); -+ ovsdb_idl_add_column(ovnsb_idl_loop.idl, -+ &sbrec_port_binding_col_chassis); - - /* Create IDL indexes */ - struct ovsdb_idl_index *nbrec_ls_by_name -@@ -1908,6 +2052,10 @@ main(int argc, char *argv[]) - &icsbrec_port_binding_col_transit_switch, - &icsbrec_port_binding_col_availability_zone); - -+ struct ovsdb_idl_index *icsbrec_route_by_az -+ = ovsdb_idl_index_create1(ovnisb_idl_loop.idl, -+ &icsbrec_route_col_availability_zone); -+ - struct ovsdb_idl_index *icsbrec_route_by_ts - = ovsdb_idl_index_create1(ovnisb_idl_loop.idl, - &icsbrec_route_col_transit_switch); -@@ -1962,6 +2110,7 @@ main(int argc, char *argv[]) - .icsbrec_port_binding_by_az = icsbrec_port_binding_by_az, - .icsbrec_port_binding_by_ts = icsbrec_port_binding_by_ts, - .icsbrec_port_binding_by_ts_az = icsbrec_port_binding_by_ts_az, -+ .icsbrec_route_by_az = icsbrec_route_by_az, - .icsbrec_route_by_ts = icsbrec_route_by_ts, - .icsbrec_route_by_ts_az = icsbrec_route_by_ts_az, - }; -diff --git a/include/ovn/actions.h b/include/ovn/actions.h -index d7ee84dac..fdb6ab08b 100644 ---- a/include/ovn/actions.h -+++ b/include/ovn/actions.h -@@ -121,6 +121,8 @@ struct ovn_extend_table; - OVNACT(COMMIT_ECMP_NH, ovnact_commit_ecmp_nh) \ - OVNACT(CHK_ECMP_NH_MAC, ovnact_result) \ - OVNACT(CHK_ECMP_NH, ovnact_result) \ -+ OVNACT(COMMIT_LB_AFF, ovnact_commit_lb_aff) \ -+ OVNACT(CHK_LB_AFF, ovnact_result) \ - - /* enum ovnact_type, with a member OVNACT_ for each action. */ - enum OVS_PACKED_ENUM ovnact_type { -@@ -463,6 +465,20 @@ struct ovnact_commit_ecmp_nh { - uint8_t proto; - }; - -+/* OVNACT_COMMIT_LB_AFF. */ -+struct ovnact_commit_lb_aff { -+ struct ovnact ovnact; -+ -+ struct in6_addr vip; -+ uint16_t vip_port; -+ uint8_t proto; -+ -+ struct in6_addr backend; -+ uint16_t backend_port; -+ -+ uint16_t timeout; -+}; -+ - /* Internal use by the helpers below. */ - void ovnact_init(struct ovnact *, enum ovnact_type, size_t len); - void *ovnact_put(struct ofpbuf *, enum ovnact_type, size_t len); -diff --git a/include/ovn/logical-fields.h b/include/ovn/logical-fields.h -index 3db7265e4..8060488f9 100644 ---- a/include/ovn/logical-fields.h -+++ b/include/ovn/logical-fields.h -@@ -53,6 +53,11 @@ enum ovn_controller_event { - - #define MFF_N_LOG_REGS 10 - -+#define MFF_LOG_LB_AFF_MATCH_IP4_ADDR MFF_REG4 -+#define MFF_LOG_LB_AFF_MATCH_LS_IP6_ADDR MFF_XXREG0 -+#define MFF_LOG_LB_AFF_MATCH_LR_IP6_ADDR MFF_XXREG1 -+#define MFF_LOG_LB_AFF_MATCH_PORT MFF_REG8 -+ - void ovn_init_symtab(struct shash *symtab); - - /* MFF_LOG_FLAGS_REG bit assignments */ -@@ -71,6 +76,7 @@ enum mff_log_flags_bits { - MLF_USE_SNAT_ZONE = 11, - MLF_CHECK_PORT_SEC_BIT = 12, - MLF_LOOKUP_COMMIT_ECMP_NH_BIT = 13, -+ MLF_USE_LB_AFF_SESSION_BIT = 14, - }; - - /* MFF_LOG_FLAGS_REG flag assignments */ -@@ -116,6 +122,8 @@ enum mff_log_flags { - MLF_LOCALPORT = (1 << MLF_LOCALPORT_BIT), - - MLF_LOOKUP_COMMIT_ECMP_NH = (1 << MLF_LOOKUP_COMMIT_ECMP_NH_BIT), -+ -+ MLF_USE_LB_AFF_SESSION = (1 << MLF_USE_LB_AFF_SESSION_BIT), - }; - - /* OVN logical fields -diff --git a/lib/actions.c b/lib/actions.c -index adbb42db4..5d88fccb7 100644 ---- a/lib/actions.c -+++ b/lib/actions.c -@@ -4600,6 +4600,429 @@ encode_CHK_ECMP_NH(const struct ovnact_result *res, - MLF_LOOKUP_COMMIT_ECMP_NH_BIT, ofpacts); - } - -+static void -+parse_commit_lb_aff(struct action_context *ctx, -+ struct ovnact_commit_lb_aff *lb_aff) -+{ -+ int vip_family, backend_family; -+ uint16_t timeout, port = 0; -+ char *ip_str; -+ -+ lexer_force_match(ctx->lexer, LEX_T_LPAREN); /* Skip '('. */ -+ if (!lexer_match_id(ctx->lexer, "vip")) { -+ lexer_syntax_error(ctx->lexer, "invalid parameter"); -+ return; -+ } -+ -+ if (!lexer_force_match(ctx->lexer, LEX_T_EQUALS)) { -+ lexer_syntax_error(ctx->lexer, "invalid parameter"); -+ return; -+ } -+ -+ if (ctx->lexer->token.type != LEX_T_STRING) { -+ lexer_syntax_error(ctx->lexer, "invalid parameter"); -+ return; -+ } -+ -+ if (!ip_address_and_port_from_lb_key(ctx->lexer->token.s, &ip_str, -+ &port, &vip_family)) { -+ lexer_syntax_error(ctx->lexer, "invalid parameter"); -+ return; -+ } -+ -+ if (vip_family == AF_INET) { -+ ovs_be32 vip4; -+ ip_parse(ip_str, &vip4); -+ in6_addr_set_mapped_ipv4(&lb_aff->vip, vip4); -+ } else { -+ ipv6_parse(ip_str, &lb_aff->vip); -+ } -+ -+ lb_aff->vip_port = port; -+ free(ip_str); -+ -+ lexer_get(ctx->lexer); -+ lexer_force_match(ctx->lexer, LEX_T_COMMA); -+ -+ if (!lexer_match_id(ctx->lexer, "backend")) { -+ lexer_syntax_error(ctx->lexer, "invalid parameter"); -+ return; -+ } -+ -+ if (!lexer_force_match(ctx->lexer, LEX_T_EQUALS)) { -+ lexer_syntax_error(ctx->lexer, "invalid parameter"); -+ return; -+ } -+ -+ if (ctx->lexer->token.type != LEX_T_STRING) { -+ lexer_syntax_error(ctx->lexer, "invalid parameter"); -+ return; -+ } -+ -+ if (!ip_address_and_port_from_lb_key(ctx->lexer->token.s, &ip_str, -+ &port, &backend_family)) { -+ lexer_syntax_error(ctx->lexer, "invalid parameter"); -+ return; -+ } -+ -+ if (backend_family == AF_INET) { -+ ovs_be32 backend4; -+ ip_parse(ip_str, &backend4); -+ in6_addr_set_mapped_ipv4(&lb_aff->backend, backend4); -+ } else { -+ ipv6_parse(ip_str, &lb_aff->backend); -+ } -+ -+ free(ip_str); -+ -+ if (backend_family != vip_family) { -+ lexer_syntax_error(ctx->lexer, "invalid protocol family"); -+ return; -+ } -+ -+ lb_aff->backend_port = port; -+ -+ lexer_get(ctx->lexer); -+ lexer_force_match(ctx->lexer, LEX_T_COMMA); -+ -+ if (lb_aff->vip_port) { -+ if (!lexer_match_id(ctx->lexer, "proto")) { -+ lexer_syntax_error(ctx->lexer, "invalid parameter"); -+ return; -+ } -+ -+ if (!lexer_force_match(ctx->lexer, LEX_T_EQUALS)) { -+ lexer_syntax_error(ctx->lexer, "invalid parameter"); -+ return; -+ } -+ -+ if (lexer_match_id(ctx->lexer, "tcp")) { -+ lb_aff->proto = IPPROTO_TCP; -+ } else if (lexer_match_id(ctx->lexer, "udp")) { -+ lb_aff->proto = IPPROTO_UDP; -+ } else if (lexer_match_id(ctx->lexer, "sctp")) { -+ lb_aff->proto = IPPROTO_SCTP; -+ } else { -+ lexer_syntax_error(ctx->lexer, "invalid protocol"); -+ return; -+ } -+ lexer_force_match(ctx->lexer, LEX_T_COMMA); -+ } -+ -+ if (!lexer_match_id(ctx->lexer, "timeout")) { -+ lexer_syntax_error(ctx->lexer, "invalid parameter"); -+ return; -+ } -+ if (!lexer_force_match(ctx->lexer, LEX_T_EQUALS)) { -+ lexer_syntax_error(ctx->lexer, "invalid parameter"); -+ return; -+ } -+ if (!action_parse_uint16(ctx, &timeout, "affinity timeout")) { -+ return; -+ } -+ lb_aff->timeout = timeout; -+ -+ lexer_force_match(ctx->lexer, LEX_T_RPAREN); /* Skip ')'. */ -+} -+ -+static void -+format_COMMIT_LB_AFF(const struct ovnact_commit_lb_aff *lb_aff, struct ds *s) -+{ -+ bool ipv6 = !IN6_IS_ADDR_V4MAPPED(&lb_aff->vip); -+ -+ if (ipv6) { -+ char ip_str[INET6_ADDRSTRLEN] = {}; -+ inet_ntop(AF_INET6, &lb_aff->vip, ip_str, INET6_ADDRSTRLEN); -+ ds_put_format(s, "commit_lb_aff(vip = \"[%s]", ip_str); -+ } else { -+ ovs_be32 ip = in6_addr_get_mapped_ipv4(&lb_aff->vip); -+ char *ip_str = xasprintf(IP_FMT, IP_ARGS(ip)); -+ ds_put_format(s, "commit_lb_aff(vip = \"%s", ip_str); -+ free(ip_str); -+ } -+ if (lb_aff->vip_port) { -+ ds_put_format(s, ":%d", lb_aff->vip_port); -+ } -+ ds_put_cstr(s, "\""); -+ -+ if (ipv6) { -+ char ip_str[INET6_ADDRSTRLEN] = {}; -+ inet_ntop(AF_INET6, &lb_aff->backend, ip_str, INET6_ADDRSTRLEN); -+ ds_put_format(s, ", backend = \"[%s]", ip_str); -+ } else { -+ ovs_be32 ip = in6_addr_get_mapped_ipv4(&lb_aff->backend); -+ char *ip_str = xasprintf(IP_FMT, IP_ARGS(ip)); -+ ds_put_format(s, ", backend = \"%s", ip_str); -+ free(ip_str); -+ } -+ if (lb_aff->backend_port) { -+ ds_put_format(s, ":%d", lb_aff->backend_port); -+ } -+ ds_put_cstr(s, "\""); -+ -+ if (lb_aff->proto) { -+ const char *proto; -+ switch (lb_aff->proto) { -+ case IPPROTO_UDP: -+ proto = "udp"; -+ break; -+ case IPPROTO_SCTP: -+ proto = "sctp"; -+ break; -+ case IPPROTO_TCP: -+ default: -+ proto = "tcp"; -+ break; -+ } -+ ds_put_format(s, ", proto = %s", proto); -+ } -+ ds_put_format(s, ", timeout = %d);", lb_aff->timeout); -+} -+ -+static void -+encode_COMMIT_LB_AFF(const struct ovnact_commit_lb_aff *lb_aff, -+ const struct ovnact_encode_params *ep, -+ struct ofpbuf *ofpacts) -+{ -+ bool ipv6 = !IN6_IS_ADDR_V4MAPPED(&lb_aff->vip); -+ size_t ol_offset = ofpacts->size; -+ struct ofpact_learn *ol = ofpact_put_LEARN(ofpacts); -+ struct match match = MATCH_CATCHALL_INITIALIZER; -+ struct ofpact_learn_spec *ol_spec; -+ unsigned int imm_bytes; -+ uint8_t *src_imm; -+ -+ ol->flags = NX_LEARN_F_DELETE_LEARNED; -+ ol->idle_timeout = lb_aff->timeout; /* seconds. */ -+ ol->hard_timeout = OFP_FLOW_PERMANENT; -+ ol->priority = OFP_DEFAULT_PRIORITY; -+ ol->table_id = OFTABLE_CHK_LB_AFFINITY; -+ -+ /* Match on metadata of the packet that created the new table. */ -+ ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec); -+ ol_spec->dst.field = mf_from_id(MFF_METADATA); -+ ol_spec->dst.ofs = 0; -+ ol_spec->dst.n_bits = ol_spec->dst.field->n_bits; -+ ol_spec->n_bits = ol_spec->dst.n_bits; -+ ol_spec->dst_type = NX_LEARN_DST_MATCH; -+ ol_spec->src_type = NX_LEARN_SRC_FIELD; -+ ol_spec->src.field = mf_from_id(MFF_METADATA); -+ -+ /* Match on the same ETH type as the packet that created the new table. */ -+ ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec); -+ ol_spec->dst.field = mf_from_id(MFF_ETH_TYPE); -+ ol_spec->dst.ofs = 0; -+ ol_spec->dst.n_bits = ol_spec->dst.field->n_bits; -+ ol_spec->n_bits = ol_spec->dst.n_bits; -+ ol_spec->dst_type = NX_LEARN_DST_MATCH; -+ ol_spec->src_type = NX_LEARN_SRC_IMMEDIATE; -+ union mf_value imm_eth_type = { -+ .be16 = ipv6 ? htons(ETH_TYPE_IPV6) : htons(ETH_TYPE_IP) -+ }; -+ mf_write_subfield_value(&ol_spec->dst, &imm_eth_type, &match); -+ /* Push value last, as this may reallocate 'ol_spec'. */ -+ imm_bytes = DIV_ROUND_UP(ol_spec->dst.n_bits, 8); -+ src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes)); -+ memcpy(src_imm, &imm_eth_type, imm_bytes); -+ -+ /* IP src. */ -+ ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec); -+ ol_spec->dst.field = -+ ipv6 ? mf_from_id(MFF_IPV6_SRC) : mf_from_id(MFF_IPV4_SRC); -+ ol_spec->src.field = -+ ipv6 ? mf_from_id(MFF_IPV6_SRC) : mf_from_id(MFF_IPV4_SRC); -+ ol_spec->dst.ofs = 0; -+ ol_spec->dst.n_bits = ol_spec->dst.field->n_bits; -+ ol_spec->n_bits = ol_spec->dst.n_bits; -+ ol_spec->dst_type = NX_LEARN_DST_MATCH; -+ ol_spec->src_type = NX_LEARN_SRC_FIELD; -+ -+ /* IP dst. */ -+ ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec); -+ ol_spec->dst.field = -+ ipv6 ? mf_from_id(MFF_IPV6_DST) : mf_from_id(MFF_IPV4_DST); -+ union mf_value imm_ip; -+ if (ipv6) { -+ imm_ip = (union mf_value) { -+ .ipv6 = lb_aff->vip, -+ }; -+ } else { -+ ovs_be32 ip4 = in6_addr_get_mapped_ipv4(&lb_aff->vip); -+ imm_ip = (union mf_value) { -+ .be32 = ip4, -+ }; -+ } -+ ol_spec->dst.ofs = 0; -+ ol_spec->dst.n_bits = ol_spec->dst.field->n_bits; -+ ol_spec->n_bits = ol_spec->dst.n_bits; -+ ol_spec->dst_type = NX_LEARN_DST_MATCH; -+ ol_spec->src_type = NX_LEARN_SRC_IMMEDIATE; -+ mf_write_subfield_value(&ol_spec->dst, &imm_ip, &match); -+ -+ /* Push value last, as this may reallocate 'ol_spec' */ -+ imm_bytes = DIV_ROUND_UP(ol_spec->dst.n_bits, 8); -+ src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes)); -+ memcpy(src_imm, &imm_ip, imm_bytes); -+ -+ if (lb_aff->proto) { -+ /* IP proto. */ -+ union mf_value imm_proto = { -+ .u8 = lb_aff->proto, -+ }; -+ ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec); -+ ol_spec->dst.field = mf_from_id(MFF_IP_PROTO); -+ ol_spec->src.field = mf_from_id(MFF_IP_PROTO); -+ ol_spec->dst.ofs = 0; -+ ol_spec->dst.n_bits = ol_spec->dst.field->n_bits; -+ ol_spec->n_bits = ol_spec->dst.n_bits; -+ ol_spec->dst_type = NX_LEARN_DST_MATCH; -+ ol_spec->src_type = NX_LEARN_SRC_IMMEDIATE; -+ mf_write_subfield_value(&ol_spec->dst, &imm_proto, &match); -+ /* Push value last, as this may reallocate 'ol_spec' */ -+ imm_bytes = DIV_ROUND_UP(ol_spec->dst.n_bits, 8); -+ src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes)); -+ memcpy(src_imm, &imm_proto, imm_bytes); -+ -+ /* dst port */ -+ ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec); -+ switch (lb_aff->proto) { -+ case IPPROTO_TCP: -+ ol_spec->dst.field = mf_from_id(MFF_TCP_DST); -+ ol_spec->src.field = mf_from_id(MFF_TCP_DST); -+ break; -+ case IPPROTO_UDP: -+ ol_spec->dst.field = mf_from_id(MFF_UDP_DST); -+ ol_spec->src.field = mf_from_id(MFF_UDP_DST); -+ break; -+ case IPPROTO_SCTP: -+ ol_spec->dst.field = mf_from_id(MFF_SCTP_DST); -+ ol_spec->src.field = mf_from_id(MFF_SCTP_DST); -+ break; -+ default: -+ OVS_NOT_REACHED(); -+ break; -+ } -+ ol_spec->dst.ofs = 0; -+ ol_spec->dst.n_bits = ol_spec->dst.field->n_bits; -+ ol_spec->n_bits = ol_spec->dst.n_bits; -+ ol_spec->dst_type = NX_LEARN_DST_MATCH; -+ ol_spec->src_type = NX_LEARN_SRC_IMMEDIATE; -+ /* Match on vip port. */ -+ union mf_value imm_vip_port = (union mf_value) { -+ .be16 = htons(lb_aff->vip_port), -+ }; -+ -+ mf_write_subfield_value(&ol_spec->dst, &imm_vip_port, &match); -+ /* Push value last, as this may reallocate 'ol_spec' */ -+ imm_bytes = DIV_ROUND_UP(ol_spec->dst.n_bits, 8); -+ src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes)); -+ memcpy(src_imm, &imm_vip_port, imm_bytes); -+ } -+ -+ /* Set MLF_USE_LB_AFF_SESSION_BIT for ecmp replies. */ -+ ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec); -+ ol_spec->dst.field = mf_from_id(MFF_LOG_FLAGS); -+ ol_spec->dst.ofs = MLF_USE_LB_AFF_SESSION_BIT; -+ ol_spec->dst.n_bits = 1; -+ ol_spec->n_bits = ol_spec->dst.n_bits; -+ ol_spec->dst_type = NX_LEARN_DST_LOAD; -+ ol_spec->src_type = NX_LEARN_SRC_IMMEDIATE; -+ union mf_value imm_reg_value = { -+ .u8 = 1 -+ }; -+ mf_write_subfield_value(&ol_spec->dst, &imm_reg_value, &match); -+ -+ /* Push value last, as this may reallocate 'ol_spec' */ -+ imm_bytes = DIV_ROUND_UP(ol_spec->dst.n_bits, 8); -+ src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes)); -+ ol = ofpacts->header; -+ memcpy(src_imm, &imm_reg_value, imm_bytes); -+ -+ /* Load backend IP in REG4/XXREG1. */ -+ union mf_value imm_backend_ip; -+ ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec); -+ -+ if (ipv6) { -+ imm_backend_ip = (union mf_value) { -+ .ipv6 = lb_aff->backend, -+ }; -+ if (ep->is_switch) { -+ ol_spec->dst.field = mf_from_id(MFF_LOG_LB_AFF_MATCH_LS_IP6_ADDR); -+ } else { -+ ol_spec->dst.field = mf_from_id(MFF_LOG_LB_AFF_MATCH_LR_IP6_ADDR); -+ } -+ } else { -+ ovs_be32 ip4 = in6_addr_get_mapped_ipv4(&lb_aff->backend); -+ imm_backend_ip = (union mf_value) { -+ .be32 = ip4, -+ }; -+ ol_spec->dst.field = mf_from_id(MFF_LOG_LB_AFF_MATCH_IP4_ADDR); -+ } -+ -+ ol_spec->dst_type = NX_LEARN_DST_LOAD; -+ ol_spec->src_type = NX_LEARN_SRC_IMMEDIATE; -+ ol_spec->dst.ofs = 0; -+ ol_spec->dst.n_bits = ol_spec->dst.field->n_bits; -+ ol_spec->n_bits = ol_spec->dst.n_bits; -+ mf_write_subfield_value(&ol_spec->dst, &imm_backend_ip, &match); -+ /* Push value last, as this may reallocate 'ol_spec' */ -+ imm_bytes = DIV_ROUND_UP(ol_spec->dst.n_bits, 8); -+ src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes)); -+ memcpy(src_imm, &imm_backend_ip, imm_bytes); -+ -+ if (lb_aff->backend_port) { -+ /* Load backend port in REG8. */ -+ union mf_value imm_backend_port; -+ ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec); -+ imm_backend_port = (union mf_value) { -+ .be16 = htons(lb_aff->backend_port), -+ }; -+ -+ ol_spec->dst.field = mf_from_id(MFF_LOG_LB_AFF_MATCH_PORT); -+ ol_spec->dst_type = NX_LEARN_DST_LOAD; -+ ol_spec->src_type = NX_LEARN_SRC_IMMEDIATE; -+ ol_spec->dst.ofs = 0; -+ ol_spec->dst.n_bits = 8 * sizeof(lb_aff->backend_port); -+ ol_spec->n_bits = ol_spec->dst.n_bits; -+ mf_write_subfield_value(&ol_spec->dst, &imm_backend_port, &match); -+ /* Push value last, as this may reallocate 'ol_spec' */ -+ imm_bytes = DIV_ROUND_UP(ol_spec->dst.n_bits, 8); -+ src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes)); -+ memcpy(src_imm, &imm_backend_port, imm_bytes); -+ } -+ -+ ol = ofpbuf_at_assert(ofpacts, ol_offset, sizeof *ol); -+ ofpact_finish_LEARN(ofpacts, &ol); -+} -+ -+static void -+ovnact_commit_lb_aff_free(struct ovnact_commit_lb_aff *lb_aff OVS_UNUSED) -+{ -+} -+ -+static void -+parse_chk_lb_aff(struct action_context *ctx, const struct expr_field *dst, -+ struct ovnact_result *res) -+{ -+ parse_ovnact_result(ctx, "chk_lb_aff", NULL, dst, res); -+} -+ -+static void -+format_CHK_LB_AFF(const struct ovnact_result *res, struct ds *s) -+{ -+ expr_field_format(&res->dst, s); -+ ds_put_cstr(s, " = chk_lb_aff();"); -+} -+ -+static void -+encode_CHK_LB_AFF(const struct ovnact_result *res, -+ const struct ovnact_encode_params *ep OVS_UNUSED, -+ struct ofpbuf *ofpacts) -+{ -+ encode_result_action__(res, OFTABLE_CHK_LB_AFFINITY, -+ MLF_USE_LB_AFF_SESSION_BIT, ofpacts); -+} -+ - /* Parses an assignment or exchange or put_dhcp_opts action. */ - static void - parse_set_action(struct action_context *ctx) -@@ -4684,6 +5107,10 @@ parse_set_action(struct action_context *ctx) - && lexer_lookahead(ctx->lexer) == LEX_T_LPAREN) { - parse_chk_ecmp_nh(ctx, &lhs, - ovnact_put_CHK_ECMP_NH(ctx->ovnacts)); -+ } else if (!strcmp(ctx->lexer->token.s, "chk_lb_aff") && -+ lexer_lookahead(ctx->lexer) == LEX_T_LPAREN) { -+ parse_chk_lb_aff(ctx, &lhs, -+ ovnact_put_CHK_LB_AFF(ctx->ovnacts)); - } else { - parse_assignment_action(ctx, false, &lhs); - } -@@ -4790,6 +5217,8 @@ parse_action(struct action_context *ctx) - parse_put_fdb(ctx, ovnact_put_PUT_FDB(ctx->ovnacts)); - } else if (lexer_match_id(ctx->lexer, "commit_ecmp_nh")) { - parse_commit_ecmp_nh(ctx, ovnact_put_COMMIT_ECMP_NH(ctx->ovnacts)); -+ } else if (lexer_match_id(ctx->lexer, "commit_lb_aff")) { -+ parse_commit_lb_aff(ctx, ovnact_put_COMMIT_LB_AFF(ctx->ovnacts)); - } else { - lexer_syntax_error(ctx->lexer, "expecting action"); - } -diff --git a/lib/features.c b/lib/features.c -index f15ec42bb..462b99818 100644 ---- a/lib/features.c -+++ b/lib/features.c -@@ -26,10 +26,13 @@ - #include "openvswitch/rconn.h" - #include "openvswitch/ofp-msgs.h" - #include "openvswitch/ofp-meter.h" -+#include "openvswitch/ofp-util.h" - #include "ovn/features.h" - - VLOG_DEFINE_THIS_MODULE(features); - -+#define FEATURES_DEFAULT_PROBE_INTERVAL_SEC 5 -+ - struct ovs_feature { - enum ovs_feature_value value; - const char *name; -@@ -74,7 +77,8 @@ static void - ovs_feature_rconn_setup(const char *br_name) - { - if (!swconn) { -- swconn = rconn_create(5, 0, DSCP_DEFAULT, 1 << OFP15_VERSION); -+ swconn = rconn_create(FEATURES_DEFAULT_PROBE_INTERVAL_SEC, 0, -+ DSCP_DEFAULT, 1 << OFP15_VERSION); - } - - if (!rconn_is_connected(swconn)) { -@@ -85,11 +89,14 @@ ovs_feature_rconn_setup(const char *br_name) - } - free(target); - } -+ rconn_set_probe_interval(swconn, FEATURES_DEFAULT_PROBE_INTERVAL_SEC); - } - - static bool - ovs_feature_get_openflow_cap(const char *br_name) - { -+ struct ofpbuf *msg; -+ - if (!br_name) { - return false; - } -@@ -102,15 +109,14 @@ ovs_feature_get_openflow_cap(const char *br_name) - } - - /* send new requests just after reconnect. */ -- if (conn_seq_no == rconn_get_connection_seqno(swconn)) { -- return false; -+ if (conn_seq_no != rconn_get_connection_seqno(swconn)) { -+ /* dump datapath meter capabilities. */ -+ msg = ofpraw_alloc(OFPRAW_OFPST13_METER_FEATURES_REQUEST, -+ rconn_get_version(swconn), 0); -+ rconn_send(swconn, msg, NULL); - } - - bool ret = false; -- /* dump datapath meter capabilities. */ -- struct ofpbuf *msg = ofpraw_alloc(OFPRAW_OFPST13_METER_FEATURES_REQUEST, -- rconn_get_version(swconn), 0); -- rconn_send(swconn, msg, NULL); - for (int i = 0; i < 50; i++) { - msg = rconn_recv(swconn); - if (!msg) { -@@ -137,6 +143,8 @@ ovs_feature_get_openflow_cap(const char *br_name) - } - } - conn_seq_no = rconn_get_connection_seqno(swconn); -+ } else if (type == OFPTYPE_ECHO_REQUEST) { -+ rconn_send(swconn, ofputil_encode_echo_reply(oh), NULL); - } - ofpbuf_delete(msg); - } -diff --git a/lib/lb.c b/lib/lb.c -index 477cf8f5e..bb5ae2196 100644 ---- a/lib/lb.c -+++ b/lib/lb.c -@@ -225,6 +225,16 @@ ovn_northd_lb_create(const struct nbrec_load_balancer *nbrec_lb) - smap_get_def(&nbrec_lb->options, "neighbor_responder", "reachable"); - lb->neigh_mode = strcmp(mode, "all") ? LB_NEIGH_RESPOND_REACHABLE - : LB_NEIGH_RESPOND_ALL; -+ uint32_t affinity_timeout = -+ smap_get_uint(&nbrec_lb->options, "affinity_timeout", 0); -+ if (affinity_timeout > UINT16_MAX) { -+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1); -+ VLOG_WARN_RL(&rl, "max affinity_timeout timeout value is %u", -+ UINT16_MAX); -+ affinity_timeout = UINT16_MAX; -+ } -+ lb->affinity_timeout = affinity_timeout; -+ - sset_init(&lb->ips_v4); - sset_init(&lb->ips_v6); - struct smap_node *node; -diff --git a/lib/lb.h b/lib/lb.h -index 9b902f005..241872681 100644 ---- a/lib/lb.h -+++ b/lib/lb.h -@@ -67,6 +67,7 @@ struct ovn_northd_lb { - bool controller_event; - bool routable; - bool skip_snat; -+ uint16_t affinity_timeout; - - struct sset ips_v4; - struct sset ips_v6; -diff --git a/northd/northd.c b/northd/northd.c -index 84440a47f..404c40b8c 100644 ---- a/northd/northd.c -+++ b/northd/northd.c -@@ -121,20 +121,22 @@ enum ovn_stage { - PIPELINE_STAGE(SWITCH, IN, ACL, 8, "ls_in_acl") \ - PIPELINE_STAGE(SWITCH, IN, QOS_MARK, 9, "ls_in_qos_mark") \ - PIPELINE_STAGE(SWITCH, IN, QOS_METER, 10, "ls_in_qos_meter") \ -- PIPELINE_STAGE(SWITCH, IN, LB, 11, "ls_in_lb") \ -- PIPELINE_STAGE(SWITCH, IN, ACL_AFTER_LB, 12, "ls_in_acl_after_lb") \ -- PIPELINE_STAGE(SWITCH, IN, STATEFUL, 13, "ls_in_stateful") \ -- PIPELINE_STAGE(SWITCH, IN, PRE_HAIRPIN, 14, "ls_in_pre_hairpin") \ -- PIPELINE_STAGE(SWITCH, IN, NAT_HAIRPIN, 15, "ls_in_nat_hairpin") \ -- PIPELINE_STAGE(SWITCH, IN, HAIRPIN, 16, "ls_in_hairpin") \ -- PIPELINE_STAGE(SWITCH, IN, ARP_ND_RSP, 17, "ls_in_arp_rsp") \ -- PIPELINE_STAGE(SWITCH, IN, DHCP_OPTIONS, 18, "ls_in_dhcp_options") \ -- PIPELINE_STAGE(SWITCH, IN, DHCP_RESPONSE, 19, "ls_in_dhcp_response") \ -- PIPELINE_STAGE(SWITCH, IN, DNS_LOOKUP, 20, "ls_in_dns_lookup") \ -- PIPELINE_STAGE(SWITCH, IN, DNS_RESPONSE, 21, "ls_in_dns_response") \ -- PIPELINE_STAGE(SWITCH, IN, EXTERNAL_PORT, 22, "ls_in_external_port") \ -- PIPELINE_STAGE(SWITCH, IN, L2_LKUP, 23, "ls_in_l2_lkup") \ -- PIPELINE_STAGE(SWITCH, IN, L2_UNKNOWN, 24, "ls_in_l2_unknown") \ -+ PIPELINE_STAGE(SWITCH, IN, LB_AFF_CHECK, 11, "ls_in_lb_aff_check") \ -+ PIPELINE_STAGE(SWITCH, IN, LB, 12, "ls_in_lb") \ -+ PIPELINE_STAGE(SWITCH, IN, LB_AFF_LEARN, 13, "ls_in_lb_aff_learn") \ -+ PIPELINE_STAGE(SWITCH, IN, ACL_AFTER_LB, 14, "ls_in_acl_after_lb") \ -+ PIPELINE_STAGE(SWITCH, IN, STATEFUL, 15, "ls_in_stateful") \ -+ PIPELINE_STAGE(SWITCH, IN, PRE_HAIRPIN, 16, "ls_in_pre_hairpin") \ -+ PIPELINE_STAGE(SWITCH, IN, NAT_HAIRPIN, 17, "ls_in_nat_hairpin") \ -+ PIPELINE_STAGE(SWITCH, IN, HAIRPIN, 18, "ls_in_hairpin") \ -+ PIPELINE_STAGE(SWITCH, IN, ARP_ND_RSP, 19, "ls_in_arp_rsp") \ -+ PIPELINE_STAGE(SWITCH, IN, DHCP_OPTIONS, 20, "ls_in_dhcp_options") \ -+ PIPELINE_STAGE(SWITCH, IN, DHCP_RESPONSE, 21, "ls_in_dhcp_response") \ -+ PIPELINE_STAGE(SWITCH, IN, DNS_LOOKUP, 22, "ls_in_dns_lookup") \ -+ PIPELINE_STAGE(SWITCH, IN, DNS_RESPONSE, 23, "ls_in_dns_response") \ -+ PIPELINE_STAGE(SWITCH, IN, EXTERNAL_PORT, 24, "ls_in_external_port") \ -+ PIPELINE_STAGE(SWITCH, IN, L2_LKUP, 25, "ls_in_l2_lkup") \ -+ PIPELINE_STAGE(SWITCH, IN, L2_UNKNOWN, 26, "ls_in_l2_unknown") \ - \ - /* Logical switch egress stages. */ \ - PIPELINE_STAGE(SWITCH, OUT, PRE_LB, 0, "ls_out_pre_lb") \ -@@ -155,20 +157,22 @@ enum ovn_stage { - PIPELINE_STAGE(ROUTER, IN, IP_INPUT, 3, "lr_in_ip_input") \ - PIPELINE_STAGE(ROUTER, IN, UNSNAT, 4, "lr_in_unsnat") \ - PIPELINE_STAGE(ROUTER, IN, DEFRAG, 5, "lr_in_defrag") \ -- PIPELINE_STAGE(ROUTER, IN, DNAT, 6, "lr_in_dnat") \ -- PIPELINE_STAGE(ROUTER, IN, ECMP_STATEFUL, 7, "lr_in_ecmp_stateful") \ -- PIPELINE_STAGE(ROUTER, IN, ND_RA_OPTIONS, 8, "lr_in_nd_ra_options") \ -- PIPELINE_STAGE(ROUTER, IN, ND_RA_RESPONSE, 9, "lr_in_nd_ra_response") \ -- PIPELINE_STAGE(ROUTER, IN, IP_ROUTING_PRE, 10, "lr_in_ip_routing_pre") \ -- PIPELINE_STAGE(ROUTER, IN, IP_ROUTING, 11, "lr_in_ip_routing") \ -- PIPELINE_STAGE(ROUTER, IN, IP_ROUTING_ECMP, 12, "lr_in_ip_routing_ecmp") \ -- PIPELINE_STAGE(ROUTER, IN, POLICY, 13, "lr_in_policy") \ -- PIPELINE_STAGE(ROUTER, IN, POLICY_ECMP, 14, "lr_in_policy_ecmp") \ -- PIPELINE_STAGE(ROUTER, IN, ARP_RESOLVE, 15, "lr_in_arp_resolve") \ -- PIPELINE_STAGE(ROUTER, IN, CHK_PKT_LEN, 16, "lr_in_chk_pkt_len") \ -- PIPELINE_STAGE(ROUTER, IN, LARGER_PKTS, 17, "lr_in_larger_pkts") \ -- PIPELINE_STAGE(ROUTER, IN, GW_REDIRECT, 18, "lr_in_gw_redirect") \ -- PIPELINE_STAGE(ROUTER, IN, ARP_REQUEST, 19, "lr_in_arp_request") \ -+ PIPELINE_STAGE(ROUTER, IN, LB_AFF_CHECK, 6, "lr_in_lb_aff_check") \ -+ PIPELINE_STAGE(ROUTER, IN, DNAT, 7, "lr_in_dnat") \ -+ PIPELINE_STAGE(ROUTER, IN, LB_AFF_LEARN, 8, "lr_in_lb_aff_learn") \ -+ PIPELINE_STAGE(ROUTER, IN, ECMP_STATEFUL, 9, "lr_in_ecmp_stateful") \ -+ PIPELINE_STAGE(ROUTER, IN, ND_RA_OPTIONS, 10, "lr_in_nd_ra_options") \ -+ PIPELINE_STAGE(ROUTER, IN, ND_RA_RESPONSE, 11, "lr_in_nd_ra_response") \ -+ PIPELINE_STAGE(ROUTER, IN, IP_ROUTING_PRE, 12, "lr_in_ip_routing_pre") \ -+ PIPELINE_STAGE(ROUTER, IN, IP_ROUTING, 13, "lr_in_ip_routing") \ -+ PIPELINE_STAGE(ROUTER, IN, IP_ROUTING_ECMP, 14, "lr_in_ip_routing_ecmp") \ -+ PIPELINE_STAGE(ROUTER, IN, POLICY, 15, "lr_in_policy") \ -+ PIPELINE_STAGE(ROUTER, IN, POLICY_ECMP, 16, "lr_in_policy_ecmp") \ -+ PIPELINE_STAGE(ROUTER, IN, ARP_RESOLVE, 17, "lr_in_arp_resolve") \ -+ PIPELINE_STAGE(ROUTER, IN, CHK_PKT_LEN, 18, "lr_in_chk_pkt_len") \ -+ PIPELINE_STAGE(ROUTER, IN, LARGER_PKTS, 19, "lr_in_larger_pkts") \ -+ PIPELINE_STAGE(ROUTER, IN, GW_REDIRECT, 20, "lr_in_gw_redirect") \ -+ PIPELINE_STAGE(ROUTER, IN, ARP_REQUEST, 21, "lr_in_arp_request") \ - \ - /* Logical router egress stages. */ \ - PIPELINE_STAGE(ROUTER, OUT, CHECK_DNAT_LOCAL, 0, \ -@@ -215,8 +219,17 @@ enum ovn_stage { - #define REG_ORIG_DIP_IPV6 "xxreg1" - #define REG_ORIG_TP_DPORT "reg2[0..15]" - -+/* Register used to store backend ipv6 address -+ * for load balancer affinity. */ -+#define REG_LB_L2_AFF_BACKEND_IP6 "xxreg0" -+ - /* Register definitions for switches and routers. */ - -+/* Register used to store backend ipv4 address -+ * for load balancer affinity. */ -+#define REG_LB_AFF_BACKEND_IP4 "reg4" -+#define REG_LB_AFF_MATCH_PORT "reg8[0..15]" -+ - /* Indicate that this packet has been recirculated using egress - * loopback. This allows certain checks to be bypassed, such as a - * logical router dropping packets with source IP address equals -@@ -228,6 +241,7 @@ enum ovn_stage { - #define REGBIT_LOOKUP_NEIGHBOR_IP_RESULT "reg9[3]" - #define REGBIT_DST_NAT_IP_LOCAL "reg9[4]" - #define REGBIT_KNOWN_ECMP_NH "reg9[5]" -+#define REGBIT_KNOWN_LB_SESSION "reg9[6]" - - /* Register to store the eth address associated to a router port for packets - * received in S_ROUTER_IN_ADMISSION. -@@ -245,6 +259,10 @@ enum ovn_stage { - #define REG_SRC_IPV6 "xxreg1" - #define REG_ROUTE_TABLE_ID "reg7" - -+/* Register used to store backend ipv6 address -+ * for load balancer affinity. */ -+#define REG_LB_L3_AFF_BACKEND_IP6 "xxreg1" -+ - #define REG_ORIG_TP_DPORT_ROUTER "reg9[16..31]" - - /* Register used for setting a label for ACLs in a Logical Switch. */ -@@ -267,73 +285,75 @@ enum ovn_stage { - * OVS register usage: - * - * Logical Switch pipeline: -- * +----+----------------------------------------------+---+------------------+ -- * | R0 | REGBIT_{CONNTRACK/DHCP/DNS} | | | -- * | | REGBIT_{HAIRPIN/HAIRPIN_REPLY} | | | -- * | | REGBIT_ACL_HINT_{ALLOW_NEW/ALLOW/DROP/BLOCK} | | | -- * | | REGBIT_ACL_LABEL | X | | -- * +----+----------------------------------------------+ X | | -- * | R1 | ORIG_DIP_IPV4 (>= IN_PRE_STATEFUL) | R | | -- * +----+----------------------------------------------+ E | | -- * | R2 | ORIG_TP_DPORT (>= IN_PRE_STATEFUL) | G | | -- * +----+----------------------------------------------+ 0 | | -- * | R3 | ACL LABEL | | | -- * +----+----------------------------------------------+---+------------------+ -- * | R4 | UNUSED | | | -- * +----+----------------------------------------------+ X | ORIG_DIP_IPV6(>= | -- * | R5 | UNUSED | X | IN_PRE_STATEFUL) | -- * +----+----------------------------------------------+ R | | -- * | R6 | UNUSED | E | | -- * +----+----------------------------------------------+ G | | -- * | R7 | UNUSED | 1 | | -- * +----+----------------------------------------------+---+------------------+ -- * | R8 | UNUSED | -+ * +----+----------------------------------------------+---+-----------------------------------+ -+ * | R0 | REGBIT_{CONNTRACK/DHCP/DNS} | | | -+ * | | REGBIT_{HAIRPIN/HAIRPIN_REPLY} | | | -+ * | | REGBIT_ACL_HINT_{ALLOW_NEW/ALLOW/DROP/BLOCK} | | | -+ * | | REGBIT_ACL_LABEL | X | | -+ * +----+----------------------------------------------+ X | | -+ * | R5 | UNUSED | X | LB_L2_AFF_BACKEND_IP6 | -+ * | R1 | ORIG_DIP_IPV4 (>= IN_PRE_STATEFUL) | R | | -+ * +----+----------------------------------------------+ E | | -+ * | R2 | ORIG_TP_DPORT (>= IN_PRE_STATEFUL) | G | | -+ * +----+----------------------------------------------+ 0 | | -+ * | R3 | ACL LABEL | | | -+ * +----+----------------------------------------------+---+-----------------------------------+ -+ * | R4 | REG_LB_AFF_BACKEND_IP4 | | | -+ * +----+----------------------------------------------+ X | | -+ * | R5 | UNUSED | X | ORIG_DIP_IPV6(>= IN_PRE_STATEFUL) | -+ * +----+----------------------------------------------+ R | | -+ * | R6 | UNUSED | E | | -+ * +----+----------------------------------------------+ G | | -+ * | R7 | UNUSED | 1 | | -+ * +----+----------------------------------------------+---+-----------------------------------+ -+ * | R8 | LB_AFF_MATCH_PORT | - * +----+----------------------------------------------+ - * | R9 | UNUSED | - * +----+----------------------------------------------+ - * - * Logical Router pipeline: -- * +-----+--------------------------+---+-----------------+---+---------------+ -- * | R0 | REGBIT_ND_RA_OPTS_RESULT | | | | | -- * | | (= IN_ND_RA_OPTIONS) | X | | | | -- * | | NEXT_HOP_IPV4 | R | | | | -- * | | (>= IP_INPUT) | E | INPORT_ETH_ADDR | X | | -- * +-----+--------------------------+ G | (< IP_INPUT) | X | | -- * | R1 | SRC_IPV4 for ARP-REQ | 0 | | R | | -- * | | (>= IP_INPUT) | | | E | NEXT_HOP_IPV6 | -- * +-----+--------------------------+---+-----------------+ G | ( >= DEFRAG ) | -- * | R2 | UNUSED | X | | 0 | | -- * | | | R | | | | -- * +-----+--------------------------+ E | UNUSED | | | -- * | R3 | UNUSED | G | | | | -- * | | | 1 | | | | -- * +-----+--------------------------+---+-----------------+---+---------------+ -- * | R4 | UNUSED | X | | | | -- * | | | R | | | | -- * +-----+--------------------------+ E | UNUSED | X | | -- * | R5 | UNUSED | G | | X | | -- * | | | 2 | | R |SRC_IPV6 for NS| -- * +-----+--------------------------+---+-----------------+ E | ( >= | -- * | R6 | UNUSED | X | | G | IN_IP_ROUTING)| -- * | | | R | | 1 | | -- * +-----+--------------------------+ E | UNUSED | | | -- * | R7 | ROUTE_TABLE_ID | G | | | | -- * | | (>= IN_IP_ROUTING_PRE && | 3 | | | | -- * | | <= IN_IP_ROUTING) | | | | | -- * +-----+--------------------------+---+-----------------+---+---------------+ -- * | R8 | ECMP_GROUP_ID | | | -- * | | ECMP_MEMBER_ID | X | | -- * +-----+--------------------------+ R | | -- * | | REGBIT_{ | E | | -- * | | EGRESS_LOOPBACK/ | G | UNUSED | -- * | R9 | PKT_LARGER/ | 4 | | -- * | | LOOKUP_NEIGHBOR_RESULT/| | | -- * | | SKIP_LOOKUP_NEIGHBOR/ | | | -- * | | KNOWN_ECMP_NH} | | | -- * | | | | | -- * | | REG_ORIG_TP_DPORT_ROUTER | | | -- * | | | | | -- * +-----+--------------------------+---+-----------------+ -+ * +-----+---------------------------+---+-----------------+---+------------------------------------+ -+ * | R0 | REGBIT_ND_RA_OPTS_RESULT | | | | | -+ * | | (= IN_ND_RA_OPTIONS) | X | | | | -+ * | | NEXT_HOP_IPV4 | R | | | | -+ * | | (>= IP_INPUT) | E | INPORT_ETH_ADDR | X | | -+ * +-----+---------------------------+ G | (< IP_INPUT) | X | | -+ * | R1 | SRC_IPV4 for ARP-REQ | 0 | | R | | -+ * | | (>= IP_INPUT) | | | E | NEXT_HOP_IPV6 (>= DEFRAG ) | -+ * +-----+---------------------------+---+-----------------+ G | | -+ * | R2 | UNUSED | X | | 0 | | -+ * | | | R | | | | -+ * +-----+---------------------------+ E | UNUSED | | | -+ * | R3 | UNUSED | G | | | | -+ * | | | 1 | | | | -+ * +-----+---------------------------+---+-----------------+---+------------------------------------+ -+ * | R4 | REG_LB_AFF_BACKEND_IP4 | X | | | | -+ * | | | R | | | | -+ * +-----+---------------------------+ E | UNUSED | X | | -+ * | R5 | UNUSED | G | | X | | -+ * | | | 2 | | R | LB_L3_AFF_BACKEND_IP6 | -+ * +-----+---------------------------+---+-----------------+ E | (<= IN_DNAT) | -+ * | R6 | UNUSED | X | | G | | -+ * | | | R | | 1 | | -+ * +-----+---------------------------+ E | UNUSED | | | -+ * | R7 | ROUTE_TABLE_ID | G | | | | -+ * | | (>= IN_IP_ROUTING_PRE && | 3 | | | | -+ * | | <= IN_IP_ROUTING) | | | | | -+ * +-----+---------------------------+---+-----------------+---+------------------------------------+ -+ * | R8 | ECMP_GROUP_ID | | | -+ * | | ECMP_MEMBER_ID | | | -+ * | | LB_AFF_MATCH_PORT | X | | -+ * +-----+---------------------------+ R | | -+ * | | REGBIT_{ | E | | -+ * | | EGRESS_LOOPBACK/ | G | UNUSED | -+ * | R9 | PKT_LARGER/ | 4 | | -+ * | | LOOKUP_NEIGHBOR_RESULT/ | | | -+ * | | SKIP_LOOKUP_NEIGHBOR/ | | | -+ * | | KNOWN_ECMP_NH} | | | -+ * | | | | | -+ * | | REG_ORIG_TP_DPORT_ROUTER | | | -+ * | | | | | -+ * +-----+---------------------------+---+-----------------+ - * - */ - -@@ -1040,7 +1060,16 @@ init_mcast_info_for_switch_datapath(struct ovn_datapath *od) - mcast_sw_info->query_max_response = - smap_get_ullong(&od->nbs->other_config, "mcast_query_max_response", - OVN_MCAST_DEFAULT_QUERY_MAX_RESPONSE_S); -+} -+ -+static void -+init_mcast_flow_count(struct ovn_datapath *od) -+{ -+ if (od->nbr) { -+ return; -+ } - -+ struct mcast_switch_info *mcast_sw_info = &od->mcast_info.sw; - mcast_sw_info->active_v4_flows = ATOMIC_VAR_INIT(0); - mcast_sw_info->active_v6_flows = ATOMIC_VAR_INIT(0); - } -@@ -6936,6 +6965,426 @@ build_lb_rules_pre_stateful(struct hmap *lflows, struct ovn_northd_lb *lb, - } - } - -+/* Builds the logical router flows related to load balancer affinity. -+ * For a LB configured with 'vip=V:VP' and backends 'B1:BP1,B2:BP2' and -+ * affinity timeout set to T, it generates the following logical flows: -+ * - load balancing affinity check: -+ * table=lr_in_lb_aff_check, priority=100 -+ * match=(new_lb_match) -+ * action=(REGBIT_KNOWN_LB_SESSION = chk_lb_aff(); next;) -+ * -+ * - load balancing: -+ * table=lr_in_dnat, priority=150 -+ * match=(REGBIT_KNOWN_LB_SESSION == 1 && ct.new && ip4 -+ * && REG_LB_AFF_BACKEND_IP4 == B1 && REG_LB_AFF_MATCH_PORT == BP1) -+ * action=(REG_NEXT_HOP_IPV4 = V; lb_action; -+ * ct_lb_mark(backends=B1:BP1);) -+ * table=lr_in_dnat, priority=150 -+ * match=(REGBIT_KNOWN_LB_SESSION == 1 && ct.new && ip4 -+ * && REG_LB_AFF_BACKEND_IP4 == B2 && REG_LB_AFF_MATCH_PORT == BP2) -+ * action=(REG_NEXT_HOP_IPV4 = V; lb_action; -+ * ct_lb_mark(backends=B2:BP2);) -+ * -+ * - load balancing affinity learn: -+ * table=lr_in_lb_aff_learn, priority=100 -+ * match=(REGBIT_KNOWN_LB_SESSION == 0 -+ * && ct.new && ip4 -+ * && REG_NEXT_HOP_IPV4 == V && REG_ORIG_TP_DPORT_ROUTER = VP -+ * && ip4.dst == B1 && tcp.dst == BP1) -+ * action=(commit_lb_aff(vip = "V:VP", backend = "B1:BP1", -+ * proto = tcp, timeout = T)); -+ * table=lr_in_lb_aff_learn, priority=100 -+ * match=(REGBIT_KNOWN_LB_SESSION == 0 -+ * && ct.new && ip4 -+ * && REG_NEXT_HOP_IPV4 == V && REG_ORIG_TP_DPORT_ROUTER = VP -+ * && ip4.dst == B2 && tcp.dst == BP2) -+ * action=(commit_lb_aff(vip = "V:VP", backend = "B2:BP2", -+ * proto = tcp, timeout = T)); -+ * -+ */ -+static void -+build_lb_affinity_lr_flows(struct hmap *lflows, struct ovn_northd_lb *lb, -+ struct ovn_lb_vip *lb_vip, char *new_lb_match, -+ char *lb_action, struct ovn_datapath **dplist, -+ int n_dplist) -+{ -+ if (!lb->affinity_timeout) { -+ return; -+ } -+ -+ static char *aff_check = REGBIT_KNOWN_LB_SESSION" = chk_lb_aff(); next;"; -+ struct ovn_lflow *lflow_ref_aff_check = NULL; -+ /* Check if we have already a enstablished connection for this -+ * tuple and we are in affinity timeslot. */ -+ uint32_t hash_aff_check = ovn_logical_flow_hash( -+ ovn_stage_get_table(S_ROUTER_IN_LB_AFF_CHECK), -+ ovn_stage_get_pipeline(S_ROUTER_IN_LB_AFF_CHECK), 100, -+ new_lb_match, aff_check); -+ -+ for (size_t i = 0; i < n_dplist; i++) { -+ if (!ovn_dp_group_add_with_reference(lflow_ref_aff_check, dplist[i])) { -+ lflow_ref_aff_check = ovn_lflow_add_at_with_hash( -+ lflows, dplist[i], S_ROUTER_IN_LB_AFF_CHECK, 100, -+ new_lb_match, aff_check, NULL, NULL, &lb->nlb->header_, -+ OVS_SOURCE_LOCATOR, hash_aff_check); -+ } -+ } -+ -+ struct ds aff_action = DS_EMPTY_INITIALIZER; -+ struct ds aff_action_learn = DS_EMPTY_INITIALIZER; -+ struct ds aff_match = DS_EMPTY_INITIALIZER; -+ struct ds aff_match_learn = DS_EMPTY_INITIALIZER; -+ -+ bool ipv6 = !IN6_IS_ADDR_V4MAPPED(&lb_vip->vip); -+ const char *ip_match = ipv6 ? "ip6" : "ip4"; -+ -+ const char *reg_vip = ipv6 ? REG_NEXT_HOP_IPV6 : REG_NEXT_HOP_IPV4; -+ const char *reg_backend = -+ ipv6 ? REG_LB_L3_AFF_BACKEND_IP6 : REG_LB_AFF_BACKEND_IP4; -+ -+ /* Prepare common part of affinity LB and affinity learn action. */ -+ ds_put_format(&aff_action, "%s = %s; ", reg_vip, lb_vip->vip_str); -+ ds_put_cstr(&aff_action_learn, "commit_lb_aff(vip = \""); -+ -+ if (lb_vip->vip_port) { -+ ds_put_format(&aff_action_learn, ipv6 ? "[%s]:%"PRIu16 : "%s:%"PRIu16, -+ lb_vip->vip_str, lb_vip->vip_port); -+ } else { -+ ds_put_cstr(&aff_action_learn, lb_vip->vip_str); -+ } -+ -+ if (lb_action) { -+ ds_put_cstr(&aff_action, lb_action); -+ } -+ ds_put_cstr(&aff_action, "ct_lb_mark(backends="); -+ ds_put_cstr(&aff_action_learn, "\", backend = \""); -+ -+ /* Prepare common part of affinity learn match. */ -+ if (lb_vip->vip_port) { -+ ds_put_format(&aff_match_learn, REGBIT_KNOWN_LB_SESSION" == 0 && " -+ "ct.new && %s && %s == %s && " -+ REG_ORIG_TP_DPORT_ROUTER" == %"PRIu16" && " -+ "%s.dst == ", ip_match, reg_vip, lb_vip->vip_str, -+ lb_vip->vip_port, ip_match); -+ } else { -+ ds_put_format(&aff_match_learn, REGBIT_KNOWN_LB_SESSION" == 0 && " -+ "ct.new && %s && %s == %s && %s.dst == ", ip_match, -+ reg_vip, lb_vip->vip_str, ip_match); -+ } -+ -+ /* Prepare common part of affinity match. */ -+ ds_put_format(&aff_match, REGBIT_KNOWN_LB_SESSION" == 1 && " -+ "ct.new && %s && %s == ", ip_match, reg_backend); -+ -+ /* Store the common part length. */ -+ size_t aff_action_len = aff_action.length; -+ size_t aff_action_learn_len = aff_action_learn.length; -+ size_t aff_match_len = aff_match.length; -+ size_t aff_match_learn_len = aff_match_learn.length; -+ -+ -+ for (size_t i = 0; i < lb_vip->n_backends; i++) { -+ struct ovn_lb_backend *backend = &lb_vip->backends[i]; -+ -+ ds_put_cstr(&aff_match_learn, backend->ip_str); -+ ds_put_cstr(&aff_match, backend->ip_str); -+ -+ if (backend->port) { -+ ds_put_format(&aff_action, ipv6 ? "[%s]:%d" : "%s:%d", -+ backend->ip_str, backend->port); -+ ds_put_format(&aff_action_learn, ipv6 ? "[%s]:%d" : "%s:%d", -+ backend->ip_str, backend->port); -+ -+ ds_put_format(&aff_match_learn, " && %s.dst == %d", -+ lb->proto, backend->port); -+ ds_put_format(&aff_match, " && "REG_LB_AFF_MATCH_PORT" == %d", -+ backend->port); -+ } else { -+ ds_put_cstr(&aff_action, backend->ip_str); -+ ds_put_cstr(&aff_action_learn, backend->ip_str); -+ } -+ -+ ds_put_cstr(&aff_action, ");"); -+ ds_put_char(&aff_action_learn, '"'); -+ -+ if (lb_vip->vip_port) { -+ ds_put_format(&aff_action_learn, ", proto = %s", lb->proto); -+ } -+ -+ ds_put_format(&aff_action_learn, ", timeout = %d); /* drop */", -+ lb->affinity_timeout); -+ -+ struct ovn_lflow *lflow_ref_aff_learn = NULL; -+ uint32_t hash_aff_learn = ovn_logical_flow_hash( -+ ovn_stage_get_table(S_ROUTER_IN_LB_AFF_LEARN), -+ ovn_stage_get_pipeline(S_ROUTER_IN_LB_AFF_LEARN), -+ 100, ds_cstr(&aff_match_learn), ds_cstr(&aff_action_learn)); -+ -+ struct ovn_lflow *lflow_ref_aff_lb = NULL; -+ uint32_t hash_aff_lb = ovn_logical_flow_hash( -+ ovn_stage_get_table(S_ROUTER_IN_DNAT), -+ ovn_stage_get_pipeline(S_ROUTER_IN_DNAT), -+ 150, ds_cstr(&aff_match), ds_cstr(&aff_action)); -+ -+ for (size_t j = 0; j < n_dplist; j++) { -+ /* Forward to OFTABLE_CHK_LB_AFFINITY table to store flow tuple. */ -+ if (!ovn_dp_group_add_with_reference(lflow_ref_aff_learn, -+ dplist[j])) { -+ lflow_ref_aff_learn = ovn_lflow_add_at_with_hash( -+ lflows, dplist[j], S_ROUTER_IN_LB_AFF_LEARN, 100, -+ ds_cstr(&aff_match_learn), ds_cstr(&aff_action_learn), -+ NULL, NULL, &lb->nlb->header_, OVS_SOURCE_LOCATOR, -+ hash_aff_learn); -+ } -+ /* Use already selected backend within affinity timeslot. */ -+ if (!ovn_dp_group_add_with_reference(lflow_ref_aff_lb, -+ dplist[j])) { -+ lflow_ref_aff_lb = ovn_lflow_add_at_with_hash( -+ lflows, dplist[j], S_ROUTER_IN_DNAT, 150, -+ ds_cstr(&aff_match), ds_cstr(&aff_action), NULL, NULL, -+ &lb->nlb->header_, OVS_SOURCE_LOCATOR, -+ hash_aff_lb); -+ } -+ } -+ -+ ds_truncate(&aff_action, aff_action_len); -+ ds_truncate(&aff_action_learn, aff_action_learn_len); -+ ds_truncate(&aff_match, aff_match_len); -+ ds_truncate(&aff_match_learn, aff_match_learn_len); -+ } -+ -+ ds_destroy(&aff_action); -+ ds_destroy(&aff_action_learn); -+ ds_destroy(&aff_match); -+ ds_destroy(&aff_match_learn); -+} -+ -+/* Builds the logical switch flows related to load balancer affinity. -+ * For a LB configured with 'vip=V:VP' and backends 'B1:BP1,B2:BP2' and -+ * affinity timeout set to T, it generates the following logical flows: -+ * - load balancing affinity check: -+ * table=ls_in_lb_aff_check, priority=100 -+ * match=(ct.new && ip4 -+ * && REG_ORIG_DIP_IPV4 == V && REG_ORIG_TP_DPORT == VP) -+ * action=(REGBIT_KNOWN_LB_SESSION = chk_lb_aff(); next;) -+ * -+ * - load balancing: -+ * table=ls_in_lb, priority=150 -+ * match=(REGBIT_KNOWN_LB_SESSION == 1 && ct.new && ip4 -+ * && REG_LB_AFF_BACKEND_IP4 == B1 && REG_LB_AFF_MATCH_PORT == BP1) -+ * action=(REGBIT_CONNTRACK_COMMIT = 0; -+ * REG_ORIG_DIP_IPV4 = V; REG_ORIG_TP_DPORT = VP; -+ * ct_lb_mark(backends=B1:BP1);) -+ * table=ls_in_lb, priority=150 -+ * match=(REGBIT_KNOWN_LB_SESSION == 1 && ct.new && ip4 -+ * && REG_LB_AFF_BACKEND_IP4 == B2 && REG_LB_AFF_MATCH_PORT == BP2) -+ * action=(REGBIT_CONNTRACK_COMMIT = 0; -+ * REG_ORIG_DIP_IPV4 = V; -+ * REG_ORIG_TP_DPORT = VP; -+ * ct_lb_mark(backends=B1:BP2);) -+ * -+ * - load balancing affinity learn: -+ * table=ls_in_lb_aff_learn, priority=100 -+ * match=(REGBIT_KNOWN_LB_SESSION == 0 -+ * && ct.new && ip4 -+ * && REG_ORIG_DIP_IPV4 == V && REG_ORIG_TP_DPORT == VP -+ * && ip4.dst == B1 && tcp.dst == BP1) -+ * action=(commit_lb_aff(vip = "V:VP", backend = "B1:BP1", -+ * proto = tcp, timeout = T)); -+ * table=ls_in_lb_aff_learn, priority=100 -+ * match=(REGBIT_KNOWN_LB_SESSION == 0 -+ * && ct.new && ip4 -+ * && REG_ORIG_DIP_IPV4 == V && REG_ORIG_TP_DPORT == VP -+ * && ip4.dst == B2 && tcp.dst == BP2) -+ * action=(commit_lb_aff(vip = "V:VP", backend = "B2:BP2", -+ * proto = tcp, timeout = T)); -+ * -+ */ -+static void -+build_lb_affinity_ls_flows(struct hmap *lflows, struct ovn_northd_lb *lb, -+ struct ovn_lb_vip *lb_vip) -+{ -+ if (!lb->affinity_timeout) { -+ return; -+ } -+ -+ struct ds new_lb_match = DS_EMPTY_INITIALIZER; -+ if (IN6_IS_ADDR_V4MAPPED(&lb_vip->vip)) { -+ ds_put_format(&new_lb_match, -+ "ct.new && ip4 && "REG_ORIG_DIP_IPV4 " == %s", -+ lb_vip->vip_str); -+ } else { -+ ds_put_format(&new_lb_match, -+ "ct.new && ip6 && "REG_ORIG_DIP_IPV6 " == %s", -+ lb_vip->vip_str); -+ } -+ -+ if (lb_vip->vip_port) { -+ ds_put_format(&new_lb_match, " && "REG_ORIG_TP_DPORT " == %"PRIu16, -+ lb_vip->vip_port); -+ } -+ -+ static char *aff_check = REGBIT_KNOWN_LB_SESSION" = chk_lb_aff(); next;"; -+ struct ovn_lflow *lflow_ref_aff_check = NULL; -+ /* Check if we have already a enstablished connection for this -+ * tuple and we are in affinity timeslot. */ -+ uint32_t hash_aff_check = ovn_logical_flow_hash( -+ ovn_stage_get_table(S_SWITCH_IN_LB_AFF_CHECK), -+ ovn_stage_get_pipeline(S_SWITCH_IN_LB_AFF_CHECK), 100, -+ ds_cstr(&new_lb_match), aff_check); -+ -+ for (size_t i = 0; i < lb->n_nb_ls; i++) { -+ if (!ovn_dp_group_add_with_reference(lflow_ref_aff_check, -+ lb->nb_ls[i])) { -+ lflow_ref_aff_check = ovn_lflow_add_at_with_hash( -+ lflows, lb->nb_ls[i], S_SWITCH_IN_LB_AFF_CHECK, 100, -+ ds_cstr(&new_lb_match), aff_check, NULL, NULL, -+ &lb->nlb->header_, OVS_SOURCE_LOCATOR, hash_aff_check); -+ } -+ } -+ ds_destroy(&new_lb_match); -+ -+ struct ds aff_action = DS_EMPTY_INITIALIZER; -+ struct ds aff_action_learn = DS_EMPTY_INITIALIZER; -+ struct ds aff_match = DS_EMPTY_INITIALIZER; -+ struct ds aff_match_learn = DS_EMPTY_INITIALIZER; -+ -+ bool ipv6 = !IN6_IS_ADDR_V4MAPPED(&lb_vip->vip); -+ const char *ip_match = ipv6 ? "ip6" : "ip4"; -+ -+ const char *reg_vip = ipv6 ? REG_ORIG_DIP_IPV6 : REG_ORIG_DIP_IPV4; -+ const char *reg_backend = -+ ipv6 ? REG_LB_L2_AFF_BACKEND_IP6 : REG_LB_AFF_BACKEND_IP4; -+ -+ /* Prepare common part of affinity LB and affinity learn action. */ -+ ds_put_format(&aff_action, REGBIT_CONNTRACK_COMMIT" = 0; %s = %s; ", -+ reg_vip, lb_vip->vip_str); -+ ds_put_cstr(&aff_action_learn, "commit_lb_aff(vip = \""); -+ -+ if (lb_vip->vip_port) { -+ ds_put_format(&aff_action, REG_ORIG_TP_DPORT" = %"PRIu16"; ", -+ lb_vip->vip_port); -+ ds_put_format(&aff_action_learn, ipv6 ? "[%s]:%"PRIu16 : "%s:%"PRIu16, -+ lb_vip->vip_str, lb_vip->vip_port); -+ } else { -+ ds_put_cstr(&aff_action_learn, lb_vip->vip_str); -+ } -+ -+ ds_put_cstr(&aff_action, "ct_lb_mark(backends="); -+ ds_put_cstr(&aff_action_learn, "\", backend = \""); -+ -+ /* Prepare common part of affinity learn match. */ -+ if (lb_vip->vip_port) { -+ ds_put_format(&aff_match_learn, REGBIT_KNOWN_LB_SESSION" == 0 && " -+ "ct.new && %s && %s == %s && " -+ REG_ORIG_TP_DPORT" == %"PRIu16" && %s.dst == ", -+ ip_match, reg_vip, lb_vip->vip_str, -+ lb_vip->vip_port, ip_match); -+ } else { -+ ds_put_format(&aff_match_learn, REGBIT_KNOWN_LB_SESSION" == 0 && " -+ "ct.new && %s && %s == %s && %s.dst == ", -+ ip_match, reg_vip, lb_vip->vip_str, ip_match); -+ } -+ -+ /* Prepare common part of affinity match. */ -+ ds_put_format(&aff_match, REGBIT_KNOWN_LB_SESSION" == 1 && " -+ "ct.new && %s && %s == ", ip_match, reg_backend); -+ -+ /* Store the common part length. */ -+ size_t aff_action_len = aff_action.length; -+ size_t aff_action_learn_len = aff_action_learn.length; -+ size_t aff_match_len = aff_match.length; -+ size_t aff_match_learn_len = aff_match_learn.length; -+ -+ for (size_t i = 0; i < lb_vip->n_backends; i++) { -+ struct ovn_lb_backend *backend = &lb_vip->backends[i]; -+ -+ ds_put_cstr(&aff_match_learn, backend->ip_str); -+ ds_put_cstr(&aff_match, backend->ip_str); -+ -+ if (backend->port) { -+ ds_put_format(&aff_action, ipv6 ? "[%s]:%d" : "%s:%d", -+ backend->ip_str, backend->port); -+ ds_put_format(&aff_action_learn, ipv6 ? "[%s]:%d" : "%s:%d", -+ backend->ip_str, backend->port); -+ -+ ds_put_format(&aff_match_learn, " && %s.dst == %d", -+ lb->proto, backend->port); -+ ds_put_format(&aff_match, " && "REG_LB_AFF_MATCH_PORT" == %d", -+ backend->port); -+ } else { -+ ds_put_cstr(&aff_action, backend->ip_str); -+ ds_put_cstr(&aff_action_learn, backend->ip_str); -+ } -+ -+ ds_put_cstr(&aff_action, ");"); -+ ds_put_char(&aff_action_learn, '"'); -+ -+ if (lb_vip->vip_port) { -+ ds_put_format(&aff_action_learn, ", proto = %s", lb->proto); -+ } -+ -+ ds_put_format(&aff_action_learn, ", timeout = %d); /* drop */", -+ lb->affinity_timeout); -+ -+ struct ovn_lflow *lflow_ref_aff_learn = NULL; -+ uint32_t hash_aff_learn = ovn_logical_flow_hash( -+ ovn_stage_get_table(S_SWITCH_IN_LB_AFF_LEARN), -+ ovn_stage_get_pipeline(S_SWITCH_IN_LB_AFF_LEARN), -+ 100, ds_cstr(&aff_match_learn), ds_cstr(&aff_action_learn)); -+ -+ struct ovn_lflow *lflow_ref_aff_lb = NULL; -+ uint32_t hash_aff_lb = ovn_logical_flow_hash( -+ ovn_stage_get_table(S_SWITCH_IN_LB), -+ ovn_stage_get_pipeline(S_SWITCH_IN_LB), -+ 150, ds_cstr(&aff_match), ds_cstr(&aff_action)); -+ -+ for (size_t j = 0; j < lb->n_nb_ls; j++) { -+ /* Forward to OFTABLE_CHK_LB_AFFINITY table to store flow tuple. */ -+ if (!ovn_dp_group_add_with_reference(lflow_ref_aff_learn, -+ lb->nb_ls[j])) { -+ lflow_ref_aff_learn = ovn_lflow_add_at_with_hash( -+ lflows, lb->nb_ls[j], S_SWITCH_IN_LB_AFF_LEARN, 100, -+ ds_cstr(&aff_match_learn), ds_cstr(&aff_action_learn), -+ NULL, NULL, &lb->nlb->header_, OVS_SOURCE_LOCATOR, -+ hash_aff_learn); -+ } -+ /* Use already selected backend within affinity timeslot. */ -+ if (!ovn_dp_group_add_with_reference(lflow_ref_aff_lb, -+ lb->nb_ls[j])) { -+ lflow_ref_aff_lb = ovn_lflow_add_at_with_hash( -+ lflows, lb->nb_ls[j], S_SWITCH_IN_LB, 150, -+ ds_cstr(&aff_match), ds_cstr(&aff_action), NULL, NULL, -+ &lb->nlb->header_, OVS_SOURCE_LOCATOR, -+ hash_aff_lb); -+ } -+ } -+ -+ ds_truncate(&aff_action, aff_action_len); -+ ds_truncate(&aff_action_learn, aff_action_learn_len); -+ ds_truncate(&aff_match, aff_match_len); -+ ds_truncate(&aff_match_learn, aff_match_learn_len); -+ } -+ -+ ds_destroy(&aff_action); -+ ds_destroy(&aff_action_learn); -+ ds_destroy(&aff_match); -+ ds_destroy(&aff_match_learn); -+} -+ -+static void -+build_lb_affinity_default_flows(struct ovn_datapath *od, struct hmap *lflows) -+{ -+ if (od->nbs) { -+ ovn_lflow_add(lflows, od, S_SWITCH_IN_LB_AFF_CHECK, 0, "1", "next;"); -+ ovn_lflow_add(lflows, od, S_SWITCH_IN_LB_AFF_LEARN, 0, "1", "next;"); -+ } -+ if (od->nbr) { -+ ovn_lflow_add(lflows, od, S_ROUTER_IN_LB_AFF_CHECK, 0, "1", "next;"); -+ ovn_lflow_add(lflows, od, S_ROUTER_IN_LB_AFF_LEARN, 0, "1", "next;"); -+ } -+} -+ - static void - build_lb_rules(struct hmap *lflows, struct ovn_northd_lb *lb, bool ct_lb_mark, - struct ds *match, struct ds *action, -@@ -6985,6 +7434,8 @@ build_lb_rules(struct hmap *lflows, struct ovn_northd_lb *lb, bool ct_lb_mark, - priority = 120; - } - -+ build_lb_affinity_ls_flows(lflows, lb, lb_vip); -+ - struct ovn_lflow *lflow_ref = NULL; - uint32_t hash = ovn_logical_flow_hash( - ovn_stage_get_table(S_SWITCH_IN_LB), -@@ -8451,6 +8902,10 @@ build_lswitch_ip_mcast_igmp_mld(struct ovn_igmp_group *igmp_group, - if (atomic_compare_exchange_strong( - &mcast_sw_info->active_v4_flows, &table_size, - mcast_sw_info->table_size)) { -+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1); -+ -+ VLOG_INFO_RL(&rl, "Too many active mcast flows: %"PRIu64, -+ mcast_sw_info->active_v4_flows); - return; - } - atomic_add(&mcast_sw_info->active_v4_flows, 1, &dummy); -@@ -10063,6 +10518,14 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip, - xcalloc(lb->n_nb_lr, sizeof *distributed_router); - int n_distributed_router = 0; - -+ struct ovn_datapath **lb_aff_force_snat_router = -+ xcalloc(lb->n_nb_lr, sizeof *lb_aff_force_snat_router); -+ int n_lb_aff_force_snat_router = 0; -+ -+ struct ovn_datapath **lb_aff_router = -+ xcalloc(lb->n_nb_lr, sizeof *lb_aff_router); -+ int n_lb_aff_router = 0; -+ - /* Group gw router since we do not have datapath dependency in - * lflow generation for them. - */ -@@ -10081,6 +10544,13 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip, - distributed_router[n_distributed_router++] = od; - } - -+ if (!lport_addresses_is_empty(&od->lb_force_snat_addrs) || -+ od->lb_force_snat_router_ip) { -+ lb_aff_force_snat_router[n_lb_aff_force_snat_router++] = od; -+ } else { -+ lb_aff_router[n_lb_aff_router++] = od; -+ } -+ - if (sset_contains(&od->external_ips, lb_vip->vip_str)) { - /* The load balancer vip is also present in the NAT entries. - * So add a high priority lflow to advance the the packet -@@ -10113,10 +10583,26 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip, - "flags.force_snat_for_lb = 1; next;", - lflows, prio, meter_groups); - -+ /* LB affinity flows for datapaths where CMS has specified -+ * force_snat_for_lb floag option. -+ */ -+ build_lb_affinity_lr_flows(lflows, lb, lb_vip, new_match, -+ "flags.force_snat_for_lb = 1; ", -+ lb_aff_force_snat_router, -+ n_lb_aff_force_snat_router); -+ - build_gw_lrouter_nat_flows_for_lb(lb, gw_router, n_gw_router, - reject, new_match, ds_cstr(action), est_match, - "next;", lflows, prio, meter_groups); - -+ /* LB affinity flows for datapaths where CMS has specified -+ * skip_snat_for_lb floag option or regular datapaths. -+ */ -+ char *lb_aff_action = -+ lb->skip_snat ? "flags.skip_snat_for_lb = 1; " : NULL; -+ build_lb_affinity_lr_flows(lflows, lb, lb_vip, new_match, lb_aff_action, -+ lb_aff_router, n_lb_aff_router); -+ - /* Distributed router logic */ - for (size_t i = 0; i < n_distributed_router; i++) { - struct ovn_datapath *od = distributed_router[i]; -@@ -10210,6 +10696,8 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip, - free(gw_router_force_snat); - free(gw_router_skip_snat); - free(distributed_router); -+ free(lb_aff_force_snat_router); -+ free(lb_aff_router); - free(gw_router); - } - -@@ -13633,7 +14121,8 @@ static void - build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, struct hmap *lflows, - const struct hmap *ports, struct ds *match, - struct ds *actions, -- const struct shash *meter_groups) -+ const struct shash *meter_groups, -+ bool ct_lb_mark) - { - if (!od->nbr) { - return; -@@ -13827,6 +14316,26 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, struct hmap *lflows, - } - } - -+ if (od->nbr->n_nat) { -+ ds_clear(match); -+ const char *ct_natted = ct_lb_mark ? -+ "ct_mark.natted" : -+ "ct_label.natted"; -+ ds_put_format(match, "ip && %s == 1", ct_natted); -+ /* This flow is unique since it is in the egress pipeline but checks -+ * the value of ct_label.natted, which would have been set in the -+ * ingress pipeline. If a change is ever introduced that clears or -+ * otherwise invalidates the ct_label between the ingress and egress -+ * pipelines, then an alternative will need to be devised. -+ */ -+ ds_clear(actions); -+ ds_put_cstr(actions, REGBIT_DST_NAT_IP_LOCAL" = 1; next;"); -+ ovn_lflow_add_with_hint(lflows, od, S_ROUTER_OUT_CHECK_DNAT_LOCAL, -+ 50, ds_cstr(match), ds_cstr(actions), -+ &od->nbr->header_); -+ -+ } -+ - /* Handle force SNAT options set in the gateway router. */ - if (od->is_gw_router) { - if (dnat_force_snat_ip) { -@@ -13925,7 +14434,9 @@ build_lswitch_and_lrouter_iterate_by_od(struct ovn_datapath *od, - build_misc_local_traffic_drop_flows_for_lrouter(od, lsi->lflows); - build_lrouter_arp_nd_for_datapath(od, lsi->lflows, lsi->meter_groups); - build_lrouter_nat_defrag_and_lb(od, lsi->lflows, lsi->ports, &lsi->match, -- &lsi->actions, lsi->meter_groups); -+ &lsi->actions, lsi->meter_groups, -+ lsi->features->ct_no_masked_label); -+ build_lb_affinity_default_flows(od, lsi->lflows); - } - - /* Helper function to combine all lflow generation which is iterated by port. -@@ -15148,6 +15659,11 @@ build_mcast_groups(struct lflow_input *input_data, - - hmap_init(mcast_groups); - hmap_init(igmp_groups); -+ struct ovn_datapath *od; -+ -+ HMAP_FOR_EACH (od, key_node, datapaths) { -+ init_mcast_flow_count(od); -+ } - - HMAP_FOR_EACH (op, key_node, ports) { - if (op->nbrp && lrport_is_enabled(op->nbrp)) { -@@ -15205,8 +15721,7 @@ build_mcast_groups(struct lflow_input *input_data, - } - - /* If the datapath value is stale, purge the group. */ -- struct ovn_datapath *od = -- ovn_datapath_from_sbrec(datapaths, sb_igmp->datapath); -+ od = ovn_datapath_from_sbrec(datapaths, sb_igmp->datapath); - - if (!od || ovn_datapath_is_stale(od)) { - sbrec_igmp_group_delete(sb_igmp); -@@ -15251,7 +15766,6 @@ build_mcast_groups(struct lflow_input *input_data, - * IGMP groups are based on the groups learnt by their multicast enabled - * peers. - */ -- struct ovn_datapath *od; - HMAP_FOR_EACH (od, key_node, datapaths) { - - if (ovs_list_is_empty(&od->mcast_info.groups)) { -diff --git a/northd/ovn-northd.8.xml b/northd/ovn-northd.8.xml -index dae961c87..509ca4821 100644 ---- a/northd/ovn-northd.8.xml -+++ b/northd/ovn-northd.8.xml -@@ -853,9 +853,56 @@ - - - --

Ingress Table 11: LB

-+

Ingress Table 11: Load balancing affinity check

-+ -+

-+ Load balancing affinity check table contains the following -+ logical flows: -+

- -
    -+
  • -+ For all the configured load balancing rules for a switch in -+ OVN_Northbound database where a positive affinity timeout -+ is specified in options column, that includes a L4 port -+ PORT of protocol P and IP address VIP, -+ a priority-100 flow is added. For IPv4 VIPs, the flow -+ matches ct.new && ip && ip4.dst == VIP -+ && P.dst == PORT. For IPv6 -+ VIPs, the flow matches ct.new && ip && -+ ip6.dst == VIP&& P && -+ P.dst == PORT. The flow's action is -+ reg9[6] = chk_lb_aff(); next;. -+
  • -+ -+
  • -+ A priority 0 flow is added which matches on all packets and applies -+ the action next;. -+
  • -+
-+ -+

Ingress Table 12: LB

-+ -+
    -+
  • -+ For all the configured load balancing rules for a switch in -+ OVN_Northbound database where a positive affinity timeout -+ is specified in options column, that includes a L4 port -+ PORT of protocol P and IP address VIP, -+ a priority-150 flow is added. For IPv4 VIPs, the flow -+ matches reg9[6] == 1 && ct.new && ip && -+ ip4.dst == VIP && P.dst == PORT -+ . For IPv6 VIPs, the flow matches -+ reg9[6] == 1 && ct.new && ip && -+ ip6.dst == VIP && P && -+ P.dst == PORT. -+ The flow's action is ct_lb_mark(args), where -+ args contains comma separated IP addresses (and optional -+ port numbers) to load balance to. The address family of the IP -+ addresses of args is the same as the address family -+ of VIP. -+
  • -+ -
  • - For all the configured load balancing rules for a switch in - OVN_Northbound database that includes a L4 port -@@ -914,7 +961,38 @@ -
  • -
- --

Ingress table 12: from-lport ACLs after LB

-+

Ingress Table 13: Load balancing affinity learn

-+ -+

-+ Load balancing affinity learn table contains the following -+ logical flows: -+

-+ -+
    -+
  • -+ For all the configured load balancing rules for a switch in -+ OVN_Northbound database where a positive affinity timeout -+ T is specified in options column, that includes -+ a L4 port PORT of protocol P and IP address -+ VIP, a priority-100 flow is added. For IPv4 VIPs, -+ the flow matches reg9[6] == 0 && ct.new && ip -+ && ip4.dst == VIP && P.dst == -+ PORT. For IPv6 VIPs, the flow matches -+ ct.new && ip && ip6.dst == VIP -+ && P && P.dst == PORT -+ . The flow's action is commit_lb_aff(vip = -+ VIP:PORT, backend = backend ip: -+ backend port, proto = P, timeout = T); -+ . -+
  • -+ -+
  • -+ A priority 0 flow is added which matches on all packets and applies -+ the action next;. -+
  • -+
-+ -+

Ingress table 14: from-lport ACLs after LB

- -

- Logical flows in this table closely reproduce those in the -@@ -976,7 +1054,7 @@ - - - --

Ingress Table 13: Stateful

-+

Ingress Table 15: Stateful

- -
    -
  • -@@ -999,7 +1077,7 @@ -
  • -
- --

Ingress Table 14: Pre-Hairpin

-+

Ingress Table 16: Pre-Hairpin

-
    -
  • - If the logical switch has load balancer(s) configured, then a -@@ -1017,7 +1095,7 @@ -
  • -
- --

Ingress Table 15: Nat-Hairpin

-+

Ingress Table 17: Nat-Hairpin

-
    -
  • - If the logical switch has load balancer(s) configured, then a -@@ -1052,7 +1130,7 @@ -
  • -
- --

Ingress Table 16: Hairpin

-+

Ingress Table 18: Hairpin

-
    -
  • -

    -@@ -1086,7 +1164,7 @@ -

  • -
- --

Ingress Table 17: ARP/ND responder

-+

Ingress Table 19: ARP/ND responder

- -

- This table implements ARP/ND responder in a logical switch for known -@@ -1388,7 +1466,7 @@ output; - - - --

Ingress Table 18: DHCP option processing

-+

Ingress Table 20: DHCP option processing

- -

- This table adds the DHCPv4 options to a DHCPv4 packet from the -@@ -1449,7 +1527,7 @@ next; - - - --

Ingress Table 19: DHCP responses

-+

Ingress Table 21: DHCP responses

- -

- This table implements DHCP responder for the DHCP replies generated by -@@ -1530,7 +1608,7 @@ output; - - - --

Ingress Table 20 DNS Lookup

-+

Ingress Table 22 DNS Lookup

- -

- This table looks up and resolves the DNS names to the corresponding -@@ -1559,7 +1637,7 @@ reg0[4] = dns_lookup(); next; - - - --

Ingress Table 21 DNS Responses

-+

Ingress Table 23 DNS Responses

- -

- This table implements DNS responder for the DNS replies generated by -@@ -1594,7 +1672,7 @@ output; - - - --

Ingress table 22 External ports

-+

Ingress table 24 External ports

- -

- Traffic from the external logical ports enter the ingress -@@ -1637,7 +1715,7 @@ output; - - - --

Ingress Table 23 Destination Lookup

-+

Ingress Table 25 Destination Lookup

- -

- This table implements switching behavior. It contains these logical -@@ -1806,7 +1884,7 @@ output; - - - --

Ingress Table 24 Destination unknown

-+

Ingress Table 26 Destination unknown

- -

- This table handles the packets whose destination was not found or -@@ -3172,7 +3250,33 @@ icmp6 { - packet de-fragmentation and tracking before sending it to the next table. -

- --

Ingress Table 6: DNAT

-+

Ingress Table 6: Load balancing affinity check

-+ -+

-+ Load balancing affinity check table contains the following -+ logical flows: -+

-+ -+
    -+
  • -+ For all the configured load balancing rules for a logical router where -+ a positive affinity timeout is specified in options -+ column, that includes a L4 port PORT of protocol -+ P and IPv4 or IPv6 address VIP, a priority-100 -+ flow that matches on ct.new && ip && -+ reg0 == VIP && P && reg9[16..31] -+ == PORT (xxreg0 == VIP -+ in the IPv6 case) with an action of reg9[6] = -+ chk_lb_aff(); next; -+
  • -+ -+
  • -+ A priority 0 flow is added which matches on all packets and applies -+ the action next;. -+
  • -+
-+ -+

Ingress Table 7: DNAT

- -

- Packets enter the pipeline with destination IP address that needs to -@@ -3180,7 +3284,7 @@ icmp6 { - in the reverse direction needs to be unDNATed. -

- --

Ingress Table 6: Load balancing DNAT rules

-+

Ingress Table 7: Load balancing DNAT rules

- -

- Following load balancing DNAT flows are added for Gateway router or -@@ -3190,6 +3294,21 @@ icmp6 { -

- -
    -+
  • -+ For all the configured load balancing rules for a logical router where -+ a positive affinity timeout is specified in options -+ column, that includes a L4 port PORT of protocol -+ P and IPv4 or IPv6 address VIP, a priority-150 -+ flow that matches on reg9[6] == 1 && ct.new && -+ ip && reg0 == VIP && P && -+ reg9[16..31] == PORT (xxreg0 -+ == VIP in the IPv6 case) with an action of -+ ct_lb_mark(args) , where args -+ contains comma separated IP addresses (and optional port numbers) -+ to load balance to. The address family of the IP addresses of -+ args is the same as the address family of VIP. -+
  • -+ -
  • - If controller_event has been enabled for all the configured load - balancing rules for a Gateway router or Router with gateway port -@@ -3319,7 +3438,7 @@ icmp6 { -
  • -
- --

Ingress Table 6: DNAT on Gateway Routers

-+

Ingress Table 7: DNAT on Gateway Routers

- -
    -
  • -@@ -3361,7 +3480,7 @@ icmp6 { -
  • -
- --

Ingress Table 6: DNAT on Distributed Routers

-+

Ingress Table 7: DNAT on Distributed Routers

- -

- On distributed routers, the DNAT table only handles packets -@@ -3416,7 +3535,35 @@ icmp6 { - - - --

Ingress Table 7: ECMP symmetric reply processing

-+

Ingress Table 8: Load balancing affinity learn

-+ -+

-+ Load balancing affinity learn table contains the following -+ logical flows: -+

-+ -+
    -+
  • -+ For all the configured load balancing rules for a logical router where -+ a positive affinity timeout T is specified in options -+ column, that includes a L4 port PORT of protocol -+ P and IPv4 or IPv6 address VIP, a priority-100 -+ flow that matches on reg9[6] == 0 && ct.new && -+ ip && reg0 == VIP && P && -+ reg9[16..31] == PORT (xxreg0 == -+ VIP in the IPv6 case) with an action of -+ commit_lb_aff(vip = VIP:PORT, backend = -+ backend ip: backend port, proto = P, -+ timeout = T);. -+
  • -+ -+
  • -+ A priority 0 flow is added which matches on all packets and applies -+ the action next;. -+
  • -+
-+ -+

Ingress Table 9: ECMP symmetric reply processing

-
    -
  • - If ECMP routes with symmetric reply are configured in the -@@ -3435,7 +3582,7 @@ icmp6 { -
  • -
- --

Ingress Table 8: IPv6 ND RA option processing

-+

Ingress Table 10: IPv6 ND RA option processing

- -
    -
  • -@@ -3465,7 +3612,7 @@ reg0[5] = put_nd_ra_opts(options);next; -
  • -
- --

Ingress Table 9: IPv6 ND RA responder

-+

Ingress Table 11: IPv6 ND RA responder

- -

- This table implements IPv6 ND RA responder for the IPv6 ND RA replies -@@ -3510,7 +3657,7 @@ output; - - - --

Ingress Table 10: IP Routing Pre

-+

Ingress Table 12: IP Routing Pre

- -

- If a packet arrived at this table from Logical Router Port P -@@ -3540,7 +3687,7 @@ output; - - - --

Ingress Table 11: IP Routing

-+

Ingress Table 13: IP Routing

- -

- A packet that arrives at this table is an IP packet that should be -@@ -3741,7 +3888,7 @@ select(reg8[16..31], MID1, MID2, ...); - - - --

Ingress Table 12: IP_ROUTING_ECMP

-+

Ingress Table 14: IP_ROUTING_ECMP

- -

- This table implements the second part of IP routing for ECMP routes -@@ -3793,7 +3940,7 @@ outport = P; - - - --

Ingress Table 13: Router policies

-+

Ingress Table 15: Router policies

-

- This table adds flows for the logical router policies configured - on the logical router. Please see the -@@ -3865,7 +4012,7 @@ next; - - - --

Ingress Table 14: ECMP handling for router policies

-+

Ingress Table 16: ECMP handling for router policies

-

- This table handles the ECMP for the router policies configured - with multiple nexthops. -@@ -3909,7 +4056,7 @@ outport = P - - - --

Ingress Table 15: ARP/ND Resolution

-+

Ingress Table 17: ARP/ND Resolution

- -

- Any packet that reaches this table is an IP packet whose next-hop -@@ -4110,7 +4257,7 @@ outport = P - - - --

Ingress Table 16: Check packet length

-+

Ingress Table 18: Check packet length

- -

- For distributed logical routers or gateway routers with gateway -@@ -4147,7 +4294,7 @@ REGBIT_PKT_LARGER = check_pkt_larger(L); next; - and advances to the next table. -

- --

Ingress Table 17: Handle larger packets

-+

Ingress Table 19: Handle larger packets

- -

- For distributed logical routers or gateway routers with gateway port -@@ -4210,7 +4357,7 @@ icmp6 { - and advances to the next table. -

- --

Ingress Table 18: Gateway Redirect

-+

Ingress Table 20: Gateway Redirect

- -

- For distributed logical routers where one or more of the logical router -@@ -4278,7 +4425,7 @@ icmp6 { - - - --

Ingress Table 19: ARP Request

-+

Ingress Table 21: ARP Request

- -

- In the common case where the Ethernet destination has been resolved, this -@@ -4392,6 +4539,22 @@ nd_ns { - - - -+

-+ This table also installs a priority-50 logical flow for each logical -+ router that has NATs configured on it. The flow has match -+ ip && ct_label.natted == 1 and action -+ REGBIT_DST_NAT_IP_LOCAL = 1; next;. This is intended -+ to ensure that traffic that was DNATted locally will use a separate -+ conntrack zone for SNAT if SNAT is required later in the egress -+ pipeline. Note that this flow checks the value of -+ ct_label.natted, which is set in the ingress pipeline. -+ This means that ovn-northd assumes that this value is carried over -+ from the ingress pipeline to the egress pipeline and is not altered -+ or cleared. If conntrack label values are ever changed to be cleared -+ between the ingress and egress pipelines, then the match conditions -+ of this flow will be updated accordingly. -+

-+ -

Egress Table 1: UNDNAT

- -

-diff --git a/northd/ovn-northd.c b/northd/ovn-northd.c -index 96f17f15f..4bf1afe3b 100644 ---- a/northd/ovn-northd.c -+++ b/northd/ovn-northd.c -@@ -125,6 +125,10 @@ static const char *rbac_igmp_group_auth[] = - {""}; - static const char *rbac_igmp_group_update[] = - {"address", "chassis", "datapath", "ports"}; -+static const char *rbac_bfd_auth[] = -+ {""}; -+static const char *rbac_bfd_update[] = -+ {"status"}; - - static struct rbac_perm_cfg { - const char *table; -@@ -207,6 +211,14 @@ static struct rbac_perm_cfg { - .update = rbac_igmp_group_update, - .n_update = ARRAY_SIZE(rbac_igmp_group_update), - .row = NULL -+ },{ -+ .table = "BFD", -+ .auth = rbac_bfd_auth, -+ .n_auth = ARRAY_SIZE(rbac_bfd_auth), -+ .insdel = false, -+ .update = rbac_bfd_update, -+ .n_update = ARRAY_SIZE(rbac_bfd_update), -+ .row = NULL - },{ - .table = NULL, - .auth = NULL, -diff --git a/ovn-nb.xml b/ovn-nb.xml -index 7fe88af27..dee9d4c15 100644 ---- a/ovn-nb.xml -+++ b/ovn-nb.xml -@@ -1908,6 +1908,14 @@ - requests only for VIPs that are part of a router's subnet. The default - value of this option, if not specified, is reachable. - -+ -+ -+ If the CMS provides a positive value (in seconds) for -+ affinity_timeout, OVN will dnat connections received -+ from the same client to this lb to the same backend if received in -+ the affinity timeslot. Max supported affinity_timeout is 65535 -+ seconds. -+ - - - -diff --git a/ovn-sb.xml b/ovn-sb.xml -index 37a709f83..f9cecb2f9 100644 ---- a/ovn-sb.xml -+++ b/ovn-sb.xml -@@ -2624,6 +2624,50 @@ tcp.flags = RST; - register R is set to 1. -

- -+ -+
-+ -+ commit_lb_aff(vip, backend, -+ proto, timeout); -+ -+
-+
-+

-+ Parameters: load-balancer virtual ip:port vip, -+ load-balancer backend ip:port backend, load-balancer -+ protocol proto, affinity timeout timeout. -+

-+ -+

-+ This action translates to an openflow "learn" action that inserts -+ a new flow in table 78. -+

-+ -+
    -+
  • -+ Match on the 4-tuple in table 78: nw_src=ip client, -+ nw_dst=vip ip, ip_proto, -+ tp_dst=vip port and set reg9[6] to 1, -+ reg4 and reg8 to backend ip and port -+ respectively. For IPv6 register xxreg1 is used to -+ store the backend ip. -+
  • -+
-+ -+

-+ This action is applied for new connections received by a specific -+ load-balacer with affinity timeout configured. -+

-+
-+ -+
R = chk_lb_aff();
-+
-+

-+ This action checks if the packet under consideration matches any -+ flow in table 78. If it is so, then the 1-bit destination -+ register R is set to 1. -+

-+
- - - -diff --git a/rhel/ovn-fedora.spec.in b/rhel/ovn-fedora.spec.in -index 821eb03cc..57dc977c1 100644 ---- a/rhel/ovn-fedora.spec.in -+++ b/rhel/ovn-fedora.spec.in -@@ -65,6 +65,7 @@ BuildRequires: tcpdump - BuildRequires: unbound unbound-devel - - Requires: openssl hostname iproute module-init-tools openvswitch -+Requires: python3-openvswitch - - Requires(post): systemd-units - Requires(preun): systemd-units -diff --git a/tests/ovn-controller.at b/tests/ovn-controller.at -index 3c3fb31c7..6a0e83c33 100644 ---- a/tests/ovn-controller.at -+++ b/tests/ovn-controller.at -@@ -2337,3 +2337,115 @@ done - AT_CHECK([grep "deleted interface patch" hv1/ovs-vswitchd.log], [1], [ignore]) - OVN_CLEANUP([hv1]) - AT_CLEANUP -+ -+AT_SETUP([ovn-controller - resolve CT zone conflicts from ovsdb]) -+ -+ovn_start -+ -+net_add n1 -+sim_add hv1 -+as hv1 -+check ovs-vsctl add-br br-phys -+ovn_attach n1 br-phys 192.168.0.1 -+ -+get_zone_num () { -+ output=$1 -+ name=$2 -+ printf "$output" | grep $name | cut -d ' ' -f 2 -+} -+ -+check_ovsdb_zone() { -+ name=$1 -+ ct_zone=$2 -+ db_zone=$(ovs-vsctl get Bridge br-int external_ids:ct-zone-${name} | sed -e 's/^"//' -e 's/"$//') -+ test $ct_zone -eq $db_zone -+} -+ -+check ovs-vsctl add-port br-int ls0-hv1 -- set Interface ls0-hv1 external-ids:iface-id=ls0-hv1 -+check ovs-vsctl add-port br-int ls0-hv2 -- set Interface ls0-hv2 external-ids:iface-id=ls0-hv2 -+ -+check ovn-nbctl lr-add lr0 -+ -+check ovn-nbctl ls-add ls0 -+check ovn-nbctl lsp-add ls0 ls0-lr0 -+check ovn-nbctl lsp-set-type ls0-lr0 router -+check ovn-nbctl lsp-set-addresses ls0-lr0 router -+check ovn-nbctl lrp-add lr0 lr0-ls0 00:00:00:00:00:01 10.0.0.1 -+ -+check ovn-nbctl lsp-add ls0 ls0-hv1 -+check ovn-nbctl lsp-set-addresses ls0-hv1 "00:00:00:00:00:02 10.0.0.2" -+ -+check ovn-nbctl lsp-add ls0 ls0-hv2 -+check ovn-nbctl lsp-set-addresses ls0-hv2 "00:00:00:00:00:03 10.0.0.3" -+ -+check ovn-nbctl lrp-add lr0 lrp-gw 01:00:00:00:00:01 172.16.0.1 -+check ovn-nbctl lrp-set-gateway-chassis lrp-gw hv1 -+ -+check ovn-nbctl --wait=hv sync -+ -+ct_zones=$(ovn-appctl -t ovn-controller ct-zone-list) -+echo "$ct_zones" -+ -+port1_zone=$(get_zone_num "$ct_zones" ls0-hv1) -+port2_zone=$(get_zone_num "$ct_zones" ls0-hv2) -+ -+lr_uuid=$(fetch_column Datapath_Binding _uuid external_ids:name=lr0) -+snat_zone=$(get_zone_num "$ct_zones" ${lr_uuid}_snat) -+echo "snat_zone is $snat_zone" -+ -+check test "$port1_zone" -ne "$port2_zone" -+check test "$port2_zone" -ne "$snat_zone" -+check test "$port1_zone" -ne "$snat_zone" -+ -+OVS_WAIT_UNTIL([check_ovsdb_zone ls0-hv1 $port1_zone]) -+OVS_WAIT_UNTIL([check_ovsdb_zone ls0-hv2 $port2_zone]) -+OVS_WAIT_UNTIL([check_ovsdb_zone ${lr_uuid}_snat $snat_zone]) -+ -+# Now purposely request an SNAT zone for lr0 that conflicts with a zone -+# currently assigned to a logical port -+ -+snat_req_zone=$port1_zone -+check ovn-nbctl set Logical_Router lr0 options:snat-ct-zone=$snat_req_zone -+ovn-nbctl --wait=hv sync -+ -+ct_zones=$(ovn-appctl -t ovn-controller ct-zone-list) -+echo "$ct_zones" -+ -+port1_zone=$(get_zone_num "$ct_zones" ls0-hv1) -+port2_zone=$(get_zone_num "$ct_zones" ls0-hv2) -+snat_zone=$(get_zone_num "$ct_zones" ${lr_uuid}_snat) -+ -+check test "$snat_zone" -eq "$snat_req_zone" -+check test "$port1_zone" -ne "$port2_zone" -+check test "$port2_zone" -ne "$snat_zone" -+check test "$port1_zone" -ne "$snat_zone" -+ -+OVS_WAIT_UNTIL([check_ovsdb_zone ls0-hv1 $port1_zone]) -+OVS_WAIT_UNTIL([check_ovsdb_zone ls0-hv2 $port2_zone]) -+OVS_WAIT_UNTIL([check_ovsdb_zone ${lr_uuid}_snat $snat_zone]) -+ -+# Now create a conflict in the OVSDB and restart ovn-controller. -+ -+ovs-vsctl set bridge br-int external_ids:ct-zone-ls0-hv1="$snat_req_zone" -+ovs-vsctl set bridge br-int external_ids:ct-zone-ls0-hv2="$snat_req_zone" -+ -+ovn-appctl -t ovn-controller inc-engine/recompute -+ -+ct_zones=$(ovn-appctl -t ovn-controller ct-zone-list) -+echo "$ct_zones" -+ -+port1_zone=$(get_zone_num "$ct_zones" ls0-hv1) -+port2_zone=$(get_zone_num "$ct_zones" ls0-hv2) -+snat_zone=$(get_zone_num "$ct_zones" ${lr_uuid}_snat) -+ -+check test "$snat_zone" -eq "$snat_req_zone" -+check test "$port1_zone" -ne "$port2_zone" -+check test "$port2_zone" -ne "$snat_zone" -+check test "$port1_zone" -ne "$snat_zone" -+ -+OVS_WAIT_UNTIL([check_ovsdb_zone ls0-hv1 $port1_zone]) -+OVS_WAIT_UNTIL([check_ovsdb_zone ls0-hv2 $port2_zone]) -+OVS_WAIT_UNTIL([check_ovsdb_zone ${lr_uuid}_snat $snat_zone]) -+ -+OVN_CLEANUP([hv1]) -+AT_CLEANUP -diff --git a/tests/ovn-ic.at b/tests/ovn-ic.at -index b136472c8..c2e26a4be 100644 ---- a/tests/ovn-ic.at -+++ b/tests/ovn-ic.at -@@ -119,6 +119,139 @@ OVN_CLEANUP_IC - AT_CLEANUP - ]) - -+OVN_FOR_EACH_NORTHD([ -+AT_SETUP([ovn-ic -- route deletion upon TS deletion]) -+ -+ovn_init_ic_db -+net_add n1 -+ -+# 1 GW per AZ -+for i in 1 2; do -+ az=az$i -+ ovn_start $az -+ sim_add gw-$az -+ as gw-$az -+ check ovs-vsctl add-br br-phys -+ ovn_az_attach $az n1 br-phys 192.168.1.$i -+ check ovs-vsctl set open . external-ids:ovn-is-interconn=true -+ check ovn-nbctl set nb-global . \ -+ options:ic-route-adv=true \ -+ options:ic-route-adv-default=true \ -+ options:ic-route-learn=true \ -+ options:ic-route-learn-default=true -+done -+ -+create_ic_infra() { -+ az_id=$1 -+ ts_id=$2 -+ az=az$i -+ -+ lsp=lsp${az_id}-${ts_id} -+ lrp=lrp${az_id}-${ts_id} -+ ts=ts${az_id}-${ts_id} -+ lr=lr${az_id}-${ts_id} -+ -+ ovn_as $az -+ -+ check ovn-ic-nbctl ts-add $ts -+ check ovn-nbctl lr-add $lr -+ check ovn-nbctl lrp-add $lr $lrp 00:00:00:00:00:0$az_id 10.0.$az_id.1/24 -+ check ovn-nbctl lrp-set-gateway-chassis $lrp gw-$az -+ -+ check ovn-nbctl lsp-add $ts $lsp -- \ -+ lsp-set-addresses $lsp router -- \ -+ lsp-set-type $lsp router -- \ -+ lsp-set-options $lsp router-port=$lrp -+ -+ check ovn-nbctl lr-route-add $lr 192.168.0.0/16 10.0.$az_id.10 -+} -+ -+create_ic_infra 1 1 -+create_ic_infra 1 2 -+create_ic_infra 2 1 -+ -+ovn_as az1 -+ -+wait_row_count ic-sb:Route 3 ip_prefix=192.168.0.0/16 -+ -+# remove transit switch 1 (from az1) and check if its route is deleted -+# same route from another AZ and ts should remain, as -+check ovn-ic-nbctl ts-del ts1-1 -+sleep 2 -+ovn-ic-sbctl list route -+ovn-ic-nbctl list transit_switch -+wait_row_count ic-sb:route 2 ip_prefix=192.168.0.0/16 -+ovn-ic-sbctl list route -+ -+for i in 1 2; do -+ az=az$i -+ OVN_CLEANUP_SBOX(gw-$az) -+ OVN_CLEANUP_AZ([$az]) -+done -+OVN_CLEANUP_IC -+AT_CLEANUP -+]) -+ -+OVN_FOR_EACH_NORTHD([ -+AT_SETUP([ovn-ic -- duplicate NB route adv/learn]) -+ -+ovn_init_ic_db -+net_add n1 -+ -+# 1 GW per AZ -+for i in 1 2; do -+ az=az$i -+ ovn_start $az -+ sim_add gw-$az -+ as gw-$az -+ check ovs-vsctl add-br br-phys -+ ovn_az_attach $az n1 br-phys 192.168.1.$i -+ check ovs-vsctl set open . external-ids:ovn-is-interconn=true -+ check ovn-nbctl set nb-global . \ -+ options:ic-route-adv=true \ -+ options:ic-route-adv-default=true \ -+ options:ic-route-learn=true \ -+ options:ic-route-learn-default=true -+done -+ -+ovn_as az1 -+ -+# create transit switch and connect to LR -+check ovn-ic-nbctl ts-add ts1 -+for i in 1 2; do -+ ovn_as az$i -+ -+ check ovn-nbctl lr-add lr1 -+ check ovn-nbctl lrp-add lr1 lrp$i 00:00:00:00:0$i:01 10.0.$i.1/24 -+ check ovn-nbctl lrp-set-gateway-chassis lrp$i gw-az$i -+ -+ check ovn-nbctl lsp-add ts1 lsp$i -- \ -+ lsp-set-addresses lsp$i router -- \ -+ lsp-set-type lsp$i router -- \ -+ lsp-set-options lsp$i router-port=lrp$i -+done -+ -+ovn_as az1 -+ -+ovn-nbctl \ -+ --id=@id create logical-router-static-route ip_prefix=1.1.1.1/32 nexthop=10.0.1.10 -- \ -+ add logical-router lr1 static_routes @id -+ovn-nbctl \ -+ --id=@id create logical-router-static-route ip_prefix=1.1.1.1/32 nexthop=10.0.1.10 -- \ -+ add logical-router lr1 static_routes @id -+ -+wait_row_count ic-sb:route 1 ip_prefix=1.1.1.1/32 -+ -+for i in 1 2; do -+ az=az$i -+ OVN_CLEANUP_SBOX(gw-$az) -+ OVN_CLEANUP_AZ([$az]) -+done -+ -+OVN_CLEANUP_IC -+AT_CLEANUP -+]) -+ - OVN_FOR_EACH_NORTHD([ - AT_SETUP([ovn-ic -- gateway sync]) - -diff --git a/tests/ovn-nbctl.at b/tests/ovn-nbctl.at -index 726efa6f4..0d3412742 100644 ---- a/tests/ovn-nbctl.at -+++ b/tests/ovn-nbctl.at -@@ -1623,6 +1623,7 @@ AT_CHECK([ovn-nbctl lr-route-add lr0 0.0.0.0/0 192.168.0.1]) - AT_CHECK([ovn-nbctl lr-route-add lr0 10.0.1.0/24 11.0.1.1 lp0]) - AT_CHECK([ovn-nbctl lr-route-add lr0 10.0.0.1/24 11.0.0.2]) - AT_CHECK([ovn-nbctl lr-route-add lr0 10.0.10.0/24 lp0]) -+AT_CHECK([ovn-nbctl --bfd lr-route-add lr0 10.0.20.0/24 11.0.2.1 lp0]) - AT_CHECK([ovn-nbctl lr-route-add lr0 10.0.10.0/24 lp1], [1], [], - [ovn-nbctl: bad IPv4 nexthop argument: lp1 - ]) -@@ -1676,6 +1677,7 @@ Route Table
: - 10.0.0.0/24 11.0.0.1 dst-ip - 10.0.1.0/24 11.0.1.1 dst-ip lp0 - 10.0.10.0/24 dst-ip lp0 -+ 10.0.20.0/24 11.0.2.1 dst-ip lp0 bfd - 20.0.0.0/24 discard dst-ip - 9.16.1.0/24 11.0.0.1 src-ip - 10.0.0.0/24 11.0.0.2 src-ip -@@ -1683,6 +1685,10 @@ Route Table
: - 0.0.0.0/0 192.168.0.1 dst-ip - ]) - -+check_row_count nb:BFD 1 -+AT_CHECK([ovn-nbctl lr-route-del lr0 10.0.20.0/24]) -+check_row_count nb:BFD 0 -+ - AT_CHECK([ovn-nbctl lrp-add lr0 lp1 f0:00:00:00:00:02 11.0.0.254/24]) - AT_CHECK([ovn-nbctl --may-exist lr-route-add lr0 10.0.0.111/24 11.0.0.1 lp1]) - AT_CHECK([ovn-nbctl lr-route-list lr0], [0], [dnl -diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at -index 7c3c84007..c00831432 100644 ---- a/tests/ovn-northd.at -+++ b/tests/ovn-northd.at -@@ -2149,9 +2149,9 @@ AT_CAPTURE_FILE([sw1flows]) - - AT_CHECK( - [grep -E 'ls_(in|out)_acl' sw0flows sw1flows | grep pg0 | sort], [0], [dnl --sw0flows: table=4 (ls_out_acl ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) -+sw0flows: table=4 (ls_out_acl ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };) - sw0flows: table=8 (ls_in_acl ), priority=2002 , match=(inport == @pg0 && ip4 && tcp && tcp.dst == 80), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=egress,table=5); };) --sw1flows: table=4 (ls_out_acl ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) -+sw1flows: table=4 (ls_out_acl ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };) - sw1flows: table=8 (ls_in_acl ), priority=2002 , match=(inport == @pg0 && ip4 && tcp && tcp.dst == 80), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=egress,table=5); };) - ]) - -@@ -2165,10 +2165,10 @@ ovn-sbctl dump-flows sw1 > sw1flows2 - AT_CAPTURE_FILE([sw1flows2]) - - AT_CHECK([grep "ls_out_acl" sw0flows2 sw1flows2 | grep pg0 | sort], [0], [dnl --sw0flows2: table=4 (ls_out_acl ), priority=2002 , match=(outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) --sw0flows2: table=4 (ls_out_acl ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) --sw1flows2: table=4 (ls_out_acl ), priority=2002 , match=(outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) --sw1flows2: table=4 (ls_out_acl ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) -+sw0flows2: table=4 (ls_out_acl ), priority=2002 , match=(outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };) -+sw0flows2: table=4 (ls_out_acl ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };) -+sw1flows2: table=4 (ls_out_acl ), priority=2002 , match=(outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };) -+sw1flows2: table=4 (ls_out_acl ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };) - ]) - - AS_BOX([3]) -@@ -2183,16 +2183,16 @@ AT_CAPTURE_FILE([sw1flows3]) - AT_CHECK([grep "ls_out_acl" sw0flows3 sw1flows3 | grep pg0 | sort], [0], [dnl - sw0flows3: table=4 (ls_out_acl ), priority=2001 , match=(reg0[[7]] == 1 && (outport == @pg0 && ip)), action=(reg0[[1]] = 1; next;) - sw0flows3: table=4 (ls_out_acl ), priority=2001 , match=(reg0[[8]] == 1 && (outport == @pg0 && ip)), action=(next;) --sw0flows3: table=4 (ls_out_acl ), priority=2002 , match=((reg0[[10]] == 1) && outport == @pg0 && ip4 && udp), action=(ct_commit { ct_mark.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) --sw0flows3: table=4 (ls_out_acl ), priority=2002 , match=((reg0[[9]] == 1) && outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) --sw0flows3: table=4 (ls_out_acl ), priority=2003 , match=((reg0[[10]] == 1) && outport == @pg0 && ip6 && udp), action=(ct_commit { ct_mark.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) --sw0flows3: table=4 (ls_out_acl ), priority=2003 , match=((reg0[[9]] == 1) && outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) -+sw0flows3: table=4 (ls_out_acl ), priority=2002 , match=((reg0[[10]] == 1) && outport == @pg0 && ip4 && udp), action=(ct_commit { ct_mark.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };) -+sw0flows3: table=4 (ls_out_acl ), priority=2002 , match=((reg0[[9]] == 1) && outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };) -+sw0flows3: table=4 (ls_out_acl ), priority=2003 , match=((reg0[[10]] == 1) && outport == @pg0 && ip6 && udp), action=(ct_commit { ct_mark.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };) -+sw0flows3: table=4 (ls_out_acl ), priority=2003 , match=((reg0[[9]] == 1) && outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };) - sw1flows3: table=4 (ls_out_acl ), priority=2001 , match=(reg0[[7]] == 1 && (outport == @pg0 && ip)), action=(reg0[[1]] = 1; next;) - sw1flows3: table=4 (ls_out_acl ), priority=2001 , match=(reg0[[8]] == 1 && (outport == @pg0 && ip)), action=(next;) --sw1flows3: table=4 (ls_out_acl ), priority=2002 , match=((reg0[[10]] == 1) && outport == @pg0 && ip4 && udp), action=(ct_commit { ct_mark.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) --sw1flows3: table=4 (ls_out_acl ), priority=2002 , match=((reg0[[9]] == 1) && outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) --sw1flows3: table=4 (ls_out_acl ), priority=2003 , match=((reg0[[10]] == 1) && outport == @pg0 && ip6 && udp), action=(ct_commit { ct_mark.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) --sw1flows3: table=4 (ls_out_acl ), priority=2003 , match=((reg0[[9]] == 1) && outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) -+sw1flows3: table=4 (ls_out_acl ), priority=2002 , match=((reg0[[10]] == 1) && outport == @pg0 && ip4 && udp), action=(ct_commit { ct_mark.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };) -+sw1flows3: table=4 (ls_out_acl ), priority=2002 , match=((reg0[[9]] == 1) && outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };) -+sw1flows3: table=4 (ls_out_acl ), priority=2003 , match=((reg0[[10]] == 1) && outport == @pg0 && ip6 && udp), action=(ct_commit { ct_mark.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };) -+sw1flows3: table=4 (ls_out_acl ), priority=2003 , match=((reg0[[9]] == 1) && outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };) - ]) - AT_CLEANUP - ]) -@@ -2364,7 +2364,7 @@ check ovn-nbctl --wait=sb \ - -- ls-lb-add ls lb - - AT_CHECK([ovn-sbctl lflow-list ls | grep -e ls_in_acl_hint -e ls_out_acl_hint -e ls_in_acl -e ls_out_acl | sort], [0], [dnl -- table=12(ls_in_acl_after_lb ), priority=0 , match=(1), action=(next;) -+ table=14(ls_in_acl_after_lb ), priority=0 , match=(1), action=(next;) - table=3 (ls_out_acl_hint ), priority=0 , match=(1), action=(next;) - table=3 (ls_out_acl_hint ), priority=1 , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;) - table=3 (ls_out_acl_hint ), priority=2 , match=(ct.est && ct_mark.blocked == 1), action=(reg0[[9]] = 1; next;) -@@ -2407,7 +2407,7 @@ ovn-nbctl --wait=sb clear logical_switch ls acls - ovn-nbctl --wait=sb clear logical_switch ls load_balancer - - AT_CHECK([ovn-sbctl lflow-list ls | grep -e ls_in_acl_hint -e ls_out_acl_hint -e ls_in_acl -e ls_out_acl | sort], [0], [dnl -- table=12(ls_in_acl_after_lb ), priority=0 , match=(1), action=(next;) -+ table=14(ls_in_acl_after_lb ), priority=0 , match=(1), action=(next;) - table=3 (ls_out_acl_hint ), priority=65535, match=(1), action=(next;) - table=4 (ls_out_acl ), priority=65535, match=(1), action=(next;) - table=7 (ls_in_acl_hint ), priority=65535, match=(1), action=(next;) -@@ -3640,11 +3640,11 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl - ]) - - AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl -- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(ct_lb_mark(backends=10.0.0.4:8080);) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(ct_lb_mark(backends=10.0.0.40:8080);) -+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(ct_lb_mark(backends=10.0.0.4:8080);) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(ct_lb_mark(backends=10.0.0.40:8080);) - ]) - - AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl -@@ -3676,11 +3676,11 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl - ]) - - AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl -- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080);) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080);) -+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080);) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080);) - ]) - - AT_CHECK([grep "lr_out_snat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl -@@ -3722,11 +3722,11 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl - ]) - - AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl -- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080);) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080);) -+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080);) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080);) - ]) - - AT_CHECK([grep "lr_out_snat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl -@@ -3782,11 +3782,11 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl - ]) - - AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl -- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080);) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080);) -+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080);) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080);) - ]) - - AT_CHECK([grep "lr_out_snat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl -@@ -3829,8 +3829,8 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl - ]) - - AT_CHECK([grep "lr_in_dnat" lr0flows | grep skip_snat_for_lb | sort], [0], [dnl -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.20 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.skip_snat_for_lb = 1; next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.20 && tcp && reg9[[16..31]] == 80), action=(flags.skip_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080);) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.20 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.skip_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.20 && tcp && reg9[[16..31]] == 80), action=(flags.skip_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080);) - ]) - - AT_CHECK([grep "lr_out_snat" lr0flows | grep skip_snat_for_lb | sed 's/table=./table=?/' | sort], [0], [dnl -@@ -3998,7 +3998,7 @@ check_stateful_flows() { - table=? (ls_in_pre_stateful ), priority=120 , match=(ip4.dst == 10.0.0.20 && tcp.dst == 80), action=(reg1 = 10.0.0.20; reg2[[0..15]] = 80; ct_lb_mark;) - ]) - -- AT_CHECK([grep "ls_in_lb" sw0flows | sort | sed 's/table=../table=??/'], [0], [dnl -+ AT_CHECK([grep "ls_in_lb " sw0flows | sort | sed 's/table=../table=??/'], [0], [dnl - table=??(ls_in_lb ), priority=0 , match=(1), action=(next;) - table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.4:8080);) - table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.20 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.40:8080);) -@@ -4064,7 +4064,7 @@ AT_CHECK([grep "ls_in_pre_stateful" sw0flows | sort | sed 's/table=./table=?/'], - table=? (ls_in_pre_stateful ), priority=110 , match=(reg0[[2]] == 1), action=(ct_lb_mark;) - ]) - --AT_CHECK([grep "ls_in_lb" sw0flows | sort | sed 's/table=../table=??/'], [0], [dnl -+AT_CHECK([grep "ls_in_lb " sw0flows | sort | sed 's/table=../table=??/'], [0], [dnl - table=??(ls_in_lb ), priority=0 , match=(1), action=(next;) - ]) - -@@ -4925,7 +4925,7 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl - ]) - - AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl -- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;) - ]) - - AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl -@@ -4961,7 +4961,7 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl - ]) - - AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl -- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;) - ]) - - AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl -@@ -5013,12 +5013,13 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl - ]) - - AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl -- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -- table=6 (lr_in_dnat ), priority=100 , match=(ip && ip4.dst == 172.168.0.20 && inport == "lr0-public" && is_chassis_resident("cr-lr0-public")), action=(ct_dnat_in_czone(10.0.0.3);) -+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -+ table=7 (lr_in_dnat ), priority=100 , match=(ip && ip4.dst == 172.168.0.20 && inport == "lr0-public" && is_chassis_resident("cr-lr0-public")), action=(ct_dnat_in_czone(10.0.0.3);) - ]) - - AT_CHECK([grep "lr_out_chk_dnat_local" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl - table=? (lr_out_chk_dnat_local), priority=0 , match=(1), action=(reg9[[4]] = 0; next;) -+ table=? (lr_out_chk_dnat_local), priority=50 , match=(ip && ct_mark.natted == 1), action=(reg9[[4]] = 1; next;) - table=? (lr_out_chk_dnat_local), priority=50 , match=(ip && ip4.dst == 172.168.0.10 && is_chassis_resident("cr-lr0-public")), action=(reg9[[4]] = 1; next;) - table=? (lr_out_chk_dnat_local), priority=50 , match=(ip && ip4.dst == 172.168.0.20 && is_chassis_resident("cr-lr0-public")), action=(reg9[[4]] = 1; next;) - table=? (lr_out_chk_dnat_local), priority=50 , match=(ip && ip4.dst == 172.168.0.30 && is_chassis_resident("cr-lr0-public")), action=(reg9[[4]] = 1; next;) -@@ -5079,20 +5080,21 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl - ]) - - AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl -- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -- table=6 (lr_in_dnat ), priority=100 , match=(ip && ip4.dst == 172.168.0.20 && inport == "lr0-public" && is_chassis_resident("cr-lr0-public")), action=(ct_dnat_in_czone(10.0.0.3);) -- table=6 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;) -- table=6 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 172.168.0.200 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.80,10.0.0.81);) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.4:8080);) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);) -+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -+ table=7 (lr_in_dnat ), priority=100 , match=(ip && ip4.dst == 172.168.0.20 && inport == "lr0-public" && is_chassis_resident("cr-lr0-public")), action=(ct_dnat_in_czone(10.0.0.3);) -+ table=7 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;) -+ table=7 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 172.168.0.200 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.80,10.0.0.81);) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.4:8080);) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);) - ]) - - AT_CHECK([grep "lr_out_chk_dnat_local" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl - table=? (lr_out_chk_dnat_local), priority=0 , match=(1), action=(reg9[[4]] = 0; next;) -+ table=? (lr_out_chk_dnat_local), priority=50 , match=(ip && ct_mark.natted == 1), action=(reg9[[4]] = 1; next;) - table=? (lr_out_chk_dnat_local), priority=50 , match=(ip && ip4.dst == 172.168.0.10 && is_chassis_resident("cr-lr0-public")), action=(reg9[[4]] = 1; next;) - table=? (lr_out_chk_dnat_local), priority=50 , match=(ip && ip4.dst == 172.168.0.20 && is_chassis_resident("cr-lr0-public")), action=(reg9[[4]] = 1; next;) - table=? (lr_out_chk_dnat_local), priority=50 , match=(ip && ip4.dst == 172.168.0.30 && is_chassis_resident("cr-lr0-public")), action=(reg9[[4]] = 1; next;) -@@ -5147,20 +5149,21 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl - ]) - - AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl -- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -- table=6 (lr_in_dnat ), priority=100 , match=(ip && ip4.dst == 172.168.0.20), action=(flags.loopback = 1; ct_dnat(10.0.0.3);) -- table=6 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1), action=(next;) -- table=6 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 172.168.0.200), action=(ct_lb_mark(backends=10.0.0.80,10.0.0.81);) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1), action=(next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(ct_lb_mark(backends=10.0.0.4:8080);) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082), action=(ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);) -+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -+ table=7 (lr_in_dnat ), priority=100 , match=(ip && ip4.dst == 172.168.0.20), action=(flags.loopback = 1; ct_dnat(10.0.0.3);) -+ table=7 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1), action=(next;) -+ table=7 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 172.168.0.200), action=(ct_lb_mark(backends=10.0.0.80,10.0.0.81);) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1), action=(next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(ct_lb_mark(backends=10.0.0.4:8080);) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082), action=(ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);) - ]) - - AT_CHECK([grep "lr_out_chk_dnat_local" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl - table=? (lr_out_chk_dnat_local), priority=0 , match=(1), action=(reg9[[4]] = 0; next;) -+ table=? (lr_out_chk_dnat_local), priority=50 , match=(ip && ct_mark.natted == 1), action=(reg9[[4]] = 1; next;) - ]) - - AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl -@@ -5207,20 +5210,21 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl - ]) - - AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl -- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -- table=6 (lr_in_dnat ), priority=100 , match=(ip && ip4.dst == 172.168.0.20), action=(flags.loopback = 1; ct_dnat(10.0.0.3);) -- table=6 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -- table=6 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 172.168.0.200), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.80,10.0.0.81);) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080);) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);) -+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -+ table=7 (lr_in_dnat ), priority=100 , match=(ip && ip4.dst == 172.168.0.20), action=(flags.loopback = 1; ct_dnat(10.0.0.3);) -+ table=7 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 172.168.0.200), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.80,10.0.0.81);) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080);) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);) - ]) - - AT_CHECK([grep "lr_out_chk_dnat_local" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl - table=? (lr_out_chk_dnat_local), priority=0 , match=(1), action=(reg9[[4]] = 0; next;) -+ table=? (lr_out_chk_dnat_local), priority=50 , match=(ip && ct_mark.natted == 1), action=(reg9[[4]] = 1; next;) - ]) - - AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl -@@ -5270,22 +5274,23 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl - ]) - - AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl -- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -- table=6 (lr_in_dnat ), priority=100 , match=(ip && ip4.dst == 172.168.0.20), action=(flags.loopback = 1; ct_dnat(10.0.0.3);) -- table=6 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -- table=6 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 172.168.0.200), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.80,10.0.0.81);) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.10 && tcp && reg9[[16..31]] == 9082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080);) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.10 && tcp && reg9[[16..31]] == 9082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);) -+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -+ table=7 (lr_in_dnat ), priority=100 , match=(ip && ip4.dst == 172.168.0.20), action=(flags.loopback = 1; ct_dnat(10.0.0.3);) -+ table=7 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 172.168.0.200), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.80,10.0.0.81);) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.10 && tcp && reg9[[16..31]] == 9082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080);) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.10 && tcp && reg9[[16..31]] == 9082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);) - ]) - - AT_CHECK([grep "lr_out_chk_dnat_local" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl - table=? (lr_out_chk_dnat_local), priority=0 , match=(1), action=(reg9[[4]] = 0; next;) -+ table=? (lr_out_chk_dnat_local), priority=50 , match=(ip && ct_mark.natted == 1), action=(reg9[[4]] = 1; next;) - ]) - - AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl -@@ -5346,24 +5351,25 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl - ]) - - AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl -- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -- table=6 (lr_in_dnat ), priority=100 , match=(ip && ip4.dst == 172.168.0.20), action=(flags.loopback = 1; ct_dnat(10.0.0.3);) -- table=6 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -- table=6 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 172.168.0.200), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.80,10.0.0.81);) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.10 && tcp && reg9[[16..31]] == 9082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip6 && xxreg0 == def0::2 && tcp && reg9[[16..31]] == 8000 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080);) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.10 && tcp && reg9[[16..31]] == 9082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip6 && xxreg0 == def0::2 && tcp && reg9[[16..31]] == 8000), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=[[aef0::2]]:80,[[aef0::3]]:80);) -+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -+ table=7 (lr_in_dnat ), priority=100 , match=(ip && ip4.dst == 172.168.0.20), action=(flags.loopback = 1; ct_dnat(10.0.0.3);) -+ table=7 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 172.168.0.200), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.80,10.0.0.81);) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.10 && tcp && reg9[[16..31]] == 9082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip6 && xxreg0 == def0::2 && tcp && reg9[[16..31]] == 8000 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080);) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.10 && tcp && reg9[[16..31]] == 9082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip6 && xxreg0 == def0::2 && tcp && reg9[[16..31]] == 8000), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=[[aef0::2]]:80,[[aef0::3]]:80);) - ]) - - AT_CHECK([grep "lr_out_chk_dnat_local" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl - table=? (lr_out_chk_dnat_local), priority=0 , match=(1), action=(reg9[[4]] = 0; next;) -+ table=? (lr_out_chk_dnat_local), priority=50 , match=(ip && ct_mark.natted == 1), action=(reg9[[4]] = 1; next;) - ]) - - AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl -@@ -5413,11 +5419,11 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl - ]) - - AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl -- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && tcp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && tcp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);) -- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);) -+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && tcp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && tcp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);) - ]) - - AT_CHECK([grep "lr_out_chk_dnat_local" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl -@@ -6129,7 +6135,6 @@ AT_CHECK([grep -e "(lr_in_ip_routing ).*outport" lr0flows | sed 's/table=../ta - ]) - - AT_CLEANUP --]) - - OVN_FOR_EACH_NORTHD([ - AT_SETUP([check exclude-lb-vips-from-garp option]) -@@ -6508,7 +6513,7 @@ AT_CHECK([grep -e "ls_in_acl" lsflows | sed 's/table=../table=??/' | sort], [0], - table=??(ls_in_acl_hint ), priority=7 , match=(ct.new && !ct.est), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;) - ]) - --AT_CHECK([grep -e "ls_in_lb" lsflows | sed 's/table=../table=??/' | sort], [0], [dnl -+AT_CHECK([grep -e "ls_in_lb " lsflows | sed 's/table=../table=??/' | sort], [0], [dnl - table=??(ls_in_lb ), priority=0 , match=(1), action=(next;) - table=??(ls_in_lb ), priority=110 , match=(ct.new && ip4.dst == 10.0.0.2), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.10);) - ]) -@@ -6561,7 +6566,7 @@ AT_CHECK([grep -e "ls_in_acl" lsflows | sed 's/table=../table=??/' | sort], [0], - table=??(ls_in_acl_hint ), priority=7 , match=(ct.new && !ct.est), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;) - ]) - --AT_CHECK([grep -e "ls_in_lb" lsflows | sed 's/table=../table=??/' | sort], [0], [dnl -+AT_CHECK([grep -e "ls_in_lb " lsflows | sed 's/table=../table=??/' | sort], [0], [dnl - table=??(ls_in_lb ), priority=0 , match=(1), action=(next;) - table=??(ls_in_lb ), priority=110 , match=(ct.new && ip4.dst == 10.0.0.2), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.10);) - ]) -@@ -6614,7 +6619,7 @@ AT_CHECK([grep -e "ls_in_acl" lsflows | sed 's/table=../table=??/' | sort], [0], - table=??(ls_in_acl_hint ), priority=7 , match=(ct.new && !ct.est), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;) - ]) - --AT_CHECK([grep -e "ls_in_lb" lsflows | sed 's/table=../table=??/' | sort], [0], [dnl -+AT_CHECK([grep -e "ls_in_lb " lsflows | sed 's/table=../table=??/' | sort], [0], [dnl - table=??(ls_in_lb ), priority=0 , match=(1), action=(next;) - table=??(ls_in_lb ), priority=110 , match=(ct.new && ip4.dst == 10.0.0.2), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.10);) - ]) -@@ -7582,7 +7587,7 @@ sort | sed 's/table=../table=??/' ], [0], [dnl - table=??(ls_in_check_port_sec), priority=100 , match=(vlan.present), action=(drop;) - table=??(ls_in_check_port_sec), priority=50 , match=(1), action=(reg0[[15]] = check_in_port_sec(); next;) - table=??(ls_in_check_port_sec), priority=70 , match=(inport == "localnetport"), action=(set_queue(10); reg0[[15]] = check_in_port_sec(); next;) -- table=??(ls_in_check_port_sec), priority=70 , match=(inport == "sw0p1"), action=(reg0[[14]] = 1; next(pipeline=ingress, table=16);) -+ table=??(ls_in_check_port_sec), priority=70 , match=(inport == "sw0p1"), action=(reg0[[14]] = 1; next(pipeline=ingress, table=18);) - table=??(ls_in_check_port_sec), priority=70 , match=(inport == "sw0p2"), action=(set_queue(10); reg0[[15]] = check_in_port_sec(); next;) - table=??(ls_in_apply_port_sec), priority=0 , match=(1), action=(next;) - table=??(ls_in_apply_port_sec), priority=50 , match=(reg0[[15]] == 1), action=(drop;) -@@ -7619,11 +7624,11 @@ check ovn-nbctl \ - AS_BOX([No chassis registered - use ct_lb_mark and ct_mark.natted]) - check ovn-nbctl --wait=sb sync - AT_CHECK([ovn-sbctl lflow-list | grep -e natted -e ct_lb], [0], [dnl -- table=6 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 66.66.66.66 && ct_mark.natted == 1), action=(next;) -- table=6 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 66.66.66.66), action=(ct_lb_mark(backends=42.42.42.2);) -+ table=7 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 66.66.66.66 && ct_mark.natted == 1), action=(next;) -+ table=7 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 66.66.66.66), action=(ct_lb_mark(backends=42.42.42.2);) - table=6 (ls_in_pre_stateful ), priority=120 , match=(ip4.dst == 66.66.66.66), action=(reg1 = 66.66.66.66; ct_lb_mark;) - table=6 (ls_in_pre_stateful ), priority=110 , match=(reg0[[2]] == 1), action=(ct_lb_mark;) -- table=11(ls_in_lb ), priority=110 , match=(ct.new && ip4.dst == 66.66.66.66), action=(reg0[[1]] = 0; ct_lb_mark(backends=42.42.42.2);) -+ table=12(ls_in_lb ), priority=110 , match=(ct.new && ip4.dst == 66.66.66.66), action=(reg0[[1]] = 0; ct_lb_mark(backends=42.42.42.2);) - table=2 (ls_out_pre_stateful), priority=110 , match=(reg0[[2]] == 1), action=(ct_lb_mark;) - ]) - -@@ -7631,11 +7636,11 @@ AS_BOX([Chassis registered that doesn't support ct_lb_mark - use ct_lb and ct_la - check ovn-sbctl chassis-add hv geneve 127.0.0.1 - check ovn-nbctl --wait=sb sync - AT_CHECK([ovn-sbctl lflow-list | grep -e natted -e ct_lb], [0], [dnl -- table=6 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 66.66.66.66 && ct_label.natted == 1), action=(next;) -- table=6 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 66.66.66.66), action=(ct_lb(backends=42.42.42.2);) -+ table=7 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 66.66.66.66 && ct_label.natted == 1), action=(next;) -+ table=7 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 66.66.66.66), action=(ct_lb(backends=42.42.42.2);) - table=6 (ls_in_pre_stateful ), priority=120 , match=(ip4.dst == 66.66.66.66), action=(reg1 = 66.66.66.66; ct_lb;) - table=6 (ls_in_pre_stateful ), priority=110 , match=(reg0[[2]] == 1), action=(ct_lb;) -- table=11(ls_in_lb ), priority=110 , match=(ct.new && ip4.dst == 66.66.66.66), action=(reg0[[1]] = 0; ct_lb(backends=42.42.42.2);) -+ table=12(ls_in_lb ), priority=110 , match=(ct.new && ip4.dst == 66.66.66.66), action=(reg0[[1]] = 0; ct_lb(backends=42.42.42.2);) - table=2 (ls_out_pre_stateful), priority=110 , match=(reg0[[2]] == 1), action=(ct_lb;) - ]) - -@@ -7643,11 +7648,11 @@ AS_BOX([Chassis upgrades and supports ct_lb_mark - use ct_lb_mark and ct_mark.na - check ovn-sbctl set chassis hv other_config:ct-no-masked-label=true - check ovn-nbctl --wait=sb sync - AT_CHECK([ovn-sbctl lflow-list | grep -e natted -e ct_lb], [0], [dnl -- table=6 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 66.66.66.66 && ct_mark.natted == 1), action=(next;) -- table=6 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 66.66.66.66), action=(ct_lb_mark(backends=42.42.42.2);) -+ table=7 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 66.66.66.66 && ct_mark.natted == 1), action=(next;) -+ table=7 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 66.66.66.66), action=(ct_lb_mark(backends=42.42.42.2);) - table=6 (ls_in_pre_stateful ), priority=120 , match=(ip4.dst == 66.66.66.66), action=(reg1 = 66.66.66.66; ct_lb_mark;) - table=6 (ls_in_pre_stateful ), priority=110 , match=(reg0[[2]] == 1), action=(ct_lb_mark;) -- table=11(ls_in_lb ), priority=110 , match=(ct.new && ip4.dst == 66.66.66.66), action=(reg0[[1]] = 0; ct_lb_mark(backends=42.42.42.2);) -+ table=12(ls_in_lb ), priority=110 , match=(ct.new && ip4.dst == 66.66.66.66), action=(reg0[[1]] = 0; ct_lb_mark(backends=42.42.42.2);) - table=2 (ls_out_pre_stateful), priority=110 , match=(reg0[[2]] == 1), action=(ct_lb_mark;) - ]) - -@@ -7801,11 +7806,11 @@ ovn-sbctl dump-flows S1 > S1flows - AT_CAPTURE_FILE([S0flows]) - AT_CAPTURE_FILE([S1flows]) - --AT_CHECK([grep "ls_in_lb" S0flows | sort], [0], [dnl -- table=11(ls_in_lb ), priority=0 , match=(1), action=(next;) -+AT_CHECK([grep "ls_in_lb " S0flows | sort], [0], [dnl -+ table=12(ls_in_lb ), priority=0 , match=(1), action=(next;) - ]) --AT_CHECK([grep "ls_in_lb" S1flows | sort], [0], [dnl -- table=11(ls_in_lb ), priority=0 , match=(1), action=(next;) -+AT_CHECK([grep "ls_in_lb " S1flows | sort], [0], [dnl -+ table=12(ls_in_lb ), priority=0 , match=(1), action=(next;) - ]) - - ovn-nbctl --wait=sb set NB_Global . options:install_ls_lb_from_router=true -@@ -7816,13 +7821,13 @@ ovn-sbctl dump-flows S1 > S1flows - AT_CAPTURE_FILE([S0flows]) - AT_CAPTURE_FILE([S1flows]) - --AT_CHECK([grep "ls_in_lb" S0flows | sort], [0], [dnl -- table=11(ls_in_lb ), priority=0 , match=(1), action=(next;) -- table=11(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 172.16.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:80);) -+AT_CHECK([grep "ls_in_lb " S0flows | sort], [0], [dnl -+ table=12(ls_in_lb ), priority=0 , match=(1), action=(next;) -+ table=12(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 172.16.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:80);) - ]) --AT_CHECK([grep "ls_in_lb" S1flows | sort], [0], [dnl -- table=11(ls_in_lb ), priority=0 , match=(1), action=(next;) -- table=11(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 172.16.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:80);) -+AT_CHECK([grep "ls_in_lb " S1flows | sort], [0], [dnl -+ table=12(ls_in_lb ), priority=0 , match=(1), action=(next;) -+ table=12(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 172.16.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:80);) - ]) - - ovn-sbctl get datapath S0 _uuid > dp_uuids -@@ -7841,14 +7846,137 @@ ovn-sbctl dump-flows S1 > S1flows - AT_CAPTURE_FILE([S0flows]) - AT_CAPTURE_FILE([S1flows]) - --AT_CHECK([grep "ls_in_lb" S0flows | sort], [0], [dnl -- table=11(ls_in_lb ), priority=0 , match=(1), action=(next;) -+AT_CHECK([grep "ls_in_lb " S0flows | sort], [0], [dnl -+ table=12(ls_in_lb ), priority=0 , match=(1), action=(next;) - ]) --AT_CHECK([grep "ls_in_lb" S1flows | sort], [0], [dnl -- table=11(ls_in_lb ), priority=0 , match=(1), action=(next;) -+AT_CHECK([grep "ls_in_lb " S1flows | sort], [0], [dnl -+ table=12(ls_in_lb ), priority=0 , match=(1), action=(next;) - ]) - - check_column "" sb:load_balancer datapaths name=lb0 - - AT_CLEANUP - ]) -+ -+AT_SETUP([check lb-affinity flows]) -+AT_KEYWORDS([lb-affinity-flows]) -+ovn_start -+ -+ovn-nbctl lr-add R1 -+ovn-nbctl set logical_router R1 options:chassis=hv1 -+ovn-nbctl lrp-add R1 R1-S0 02:ac:10:01:00:01 10.0.0.1/24 -+ovn-nbctl lrp-add R1 R1-S1 02:ac:10:01:01:01 20.0.0.1/24 -+ovn-nbctl lrp-add R1 R1-PUB 02:ac:20:01:01:01 172.16.0.1/24 -+ -+ovn-nbctl ls-add S0 -+ovn-nbctl lsp-add S0 S0-R1 -+ovn-nbctl lsp-set-type S0-R1 router -+ovn-nbctl lsp-set-addresses S0-R1 02:ac:10:01:00:01 -+ovn-nbctl lsp-set-options S0-R1 router-port=R1-S0 -+ -+ovn-nbctl ls-add S1 -+ovn-nbctl lsp-add S1 S1-R1 -+ovn-nbctl lsp-set-type S1-R1 router -+ovn-nbctl lsp-set-addresses S1-R1 02:ac:10:01:01:01 -+ovn-nbctl lsp-set-options S1-R1 router-port=R1-S1 -+ -+# Add load balancers on the logical router R1 -+ovn-nbctl lb-add lb0 172.16.0.10:80 10.0.0.2:80,20.0.0.2:80 tcp -+ovn-nbctl lr-lb-add R1 lb0 -+ovn-nbctl ls-lb-add S0 lb0 -+ -+ovn-sbctl dump-flows S0 > S0flows -+ovn-sbctl dump-flows R1 > R1flows -+ -+AT_CAPTURE_FILE([S0flows]) -+AT_CAPTURE_FILE([R1flows]) -+ -+AT_CHECK([grep "ls_in_lb_aff_check" S0flows | sort], [0], [dnl -+ table=11(ls_in_lb_aff_check ), priority=0 , match=(1), action=(next;) -+]) -+AT_CHECK([grep "ls_in_lb_aff_learn" S0flows | sort], [0], [dnl -+ table=13(ls_in_lb_aff_learn ), priority=0 , match=(1), action=(next;) -+]) -+ -+AT_CHECK([grep "lr_in_lb_aff_check" R1flows | sort], [0], [dnl -+ table=6 (lr_in_lb_aff_check ), priority=0 , match=(1), action=(next;) -+]) -+AT_CHECK([grep "lr_in_lb_aff_learn" R1flows | sort], [0], [dnl -+ table=8 (lr_in_lb_aff_learn ), priority=0 , match=(1), action=(next;) -+]) -+ -+ovn-nbctl --wait=sb set load_balancer lb0 options:affinity_timeout=60 -+ -+AS_BOX([Test LS flows]) -+ovn-sbctl dump-flows S0 > S0flows -+AT_CAPTURE_FILE([S0flows]) -+ -+AT_CHECK([grep "ls_in_lb_aff_check" S0flows | sort], [0], [dnl -+ table=11(ls_in_lb_aff_check ), priority=0 , match=(1), action=(next;) -+ table=11(ls_in_lb_aff_check ), priority=100 , match=(ct.new && ip4 && reg1 == 172.16.0.10 && reg2[[0..15]] == 80), action=(reg9[[6]] = chk_lb_aff(); next;) -+]) -+AT_CHECK([grep "ls_in_lb " S0flows | sort], [0], [dnl -+ table=12(ls_in_lb ), priority=0 , match=(1), action=(next;) -+ table=12(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 172.16.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:80,20.0.0.2:80);) -+ table=12(ls_in_lb ), priority=150 , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 10.0.0.2 && reg8[[0..15]] == 80), action=(reg0[[1]] = 0; reg1 = 172.16.0.10; reg2[[0..15]] = 80; ct_lb_mark(backends=10.0.0.2:80);) -+ table=12(ls_in_lb ), priority=150 , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 20.0.0.2 && reg8[[0..15]] == 80), action=(reg0[[1]] = 0; reg1 = 172.16.0.10; reg2[[0..15]] = 80; ct_lb_mark(backends=20.0.0.2:80);) -+]) -+AT_CHECK([grep "ls_in_lb_aff_learn" S0flows | sort], [0], [dnl -+ table=13(ls_in_lb_aff_learn ), priority=0 , match=(1), action=(next;) -+ table=13(ls_in_lb_aff_learn ), priority=100 , match=(reg9[[6]] == 0 && ct.new && ip4 && reg1 == 172.16.0.10 && reg2[[0..15]] == 80 && ip4.dst == 10.0.0.2 && tcp.dst == 80), action=(commit_lb_aff(vip = "172.16.0.10:80", backend = "10.0.0.2:80", proto = tcp, timeout = 60); /* drop */) -+ table=13(ls_in_lb_aff_learn ), priority=100 , match=(reg9[[6]] == 0 && ct.new && ip4 && reg1 == 172.16.0.10 && reg2[[0..15]] == 80 && ip4.dst == 20.0.0.2 && tcp.dst == 80), action=(commit_lb_aff(vip = "172.16.0.10:80", backend = "20.0.0.2:80", proto = tcp, timeout = 60); /* drop */) -+]) -+ -+AS_BOX([Test LR flows]) -+ovn-sbctl dump-flows R1 > R1flows -+AT_CAPTURE_FILE([R1flows]) -+ -+AT_CHECK([grep "lr_in_lb_aff_check" R1flows | sort], [0], [dnl -+ table=6 (lr_in_lb_aff_check ), priority=0 , match=(1), action=(next;) -+ table=6 (lr_in_lb_aff_check ), priority=100 , match=(ct.new && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80), action=(reg9[[6]] = chk_lb_aff(); next;) -+]) -+AT_CHECK([grep "lr_in_dnat " R1flows | sort], [0], [dnl -+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80), action=(ct_lb_mark(backends=10.0.0.2:80,20.0.0.2:80);) -+ table=7 (lr_in_dnat ), priority=150 , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 10.0.0.2 && reg8[[0..15]] == 80), action=(reg0 = 172.16.0.10; ct_lb_mark(backends=10.0.0.2:80);) -+ table=7 (lr_in_dnat ), priority=150 , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 20.0.0.2 && reg8[[0..15]] == 80), action=(reg0 = 172.16.0.10; ct_lb_mark(backends=20.0.0.2:80);) -+]) -+AT_CHECK([grep "lr_in_lb_aff_learn" R1flows | sort], [0], [dnl -+ table=8 (lr_in_lb_aff_learn ), priority=0 , match=(1), action=(next;) -+ table=8 (lr_in_lb_aff_learn ), priority=100 , match=(reg9[[6]] == 0 && ct.new && ip4 && reg0 == 172.16.0.10 && reg9[[16..31]] == 80 && ip4.dst == 10.0.0.2 && tcp.dst == 80), action=(commit_lb_aff(vip = "172.16.0.10:80", backend = "10.0.0.2:80", proto = tcp, timeout = 60); /* drop */) -+ table=8 (lr_in_lb_aff_learn ), priority=100 , match=(reg9[[6]] == 0 && ct.new && ip4 && reg0 == 172.16.0.10 && reg9[[16..31]] == 80 && ip4.dst == 20.0.0.2 && tcp.dst == 80), action=(commit_lb_aff(vip = "172.16.0.10:80", backend = "20.0.0.2:80", proto = tcp, timeout = 60); /* drop */) -+]) -+ -+AS_BOX([Test LR flows - skip_snat=true]) -+check ovn-nbctl --wait=sb set load_balancer lb0 options:skip_snat=true -+ -+ovn-sbctl dump-flows R1 > R1flows_skip_snat -+AT_CAPTURE_FILE([R1flows_skip_snat]) -+ -+AT_CHECK([grep "lr_in_dnat " R1flows_skip_snat | sort], [0], [dnl -+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.skip_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.skip_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.2:80,20.0.0.2:80);) -+ table=7 (lr_in_dnat ), priority=150 , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 10.0.0.2 && reg8[[0..15]] == 80), action=(reg0 = 172.16.0.10; flags.skip_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.2:80);) -+ table=7 (lr_in_dnat ), priority=150 , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 20.0.0.2 && reg8[[0..15]] == 80), action=(reg0 = 172.16.0.10; flags.skip_snat_for_lb = 1; ct_lb_mark(backends=20.0.0.2:80);) -+]) -+ -+check ovn-nbctl remove load_balancer lb0 options skip_snat -+ -+AS_BOX([Test LR flows - lb_force_snat_ip="172.16.0.1"]) -+check ovn-nbctl --wait=sb set logical_router R1 options:lb_force_snat_ip="172.16.0.1" -+ -+ovn-sbctl dump-flows R1 > R1flows_force_snat -+AT_CAPTURE_FILE([R1flows_force_snat]) -+ -+AT_CHECK([grep "lr_in_dnat " R1flows_force_snat | sort], [0], [dnl -+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;) -+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.2:80,20.0.0.2:80);) -+ table=7 (lr_in_dnat ), priority=150 , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 10.0.0.2 && reg8[[0..15]] == 80), action=(reg0 = 172.16.0.10; flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.2:80);) -+ table=7 (lr_in_dnat ), priority=150 , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 20.0.0.2 && reg8[[0..15]] == 80), action=(reg0 = 172.16.0.10; flags.force_snat_for_lb = 1; ct_lb_mark(backends=20.0.0.2:80);) -+]) -+ -+AT_CLEANUP -+]) -diff --git a/tests/ovn.at b/tests/ovn.at -index 80e9192ca..63b419154 100644 ---- a/tests/ovn.at -+++ b/tests/ovn.at -@@ -2094,6 +2094,20 @@ reg9[5] = chk_ecmp_nh_mac(); - reg9[5] = chk_ecmp_nh(); - encodes as set_field:0/0x2000->reg10,resubmit(,77),move:NXM_NX_REG10[13]->OXM_OF_PKT_REG4[5] - -+# commit_lb_aff -+commit_lb_aff(vip = "172.16.0.123:8080", backend = "10.0.0.3:8080", proto = tcp, timeout = 30); -+ encodes as learn(table=78,idle_timeout=30,delete_learned,OXM_OF_METADATA[],eth_type=0x800,NXM_OF_IP_SRC[],ip_dst=172.16.0.123,nw_proto=6,tcp_dst=8080,load:0x1->NXM_NX_REG10[14],load:0xa000003->NXM_NX_REG4[],load:0x1f90->NXM_NX_REG8[0..15]) -+ -+commit_lb_aff(vip = "172.16.0.123", backend = "10.0.0.3", timeout = 30); -+ encodes as learn(table=78,idle_timeout=30,delete_learned,OXM_OF_METADATA[],eth_type=0x800,NXM_OF_IP_SRC[],ip_dst=172.16.0.123,load:0x1->NXM_NX_REG10[14],load:0xa000003->NXM_NX_REG4[]) -+ -+commit_lb_aff(vip = "[::1]:8080", backend = "[::2]:8080", proto = tcp, timeout = 30); -+ encodes as learn(table=78,idle_timeout=30,delete_learned,OXM_OF_METADATA[],eth_type=0x86dd,NXM_NX_IPV6_SRC[],ipv6_dst=::1,nw_proto=6,tcp_dst=8080,load:0x1->NXM_NX_REG10[14],load:0x2->NXM_NX_XXREG0[],load:0x1f90->NXM_NX_REG8[0..15]) -+ -+# chk_lb_aff() -+reg9[6] = chk_lb_aff(); -+ encodes as set_field:0/0x4000->reg10,resubmit(,78),move:NXM_NX_REG10[14]->OXM_OF_PKT_REG4[6] -+ - # push/pop - push(xxreg0);push(xxreg1[10..20]);push(eth.src);pop(xxreg0[0..47]);pop(xxreg0[48..57]);pop(xxreg1); - formats as push(xxreg0); push(xxreg1[10..20]); push(eth.src); pop(xxreg0[0..47]); pop(xxreg0[48..57]); pop(xxreg1); -@@ -16051,7 +16065,7 @@ ovn-sbctl dump-flows sw0 > sw0-flows - AT_CAPTURE_FILE([sw0-flows]) - - AT_CHECK([grep -E 'ls_(in|out)_acl' sw0-flows |grep reject| sed 's/table=../table=??/' | sort], [0], [dnl -- table=??(ls_out_acl ), priority=2002 , match=(ip), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };) -+ table=??(ls_out_acl ), priority=2002 , match=(ip), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };) - ]) - - -@@ -18619,7 +18633,7 @@ wait_for_ports_up ls1-lp_ext1 - # There should be a flow in hv2 to drop traffic from ls1-lp_ext1 destined - # to router mac. - AT_CHECK([as hv2 ovs-ofctl dump-flows br-int \ --table=30,dl_src=f0:00:00:00:00:03,dl_dst=a0:10:00:00:00:01 | \ -+table=32,dl_src=f0:00:00:00:00:03,dl_dst=a0:10:00:00:00:01 | \ - grep -c "actions=drop"], [0], [1 - ]) - # Stop ovn-controllers on hv1 and hv3. -@@ -20290,7 +20304,7 @@ check_row_count Port_Binding 1 logical_port=sw0-vir virtual_parent=sw0-p1 - wait_for_ports_up sw0-vir - check ovn-nbctl --wait=hv sync - AT_CHECK([test 2 = `cat hv1/ovn-controller.log | grep "pinctrl received packet-in" | \ --grep opcode=BIND_VPORT | grep OF_Table_ID=25 | wc -l`]) -+grep opcode=BIND_VPORT | grep OF_Table_ID=27 | wc -l`]) - - wait_row_count Port_Binding 1 logical_port=sw0-vir6 chassis=$hv1_ch_uuid - check_row_count Port_Binding 1 logical_port=sw0-vir6 virtual_parent=sw0-p1 -@@ -20339,7 +20353,7 @@ eth_dst=00000000ff01 - ip_src=$(ip_to_hex 10 0 0 10) - ip_dst=$(ip_to_hex 172 168 0 101) - send_icmp_packet 1 1 $eth_src $eth_dst $ip_src $ip_dst c4c9 0000000000000000000000 --AT_CHECK([as hv1 ovs-ofctl dump-flows br-int metadata=0x$lr0_dp_key | awk '/table=26, n_packets=1, n_bytes=45/{print $7" "$8}'],[0],[dnl -+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int metadata=0x$lr0_dp_key | awk '/table=28, n_packets=1, n_bytes=45/{print $7" "$8}'],[0],[dnl - priority=80,ip,reg15=0x3,metadata=0x3,nw_src=10.0.0.10 actions=drop - ]) - -@@ -26331,7 +26345,7 @@ ovn-sbctl dump-flows > sbflows - AT_CAPTURE_FILE([sbflows]) - AT_CAPTURE_FILE([offlows]) - OVS_WAIT_UNTIL([ -- as hv1 ovs-ofctl dump-flows br-int table=21 > offlows -+ as hv1 ovs-ofctl dump-flows br-int table=23 > offlows - test $(grep -c "load:0x64->NXM_NX_PKT_MARK" offlows) = 1 && \ - test $(grep -c "load:0x3->NXM_NX_PKT_MARK" offlows) = 1 && \ - test $(grep -c "load:0x4->NXM_NX_PKT_MARK" offlows) = 1 && \ -@@ -26424,12 +26438,12 @@ send_ipv4_pkt hv1 hv1-vif1 505400000003 00000000ff01 \ - $(ip_to_hex 10 0 0 3) $(ip_to_hex 172 168 0 120) - - OVS_WAIT_UNTIL([ -- test 1 -eq $(as hv1 ovs-ofctl dump-flows br-int table=21 | \ -+ test 1 -eq $(as hv1 ovs-ofctl dump-flows br-int table=23 | \ - grep "load:0x2->NXM_NX_PKT_MARK" -c) - ]) - - AT_CHECK([ -- test 0 -eq $(as hv1 ovs-ofctl dump-flows br-int table=21 | \ -+ test 0 -eq $(as hv1 ovs-ofctl dump-flows br-int table=23 | \ - grep "load:0x64->NXM_NX_PKT_MARK" -c) - ]) - -@@ -27121,23 +27135,23 @@ check ovn-nbctl --wait=hv sync - - # Ensure ECMP symmetric reply flows are not present on any hypervisor. - AT_CHECK([ -- test 0 -eq $(as hv1 ovs-ofctl dump-flows br-int table=15 | \ -+ test 0 -eq $(as hv1 ovs-ofctl dump-flows br-int table=17 | \ - grep "priority=100" | \ - grep "ct(commit,zone=NXM_NX_REG11\\[[0..15\\]],exec(move:NXM_OF_ETH_SRC\\[[\\]]->NXM_NX_CT_LABEL\\[[32..79\\]],load:0x[[0-9]]->NXM_NX_CT_LABEL\\[[80..95\\]]))" -c) - ]) - AT_CHECK([ -- test 0 -eq $(as hv1 ovs-ofctl dump-flows br-int table=21 | \ -+ test 0 -eq $(as hv1 ovs-ofctl dump-flows br-int table=25 | \ - grep "priority=200" | \ - grep "actions=move:NXM_NX_CT_LABEL\\[[32..79\\]]->NXM_OF_ETH_DST\\[[\\]]" -c) - ]) - - AT_CHECK([ -- test 0 -eq $(as hv2 ovs-ofctl dump-flows br-int table=15 | \ -+ test 0 -eq $(as hv2 ovs-ofctl dump-flows br-int table=17 | \ - grep "priority=100" | \ - grep "ct(commit,zone=NXM_NX_REG11\\[[0..15\\]],exec(move:NXM_OF_ETH_SRC\\[[\\]]->NXM_NX_CT_LABEL\\[[32..79\\]],load:0x[[0-9]]->NXM_NX_CT_LABEL\\[[80..95\\]]))" -c) - ]) - AT_CHECK([ -- test 0 -eq $(as hv2 ovs-ofctl dump-flows br-int table=21 | \ -+ test 0 -eq $(as hv2 ovs-ofctl dump-flows br-int table=25 | \ - grep "priority=200" | \ - grep "actions=move:NXM_NX_CT_LABEL\\[[32..79\\]]->NXM_OF_ETH_DST\\[[\\]]" -c) - ]) -@@ -27155,11 +27169,11 @@ AT_CAPTURE_FILE([hv2flows]) - - AT_CHECK([ - for hv in 1 2; do -- grep table=15 hv${hv}flows | \ -+ grep table=17 hv${hv}flows | \ - grep "priority=100" | \ - grep -c "ct(commit,zone=NXM_NX_REG11\\[[0..15\\]],.*exec(move:NXM_OF_ETH_SRC\\[[\\]]->NXM_NX_CT_LABEL\\[[32..79\\]],load:0x[[0-9]]->NXM_NX_CT_MARK\\[[16..31\\]]))" - -- grep table=23 hv${hv}flows | \ -+ grep table=25 hv${hv}flows | \ - grep "priority=200" | \ - grep -c "move:NXM_NX_CT_LABEL\\[[\\]]->NXM_NX_XXREG1\\[[\\]],move:NXM_NX_XXREG1\\[[32..79\\]]->NXM_OF_ETH_DST" - done; :], [0], [dnl -@@ -27247,23 +27261,23 @@ check ovn-nbctl --wait=hv sync - - # Ensure ECMP symmetric reply flows are not present on any hypervisor. - AT_CHECK([ -- test 0 -eq $(as hv1 ovs-ofctl dump-flows br-int table=15 | \ -+ test 0 -eq $(as hv1 ovs-ofctl dump-flows br-int table=17 | \ - grep "priority=100" | \ - grep "ct(commit,zone=NXM_NX_REG11\\[[0..15\\]],exec(move:NXM_OF_ETH_SRC\\[[\\]]->NXM_NX_CT_LABEL\\[[32..79\\]],load:0x[[0-9]]->NXM_NX_CT_LABEL\\[[80..95\\]]))" -c) - ]) - AT_CHECK([ -- test 0 -eq $(as hv1 ovs-ofctl dump-flows br-int table=21 | \ -+ test 0 -eq $(as hv1 ovs-ofctl dump-flows br-int table=25 | \ - grep "priority=200" | \ - grep "actions=move:NXM_NX_CT_LABEL\\[[32..79\\]]->NXM_OF_ETH_DST\\[[\\]]" -c) - ]) - - AT_CHECK([ -- test 0 -eq $(as hv2 ovs-ofctl dump-flows br-int table=15 | \ -+ test 0 -eq $(as hv2 ovs-ofctl dump-flows br-int table=17 | \ - grep "priority=100" | \ - grep "ct(commit,zone=NXM_NX_REG11\\[[0..15\\]],exec(move:NXM_OF_ETH_SRC\\[[\\]]->NXM_NX_CT_LABEL\\[[32..79\\]],load:0x[[0-9]]->NXM_NX_CT_MARK\\[[16..31\\]]))" -c) - ]) - AT_CHECK([ -- test 0 -eq $(as hv2 ovs-ofctl dump-flows br-int table=21 | \ -+ test 0 -eq $(as hv2 ovs-ofctl dump-flows br-int table=25 | \ - grep "priority=200" | \ - grep "actions=move:NXM_NX_CT_LABEL\\[[\\]]->NXM_OF_ETH_DST\\[[\\]]" -c) - ]) -@@ -27280,11 +27294,11 @@ AT_CAPTURE_FILE([hv2flows]) - - AT_CHECK([ - for hv in 1 2; do -- grep table=15 hv${hv}flows | \ -+ grep table=17 hv${hv}flows | \ - grep "priority=100" | \ - grep -c "ct(commit,zone=NXM_NX_REG11\\[[0..15\\]],.*exec(move:NXM_OF_ETH_SRC\\[[\\]]->NXM_NX_CT_LABEL\\[[32..79\\]],load:0x[[0-9]]->NXM_NX_CT_MARK\\[[16..31\\]]))" - -- grep table=23 hv${hv}flows | \ -+ grep table=25 hv${hv}flows | \ - grep "priority=200" | \ - grep -c "move:NXM_NX_CT_LABEL\\[[\\]]->NXM_NX_XXREG1\\[[\\]],move:NXM_NX_XXREG1\\[[32..79\\]]->NXM_OF_ETH_DST" - done; :], [0], [dnl -@@ -27748,7 +27762,7 @@ AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep "actions=controller" | grep - ]) - - # The packet should've been dropped in the lr_in_arp_resolve stage. --AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep -E "table=23, n_packets=1,.* priority=1,ip,metadata=0x${sw_key},nw_dst=10.0.1.1 actions=drop" -c], [0], [dnl -+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep -E "table=25, n_packets=1,.* priority=1,ip,metadata=0x${sw_key},nw_dst=10.0.1.1 actions=drop" -c], [0], [dnl - 1 - ]) - -@@ -28428,7 +28442,11 @@ ovs-vsctl add-br br-phys - ovn_attach n1 br-phys 192.168.0.1 24 geneve - - # Get the encap rec, should be just one - with geneve/192.168.0.1 --encap_rec=$(ovn-sbctl --data=bare --no-heading --column encaps list chassis hv1) -+# Skip initial null encap -+OVS_WAIT_UNTIL( -+ [encap_rec=$(ovn-sbctl --bare --no-heading --columns encaps list chassis hv1) -+ echo "encap_rec = $encap_rec" -+ test $encap_rec]) - - # Set multiple IPs - as hv1 -@@ -28437,9 +28455,10 @@ ovs-vsctl \ - - # Check if the encap_rec changed - should have, no need to - # compare the exact values. --encap_rec_mvtep=$(ovn-sbctl --data=bare --no-heading --column encaps list chassis hv1) -- --AT_CHECK([test "$encap_rec" != "$encap_rec_mvtep"], [0], []) -+OVS_WAIT_UNTIL( -+ [encap_rec_mvtep=$(ovn-sbctl --bare --no-heading --columns encaps list chassis hv1) -+ echo "encap_rec_mvtep = $encap_rec_mvtep" -+ test "$encap_rec" != "$encap_rec_mvtep"]) - - # now, wait for a couple of secs - should be enough time, I suppose. - sleep 2 -@@ -31261,15 +31280,15 @@ done - check ovn-nbctl --wait=hv sync - - # hv0 should see flows for lsp1 but not lsp2 --AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=25 | grep 10.0.1.2], [0], [ignore]) --AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=25 | grep 10.0.2.2], [1]) -+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=27 | grep 10.0.1.2], [0], [ignore]) -+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=27 | grep 10.0.2.2], [1]) - # hv2 should see flows for lsp2 but not lsp1 --AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=25 | grep 10.0.2.2], [0], [ignore]) --AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=25 | grep 10.0.1.2], [1]) -+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=27 | grep 10.0.2.2], [0], [ignore]) -+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=27 | grep 10.0.1.2], [1]) - - # Change lrp_lr_ls1 to a regular lrp, hv2 should see flows for lsp1 - check ovn-nbctl --wait=hv lrp-del-gateway-chassis lrp_lr_ls1 hv1 --AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=25 | grep 10.0.1.2], [0], [ignore]) -+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=27 | grep 10.0.1.2], [0], [ignore]) - - # Change it back, and trigger recompute to make sure extra flows are removed - # from hv2 (recompute is needed because currently I-P adds local datapaths but -@@ -31277,11 +31296,11 @@ AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=25 | grep 10.0.1.2], [0], [ig - check ovn-nbctl --wait=hv lrp-set-gateway-chassis lrp_lr_ls1 hv1 1 - as hv2 check ovn-appctl -t ovn-controller recompute - ovn-nbctl --wait=hv sync --AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=25 | grep 10.0.1.2], [1]) -+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=27 | grep 10.0.1.2], [1]) - - # Enable dnat_and_snat on lr, and now hv2 should see flows for lsp1. - AT_CHECK([ovn-nbctl --wait=hv --gateway-port=lrp_lr_ls1 lr-nat-add lr dnat_and_snat 192.168.0.1 10.0.1.3 lsp1 f0:00:00:00:00:03]) --AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=25 | grep 10.0.1.2], [0], [ignore]) -+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=27 | grep 10.0.1.2], [0], [ignore]) - - OVN_CLEANUP([hv1],[hv2]) - AT_CLEANUP -@@ -32889,3 +32908,231 @@ check ovn-nbctl --wait=hv sync - OVN_CLEANUP([hv1]) - AT_CLEANUP - ]) -+ -+OVN_FOR_EACH_NORTHD([ -+AT_SETUP([ovn-controller: batch add port and delete port in same IDL]) -+ovn_start -+net_add n1 -+ -+sim_add hv1 -+as hv1 -+ovs-vsctl add-br br-phys -+ovn_attach n1 br-phys 192.168.0.1 -+check ovs-vsctl add-port br-int p1 -+ -+check ovs-vsctl set interface p1 external-ids:iface-id=sw0-port1 -+check ovn-nbctl --wait=hv sync -+ovn-appctl debug/pause -+OVS_WAIT_UNTIL([test x$(as hv1 ovn-appctl -t ovn-controller debug/status) = "xpaused"]) -+ -+check ovn-nbctl ls-add sw0 -- lsp-add sw0 sw0-port1 -+check ovn-nbctl lsp-del sw0-port1 -+check ovn-nbctl --wait=sb sync -+ -+ovn-appctl debug/resume -+check ovn-nbctl --wait=hv sync -+ -+check ovn-nbctl ls-del sw0 -+check ovn-nbctl --wait=hv sync -+OVN_CLEANUP([hv1]) -+AT_CLEANUP -+]) -+ -+m4_define([MULTIPLE_OVS_INT], -+ [OVN_FOR_EACH_NORTHD([ -+ AT_SETUP([ovn-controller: Multiple OVS interfaces bound to same logical port ($1)]) -+ ovn_start -+ net_add n1 -+ -+ sim_add hv1 -+ as hv1 -+ ovs-vsctl add-br br-phys -+ ovn_attach n1 br-phys 192.168.0.1 -+ -+ get_flows() -+ { -+ cookie=${1} -+ ovs-ofctl dump-flows br-int | grep $cookie | -+ sed -e 's/duration=[[0-9.]]*s, //g' | -+ sed -e 's/idle_age=[[0-9]]*, //g' | -+ sed -e 's/n_packets=[[0-9]]*, //g' | -+ sed -e 's/n_bytes=[[0-9]]*, //g' -+ } -+ -+ check ovn-nbctl ls-add ls -+ check ovn-nbctl lsp-add ls lp -+ if test X$1 != X; then -+ check ovn-nbctl lsp-set-type lp $1 -+ fi -+ check ovn-nbctl lsp-set-addresses lp "00:00:00:01:01:02 192.168.1.2" -+ -+ check ovn-nbctl lsp-add ls vm1 -+ check ovn-nbctl lsp-set-addresses vm1 "00:00:00:01:01:11 192.168.1.11" -+ check ovs-vsctl add-port br-int vm1 -- set interface vm1 type=internal external_ids:iface-id=vm1 -+ -+ check ovn-nbctl --wait=hv sync -+ -+ check ovs-vsctl add-port br-int lpold -- set interface lpold type=internal -+ check ovs-vsctl set interface lpold external_ids:iface-id=lp -+ -+ OVS_WAIT_UNTIL([test x$(ovn-sbctl --bare --columns _uuid find port_binding logical_port=lp) != x]) -+ echo ====================================================== -+ echo === Flows after iface-id set for the old interface === -+ echo ====================================================== -+ COOKIE=$(ovn-sbctl find port_binding logical_port=lp|grep uuid|cut -d: -f2| cut -c1-8 | sed 's/^\s*0\{0,8\}//') -+ -+ OVS_WAIT_UNTIL([ -+ ofport=$(ovs-vsctl --bare --columns ofport find Interface name=lpold) -+ ovs-ofctl dump-flows br-int | grep $COOKIE | grep "actions=output:$ofport" -+ ]) -+ nb_flows=`ovs-ofctl dump-flows br-int | grep $COOKIE | wc -l` -+ echo $nb_flows "flows after iface-id set for old interface" -+ -+ echo ====================================================== -+ echo === Flows after iface-id set for the new interface === -+ echo ====================================================== -+ # Set external_ids:iface-id within same transaction as adding the port. -+ # This will generally cause ovn-controller to get initially notified of ovs interface changes with ofport == 0. -+ check ovs-vsctl add-port br-int lpnew -- set interface lpnew type=internal -- set interface lpnew external_ids:iface-id=lp -+ OVS_WAIT_UNTIL([ -+ ofport=$(ovs-vsctl --bare --columns ofport find Interface name=lpnew) -+ ovs-ofctl dump-flows br-int | grep $COOKIE | grep "actions=output:$ofport" -+ ]) -+ check test "$nb_flows" = $(ovs-ofctl dump-flows br-int | grep $COOKIE | wc -l) -+ flows_lpnew=$(get_flows $COOKIE) -+ -+ echo ====================================================== -+ echo ======= Flows after old interface is deleted ========= -+ echo ====================================================== -+ check ovs-vsctl del-port br-int lpold -+ # We do not expect changes, so let's wait for controller to get time to process any update -+ check ovn-nbctl --wait=hv sync -+ check test "$nb_flows" = $(ovs-ofctl dump-flows br-int | grep $COOKIE | wc -l) -+ flows_after_deletion=$(get_flows $COOKIE) -+ check test "$flows_lpnew" = "$flows_after_deletion" -+ -+ echo ====================================================== -+ echo ======= Flows after lptemp interface is created ==== -+ echo ====================================================== -+ # Set external_ids:iface-id in a different transaction as adding the port. -+ # This will generally cause ovn-controller to get notified of ovs interface changes with a proper ofport. -+ check ovs-vsctl add-port br-int lptemp -- set Interface lptemp type=internal -+ check ovs-vsctl set Interface lptemp external_ids:iface-id=lp -+ OVS_WAIT_UNTIL([ -+ ofport=$(ovs-vsctl --bare --columns ofport find Interface name=lptemp) -+ ovs-ofctl dump-flows br-int | grep $COOKIE | grep "actions=output:$ofport" -+ ]) -+ check test "$nb_flows" = $(ovs-ofctl dump-flows br-int | grep $COOKIE | wc -l) -+ -+ echo ====================================================== -+ echo ======= Flows after lptemp interface is deleted ====== -+ echo ====================================================== -+ check ovs-vsctl del-port br-int lptemp -+ OVS_WAIT_UNTIL([ -+ ofport=$(ovs-vsctl --bare --columns ofport find Interface name=lpnew) -+ echo $ofport -+ ovs-ofctl dump-flows br-int | grep $COOKIE -+ ovs-ofctl dump-flows br-int | grep $COOKIE | grep "actions=output:$ofport" -+ ]) -+ check test "$nb_flows" = $(ovs-ofctl dump-flows br-int | grep $COOKIE | wc -l) -+ flows_after_deletion=$(get_flows $COOKIE) -+ check test "$flows_lpnew" = "$flows_after_deletion" -+ -+ echo ====================================================== -+ echo ======= Flows after new interface is deleted ========= -+ echo ====================================================== -+ check ovs-vsctl del-port br-int lpnew -+ OVS_WAIT_UNTIL([ -+ nb_flows=`ovs-ofctl dump-flows br-int | grep $COOKIE | wc -l` -+ test "${nb_flows}" = 0 -+ ]) -+ -+ echo ====================================================== -+ echo ======= Three interfaces bound to the same port ====== -+ echo ====================================================== -+ check ovs-vsctl add-port br-int lpold -- set interface lpold type=internal -+ check ovs-vsctl set interface lpold external_ids:iface-id=lp -+ check ovs-vsctl add-port br-int lpnew -- set interface lpnew type=internal -+ check ovs-vsctl set interface lpnew external_ids:iface-id=lp -+ -+ # Wait for lpnew flows to be installed -+ OVS_WAIT_UNTIL([ -+ ofport=$(ovs-vsctl --bare --columns ofport find Interface name=lpnew) -+ ovs-ofctl dump-flows br-int | grep $COOKIE | grep "actions=output:$ofport" -+ ]) -+ flows_lpnew=$(get_flows $COOKIE) -+ nb_flows=`ovs-ofctl dump-flows br-int | grep $COOKIE | wc -l` -+ -+ check ovs-vsctl add-port br-int lptemp -- set Interface lptemp type=internal -+ check ovs-vsctl set Interface lptemp external_ids:iface-id=lp -+ -+ # Wait for lptemp flows to be installed -+ OVS_WAIT_UNTIL([ -+ ofport=$(ovs-vsctl --bare --columns ofport find Interface name=lptemp) -+ ovs-ofctl dump-flows br-int | grep $COOKIE | grep "actions=output:$ofport" -+ ]) -+ -+ # Delete both lpold and lptemp to go to a stable situation -+ check ovs-vsctl del-port br-int lptemp -+ check ovs-vsctl del-port br-int lpold -+ -+ OVS_WAIT_UNTIL([ -+ test 0 = $(ovs-vsctl show | grep "Port lpold" | wc -l) -+ ]) -+ -+ # Wait for correct/lpnew flows to be installed -+ OVS_WAIT_UNTIL([ -+ ofport=$(ovs-vsctl --bare --columns ofport find Interface name=lpnew) -+ ovs-ofctl dump-flows br-int | grep $COOKIE | grep "actions=output:$ofport" -+ ]) -+ check test "$nb_flows" = $(ovs-ofctl dump-flows br-int | grep $COOKIE | wc -l) -+ flows_after_deletion=$(get_flows $COOKIE) -+ check test "$flows_lpnew" = "$flows_after_deletion" -+ -+ # Check that recompute still works -+ check ovn-appctl -t ovn-controller recompute -+ OVS_WAIT_UNTIL([ -+ ofport=$(ovs-vsctl --bare --columns ofport find Interface name=lpnew) -+ ovs-ofctl dump-flows br-int | grep $COOKIE | grep "actions=output:$ofport" -+ ]) -+ check test "$nb_flows" = $(ovs-ofctl dump-flows br-int | grep $COOKIE | wc -l) -+ flows_after_deletion=$(get_flows $COOKIE) -+ check test "$flows_lpnew" = "$flows_after_deletion" -+ -+ OVN_CLEANUP([hv1]) -+ AT_CLEANUP -+ ])]) -+ -+MULTIPLE_OVS_INT([localport]) -+MULTIPLE_OVS_INT([]) -+ -+OVN_FOR_EACH_NORTHD([ -+AT_SETUP([feature inactivity probe]) -+ovn_start -+net_add n1 -+ -+sim_add hv1 -+as hv1 -+check ovs-vsctl add-br br-phys -+ovn_attach n1 br-phys 192.168.0.1 -+ -+dnl Ensure that there are at least 3 openflow connections. -+OVS_WAIT_UNTIL([test "$(grep -c 'negotiated OpenFlow version' hv1/ovs-vswitchd.log)" -eq "3"]) -+ -+dnl "Wait" 3 times 60 seconds and ensure ovn-controller writes to the -+dnl openflow connections in the meantime. This should allow ovs-vswitchd -+dnl to probe the openflow connections at least twice. -+ -+as hv1 ovs-appctl time/warp 60000 -+check ovn-nbctl --wait=hv sync -+ -+as hv1 ovs-appctl time/warp 60000 -+check ovn-nbctl --wait=hv sync -+ -+as hv1 ovs-appctl time/warp 60000 -+check ovn-nbctl --wait=hv sync -+ -+AT_CHECK([test -z "`grep disconnecting hv1/ovs-vswitchd.log`"]) -+OVN_CLEANUP([hv1]) -+AT_CLEANUP -+]) -diff --git a/tests/system-common-macros.at b/tests/system-common-macros.at -index 616a87fcf..8e6cb415c 100644 ---- a/tests/system-common-macros.at -+++ b/tests/system-common-macros.at -@@ -44,15 +44,38 @@ m4_define([NS_CHECK_EXEC], - # appropriate type, and allows additional arguments to be passed. - m4_define([ADD_BR], [ovs-vsctl _ADD_BR([$1]) -- $2]) - --# ADD_INT([port], [namespace], [ovs-br], [ip_addr]) -+# ADD_INT([port], [namespace], [ovs-br], [ip_addr] [ip6_addr]) - # - # Add an internal port to 'ovs-br', then shift it into 'namespace' and - # configure it with 'ip_addr' (specified in CIDR notation). -+# Optionally add an ipv6 address - m4_define([ADD_INT], - [ AT_CHECK([ovs-vsctl add-port $3 $1 -- set int $1 type=internal]) - AT_CHECK([ip link set $1 netns $2]) - NS_CHECK_EXEC([$2], [ip addr add $4 dev $1]) - NS_CHECK_EXEC([$2], [ip link set dev $1 up]) -+ if test -n "$5"; then -+ NS_CHECK_EXEC([$2], [ip -6 addr add $5 dev $1]) -+ fi -+ ] -+) -+ -+# NS_ADD_INT([port], [namespace], [ovs-br], [ip_addr] [mac_addr] [ip6_addr] [default_gw] [default_ipv6_gw]) -+# Create a namespace -+# Add an internal port to 'ovs-br', then shift it into 'namespace'. -+# Configure it with 'ip_addr' (specified in CIDR notation) and ip6_addr. -+# Set mac_addr -+# Add default gw for ipv4 and ipv6 -+m4_define([NS_ADD_INT], -+ [ AT_CHECK([ovs-vsctl add-port $3 $1 -- set int $1 type=internal external_ids:iface-id=$1]) -+ ADD_NAMESPACES($2) -+ AT_CHECK([ip link set $1 netns $2]) -+ NS_CHECK_EXEC([$2], [ip link set $1 address $5]) -+ NS_CHECK_EXEC([$2], [ip link set dev $1 up]) -+ NS_CHECK_EXEC([$2], [ip addr add $4 dev $1]) -+ NS_CHECK_EXEC([$2], [ip addr add $6 dev $1]) -+ NS_CHECK_EXEC([$2], [ip route add default via $7 dev $1]) -+ NS_CHECK_EXEC([$2], [ip -6 route add default via $8 dev $1]) - ] - ) - -@@ -333,4 +356,166 @@ m4_define([OVS_CHECK_CT_CLEAR], - - # OVS_CHECK_CT_ZERO_SNAT() - m4_define([OVS_CHECK_CT_ZERO_SNAT], -- [AT_SKIP_IF([! grep -q "Datapath supports ct_zero_snat" ovs-vswitchd.log])])) -+ [AT_SKIP_IF([! grep -q "Datapath supports ct_zero_snat" ovs-vswitchd.log])]) -+ -+# OVN_TEST_IPV6_PREFIX_DELEGATION() -+m4_define([OVN_TEST_IPV6_PREFIX_DELEGATION], -+[ -+ovn_start -+OVS_TRAFFIC_VSWITCHD_START() -+ -+ADD_BR([br-int]) -+ADD_BR([br-ext]) -+ -+ovs-ofctl add-flow br-ext action=normal -+# Set external-ids in br-int needed for ovn-controller -+ovs-vsctl \ -+ -- set Open_vSwitch . external-ids:system-id=hv1 \ -+ -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \ -+ -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \ -+ -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \ -+ -- set bridge br-int fail-mode=secure other-config:disable-in-band=true -+ -+# Start ovn-controller -+start_daemon ovn-controller -+ -+ADD_NAMESPACES(sw01) -+ADD_VETH(sw01, sw01, br-int, "192.168.1.2/24", "f0:00:00:01:02:03", \ -+ "192.168.1.1") -+ADD_NAMESPACES(sw11) -+ADD_VETH(sw11, sw11, br-int, "192.168.2.2/24", "f0:00:00:02:02:03", \ -+ "192.168.2.1") -+ADD_NAMESPACES(server) -+ADD_VETH(s1, server, br-ext, "2001:1db8:3333::2/64", "f0:00:00:01:02:05", \ -+ "2001:1db8:3333::1") -+ -+if test X"$1" = X"GR"; then -+ ovn-nbctl create Logical_Router name=R1 options:chassis=hv1 -+else -+ ovn-nbctl lr-add R1 -+fi -+ -+ovn-nbctl ls-add sw0 -+ovn-nbctl ls-add sw1 -+ovn-nbctl ls-add public -+ -+ovn-nbctl lrp-add R1 rp-sw0 00:00:01:01:02:03 192.168.1.1/24 -+ovn-nbctl lrp-add R1 rp-sw1 00:00:03:01:02:03 192.168.2.1/24 -+ovn-nbctl lrp-add R1 rp-public 00:00:02:01:02:03 172.16.1.1/24 -+ -+if test X"$1" != X"GR"; then -+ ovn-nbctl lrp-set-gateway-chassis rp-public hv1 -+fi -+ -+ovn-nbctl lsp-add sw0 sw0-rp -- set Logical_Switch_Port sw0-rp \ -+ type=router options:router-port=rp-sw0 \ -+ -- lsp-set-addresses sw0-rp router -+ovn-nbctl lsp-add sw1 sw1-rp -- set Logical_Switch_Port sw1-rp \ -+ type=router options:router-port=rp-sw1 \ -+ -- lsp-set-addresses sw1-rp router -+ -+ovn-nbctl lsp-add public public-rp -- set Logical_Switch_Port public-rp \ -+ type=router options:router-port=rp-public \ -+ -- lsp-set-addresses public-rp router -+ -+ovn-nbctl lsp-add sw0 sw01 \ -+ -- lsp-set-addresses sw01 "f0:00:00:01:02:03 192.168.1.2" -+ -+ovn-nbctl lsp-add sw1 sw11 \ -+ -- lsp-set-addresses sw11 "f0:00:00:02:02:03 192.168.2.2" -+ -+OVS_WAIT_UNTIL([test "$(ip netns exec server ip a | grep 2001:1db8:3333::2 | grep tentative)" = ""]) -+OVS_WAIT_UNTIL([test "$(ip netns exec server ip a | grep fe80 | grep tentative)" = ""]) -+ -+AT_CHECK([ovs-vsctl set Open_vSwitch . external-ids:ovn-bridge-mappings=phynet:br-ext]) -+ovn-nbctl lsp-add public public1 \ -+ -- lsp-set-addresses public1 unknown \ -+ -- lsp-set-type public1 localnet \ -+ -- lsp-set-options public1 network_name=phynet -+ -+ovn-nbctl set logical_router_port rp-public options:prefix_delegation=true -+ovn-nbctl set logical_router_port rp-public options:prefix=true -+ovn-nbctl set logical_router_port rp-sw0 options:prefix=true -+ovn-nbctl set logical_router_port rp-sw1 options:prefix=true -+ -+OVN_POPULATE_ARP -+ -+ovn-nbctl --wait=hv sync -+ -+cat > /etc/dhcp/dhcpd.conf < pkt.pcap &]) -+ -+NETNS_DAEMONIZE([server], [dhcpd -6 -f s1 > dhcpd.log 2>&1], [dhcpd.pid]) -+ovn-nbctl --wait=hv sync -+ -+OVS_WAIT_WHILE([test "$(ovn-nbctl get logical_router_port rp-public ipv6_prefix | cut -c4-15)" = ""]) -+OVS_WAIT_WHILE([test "$(ovn-nbctl get logical_router_port rp-sw0 ipv6_prefix | cut -c4-15)" = ""]) -+OVS_WAIT_WHILE([test "$(ovn-nbctl get logical_router_port rp-sw1 ipv6_prefix | cut -c4-15)" = ""]) -+ -+AT_CHECK([ovn-nbctl get logical_router_port rp-public ipv6_prefix | cut -c3-16], [0], [dnl -+[2001:1db8:3333] -+]) -+AT_CHECK([ovn-nbctl get logical_router_port rp-sw0 ipv6_prefix | cut -c3-16], [0], [dnl -+[2001:1db8:3333] -+]) -+AT_CHECK([ovn-nbctl get logical_router_port rp-sw1 ipv6_prefix | cut -c3-16], [0], [dnl -+[2001:1db8:3333] -+]) -+ -+prefix=$(ovn-nbctl list logical_router_port rp-public | awk -F/ '/ipv6_prefix/{print substr($ 1,25,9)}' | sed 's/://g') -+ovn-nbctl list logical_router_port rp-public > /tmp/rp-public -+ovn-nbctl set logical_router_port rp-sw0 options:prefix=false -+ovn-nbctl set logical_router_port rp-sw1 options:prefix=false -+# Renew message -+NS_CHECK_EXEC([server], [tcpdump -c 1 -nni s1 ip6[[48:1]]=0x05 and ip6[[113:4]]=0x${prefix} > renew.pcap &]) -+# Reply message with Status OK -+NS_CHECK_EXEC([server], [tcpdump -c 1 -nni s1 ip6[[48:1]]=0x07 and ip6[[81:4]]=0x${prefix} > reply.pcap &]) -+ -+OVS_WAIT_UNTIL([ -+ total_pkts=$(cat renew.pcap | wc -l) -+ test "${total_pkts}" = "1" -+]) -+ -+OVS_WAIT_UNTIL([ -+ total_pkts=$(cat reply.pcap | wc -l) -+ test "${total_pkts}" = "1" -+]) -+ -+kill $(pidof tcpdump) -+ -+ovn-nbctl set logical_router_port rp-sw0 options:prefix=false -+ovn-nbctl clear logical_router_port rp-sw0 ipv6_prefix -+OVS_WAIT_WHILE([test "$(ovn-nbctl get logical_router_port rp-sw0 ipv6_prefix | cut -c3-16)" = "[2001:1db8:3333]"]) -+AT_CHECK([ovn-nbctl get logical_router_port rp-sw0 ipv6_prefix | cut -c3-16], [0], [dnl -+[] -+]) -+ -+kill $(pidof ovn-controller) -+ -+as ovn-sb -+OVS_APP_EXIT_AND_WAIT([ovsdb-server]) -+ -+as ovn-nb -+OVS_APP_EXIT_AND_WAIT([ovsdb-server]) -+ -+as northd -+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE]) -+ -+as -+OVS_TRAFFIC_VSWITCHD_STOP(["/.*error receiving.*/d -+/failed to query port patch-.*/d -+/.*terminating with signal 15.*/d"]) -+])) -diff --git a/tests/system-ovn.at b/tests/system-ovn.at -index 8acfb3e39..161c2823e 100644 ---- a/tests/system-ovn.at -+++ b/tests/system-ovn.at -@@ -5272,158 +5272,22 @@ AT_CLEANUP - ]) - - OVN_FOR_EACH_NORTHD([ --AT_SETUP([IPv6 prefix delegation]) -+AT_SETUP([IPv6 prefix delegation - distributed router]) - AT_SKIP_IF([test $HAVE_DHCPD = no]) - AT_SKIP_IF([test $HAVE_TCPDUMP = no]) - AT_KEYWORDS([ovn-ipv6-prefix_d]) - --ovn_start --OVS_TRAFFIC_VSWITCHD_START() -- --ADD_BR([br-int]) --ADD_BR([br-ext]) -- --ovs-ofctl add-flow br-ext action=normal --# Set external-ids in br-int needed for ovn-controller --ovs-vsctl \ -- -- set Open_vSwitch . external-ids:system-id=hv1 \ -- -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \ -- -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \ -- -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \ -- -- set bridge br-int fail-mode=secure other-config:disable-in-band=true -- --# Start ovn-controller --start_daemon ovn-controller -- --ovn-nbctl lr-add R1 -- --ovn-nbctl ls-add sw0 --ovn-nbctl ls-add sw1 --ovn-nbctl ls-add public -- --ovn-nbctl lrp-add R1 rp-sw0 00:00:01:01:02:03 192.168.1.1/24 --ovn-nbctl lrp-add R1 rp-sw1 00:00:03:01:02:03 192.168.2.1/24 --ovn-nbctl lrp-add R1 rp-public 00:00:02:01:02:03 172.16.1.1/24 \ -- -- lrp-set-gateway-chassis rp-public hv1 -- --ovn-nbctl lsp-add sw0 sw0-rp -- set Logical_Switch_Port sw0-rp \ -- type=router options:router-port=rp-sw0 \ -- -- lsp-set-addresses sw0-rp router --ovn-nbctl lsp-add sw1 sw1-rp -- set Logical_Switch_Port sw1-rp \ -- type=router options:router-port=rp-sw1 \ -- -- lsp-set-addresses sw1-rp router -- --ovn-nbctl lsp-add public public-rp -- set Logical_Switch_Port public-rp \ -- type=router options:router-port=rp-public \ -- -- lsp-set-addresses public-rp router -- --ADD_NAMESPACES(sw01) --ADD_VETH(sw01, sw01, br-int, "192.168.1.2/24", "f0:00:00:01:02:03", \ -- "192.168.1.1") --ovn-nbctl lsp-add sw0 sw01 \ -- -- lsp-set-addresses sw01 "f0:00:00:01:02:03 192.168.1.2" -- --ADD_NAMESPACES(sw11) --ADD_VETH(sw11, sw11, br-int, "192.168.2.2/24", "f0:00:00:02:02:03", \ -- "192.168.2.1") --ovn-nbctl lsp-add sw1 sw11 \ -- -- lsp-set-addresses sw11 "f0:00:00:02:02:03 192.168.2.2" -- --ADD_NAMESPACES(server) --ADD_VETH(s1, server, br-ext, "2001:1db8:3333::2/64", "f0:00:00:01:02:05", \ -- "2001:1db8:3333::1") -- --OVS_WAIT_UNTIL([test "$(ip netns exec server ip a | grep 2001:1db8:3333::2 | grep tentative)" = ""]) --OVS_WAIT_UNTIL([test "$(ip netns exec server ip a | grep fe80 | grep tentative)" = ""]) -- --AT_CHECK([ovs-vsctl set Open_vSwitch . external-ids:ovn-bridge-mappings=phynet:br-ext]) --ovn-nbctl lsp-add public public1 \ -- -- lsp-set-addresses public1 unknown \ -- -- lsp-set-type public1 localnet \ -- -- lsp-set-options public1 network_name=phynet -- --ovn-nbctl set logical_router_port rp-public options:prefix_delegation=true --ovn-nbctl set logical_router_port rp-public options:prefix=true --ovn-nbctl set logical_router_port rp-sw0 options:prefix=true --ovn-nbctl set logical_router_port rp-sw1 options:prefix=true -- --OVN_POPULATE_ARP -- --ovn-nbctl --wait=hv sync -- --cat > /etc/dhcp/dhcpd.conf < dhcpd.log 2>&1], [dhcpd.pid]) --ovn-nbctl --wait=hv sync -- --OVS_WAIT_WHILE([test "$(ovn-nbctl get logical_router_port rp-public ipv6_prefix | cut -c4-15)" = ""]) --OVS_WAIT_WHILE([test "$(ovn-nbctl get logical_router_port rp-sw0 ipv6_prefix | cut -c4-15)" = ""]) --OVS_WAIT_WHILE([test "$(ovn-nbctl get logical_router_port rp-sw1 ipv6_prefix | cut -c4-15)" = ""]) -- --AT_CHECK([ovn-nbctl get logical_router_port rp-public ipv6_prefix | cut -c3-16], [0], [dnl --[2001:1db8:3333] --]) --AT_CHECK([ovn-nbctl get logical_router_port rp-sw0 ipv6_prefix | cut -c3-16], [0], [dnl --[2001:1db8:3333] --]) --AT_CHECK([ovn-nbctl get logical_router_port rp-sw1 ipv6_prefix | cut -c3-16], [0], [dnl --[2001:1db8:3333] --]) -- --prefix=$(ovn-nbctl list logical_router_port rp-public | awk -F/ '/ipv6_prefix/{print substr($1,25,9)}' | sed 's/://g') --ovn-nbctl set logical_router_port rp-sw0 options:prefix=false --ovn-nbctl set logical_router_port rp-sw1 options:prefix=false -- --# Renew message --NS_CHECK_EXEC([server], [tcpdump -c 1 -nni s1 ip6[[48:1]]=0x05 and ip6[[113:4]]=0x${prefix} > renew.pcap &]) --# Reply message with Status OK --NS_CHECK_EXEC([server], [tcpdump -c 1 -nni s1 ip6[[48:1]]=0x07 and ip6[[81:4]]=0x${prefix} > reply.pcap &]) -- --OVS_WAIT_UNTIL([ -- total_pkts=$(cat renew.pcap | wc -l) -- test "${total_pkts}" = "1" --]) -- --OVS_WAIT_UNTIL([ -- total_pkts=$(cat reply.pcap | wc -l) -- test "${total_pkts}" = "1" --]) -- --kill $(pidof tcpdump) -- --ovn-nbctl set logical_router_port rp-sw0 options:prefix=false --ovn-nbctl clear logical_router_port rp-sw0 ipv6_prefix --OVS_WAIT_WHILE([test "$(ovn-nbctl get logical_router_port rp-sw0 ipv6_prefix | cut -c3-16)" = "[2001:1db8:3333]"]) --AT_CHECK([ovn-nbctl get logical_router_port rp-sw0 ipv6_prefix | cut -c3-16], [0], [dnl --[] -+OVN_TEST_IPV6_PREFIX_DELEGATION(DGP) -+AT_CLEANUP - ]) - --kill $(pidof ovn-controller) -- --as ovn-sb --OVS_APP_EXIT_AND_WAIT([ovsdb-server]) -- --as ovn-nb --OVS_APP_EXIT_AND_WAIT([ovsdb-server]) -- --as northd --OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE]) -+OVN_FOR_EACH_NORTHD([ -+AT_SETUP([IPv6 prefix delegation - gw router]) -+AT_SKIP_IF([test $HAVE_DHCPD = no]) -+AT_SKIP_IF([test $HAVE_TCPDUMP = no]) -+AT_KEYWORDS([ovn-ipv6-prefix_d]) - --as --OVS_TRAFFIC_VSWITCHD_STOP(["/.*error receiving.*/d --/.*terminating with signal 15.*/d"]) -+OVN_TEST_IPV6_PREFIX_DELEGATION(GR) - AT_CLEANUP - ]) - -@@ -6489,8 +6353,12 @@ OVS_WAIT_UNTIL([tc qdisc show | grep -q 'htb 1: dev ovs-public']) - OVS_WAIT_UNTIL([tc class show dev ovs-public | \ - grep -q 'class htb .* rate 200Kbit ceil 300Kbit burst 375000b cburst 375000b']) - --AT_CHECK([ovn-nbctl remove Logical_Switch_Port public options qos_min_rate=200000]) -+ - AT_CHECK([ovn-nbctl remove Logical_Switch_Port public options qos_max_rate=300000]) -+OVS_WAIT_UNTIL([tc class show dev ovs-public | \ -+ grep -q 'class htb .* rate 200Kbit ceil 34359Mbit burst 375000b .*']) -+ -+AT_CHECK([ovn-nbctl remove Logical_Switch_Port public options qos_min_rate=200000]) - AT_CHECK([ovn-nbctl remove Logical_Switch_Port public options qos_burst=3000000]) - OVS_WAIT_UNTIL([test "$(tc qdisc show | grep 'htb 1: dev ovs-public')" = ""]) - -@@ -8343,3 +8211,985 @@ OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d - - AT_CLEANUP - ]) -+ -+OVN_FOR_EACH_NORTHD([ -+AT_SETUP([SNAT in gateway router mode]) -+AT_KEYWORDS([ovnnat]) -+ -+CHECK_CONNTRACK() -+CHECK_CONNTRACK_NAT() -+ovn_start -+OVS_TRAFFIC_VSWITCHD_START() -+ -+ADD_BR([br-int]) -+check ovs-ofctl add-flow br0 action=normal -+# Set external-ids in br-int needed for ovn-controller -+ovs-vsctl \ -+ -- set Open_vSwitch . external-ids:system-id=hv1 \ -+ -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \ -+ -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \ -+ -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \ -+ -- set bridge br-int fail-mode=secure other-config:disable-in-band=true -+ -+# Start ovn-controller -+start_daemon ovn-controller -+ -+check ip link set br0 up -+check ovs-vsctl set open . external-ids:ovn-bridge-mappings=provider:br0 -+ -+check ovn-nbctl ls-add ls1 -+check ovn-nbctl lsp-add ls1 ls1p1 -+check ovn-nbctl lsp-set-addresses ls1p1 "00:00:00:01:01:01 192.168.1.1 2001::1" -+check ovn-nbctl lsp-add ls1 ls1p2 -+check ovn-nbctl lsp-set-addresses ls1p2 "00:00:00:01:01:02 192.168.1.2 2001::2" -+ -+check ovn-nbctl lr-add lr1 -+check ovn-nbctl lrp-add lr1 lr1-ls1 00:00:00:00:00:01 192.168.1.254/24 2001::a/64 -+check ovn-nbctl lsp-add ls1 ls1-lr1 -+check ovn-nbctl lsp-set-addresses ls1-lr1 "00:00:00:00:00:01 192.168.1.254 2001::a" -+check ovn-nbctl lsp-set-type ls1-lr1 router -+check ovn-nbctl lsp-set-options ls1-lr1 router-port=lr1-ls1 -+ -+check ovn-nbctl set logical_router lr1 options:chassis=hv1 -+ -+check ovn-nbctl lrp-add lr1 lr1-pub 00:00:00:00:0f:01 172.16.1.254/24 1711::a/64 -+check ovn-nbctl ls-add pub -+check ovn-nbctl lsp-add pub pub-lr1 -+check ovn-nbctl lsp-set-type pub-lr1 router -+check ovn-nbctl lsp-set-options pub-lr1 router-port=lr1-pub -+check ovn-nbctl lsp-set-addresses pub-lr1 router -+ -+check ovn-nbctl lsp-add pub ln -- lsp-set-options ln network_name=provider -+check ovn-nbctl lsp-set-type ln localnet -+check ovn-nbctl lsp-set-addresses ln unknown -+ -+check ovn-nbctl lr-nat-add lr1 snat 172.16.1.10 192.168.1.0/24 -+check ovn-nbctl lr-nat-add lr1 snat 1711::10 2001::/64 -+ -+NS_ADD_INT(ls1p1, ls1p1, br-int, "192.168.1.1/24", "00:00:00:01:01:01", "2001::1/64", "192.168.1.254", "2001::a" ) -+NS_ADD_INT(ls1p2, ls1p2, br-int, "192.168.1.2/24", "00:00:00:01:01:02", "2001::2/64", "192.168.1.254", "2001::a" ) -+ -+ADD_NAMESPACES(ext1) -+ADD_INT(ext1, ext1, br0, 172.16.1.1/24, 1711::1/64) -+check ovn-nbctl --wait=hv sync -+wait_for_ports_up -+OVS_WAIT_UNTIL([test "$(ip netns exec ls1p1 ip a | grep 2001::1 | grep tentative)" = ""]) -+OVS_WAIT_UNTIL([test "$(ip netns exec ls1p2 ip a | grep 2002::1 | grep tentative)" = ""]) -+ -+NS_CHECK_EXEC([ls1p1], [ping -q -c 3 -i 0.3 -w 2 172.16.1.1 | FORMAT_PING], \ -+[0], [dnl -+3 packets transmitted, 3 received, 0% packet loss, time 0ms -+]) -+ -+NS_CHECK_EXEC([ls1p1], [ping6 -v -q -c 3 -i 0.3 -w 2 1711::1 | FORMAT_PING], \ -+[0], [dnl -+3 packets transmitted, 3 received, 0% packet loss, time 0ms -+]) -+ -+OVS_APP_EXIT_AND_WAIT([ovn-controller]) -+as -+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d -+/connection dropped.*/d -+/removing policing failed: No such device/d"]) -+AT_CLEANUP -+]) -+ -+OVN_FOR_EACH_NORTHD([ -+AT_SETUP([mcast flow count]) -+AT_KEYWORDS([ovnigmp IP-multicast]) -+AT_SKIP_IF([test $HAVE_TCPDUMP = no]) -+ovn_start -+ -+OVS_TRAFFIC_VSWITCHD_START() -+ADD_BR([br-int]) -+ -+# Set external-ids in br-int needed for ovn-controller -+ovs-vsctl \ -+ -- set Open_vSwitch . external-ids:system-id=hv1 \ -+ -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \ -+ -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \ -+ -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \ -+ -- set bridge br-int fail-mode=secure other-config:disable-in-band=true -+ -+# Start ovn-controller -+start_daemon ovn-controller -+ -+check ovn-nbctl ls-add ls -+check ovn-nbctl lsp-add ls vm1 -+check ovn-nbctl lsp-set-addresses vm1 00:00:00:00:00:01 -+check ovn-nbctl lsp-add ls vm2 -+check ovn-nbctl lsp-set-addresses vm2 00:00:00:00:00:02 -+check ovn-nbctl lsp-add ls vm3 -+check ovn-nbctl lsp-set-addresses vm3 00:00:00:00:00:03 -+ -+check ovn-nbctl set logical_switch ls other_config:mcast_querier=false other_config:mcast_snoop=true other_config:mcast_query_interval=30 other_config:mcast_eth_src=00:00:00:00:00:05 other_config:mcast_ip4_src=42.42.42.5 other_config:mcast_ip6_src=fe80::1 other_config:mcast_idle_timeout=3000 -+ovn-sbctl list ip_multicast -+ -+wait_igmp_flows_installed() -+{ -+ OVS_WAIT_UNTIL([ovs-ofctl dump-flows br-int table=33 | \ -+ grep 'priority=90' | grep "nw_dst=$1"]) -+} -+ -+ADD_NAMESPACES(vm1) -+ADD_INT([vm1], [vm1], [br-int], [42.42.42.1/24]) -+NS_CHECK_EXEC([vm1], [ip link set vm1 address 00:00:00:00:00:01], [0]) -+NS_CHECK_EXEC([vm1], [ip route add default via 42.42.42.5], [0]) -+check ovs-vsctl set Interface vm1 external_ids:iface-id=vm1 -+ -+ADD_NAMESPACES(vm2) -+ADD_INT([vm2], [vm2], [br-int], [42.42.42.2/24]) -+NS_CHECK_EXEC([vm2], [ip link set vm2 address 00:00:00:00:00:02], [0]) -+NS_CHECK_EXEC([vm2], [ip link set lo up], [0]) -+check ovs-vsctl set Interface vm2 external_ids:iface-id=vm2 -+ -+ADD_NAMESPACES(vm3) -+NETNS_DAEMONIZE([vm3], [tcpdump -n -i any -nnleX > vm3.pcap 2>/dev/null], [tcpdump3.pid]) -+ -+ADD_INT([vm3], [vm3], [br-int], [42.42.42.3/24]) -+NS_CHECK_EXEC([vm3], [ip link set vm3 address 00:00:00:00:00:03], [0]) -+NS_CHECK_EXEC([vm3], [ip link set lo up], [0]) -+NS_CHECK_EXEC([vm3], [ip route add default via 42.42.42.5], [0]) -+check ovs-vsctl set Interface vm3 external_ids:iface-id=vm3 -+ -+NS_CHECK_EXEC([vm2], [sysctl -w net.ipv4.igmp_max_memberships=100], [ignore], [ignore]) -+NS_CHECK_EXEC([vm3], [sysctl -w net.ipv4.igmp_max_memberships=100], [ignore], [ignore]) -+wait_for_ports_up -+ -+NS_CHECK_EXEC([vm3], [ip addr add 228.0.0.1 dev vm3 autojoin], [0]) -+wait_igmp_flows_installed 228.0.0.1 -+ -+NS_CHECK_EXEC([vm1], [ping -q -c 3 -i 0.3 -w 2 228.0.0.1], [ignore], [ignore]) -+ -+OVS_WAIT_UNTIL([ -+ requests=`grep "ICMP echo request" -c vm3.pcap` -+ test "${requests}" -ge "3" -+]) -+ -+NETNS_DAEMONIZE([vm2], [tcpdump -n -i any -nnleX > vm2.pcap 2>/dev/null], [tcpdump2.pid]) -+ -+for i in `seq 1 40`;do -+ NS_CHECK_EXEC([vm2], [ip addr add 228.1.$i.1 dev vm2 autojoin &], [0]) -+ NS_CHECK_EXEC([vm3], [ip addr add 229.1.$i.1 dev vm3 autojoin &], [0]) -+ # Do not go too fast. If going fast, there is a higher chance of sb being busy, causing full recompute (engine has not run) -+ # In this test, we do not want too many recomputes as they might hide I+I related errors -+ sleep 0.2 -+done -+ -+for i in `seq 1 40`;do -+ wait_igmp_flows_installed 228.1.$i.1 -+ wait_igmp_flows_installed 229.1.$i.1 -+done -+ovn-sbctl list multicast_group -+ -+NS_CHECK_EXEC([vm1], [ping -q -c 3 -i 0.3 -w 2 228.1.1.1], [ignore], [ignore]) -+ -+OVS_WAIT_UNTIL([ -+ requests=`grep "ICMP echo request" -c vm2.pcap` -+ test "${requests}" -ge "3" -+]) -+ -+# The test could succeed thanks to a lucky northd recompute...after hitting too any flows -+# Double check we never hit error condition -+AT_CHECK([grep -qE 'Too many active mcast flows' northd/ovn-northd.log], [1]) -+ -+OVS_APP_EXIT_AND_WAIT([ovn-controller]) -+ -+as ovn-sb -+OVS_APP_EXIT_AND_WAIT([ovsdb-server]) -+ -+as ovn-nb -+OVS_APP_EXIT_AND_WAIT([ovsdb-server]) -+ -+as northd -+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE]) -+ -+as -+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d -+/connection dropped.*/d -+/removing policing failed: No such device/d"]) -+AT_CLEANUP -+]) -+ -+OVN_FOR_EACH_NORTHD([ -+AT_SETUP([DVR ping router port]) -+AT_KEYWORDS([dvr]) -+ -+ovn_start -+ -+OVS_TRAFFIC_VSWITCHD_START() -+ADD_BR([br-int]) -+ADD_BR([br-ext]) -+ -+check ovs-ofctl add-flow br-ext action=normal -+# Set external-ids in br-int needed for ovn-controller -+ovs-vsctl \ -+ -- set Open_vSwitch . external-ids:system-id=hv1 \ -+ -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \ -+ -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \ -+ -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \ -+ -- set bridge br-int fail-mode=secure other-config:disable-in-band=true -+ -+# Start ovn-controller -+start_daemon ovn-controller -+ -+check ovs-vsctl set open . external_ids:ovn-bridge-mappings=phys:br-ext -+check ovs-vsctl set open . external-ids:ovn-chassis-mac-mappings="phys:ee:00:00:00:00:10" -+ -+ -+check ovn-nbctl ls-add internal -+ -+check ovn-nbctl lsp-add internal ln_internal "" 100 -+check ovn-nbctl lsp-set-addresses ln_internal unknown -+check ovn-nbctl lsp-set-type ln_internal localnet -+check ovn-nbctl lsp-set-options ln_internal network_name=phys -+ -+check ovn-nbctl lsp-add internal internal-gw -+check ovn-nbctl lsp-set-type internal-gw router -+check ovn-nbctl lsp-set-addresses internal-gw router -+check ovn-nbctl lsp-set-options internal-gw router-port=gw-internal -+ -+check ovn-nbctl lsp-add internal vif0 -+# Set address as unknown so that LRP has to generate ARP request -+check ovn-nbctl lsp-set-addresses vif0 unknown -+ -+check ovn-nbctl lr-add gw -+check ovn-nbctl lrp-add gw gw-internal 00:00:00:00:20:00 192.168.20.1/24 -+ -+ADD_NAMESPACES(vif0) -+ADD_VETH(vif0, vif0, br-int, "192.168.20.10/24", "00:00:00:00:20:10", "192.168.20.1") -+ -+check ovn-nbctl --wait=sb sync -+check ovn-nbctl --wait=hv sync -+ -+NS_CHECK_EXEC([vif0], [ping -q -c 3 -i 0.3 -w 1 192.168.20.1 | FORMAT_PING], \ -+[0], [dnl -+3 packets transmitted, 3 received, 0% packet loss, time 0ms -+]) -+ -+OVS_APP_EXIT_AND_WAIT([ovn-controller]) -+ -+as ovn-sb -+OVS_APP_EXIT_AND_WAIT([ovsdb-server]) -+ -+as ovn-nb -+OVS_APP_EXIT_AND_WAIT([ovsdb-server]) -+ -+as northd -+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE]) -+ -+as -+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d -+/connection dropped.*/d"]) -+AT_CLEANUP -+]) -+ -+OVN_FOR_EACH_NORTHD([ -+AT_SETUP([load balancing affinity sessions - IPv4]) -+AT_KEYWORDS([ovnlb]) -+ -+CHECK_CONNTRACK() -+CHECK_CONNTRACK_NAT() -+ovn_start -+OVS_TRAFFIC_VSWITCHD_START() -+ADD_BR([br-int]) -+ -+# Set external-ids in br-int needed for ovn-controller -+ovs-vsctl \ -+ -- set Open_vSwitch . external-ids:system-id=hv1 \ -+ -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \ -+ -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \ -+ -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \ -+ -- set bridge br-int fail-mode=secure other-config:disable-in-band=true -+ -+# Start ovn-controller -+start_daemon ovn-controller -+ -+# Logical network: -+# Two LRs - R1 and R2 that are connected to each other via LS "join" -+# in 20.0.0.0/24 network. R1 has switchess foo (192.168.1.0/24) and -+# bar (192.168.2.0/24) connected to it. R2 has alice (172.16.1.0/24) connected -+# to it. R2 is a gateway router on which we add load-balancing rules. -+# -+# foo -- R1 -- join - R2 -- alice -+# | -+# bar ---- -+ -+ovn-nbctl create Logical_Router name=R1 -+ovn-nbctl create Logical_Router name=R2 options:chassis=hv1 -+ -+ovn-nbctl ls-add foo -+ovn-nbctl ls-add bar -+ovn-nbctl ls-add alice -+ovn-nbctl ls-add join -+ -+# Connect foo to R1 -+ovn-nbctl lrp-add R1 foo 00:00:01:01:02:03 192.168.1.1/24 -+ovn-nbctl lsp-add foo rp-foo -- set Logical_Switch_Port rp-foo \ -+ type=router options:router-port=foo addresses=\"00:00:01:01:02:03\" -+ -+# Connect bar to R1 -+ovn-nbctl lrp-add R1 bar 00:00:01:01:02:04 192.168.2.1/24 -+ovn-nbctl lsp-add bar rp-bar -- set Logical_Switch_Port rp-bar \ -+ type=router options:router-port=bar addresses=\"00:00:01:01:02:04\" -+ -+# Connect alice to R2 -+ovn-nbctl lrp-add R2 alice 00:00:02:01:02:03 172.16.1.1/24 -+ovn-nbctl lsp-add alice rp-alice -- set Logical_Switch_Port rp-alice \ -+ type=router options:router-port=alice addresses=\"00:00:02:01:02:03\" -+ -+# Connect R1 to join -+ovn-nbctl lrp-add R1 R1_join 00:00:04:01:02:03 20.0.0.1/24 -+ovn-nbctl lsp-add join r1-join -- set Logical_Switch_Port r1-join \ -+ type=router options:router-port=R1_join addresses='"00:00:04:01:02:03"' -+ -+# Connect R2 to join -+ovn-nbctl lrp-add R2 R2_join 00:00:04:01:02:04 20.0.0.2/24 -+ovn-nbctl lsp-add join r2-join -- set Logical_Switch_Port r2-join \ -+ type=router options:router-port=R2_join addresses='"00:00:04:01:02:04"' -+ -+# Static routes. -+ovn-nbctl lr-route-add R1 172.16.1.0/24 20.0.0.2 -+ovn-nbctl lr-route-add R2 192.168.0.0/16 20.0.0.1 -+ -+# Logical port 'foo1' in switch 'foo'. -+ADD_NAMESPACES(foo1) -+ADD_VETH(foo1, foo1, br-int, "192.168.1.2/24", "f0:00:00:01:02:03", \ -+ "192.168.1.1") -+ovn-nbctl lsp-add foo foo1 \ -+-- lsp-set-addresses foo1 "f0:00:00:01:02:03 192.168.1.2" -+ -+# Logical port 'alice1' in switch 'alice'. -+ADD_NAMESPACES(alice1) -+ADD_VETH(alice1, alice1, br-int, "172.16.1.2/24", "f0:00:00:01:02:04", \ -+ "172.16.1.1") -+ovn-nbctl lsp-add alice alice1 \ -+-- lsp-set-addresses alice1 "f0:00:00:01:02:04 172.16.1.2" -+ -+# Logical port 'bar1' in switch 'bar'. -+ADD_NAMESPACES(bar1) -+ADD_VETH(bar1, bar1, br-int, "192.168.2.2/24", "f0:00:00:01:02:05", \ -+"192.168.2.1") -+ovn-nbctl lsp-add bar bar1 \ -+-- lsp-set-addresses bar1 "f0:00:00:01:02:05 192.168.2.2" -+ -+ADD_NAMESPACES(bar2) -+ADD_VETH(bar2, bar2, br-int, "192.168.2.3/24", "e0:00:00:01:02:05", \ -+"192.168.2.1") -+ovn-nbctl lsp-add bar bar2 \ -+-- lsp-set-addresses bar2 "e0:00:00:01:02:05 192.168.2.3" -+ -+# Config OVN load-balancer with a VIP. -+ -+ovn-nbctl lb-add lb0 172.16.1.100:8080 192.168.1.2:80,192.168.2.2:80 -+ovn-nbctl lb-add lb10 172.16.1.110:8080 192.168.1.2:80,192.168.2.2:80 -+ovn-nbctl lb-add lb0-no-aff 172.16.1.100:8081 192.168.1.2:80,192.168.2.2:80 -+ovn-nbctl lb-add lb10-no-aff 172.16.1.110:8081 192.168.1.2:80,192.168.2.2:80 -+ovn-nbctl lr-lb-add R2 lb0 -+ovn-nbctl lr-lb-add R2 lb10 -+ovn-nbctl lr-lb-add R2 lb0-no-aff -+ovn-nbctl lr-lb-add R2 lb10-no-aff -+ -+# Start webservers in 'foo1', 'bar1'. -+NETNS_DAEMONIZE([foo1], [nc -l -k 192.168.1.2 80], [nc-foo1.pid]) -+NETNS_DAEMONIZE([bar1], [nc -l -k 192.168.2.2 80], [nc-bar1.pid]) -+ -+# Wait for ovn-controller to catch up. -+ovn-nbctl --wait=hv sync -+OVS_WAIT_UNTIL([ovs-ofctl -O OpenFlow13 dump-groups br-int | \ -+grep 'nat(dst=192.168.2.2:80)']) -+ -+dnl Should work with the virtual IP address through NAT -+for i in $(seq 1 15); do -+ echo Request $i -+ NS_CHECK_EXEC([alice1], [nc -z 172.16.1.100 8080]) -+done -+ -+dnl Each server should have at least one connection. -+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(172.16.1.100) | -+sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl -+tcp,orig=(src=172.16.1.2,dst=172.16.1.100,sport=,dport=),reply=(src=192.168.1.2,dst=172.16.1.2,sport=,dport=),zone=,mark=2,protoinfo=(state=) -+tcp,orig=(src=172.16.1.2,dst=172.16.1.100,sport=,dport=),reply=(src=192.168.2.2,dst=172.16.1.2,sport=,dport=),zone=,mark=2,protoinfo=(state=) -+]) -+ -+# Flush conntrack entries for easier output parsing of next test. -+AT_CHECK([ovs-appctl dpctl/flush-conntrack]) -+# Enable lb affinity -+ovn-nbctl --wait=sb set load_balancer lb0 options:affinity_timeout=60 -+ovn-nbctl --wait=sb set load_balancer lb10 options:affinity_timeout=60 -+ -+for i in $(seq 1 15); do -+ echo Request $i -+ NS_CHECK_EXEC([alice1], [nc -z 172.16.1.100 8080]) -+done -+ -+dnl here we should have just one entry in the ct table -+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(172.16.1.100) | -+sed -e 's/zone=[[0-9]]*/zone=/; s/src=192.168.[[0-9]].2/src=192.168..2/'], [0], [dnl -+tcp,orig=(src=172.16.1.2,dst=172.16.1.100,sport=,dport=),reply=(src=192.168..2,dst=172.16.1.2,sport=,dport=),zone=,mark=2,protoinfo=(state=) -+]) -+ -+AT_CHECK([ovs-ofctl dump-flows br-int table=78 |grep cookie |sed -e 's/duration=[[0-9]]*.[[0-9]]*s/duration=/; s/load:0xc0a80[[0-9]]02/load:0xc0a8002/; s/n_packets=[[0-9]]*/n_packets=/; s/n_bytes=[[0-9]]*/n_bytes=/; s/idle_age=[[0-9]]*/idle_age=/; s/hard_age=[[0-9]]*, //'], [0], [dnl -+ cookie=0x0, duration=, table=78, n_packets=, n_bytes=, idle_timeout=60, idle_age=, tcp,metadata=0x2,nw_src=172.16.1.2,nw_dst=172.16.1.100,tp_dst=8080 actions=load:0x1->NXM_NX_REG10[[14]],load:0xc0a8002->NXM_NX_REG4[[]],load:0x50->NXM_NX_REG8[[0..15]] -+]) -+ -+check_affinity_flows () { -+n1=$(ovs-ofctl dump-flows br-int table=15 |awk '/priority=150,ct_state=\+new\+trk,ip,reg4=0xc0a80102/{print substr($4,11,length($4)-11)}') -+n2=$(ovs-ofctl dump-flows br-int table=15 |awk '/priority=150,ct_state=\+new\+trk,ip,reg4=0xc0a80202/{print substr($4,11,length($4)-11)}') -+[[ $n1 -gt 0 -a $n2 -eq 0 ]] || [[ $n1 -eq 0 -a $n2 -gt 0 ]] -+echo $? -+} -+AT_CHECK([test $(check_affinity_flows) -eq 0]) -+NS_CHECK_EXEC([alice1], [nc -z 172.16.1.100 8081]) -+ -+# Flush conntrack entries for easier output parsing of next test. -+AT_CHECK([ovs-appctl dpctl/flush-conntrack]) -+ -+ovn-nbctl lb-add lb1 172.16.1.101:8080 192.168.1.2:80,192.168.2.2:80 -+ovn-nbctl lb-add lb11 172.16.1.111:8080 192.168.1.2:80,192.168.2.2:80 -+ovn-nbctl lb-add lb1-no-aff 172.16.1.101:8081 192.168.1.2:80,192.168.2.2:80 -+ovn-nbctl lb-add lb11-no-aff 172.16.1.111:8081 192.168.1.2:80,192.168.2.2:80 -+# Enable lb affinity -+ovn-nbctl --wait=sb set load_balancer lb1 options:affinity_timeout=3 -+ovn-nbctl --wait=sb set load_balancer lb11 options:affinity_timeout=3 -+ovn-nbctl lr-lb-add R2 lb1 -+ovn-nbctl lr-lb-add R2 lb11 -+ovn-nbctl lr-lb-add R2 lb1-no-aff -+ovn-nbctl lr-lb-add R2 lb11-no-aff -+ -+# check we use both backends -+for i in $(seq 1 15); do -+ echo Request $i -+ NS_CHECK_EXEC([alice1], [nc -z 172.16.1.101 8080]) -+ ovs-ofctl del-flows br-int table=78 -+done -+ -+dnl Each server should have at least one connection. -+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(172.16.1.101) | -+sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl -+tcp,orig=(src=172.16.1.2,dst=172.16.1.101,sport=,dport=),reply=(src=192.168.1.2,dst=172.16.1.2,sport=,dport=),zone=,mark=2,protoinfo=(state=) -+tcp,orig=(src=172.16.1.2,dst=172.16.1.101,sport=,dport=),reply=(src=192.168.2.2,dst=172.16.1.2,sport=,dport=),zone=,mark=2,protoinfo=(state=) -+]) -+NS_CHECK_EXEC([alice1], [nc -z 172.16.1.101 8081]) -+ -+# Flush conntrack entries for easier output parsing of next test. -+AT_CHECK([ovs-appctl dpctl/flush-conntrack]) -+ -+NETNS_DAEMONIZE([bar2], [nc -l -k 192.168.2.3 80], [nc-bar2.pid]) -+ -+ovn-nbctl lb-add lb2 192.168.2.100:8080 192.168.2.2:80,192.168.2.3:80 -+ovn-nbctl lb-add lb20 192.168.2.120:8080 192.168.2.2:80,192.168.2.3:80 -+ovn-nbctl lb-add lb2-no-aff 192.168.2.100:8081 192.168.2.2:80,192.168.2.3:80 -+ovn-nbctl lb-add lb20-no-aff 192.168.2.120:8081 192.168.2.2:80,192.168.2.3:80 -+ovn-nbctl --wait=sb set load_balancer lb2 options:affinity_timeout=60 -+ovn-nbctl --wait=sb set load_balancer lb20 options:affinity_timeout=60 -+ovn-nbctl ls-lb-add foo lb2 -+ovn-nbctl ls-lb-add foo lb20 -+ovn-nbctl ls-lb-add foo lb2-no-aff -+ovn-nbctl ls-lb-add foo lb20-no-aff -+ -+for i in $(seq 1 15); do -+ echo Request $i -+ NS_CHECK_EXEC([foo1], [nc -z 192.168.2.100 8080]) -+done -+ -+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(192.168.2.100) | -+sed -e 's/zone=[[0-9]]*/zone=/; s/src=192.168.2.[[0-9]]/src=192.168.2./'], [0], [dnl -+tcp,orig=(src=192.168.1.2,dst=192.168.2.100,sport=,dport=),reply=(src=192.168.2.,dst=192.168.1.2,sport=,dport=),zone=,mark=2,protoinfo=(state=) -+]) -+NS_CHECK_EXEC([foo1], [nc -z 192.168.2.100 8081]) -+ -+# Flush conntrack entries for easier output parsing of next test. -+AT_CHECK([ovs-appctl dpctl/flush-conntrack]) -+ -+ovn-nbctl lb-add lb3 192.168.2.101:8080 192.168.2.2:80,192.168.2.3:80 -+ovn-nbctl lb-add lb30 192.168.2.131:8080 192.168.2.2:80,192.168.2.3:80 -+ovn-nbctl lb-add lb3-no-aff 192.168.2.101:8081 192.168.2.2:80,192.168.2.3:80 -+ovn-nbctl lb-add lb30-no-aff 192.168.2.131:8081 192.168.2.2:80,192.168.2.3:80 -+ovn-nbctl --wait=sb set load_balancer lb3 options:affinity_timeout=3 -+ovn-nbctl --wait=sb set load_balancer lb30 options:affinity_timeout=3 -+ovn-nbctl ls-lb-add foo lb3 -+ovn-nbctl ls-lb-add foo lb30 -+ovn-nbctl ls-lb-add foo lb3-no-aff -+ovn-nbctl ls-lb-add foo lb30-no-aff -+# Flush conntrack entries for easier output parsing of next test. -+AT_CHECK([ovs-appctl dpctl/flush-conntrack]) -+ -+for i in $(seq 1 15); do -+ echo Request $i -+ NS_CHECK_EXEC([foo1], [nc -z 192.168.2.101 8080]) -+ ovs-ofctl del-flows br-int table=78 -+done -+ -+dnl Each server should have at least one connection. -+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(192.168.2.101) | -+sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl -+tcp,orig=(src=192.168.1.2,dst=192.168.2.101,sport=,dport=),reply=(src=192.168.2.2,dst=192.168.1.2,sport=,dport=),zone=,mark=2,protoinfo=(state=) -+tcp,orig=(src=192.168.1.2,dst=192.168.2.101,sport=,dport=),reply=(src=192.168.2.3,dst=192.168.1.2,sport=,dport=),zone=,mark=2,protoinfo=(state=) -+]) -+NS_CHECK_EXEC([foo1], [nc -z 192.168.2.101 8081]) -+ -+NS_CHECK_EXEC([foo1], [ip neigh add 192.168.1.200 lladdr 00:00:01:01:02:03 dev foo1], [0]) -+ovn-nbctl lb-add lb4 192.168.1.100:8080 192.168.1.2:80 -+ovn-nbctl lb-add lb40 192.168.1.140:8080 192.168.1.2:80 -+ovn-nbctl lb-add lb4-no-aff 192.168.1.100:8081 192.168.1.2:80 -+ovn-nbctl lb-add lb40-no-aff 192.168.1.140:8081 192.168.1.2:80 -+ovn-nbctl --wait=sb set load_balancer lb4 options:affinity_timeout=60 options:hairpin_snat_ip=192.168.1.200 -+ovn-nbctl --wait=sb set load_balancer lb40 options:affinity_timeout=60 options:hairpin_snat_ip=192.168.1.200 -+ovn-nbctl ls-lb-add foo lb4 -+ovn-nbctl ls-lb-add foo lb40 -+ovn-nbctl lr-lb-add R1 lb4 -+ovn-nbctl lr-lb-add R1 lb40 -+ovn-nbctl ls-lb-add foo lb4-no-aff -+ovn-nbctl ls-lb-add foo lb40-no-aff -+ovn-nbctl lr-lb-add R1 lb4-no-aff -+ovn-nbctl lr-lb-add R1 lb40-no-aff -+ -+# Flush conntrack entries for easier output parsing of next test. -+AT_CHECK([ovs-appctl dpctl/flush-conntrack]) -+ -+for i in $(seq 1 15); do -+ echo Request $i -+ NS_CHECK_EXEC([foo1], [nc -z 192.168.1.100 8080]) -+done -+ -+dnl Each server should have at least one connection. -+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(192.168.1.2) | -+sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl -+tcp,orig=(src=192.168.1.2,dst=192.168.1.100,sport=,dport=),reply=(src=192.168.1.2,dst=192.168.1.2,sport=,dport=),zone=,mark=2,protoinfo=(state=) -+tcp,orig=(src=192.168.1.2,dst=192.168.1.2,sport=,dport=),reply=(src=192.168.1.2,dst=192.168.1.200,sport=,dport=),zone=,protoinfo=(state=) -+tcp,orig=(src=192.168.1.200,dst=192.168.1.2,sport=,dport=),reply=(src=192.168.1.2,dst=192.168.1.200,sport=,dport=),zone=,protoinfo=(state=) -+]) -+NS_CHECK_EXEC([foo1], [nc -z 192.168.1.100 8081]) -+ -+OVS_APP_EXIT_AND_WAIT([ovn-controller]) -+ -+as ovn-sb -+OVS_APP_EXIT_AND_WAIT([ovsdb-server]) -+ -+as ovn-nb -+OVS_APP_EXIT_AND_WAIT([ovsdb-server]) -+ -+as northd -+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE]) -+ -+as -+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d -+/connection dropped.*/d -+/inactivity probe*/d"]) -+AT_CLEANUP -+]) -+ -+OVN_FOR_EACH_NORTHD([ -+AT_SETUP([load balancing affinity sessions - IPv6]) -+AT_KEYWORDS([ovnlb]) -+ -+CHECK_CONNTRACK() -+CHECK_CONNTRACK_NAT() -+ovn_start -+OVS_TRAFFIC_VSWITCHD_START() -+ADD_BR([br-int]) -+ -+# Set external-ids in br-int needed for ovn-controller -+ovs-vsctl \ -+ -- set Open_vSwitch . external-ids:system-id=hv1 \ -+ -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \ -+ -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \ -+ -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \ -+ -- set bridge br-int fail-mode=secure other-config:disable-in-band=true -+ -+# Start ovn-controller -+start_daemon ovn-controller -+ -+# Logical network: -+# Two LRs - R1 and R2 that are connected to each other via LS "join" -+# in fd20::/64 network. R1 has switchess foo (fd11::/64) and -+# bar (fd12::/64) connected to it. R2 has alice (fd72::/64) connected -+# to it. R2 is a gateway router on which we add load-balancing rules. -+# -+# foo -- R1 -- join - R2 -- alice -+# | -+# bar ---- -+ -+ovn-nbctl create Logical_Router name=R1 -+ovn-nbctl create Logical_Router name=R2 options:chassis=hv1 -+ -+ovn-nbctl ls-add foo -+ovn-nbctl ls-add bar -+ovn-nbctl ls-add alice -+ovn-nbctl ls-add join -+ -+# Connect foo to R1 -+ovn-nbctl lrp-add R1 foo 00:00:01:01:02:03 fd11::1/64 -+ovn-nbctl lsp-add foo rp-foo -- set Logical_Switch_Port rp-foo \ -+ type=router options:router-port=foo addresses=\"00:00:01:01:02:03\" -+ -+# Connect bar to R1 -+ovn-nbctl lrp-add R1 bar 00:00:01:01:02:04 fd12::1/64 -+ovn-nbctl lsp-add bar rp-bar -- set Logical_Switch_Port rp-bar \ -+ type=router options:router-port=bar addresses=\"00:00:01:01:02:04\" -+ -+# Connect alice to R2 -+ovn-nbctl lrp-add R2 alice 00:00:02:01:02:03 fd72::1/64 -+ovn-nbctl lsp-add alice rp-alice -- set Logical_Switch_Port rp-alice \ -+ type=router options:router-port=alice addresses=\"00:00:02:01:02:03\" -+ -+# Connect R1 to join -+ovn-nbctl lrp-add R1 R1_join 00:00:04:01:02:03 fd20::1/64 -+ovn-nbctl lsp-add join r1-join -- set Logical_Switch_Port r1-join \ -+ type=router options:router-port=R1_join addresses='"00:00:04:01:02:03"' -+ -+# Connect R2 to join -+ovn-nbctl lrp-add R2 R2_join 00:00:04:01:02:04 fd20::2/64 -+ovn-nbctl lsp-add join r2-join -- set Logical_Switch_Port r2-join \ -+ type=router options:router-port=R2_join addresses='"00:00:04:01:02:04"' -+ -+# Static routes. -+ovn-nbctl lr-route-add R1 fd72::/64 fd20::2 -+ovn-nbctl lr-route-add R2 fd11::/64 fd20::1 -+ovn-nbctl lr-route-add R2 fd12::/64 fd20::1 -+ -+# Logical port 'foo1' in switch 'foo'. -+ADD_NAMESPACES(foo1) -+ADD_VETH(foo1, foo1, br-int, "fd11::2/64", "f0:00:00:01:02:03", \ -+ "fd11::1") -+OVS_WAIT_UNTIL([test "$(ip -n foo1 a | grep fd11::2 | grep tentative)" = ""]) -+ovn-nbctl lsp-add foo foo1 \ -+-- lsp-set-addresses foo1 "f0:00:00:01:02:03 fd11::2" -+ -+# Logical port 'alice1' in switch 'alice'. -+ADD_NAMESPACES(alice1) -+ADD_VETH(alice1, alice1, br-int, "fd72::2/64", "f0:00:00:01:02:04", \ -+ "fd72::1") -+OVS_WAIT_UNTIL([test "$(ip -n alice1 a | grep fd72::2 | grep tentative)" = ""]) -+ovn-nbctl lsp-add alice alice1 \ -+-- lsp-set-addresses alice1 "f0:00:00:01:02:04 fd72::2" -+ -+# Logical port 'bar1' in switch 'bar'. -+ADD_NAMESPACES(bar1) -+ADD_VETH(bar1, bar1, br-int, "fd12::2/64", "f0:00:00:01:02:05", \ -+"fd12::1") -+OVS_WAIT_UNTIL([test "$(ip -n bar1 a | grep fd12::2 | grep tentative)" = ""]) -+ovn-nbctl lsp-add bar bar1 \ -+-- lsp-set-addresses bar1 "f0:00:00:01:02:05 fd12::2" -+ -+ADD_NAMESPACES(bar2) -+ADD_VETH(bar2, bar2, br-int, "fd12::3/64", "e0:00:00:01:02:05", \ -+"fd12::1") -+OVS_WAIT_UNTIL([test "$(ip -n bar2 a | grep fd12::3 | grep tentative)" = ""]) -+ovn-nbctl lsp-add bar bar2 \ -+-- lsp-set-addresses bar2 "e0:00:00:01:02:05 fd12::3" -+ -+ovn-nbctl lb-add lb0 [[fd30::1]]:8080 [[fd11::2]]:80,[[fd12::2]]:80 -+ovn-nbctl lb-add lb10 [[fd30::10]]:8080 [[fd11::2]]:80,[[fd12::2]]:80 -+ovn-nbctl lb-add lb0-no-aff [[fd30::1]]:8081 [[fd11::2]]:80,[[fd12::2]]:80 -+ovn-nbctl lb-add lb10-no-aff [[fd30::10]]:8081 [[fd11::2]]:80,[[fd12::2]]:80 -+ovn-nbctl lr-lb-add R2 lb0 -+ovn-nbctl lr-lb-add R2 lb10 -+ovn-nbctl lr-lb-add R2 lb0-no-aff -+ovn-nbctl lr-lb-add R2 lb10-no-aff -+ -+# Wait for ovn-controller to catch up. -+ovn-nbctl --wait=hv sync -+OVS_WAIT_UNTIL([ovs-ofctl -O OpenFlow13 dump-groups br-int | \ -+grep 'nat(dst=\[[fd11::2\]]:80)']) -+ -+# Start webservers in 'foo1', 'bar1'. -+NETNS_DAEMONIZE([foo1], [nc -l -k fd11::2 80], [nc-foo1.pid]) -+NETNS_DAEMONIZE([bar1], [nc -l -k fd12::2 80], [nc-bar1.pid]) -+ -+dnl Should work with the virtual IP address through NAT -+for i in $(seq 1 15); do -+ echo Request $i -+ NS_CHECK_EXEC([alice1], [nc -z fd30::1 8080]) -+done -+ -+dnl Each server should have at least one connection. -+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd30::1) | grep -v fe80 | -+sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl -+tcp,orig=(src=fd72::2,dst=fd30::1,sport=,dport=),reply=(src=fd11::2,dst=fd72::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) -+tcp,orig=(src=fd72::2,dst=fd30::1,sport=,dport=),reply=(src=fd12::2,dst=fd72::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) -+]) -+NS_CHECK_EXEC([alice1], [nc -z fd30::1 8081]) -+ -+# Flush conntrack entries for easier output parsing of next test. -+AT_CHECK([ovs-appctl dpctl/flush-conntrack]) -+# Enable lb affinity -+ovn-nbctl --wait=sb set load_balancer lb0 options:affinity_timeout=60 -+ovn-nbctl --wait=sb set load_balancer lb10 options:affinity_timeout=60 -+ -+for i in $(seq 1 15); do -+ echo Request $i -+ NS_CHECK_EXEC([alice1], [nc -z fd30::1 8080]) -+done -+ -+dnl here we should have just one entry in the ct table -+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd30::1) | grep -v fe80 | -+sed -e 's/zone=[[0-9]]*/zone=/; s/src=fd1[[0-9]]::2/src=fd1::2/'], [0], [dnl -+tcp,orig=(src=fd72::2,dst=fd30::1,sport=,dport=),reply=(src=fd1::2,dst=fd72::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) -+]) -+ -+AT_CHECK([ovs-ofctl dump-flows br-int table=78 |grep cookie |sed -e 's/duration=[[0-9]]*.[[0-9]]*s/duration=/; s/load:0xfd1[[0-9]]000000000000/load:0xfd1000000000000/; s/n_packets=[[0-9]]*/n_packets=/; s/n_bytes=[[0-9]]*/n_bytes=/; s/idle_age=[[0-9]]*/idle_age=/; s/hard_age=[[0-9]]*, //'], [0], [dnl -+ cookie=0x0, duration=, table=78, n_packets=, n_bytes=, idle_timeout=60, idle_age=, tcp6,metadata=0x2,ipv6_src=fd72::2,ipv6_dst=fd30::1,tp_dst=8080 actions=load:0x1->NXM_NX_REG10[[14]],load:0x2->NXM_NX_XXREG1[[0..63]],load:0xfd1000000000000->NXM_NX_XXREG1[[64..127]],load:0x50->NXM_NX_REG8[[0..15]] -+]) -+ -+check_affinity_flows () { -+n1=$(ovs-ofctl dump-flows br-int table=15 |awk '/priority=150,ct_state=\+new\+trk,ipv6,reg4=0xfd110000/{print substr($4,11,length($4)-11)}') -+n2=$(ovs-ofctl dump-flows br-int table=15 |awk '/priority=150,ct_state=\+new\+trk,ipv6,reg4=0xfd120000/{print substr($4,11,length($4)-11)}') -+[[ $n1 -gt 0 -a $n2 -eq 0 ]] || [[ $n1 -eq 0 -a $n2 -gt 0 ]] -+echo $? -+} -+AT_CHECK([test $(check_affinity_flows) -eq 0]) -+NS_CHECK_EXEC([alice1], [nc -z fd30::1 8081]) -+ -+# Flush conntrack entries for easier output parsing of next test. -+AT_CHECK([ovs-appctl dpctl/flush-conntrack]) -+ -+ovn-nbctl lb-add lb1 [[fd30::2]]:8080 [[fd11::2]]:80,[[fd12::2]]:80 -+ovn-nbctl lb-add lb11 [[fd30::12]]:8080 [[fd11::2]]:80,[[fd12::2]]:80 -+ovn-nbctl lb-add lb1-no-aff [[fd30::2]]:8081 [[fd11::2]]:80,[[fd12::2]]:80 -+ovn-nbctl lb-add lb11-no-aff [[fd30::12]]:8081 [[fd11::2]]:80,[[fd12::2]]:80 -+# Enable lb affinity -+ovn-nbctl --wait=sb set load_balancer lb1 options:affinity_timeout=3 -+ovn-nbctl --wait=sb set load_balancer lb11 options:affinity_timeout=3 -+ovn-nbctl lr-lb-add R2 lb1 -+ovn-nbctl lr-lb-add R2 lb11 -+ovn-nbctl lr-lb-add R2 lb1-no-aff -+ovn-nbctl lr-lb-add R2 lb11-no-aff -+ -+# check we use both backends -+for i in $(seq 1 15); do -+ echo Request $i -+ NS_CHECK_EXEC([alice1], [nc -z fd30::2 8080]) -+ ovs-ofctl del-flows br-int table=78 -+done -+ -+dnl Each server should have at least one connection. -+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd30::2) | grep -v fe80 | -+sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl -+tcp,orig=(src=fd72::2,dst=fd30::2,sport=,dport=),reply=(src=fd11::2,dst=fd72::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) -+tcp,orig=(src=fd72::2,dst=fd30::2,sport=,dport=),reply=(src=fd12::2,dst=fd72::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) -+]) -+NS_CHECK_EXEC([alice1], [nc -z fd30::2 8081]) -+ -+# Flush conntrack entries for easier output parsing of next test. -+AT_CHECK([ovs-appctl dpctl/flush-conntrack]) -+ -+NETNS_DAEMONIZE([bar2], [nc -l -k fd12::3 80], [nc-bar2.pid]) -+ -+ovn-nbctl lb-add lb2 [[fd12::a]]:8080 [[fd12::2]]:80,[[fd12::3]]:80 -+ovn-nbctl lb-add lb20 [[fd12::2a]]:8080 [[fd12::2]]:80,[[fd12::3]]:80 -+ovn-nbctl lb-add lb2-no-aff [[fd12::a]]:8081 [[fd12::2]]:80,[[fd12::3]]:80 -+ovn-nbctl lb-add lb20-no-aff [[fd12::2a]]:8081 [[fd12::2]]:80,[[fd12::3]]:80 -+ovn-nbctl --wait=sb set load_balancer lb2 options:affinity_timeout=60 -+ovn-nbctl --wait=sb set load_balancer lb20 options:affinity_timeout=60 -+ovn-nbctl ls-lb-add foo lb2 -+ovn-nbctl ls-lb-add foo lb20 -+ovn-nbctl ls-lb-add foo lb2-no-aff -+ovn-nbctl ls-lb-add foo lb20-no-aff -+ -+for i in $(seq 1 15); do -+ echo Request $i -+ NS_CHECK_EXEC([foo1], [nc -z fd12::a 8080]) -+done -+ -+dnl here we should have just one entry in the ct table -+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd12::a) | grep -v fe80 | -+sed -e 's/zone=[[0-9]]*/zone=/; s/src=fd12::[[0-9]]/src=fd12::/'], [0], [dnl -+tcp,orig=(src=fd11::2,dst=fd12::a,sport=,dport=),reply=(src=fd12::,dst=fd11::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) -+]) -+NS_CHECK_EXEC([foo1], [nc -z fd12::a 8081]) -+ -+AT_CHECK([ovs-appctl dpctl/flush-conntrack]) -+ -+ovn-nbctl lb-add lb3 [[fd12::b]]:8080 [[fd12::2]]:80,[[fd12::3]]:80 -+ovn-nbctl lb-add lb30 [[fd12::3b]]:8080 [[fd12::2]]:80,[[fd12::3]]:80 -+ovn-nbctl lb-add lb3-no-aff [[fd12::b]]:8081 [[fd12::2]]:80,[[fd12::3]]:80 -+ovn-nbctl lb-add lb30-no-aff [[fd12::3b]]:8081 [[fd12::2]]:80,[[fd12::3]]:80 -+ovn-nbctl --wait=sb set load_balancer lb3 options:affinity_timeout=3 -+ovn-nbctl --wait=sb set load_balancer lb30 options:affinity_timeout=3 -+ovn-nbctl ls-lb-add foo lb3 -+ovn-nbctl ls-lb-add foo lb30 -+ovn-nbctl ls-lb-add foo lb3-no-aff -+ovn-nbctl ls-lb-add foo lb30-no-aff -+ -+for i in $(seq 1 15); do -+ echo Request $i -+ NS_CHECK_EXEC([foo1], [nc -z fd12::b 8080]) -+ ovs-ofctl del-flows br-int table=78 -+done -+ -+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd12::b) | grep -v fe80 | -+sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl -+tcp,orig=(src=fd11::2,dst=fd12::b,sport=,dport=),reply=(src=fd12::2,dst=fd11::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) -+tcp,orig=(src=fd11::2,dst=fd12::b,sport=,dport=),reply=(src=fd12::3,dst=fd11::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) -+]) -+NS_CHECK_EXEC([foo1], [nc -z fd12::b 8081]) -+ -+NS_CHECK_EXEC([foo1], [ip -6 neigh add fd11::b lladdr 00:00:01:01:02:03 dev foo1], [0]) -+ovn-nbctl --wait=sb lb-add lb4 [[fd11::a]]:8080 [[fd11::2]]:80 -+ovn-nbctl --wait=sb lb-add lb40 [[fd11::a]]:8080 [[fd11::2]]:80 -+ovn-nbctl --wait=sb lb-add lb4-no-aff [[fd11::a]]:8081 [[fd11::2]]:80 -+ovn-nbctl --wait=sb lb-add lb40-no-aff [[fd11::a]]:8081 [[fd11::2]]:80 -+ovn-nbctl --wait=sb set load_balancer lb4 options:affinity_timeout=60 options:hairpin_snat_ip="fd11::b" -+ovn-nbctl --wait=sb set load_balancer lb40 options:affinity_timeout=60 options:hairpin_snat_ip="fd11::b" -+ovn-nbctl ls-lb-add foo lb4 -+ovn-nbctl ls-lb-add foo lb40 -+ovn-nbctl lr-lb-add R1 lb4 -+ovn-nbctl lr-lb-add R1 lb40 -+ovn-nbctl ls-lb-add foo lb4-no-aff -+ovn-nbctl ls-lb-add foo lb40-no-aff -+ovn-nbctl lr-lb-add R1 lb4-no-aff -+ovn-nbctl lr-lb-add R1 lb40-no-aff -+ -+# Flush conntrack entries for easier output parsing of next test. -+AT_CHECK([ovs-appctl dpctl/flush-conntrack]) -+ -+for i in $(seq 1 15); do -+ echo Request $i -+ NS_CHECK_EXEC([foo1], [nc -z fd11::a 8080]) -+done -+ -+dnl Each server should have at least one connection. -+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd11::2) | grep -v fe80 | -+sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl -+tcp,orig=(src=fd11::2,dst=fd11::2,sport=,dport=),reply=(src=fd11::2,dst=fd11::b,sport=,dport=),zone=,protoinfo=(state=) -+tcp,orig=(src=fd11::2,dst=fd11::a,sport=,dport=),reply=(src=fd11::2,dst=fd11::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) -+tcp,orig=(src=fd11::b,dst=fd11::2,sport=,dport=),reply=(src=fd11::2,dst=fd11::b,sport=,dport=),zone=,protoinfo=(state=) -+]) -+NS_CHECK_EXEC([foo1], [nc -z fd11::a 8081]) -+ -+OVS_APP_EXIT_AND_WAIT([ovn-controller]) -+ -+as ovn-sb -+OVS_APP_EXIT_AND_WAIT([ovsdb-server]) -+ -+as ovn-nb -+OVS_APP_EXIT_AND_WAIT([ovsdb-server]) -+ -+as northd -+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE]) -+ -+as -+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d -+/connection dropped.*/d -+/inactivity probe*/d"]) -+AT_CLEANUP -+]) -+ -+OVN_FOR_EACH_NORTHD([ -+AT_SETUP([SNAT in separate zone from DNAT]) -+ -+CHECK_CONNTRACK() -+CHECK_CONNTRACK_NAT() -+ovn_start -+OVS_TRAFFIC_VSWITCHD_START() -+ADD_BR([br-int]) -+ -+# Set external-ids in br-int needed for ovn-controller -+ovs-vsctl \ -+ -- set Open_vSwitch . external-ids:system-id=hv1 \ -+ -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \ -+ -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \ -+ -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \ -+ -- set bridge br-int fail-mode=secure other-config:disable-in-band=true -+ -+# The goal of this test is to ensure that when traffic is first DNATted -+# (by way of a load balancer), and then SNATted, the SNAT happens in a -+# separate conntrack zone from the DNAT. -+ -+start_daemon ovn-controller -+ -+check ovn-nbctl ls-add public -+ -+check ovn-nbctl lr-add r1 -+check ovn-nbctl lrp-add r1 r1_public 00:de:ad:ff:00:01 172.16.0.1/16 -+check ovn-nbctl lrp-add r1 r1_s1 00:de:ad:fe:00:01 173.0.1.1/24 -+check ovn-nbctl lrp-set-gateway-chassis r1_public hv1 -+ -+check ovn-nbctl lb-add r1_lb 30.0.0.1 172.16.0.102 -+check ovn-nbctl lr-lb-add r1 r1_lb -+ -+check ovn-nbctl ls-add s1 -+check ovn-nbctl lsp-add s1 s1_r1 -+check ovn-nbctl lsp-set-type s1_r1 router -+check ovn-nbctl lsp-set-addresses s1_r1 router -+check ovn-nbctl lsp-set-options s1_r1 router-port=r1_s1 -+ -+check ovn-nbctl lsp-add s1 vm1 -+check ovn-nbctl lsp-set-addresses vm1 "00:de:ad:01:00:01 173.0.1.2" -+ -+check ovn-nbctl lsp-add public public_r1 -+check ovn-nbctl lsp-set-type public_r1 router -+check ovn-nbctl lsp-set-addresses public_r1 router -+check ovn-nbctl lsp-set-options public_r1 router-port=r1_public nat-addresses=router -+ -+check ovn-nbctl lr-add r2 -+check ovn-nbctl lrp-add r2 r2_public 00:de:ad:ff:00:02 172.16.0.2/16 -+check ovn-nbctl lrp-add r2 r2_s2 00:de:ad:fe:00:02 173.0.2.1/24 -+check ovn-nbctl lr-nat-add r2 dnat_and_snat 172.16.0.102 173.0.2.2 -+check ovn-nbctl lrp-set-gateway-chassis r2_public hv1 -+ -+check ovn-nbctl ls-add s2 -+check ovn-nbctl lsp-add s2 s2_r2 -+check ovn-nbctl lsp-set-type s2_r2 router -+check ovn-nbctl lsp-set-addresses s2_r2 router -+check ovn-nbctl lsp-set-options s2_r2 router-port=r2_s2 -+ -+check ovn-nbctl lsp-add s2 vm2 -+check ovn-nbctl lsp-set-addresses vm2 "00:de:ad:01:00:02 173.0.2.2" -+ -+check ovn-nbctl lsp-add public public_r2 -+check ovn-nbctl lsp-set-type public_r2 router -+check ovn-nbctl lsp-set-addresses public_r2 router -+check ovn-nbctl lsp-set-options public_r2 router-port=r2_public nat-addresses=router -+ -+ADD_NAMESPACES(vm1) -+ADD_VETH(vm1, vm1, br-int, "173.0.1.2/24", "00:de:ad:01:00:01", \ -+ "173.0.1.1") -+ADD_NAMESPACES(vm2) -+ADD_VETH(vm2, vm2, br-int, "173.0.2.2/24", "00:de:ad:01:00:02", \ -+ "173.0.2.1") -+ -+check ovn-nbctl lr-nat-add r1 dnat_and_snat 172.16.0.101 173.0.1.2 vm1 00:00:00:01:02:03 -+check ovn-nbctl --wait=hv sync -+ -+# Next, make sure that a ping works as expected -+NS_CHECK_EXEC([vm1], [ping -q -c 3 -i 0.3 -w 2 30.0.0.1 | FORMAT_PING], \ -+[0], [dnl -+3 packets transmitted, 3 received, 0% packet loss, time 0ms -+]) -+ -+# Finally, make sure that conntrack shows two separate zones being used for -+# DNAT and SNAT -+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | \ -+sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl -+icmp,orig=(src=173.0.1.2,dst=30.0.0.1,id=,type=8,code=0),reply=(src=172.16.0.102,dst=173.0.1.2,id=,type=0,code=0),zone=,mark=2 -+]) -+ -+# The final two entries appear identical here. That is because FORMAT_CT -+# scrubs the zone numbers. In actuality, the zone numbers are different, -+# which is why there are two entries. -+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(172.16.0.102) | \ -+sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl -+icmp,orig=(src=172.16.0.101,dst=172.16.0.102,id=,type=8,code=0),reply=(src=173.0.2.2,dst=172.16.0.101,id=,type=0,code=0),zone= -+icmp,orig=(src=173.0.1.2,dst=172.16.0.102,id=,type=8,code=0),reply=(src=172.16.0.102,dst=172.16.0.101,id=,type=0,code=0),zone= -+icmp,orig=(src=173.0.1.2,dst=172.16.0.102,id=,type=8,code=0),reply=(src=172.16.0.102,dst=172.16.0.101,id=,type=0,code=0),zone= -+]) -+ -+OVS_APP_EXIT_AND_WAIT([ovn-controller]) -+ -+as ovn-sb -+OVS_APP_EXIT_AND_WAIT([ovsdb-server]) -+ -+as ovn-nb -+OVS_APP_EXIT_AND_WAIT([ovsdb-server]) -+ -+as northd -+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE]) -+ -+as -+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d -+/connection dropped.*/d"]) -+AT_CLEANUP -+]) -diff --git a/utilities/ovn-nbctl.c b/utilities/ovn-nbctl.c -index 3bbdbd998..2f8ec4348 100644 ---- a/utilities/ovn-nbctl.c -+++ b/utilities/ovn-nbctl.c -@@ -4421,6 +4421,8 @@ nbctl_pre_lr_route_del(struct ctl_context *ctx) - - ovsdb_idl_add_column(ctx->idl, - &nbrec_logical_router_static_route_col_policy); -+ ovsdb_idl_add_column(ctx->idl, -+ &nbrec_logical_router_static_route_col_bfd); - ovsdb_idl_add_column(ctx->idl, - &nbrec_logical_router_static_route_col_ip_prefix); - ovsdb_idl_add_column(ctx->idl, -@@ -4433,7 +4435,7 @@ nbctl_pre_lr_route_del(struct ctl_context *ctx) - } - - static void --nbctl_lr_route_del(struct ctl_context *ctx) -+ nbctl_lr_route_del(struct ctl_context *ctx) - { - const struct nbrec_logical_router *lr; - char *error = lr_by_name_or_uuid(ctx, ctx->argv[1], true, &lr); -@@ -4550,6 +4552,10 @@ nbctl_lr_route_del(struct ctl_context *ctx) - } - - /* Everything matched. Removing. */ -+ if (lr->static_routes[i]->bfd) { -+ nbrec_bfd_delete(lr->static_routes[i]->bfd); -+ } -+ - nbrec_logical_router_update_static_routes_delvalue( - lr, lr->static_routes[i]); - n_removed++; -diff --git a/utilities/ovn-trace.c b/utilities/ovn-trace.c -index d9e7129d9..858f481fc 100644 ---- a/utilities/ovn-trace.c -+++ b/utilities/ovn-trace.c -@@ -60,6 +60,9 @@ static char *unixctl_path; - /* The southbound database. */ - static struct ovsdb_idl *ovnsb_idl; - -+/* --leader-only, --no-leader-only: Only accept the leader in a cluster. */ -+static int leader_only = true; -+ - /* --detailed: Show a detailed, table-by-table trace. */ - static bool detailed; - -@@ -138,6 +141,7 @@ main(int argc, char *argv[]) - 1, INT_MAX, ovntrace_trace, NULL); - } - ovnsb_idl = ovsdb_idl_create(db, &sbrec_idl_class, true, false); -+ ovsdb_idl_set_leader_only(ovnsb_idl, leader_only); - - bool already_read = false; - for (;;) { -@@ -243,6 +247,8 @@ parse_options(int argc, char *argv[]) - { - enum { - OPT_DB = UCHAR_MAX + 1, -+ OPT_LEADER_ONLY, -+ OPT_NO_LEADER_ONLY, - OPT_UNIXCTL, - OPT_DETAILED, - OPT_SUMMARY, -@@ -260,6 +266,8 @@ parse_options(int argc, char *argv[]) - }; - static const struct option long_options[] = { - {"db", required_argument, NULL, OPT_DB}, -+ {"leader-only", no_argument, NULL, OPT_LEADER_ONLY}, -+ {"no-leader-only", no_argument, NULL, OPT_NO_LEADER_ONLY}, - {"unixctl", required_argument, NULL, OPT_UNIXCTL}, - {"detailed", no_argument, NULL, OPT_DETAILED}, - {"summary", no_argument, NULL, OPT_SUMMARY}, -@@ -294,6 +302,14 @@ parse_options(int argc, char *argv[]) - db = optarg; - break; - -+ case OPT_LEADER_ONLY: -+ leader_only = true; -+ break; -+ -+ case OPT_NO_LEADER_ONLY: -+ leader_only = false; -+ break; -+ - case OPT_UNIXCTL: - unixctl_path = optarg; - break; -@@ -390,6 +406,7 @@ Output style options:\n\ - Other options:\n\ - --db=DATABASE connect to DATABASE\n\ - (default: %s)\n\ -+ --no-leader-only accept any cluster member, not just the leader\n\ - --ovs[=REMOTE] obtain corresponding OpenFlow flows from REMOTE\n\ - (default: %s)\n\ - --unixctl=SOCKET set control socket name\n\ -@@ -3298,6 +3315,10 @@ trace_actions(const struct ovnact *ovnacts, size_t ovnacts_len, - break; - case OVNACT_CHK_ECMP_NH: - break; -+ case OVNACT_COMMIT_LB_AFF: -+ break; -+ case OVNACT_CHK_LB_AFF: -+ break; - } - } - ofpbuf_uninit(&stack); diff --git a/SOURCES/ovn22.12.patch b/SOURCES/ovn22.12.patch new file mode 100644 index 0000000..6277eee --- /dev/null +++ b/SOURCES/ovn22.12.patch @@ -0,0 +1,3404 @@ +diff --git a/.ci/ovn-kubernetes/Dockerfile b/.ci/ovn-kubernetes/Dockerfile +index e74b620be..1884724ec 100644 +--- a/.ci/ovn-kubernetes/Dockerfile ++++ b/.ci/ovn-kubernetes/Dockerfile +@@ -1,5 +1,5 @@ + ARG OVNKUBE_COMMIT=master +-ARG LIBOVSDB_COMMIT=8081fe24e48f ++ARG LIBOVSDB_COMMIT=a6a173993830 + + FROM fedora:35 AS ovnbuilder + +@@ -47,9 +47,17 @@ RUN GO111MODULE=on go install github.com/ovn-org/libovsdb/cmd/modelgen@${LIBOVSD + # Clone OVN Kubernetes and build the binary based on the commit passed as argument + WORKDIR /root + RUN git clone https://github.com/ovn-org/ovn-kubernetes.git +-WORKDIR /root/ovn-kubernetes/go-controller ++WORKDIR /root/ovn-kubernetes + RUN git checkout ${OVNKUBE_COMMIT} && git log -n 1 + ++# Copy the ovn-kubernetes scripts from the OVN sources and apply any ++# custom changes if needed. ++RUN mkdir -p /tmp/ovn/.ci/ovn-kubernetes ++COPY .ci/ovn-kubernetes /tmp/ovn/.ci/ovn-kubernetes ++WORKDIR /tmp/ovn ++RUN .ci/ovn-kubernetes/prepare.sh /root/ovn-kubernetes ++ ++WORKDIR /root/ovn-kubernetes/go-controller + # Make sure we use the OVN NB/SB schema from the local code. + COPY --from=ovnbuilder /tmp/ovn/ovn-nb.ovsschema pkg/nbdb/ovn-nb.ovsschema + COPY --from=ovnbuilder /tmp/ovn/ovn-sb.ovsschema pkg/sbdb/ovn-sb.ovsschema +diff --git a/.ci/ovn-kubernetes/custom.patch b/.ci/ovn-kubernetes/custom.patch +new file mode 100644 +index 000000000..e69de29bb +diff --git a/.ci/ovn-kubernetes/prepare.sh b/.ci/ovn-kubernetes/prepare.sh +new file mode 100755 +index 000000000..8fc9652af +--- /dev/null ++++ b/.ci/ovn-kubernetes/prepare.sh +@@ -0,0 +1,20 @@ ++#!/bin/bash ++ ++set -ev ++ ++ovnk8s_path=$1 ++topdir=$PWD ++ ++pushd ${ovnk8s_path} ++ ++# Add here any custom operations that need to performed on the ++# ovn-kubernetes cloned repo, e.g., custom patches. ++ ++# git apply --allow-empty is too new so not all git versions from major ++# distros support it, just check if the custom patch file is not empty ++# before applying it. ++[ -s ${topdir}/.ci/ovn-kubernetes/custom.patch ] && \ ++ git apply -v ${topdir}/.ci/ovn-kubernetes/custom.patch ++ ++popd # ${ovnk8s_path} ++exit 0 +diff --git a/.github/workflows/ovn-kubernetes.yml b/.github/workflows/ovn-kubernetes.yml +index 344937e53..840ecd1e0 100644 +--- a/.github/workflows/ovn-kubernetes.yml ++++ b/.github/workflows/ovn-kubernetes.yml +@@ -16,7 +16,7 @@ env: + GO_VERSION: "1.18.4" + K8S_VERSION: v1.24.0 + OVNKUBE_COMMIT: "master" +- LIBOVSDB_COMMIT: "98c0bad3cff1" ++ LIBOVSDB_COMMIT: "a6a173993830" + KIND_CLUSTER_NAME: ovn + KIND_INSTALL_INGRESS: true + KIND_ALLOW_SYSTEM_WRITES: true +@@ -73,6 +73,7 @@ jobs: + env: + JOB_NAME: "${{ matrix.target }}-${{ matrix.ha }}-${{ matrix.gateway-mode }}-${{ matrix.ipfamily }}-${{ matrix.disable-snat-multiple-gws }}-${{ matrix.second-bridge }}" + OVN_HYBRID_OVERLAY_ENABLE: "${{ matrix.target == 'control-plane' }}" ++ KIND_INSTALL_METALLB: "${{ matrix.target == 'control-plane' }}" + OVN_MULTICAST_ENABLE: "${{ matrix.target == 'control-plane' }}" + OVN_EMPTY_LB_EVENTS: "${{ matrix.target == 'control-plane' }}" + OVN_HA: "true" +@@ -91,12 +92,19 @@ jobs: + go-version: ${{ env.GO_VERSION }} + id: go + ++ - name: Check out ovn ++ uses: actions/checkout@v3 ++ + - name: Check out ovn-kubernetes + uses: actions/checkout@v3 + with: + path: src/github.com/ovn-org/ovn-kubernetes + repository: ovn-org/ovn-kubernetes + ++ - name: Prepare ++ run: | ++ .ci/ovn-kubernetes/prepare.sh src/github.com/ovn-org/ovn-kubernetes ++ + - name: Set up environment + run: | + export GOPATH=$(go env GOPATH) +diff --git a/Makefile.am b/Makefile.am +index c8f770146..f7758d114 100644 +--- a/Makefile.am ++++ b/Makefile.am +@@ -90,6 +90,8 @@ EXTRA_DIST = \ + .ci/osx-build.sh \ + .ci/osx-prepare.sh \ + .ci/ovn-kubernetes/Dockerfile \ ++ .ci/ovn-kubernetes/prepare.sh \ ++ .ci/ovn-kubernetes/custom.patch \ + .github/workflows/test.yml \ + .github/workflows/ovn-kubernetes.yml \ + boot.sh \ +diff --git a/NEWS b/NEWS +index 0920b44e2..acb8065bc 100644 +--- a/NEWS ++++ b/NEWS +@@ -1,3 +1,6 @@ ++OVN v22.12.1 - xx xxx xxxx ++-------------------------- ++ + OVN v22.12.0 - 16 Dec 2022 + -------------------------- + - Add load balancer "affinity_timeout" option to configure load balancing +diff --git a/build-aux/sodepends.py b/build-aux/sodepends.py +index 343fda1af..7b1f9c840 100755 +--- a/build-aux/sodepends.py ++++ b/build-aux/sodepends.py +@@ -63,7 +63,8 @@ def sodepends(include_info, filenames, dst): + continue + + # Open file. +- include_dirs = [info[0] for info in include_info] ++ include_dirs = [info[1] if len(info) == 2 else info[0] ++ for info in include_info] + fn = soutil.find_file(include_dirs, toplevel) + if not fn: + ok = False +diff --git a/configure.ac b/configure.ac +index 101467253..357758e0c 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -13,7 +13,7 @@ + # limitations under the License. + + AC_PREREQ(2.63) +-AC_INIT(ovn, 22.12.0, bugs@openvswitch.org) ++AC_INIT(ovn, 22.12.1, bugs@openvswitch.org) + AC_CONFIG_MACRO_DIR([m4]) + AC_CONFIG_AUX_DIR([build-aux]) + AC_CONFIG_HEADERS([config.h]) +diff --git a/controller/chassis.c b/controller/chassis.c +index 685d9b2ae..98f8da2be 100644 +--- a/controller/chassis.c ++++ b/controller/chassis.c +@@ -352,6 +352,7 @@ chassis_build_other_config(const struct ovs_chassis_cfg *ovs_cfg, + smap_replace(config, OVN_FEATURE_PORT_UP_NOTIF, "true"); + smap_replace(config, OVN_FEATURE_CT_NO_MASKED_LABEL, "true"); + smap_replace(config, OVN_FEATURE_MAC_BINDING_TIMESTAMP, "true"); ++ smap_replace(config, OVN_FEATURE_CT_LB_RELATED, "true"); + } + + /* +@@ -469,6 +470,12 @@ chassis_other_config_changed(const struct ovs_chassis_cfg *ovs_cfg, + return true; + } + ++ if (!smap_get_bool(&chassis_rec->other_config, ++ OVN_FEATURE_CT_LB_RELATED, ++ false)) { ++ return true; ++ } ++ + return false; + } + +diff --git a/controller/lflow.c b/controller/lflow.c +index bb47bb0c7..4b1cfe318 100644 +--- a/controller/lflow.c ++++ b/controller/lflow.c +@@ -1567,9 +1567,6 @@ add_lb_vip_hairpin_reply_action(struct in6_addr *vip6, ovs_be32 vip, + /* Hairpin replies have the same nw_proto as packets that created the + * session. + */ +- union mf_value imm_proto = { +- .u8 = lb_proto, +- }; + ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec); + ol_spec->dst.field = mf_from_id(MFF_IP_PROTO); + ol_spec->src.field = mf_from_id(MFF_IP_PROTO); +@@ -1577,16 +1574,21 @@ add_lb_vip_hairpin_reply_action(struct in6_addr *vip6, ovs_be32 vip, + ol_spec->dst.n_bits = ol_spec->dst.field->n_bits; + ol_spec->n_bits = ol_spec->dst.n_bits; + ol_spec->dst_type = NX_LEARN_DST_MATCH; +- ol_spec->src_type = NX_LEARN_SRC_IMMEDIATE; +- mf_write_subfield_value(&ol_spec->dst, &imm_proto, &match); +- +- /* Push value last, as this may reallocate 'ol_spec' */ +- imm_bytes = DIV_ROUND_UP(ol_spec->dst.n_bits, 8); +- src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes)); +- memcpy(src_imm, &imm_proto, imm_bytes); + + /* Hairpin replies have source port == . */ + if (has_l4_port) { ++ union mf_value imm_proto = { ++ .u8 = lb_proto, ++ }; ++ ++ ol_spec->src_type = NX_LEARN_SRC_IMMEDIATE; ++ mf_write_subfield_value(&ol_spec->dst, &imm_proto, &match); ++ ++ /* Push value last, as this may reallocate 'ol_spec' */ ++ imm_bytes = DIV_ROUND_UP(ol_spec->dst.n_bits, 8); ++ src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes)); ++ memcpy(src_imm, &imm_proto, imm_bytes); ++ + ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec); + switch (lb_proto) { + case IPPROTO_TCP: +@@ -1610,6 +1612,8 @@ add_lb_vip_hairpin_reply_action(struct in6_addr *vip6, ovs_be32 vip, + ol_spec->n_bits = ol_spec->dst.n_bits; + ol_spec->dst_type = NX_LEARN_DST_MATCH; + ol_spec->src_type = NX_LEARN_SRC_FIELD; ++ } else { ++ ol_spec->src_type = NX_LEARN_SRC_FIELD; + } + + /* Set MLF_LOOKUP_LB_HAIRPIN_BIT for hairpin replies. */ +diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c +index 73c33a6bf..c899283dc 100644 +--- a/controller/ovn-controller.c ++++ b/controller/ovn-controller.c +@@ -755,6 +755,11 @@ update_ct_zones(const struct shash *binding_lports, + } + bitmap_set1(ct_zone_bitmap, snat_req_node->data); + node->data = snat_req_node->data; ++ } else { ++ add_pending_ct_zone_entry(pending_ct_zones, CT_ZONE_OF_QUEUED, ++ snat_req_node->data, true, snat_req_node->name); ++ bitmap_set1(ct_zone_bitmap, snat_req_node->data); ++ simap_put(ct_zones, snat_req_node->name, snat_req_node->data); + } + } + +diff --git a/controller/pinctrl.c b/controller/pinctrl.c +index 82da6ae73..e4d530138 100644 +--- a/controller/pinctrl.c ++++ b/controller/pinctrl.c +@@ -1419,7 +1419,6 @@ prepare_ipv6_prefixd(struct ovsdb_idl_txn *ovnsb_idl_txn, + + struct buffer_info { + struct ofpbuf ofpacts; +- ofp_port_t ofp_port; + struct dp_packet *p; + }; + +@@ -1495,7 +1494,6 @@ buffered_push_packet(struct buffered_packets *bp, + union mf_value pkt_mark_value; + mf_get_value(pkt_mark_field, &md->flow, &pkt_mark_value); + ofpact_put_set_field(&bi->ofpacts, pkt_mark_field, &pkt_mark_value, NULL); +- bi->ofp_port = md->flow.in_port.ofp_port; + + struct ofpact_resubmit *resubmit = ofpact_put_RESUBMIT(&bi->ofpacts); + resubmit->in_port = OFPP_CONTROLLER; +@@ -1531,7 +1529,7 @@ buffered_send_packets(struct rconn *swconn, struct buffered_packets *bp, + .ofpacts = bi->ofpacts.data, + .ofpacts_len = bi->ofpacts.size, + }; +- match_set_in_port(&po.flow_metadata, bi->ofp_port); ++ match_set_in_port(&po.flow_metadata, OFPP_CONTROLLER); + queue_msg(swconn, ofputil_encode_packet_out(&po, proto)); + + ofpbuf_uninit(&bi->ofpacts); +diff --git a/debian/changelog b/debian/changelog +index 6f5a9ac2a..d658774f6 100644 +--- a/debian/changelog ++++ b/debian/changelog +@@ -1,3 +1,9 @@ ++OVN (22.12.1-1) unstable; urgency=low ++ [ OVN team ] ++ * New upstream version ++ ++ -- OVN team Fri, 16 Dec 2022 09:52:44 -0500 ++ + ovn (22.12.0-1) unstable; urgency=low + + * New upstream version +diff --git a/ic/ovn-ic.c b/ic/ovn-ic.c +index 73ce77e5c..9a80a7f68 100644 +--- a/ic/ovn-ic.c ++++ b/ic/ovn-ic.c +@@ -1911,13 +1911,112 @@ main(int argc, char *argv[]) + struct ovsdb_idl_loop ovnisb_idl_loop = OVSDB_IDL_LOOP_INITIALIZER( + ovsdb_idl_create(ovn_ic_sb_db, &icsbrec_idl_class, true, true)); + +- /* ovn-nb db. XXX: add only needed tables and columns */ ++ /* ovn-nb db. */ + struct ovsdb_idl_loop ovnnb_idl_loop = OVSDB_IDL_LOOP_INITIALIZER( +- ovsdb_idl_create(ovnnb_db, &nbrec_idl_class, true, true)); +- +- /* ovn-sb db. XXX: add only needed tables and columns */ ++ ovsdb_idl_create(ovnnb_db, &nbrec_idl_class, false, true)); ++ ++ ovsdb_idl_add_table(ovnnb_idl_loop.idl, &nbrec_table_nb_global); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, &nbrec_nb_global_col_name); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, &nbrec_nb_global_col_options); ++ ++ ovsdb_idl_add_table(ovnnb_idl_loop.idl, ++ &nbrec_table_logical_router_static_route); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_router_static_route_col_route_table); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_router_static_route_col_ip_prefix); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_router_static_route_col_nexthop); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_router_static_route_col_external_ids); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_router_static_route_col_options); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_router_static_route_col_policy); ++ ++ ovsdb_idl_add_table(ovnnb_idl_loop.idl, &nbrec_table_logical_router); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_router_col_name); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_router_col_static_routes); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_router_col_ports); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_router_col_options); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_router_col_external_ids); ++ ++ ovsdb_idl_add_table(ovnnb_idl_loop.idl, &nbrec_table_logical_router_port); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_router_port_col_name); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_router_port_col_networks); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_router_port_col_external_ids); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_router_port_col_options); ++ ++ ovsdb_idl_add_table(ovnnb_idl_loop.idl, &nbrec_table_logical_switch); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_switch_col_name); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_switch_col_ports); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_switch_col_other_config); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_switch_col_external_ids); ++ ++ ovsdb_idl_add_table(ovnnb_idl_loop.idl, &nbrec_table_logical_switch_port); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_switch_port_col_name); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_switch_port_col_addresses); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_switch_port_col_options); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_switch_port_col_type); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_switch_port_col_up); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_switch_port_col_addresses); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_switch_port_col_enabled); ++ ovsdb_idl_add_column(ovnnb_idl_loop.idl, ++ &nbrec_logical_switch_port_col_external_ids); ++ ++ /* ovn-sb db. */ + struct ovsdb_idl_loop ovnsb_idl_loop = OVSDB_IDL_LOOP_INITIALIZER( +- ovsdb_idl_create(ovnsb_db, &sbrec_idl_class, true, true)); ++ ovsdb_idl_create(ovnsb_db, &sbrec_idl_class, false, true)); ++ ++ ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_chassis); ++ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_chassis_col_encaps); ++ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_chassis_col_name); ++ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_chassis_col_hostname); ++ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_chassis_col_other_config); ++ ++ ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_encap); ++ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_encap_col_chassis_name); ++ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_encap_col_type); ++ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_encap_col_ip); ++ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_encap_col_options); ++ ++ ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_datapath_binding); ++ ovsdb_idl_add_column(ovnsb_idl_loop.idl, ++ &sbrec_datapath_binding_col_external_ids); ++ ++ ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_port_binding); ++ ovsdb_idl_add_column(ovnsb_idl_loop.idl, ++ &sbrec_port_binding_col_datapath); ++ ovsdb_idl_add_column(ovnsb_idl_loop.idl, ++ &sbrec_port_binding_col_mac); ++ ovsdb_idl_add_column(ovnsb_idl_loop.idl, ++ &sbrec_port_binding_col_options); ++ ovsdb_idl_add_column(ovnsb_idl_loop.idl, ++ &sbrec_port_binding_col_logical_port); ++ ovsdb_idl_add_column(ovnsb_idl_loop.idl, ++ &sbrec_port_binding_col_external_ids); ++ ovsdb_idl_add_column(ovnsb_idl_loop.idl, ++ &sbrec_port_binding_col_chassis); + + /* Create IDL indexes */ + struct ovsdb_idl_index *nbrec_ls_by_name +diff --git a/include/ovn/features.h b/include/ovn/features.h +index 679f67457..5bcd68739 100644 +--- a/include/ovn/features.h ++++ b/include/ovn/features.h +@@ -24,6 +24,7 @@ + #define OVN_FEATURE_PORT_UP_NOTIF "port-up-notif" + #define OVN_FEATURE_CT_NO_MASKED_LABEL "ct-no-masked-label" + #define OVN_FEATURE_MAC_BINDING_TIMESTAMP "mac-binding-timestamp" ++#define OVN_FEATURE_CT_LB_RELATED "ovn-ct-lb-related" + + /* OVS datapath supported features. Based on availability OVN might generate + * different types of openflows. +diff --git a/include/ovn/lex.h b/include/ovn/lex.h +index 9159b7a26..64d33361f 100644 +--- a/include/ovn/lex.h ++++ b/include/ovn/lex.h +@@ -29,6 +29,8 @@ + + struct ds; + ++#define LEX_TEMPLATE_PREFIX '^' ++ + /* Token type. */ + enum lex_type { + LEX_T_END, /* end of input */ +diff --git a/lib/lb.c b/lib/lb.c +index 43628bba7..c13d07b99 100644 +--- a/lib/lb.c ++++ b/lib/lb.c +@@ -314,11 +314,10 @@ ovn_lb_vip_destroy(struct ovn_lb_vip *vip) + free(vip->backends); + } + +-void +-ovn_lb_vip_format(const struct ovn_lb_vip *vip, struct ds *s, bool template) ++static void ++ovn_lb_vip_format__(const struct ovn_lb_vip *vip, struct ds *s, ++ bool needs_brackets) + { +- bool needs_brackets = vip->address_family == AF_INET6 && vip->port_str +- && !template; + if (needs_brackets) { + ds_put_char(s, '['); + } +@@ -331,6 +330,30 @@ ovn_lb_vip_format(const struct ovn_lb_vip *vip, struct ds *s, bool template) + } + } + ++/* Formats the VIP in the way the ovn-controller expects it, that is, ++ * template IPv6 variables need to be between brackets too. ++ */ ++static char * ++ovn_lb_vip6_template_format_internal(const struct ovn_lb_vip *vip) ++{ ++ struct ds s = DS_EMPTY_INITIALIZER; ++ ++ if (vip->vip_str && *vip->vip_str == LEX_TEMPLATE_PREFIX) { ++ ovn_lb_vip_format__(vip, &s, true); ++ } else { ++ ovn_lb_vip_format(vip, &s, !!vip->port_str); ++ } ++ return ds_steal_cstr(&s); ++} ++ ++void ++ovn_lb_vip_format(const struct ovn_lb_vip *vip, struct ds *s, bool template) ++{ ++ bool needs_brackets = vip->address_family == AF_INET6 && vip->port_str ++ && !template; ++ ovn_lb_vip_format__(vip, s, needs_brackets); ++} ++ + void + ovn_lb_vip_backends_format(const struct ovn_lb_vip *vip, struct ds *s, + bool template) +@@ -512,6 +535,7 @@ ovn_northd_lb_create(const struct nbrec_load_balancer *nbrec_lb) + lb->n_vips = smap_count(&nbrec_lb->vips); + lb->vips = xcalloc(lb->n_vips, sizeof *lb->vips); + lb->vips_nb = xcalloc(lb->n_vips, sizeof *lb->vips_nb); ++ smap_init(&lb->template_vips); + lb->controller_event = smap_get_bool(&nbrec_lb->options, "event", false); + + bool routable = smap_get_bool(&nbrec_lb->options, "add_route", false); +@@ -560,6 +584,12 @@ ovn_northd_lb_create(const struct nbrec_load_balancer *nbrec_lb) + } else { + sset_add(&lb->ips_v6, lb_vip->vip_str); + } ++ ++ if (lb->template && address_family == AF_INET6) { ++ smap_add_nocopy(&lb->template_vips, ++ ovn_lb_vip6_template_format_internal(lb_vip), ++ xstrdup(node->value)); ++ } + n_vips++; + } + +@@ -604,6 +634,15 @@ ovn_northd_lb_find(const struct hmap *lbs, const struct uuid *uuid) + return NULL; + } + ++const struct smap * ++ovn_northd_lb_get_vips(const struct ovn_northd_lb *lb) ++{ ++ if (!smap_is_empty(&lb->template_vips)) { ++ return &lb->template_vips; ++ } ++ return &lb->nlb->vips; ++} ++ + void + ovn_northd_lb_add_lr(struct ovn_northd_lb *lb, size_t n, + struct ovn_datapath **ods) +@@ -637,6 +676,7 @@ ovn_northd_lb_destroy(struct ovn_northd_lb *lb) + } + free(lb->vips); + free(lb->vips_nb); ++ smap_destroy(&lb->template_vips); + sset_destroy(&lb->ips_v4); + sset_destroy(&lb->ips_v6); + free(lb->selection_fields); +diff --git a/lib/lb.h b/lib/lb.h +index 55a41ae0b..55becc1bf 100644 +--- a/lib/lb.h ++++ b/lib/lb.h +@@ -19,6 +19,7 @@ + + #include + #include ++#include "lib/smap.h" + #include "openvswitch/hmap.h" + #include "ovn-util.h" + #include "sset.h" +@@ -62,6 +63,9 @@ struct ovn_northd_lb { + char *selection_fields; + struct ovn_lb_vip *vips; + struct ovn_northd_lb_vip *vips_nb; ++ struct smap template_vips; /* Slightly changed template VIPs, populated ++ * if needed. Until now it's only required ++ * for IPv6 template load balancers. */ + size_t n_vips; + + enum lb_neighbor_responder_mode neigh_mode; +@@ -130,6 +134,7 @@ struct ovn_northd_lb_backend { + struct ovn_northd_lb *ovn_northd_lb_create(const struct nbrec_load_balancer *); + struct ovn_northd_lb *ovn_northd_lb_find(const struct hmap *, + const struct uuid *); ++const struct smap *ovn_northd_lb_get_vips(const struct ovn_northd_lb *); + void ovn_northd_lb_destroy(struct ovn_northd_lb *); + void ovn_northd_lb_add_lr(struct ovn_northd_lb *lb, size_t n, + struct ovn_datapath **ods); +diff --git a/lib/lex.c b/lib/lex.c +index 5251868b5..a8b9812bb 100644 +--- a/lib/lex.c ++++ b/lib/lex.c +@@ -782,7 +782,7 @@ next: + p = lex_parse_port_group(p, token); + break; + +- case '^': ++ case LEX_TEMPLATE_PREFIX: + p = lex_parse_template(p, token); + break; + +@@ -1061,7 +1061,7 @@ lexer_parse_template_string(const char *s, const struct smap *template_vars, + struct sset *template_vars_ref) + { + /* No '^' means no templates. */ +- if (!strchr(s, '^')) { ++ if (!strchr(s, LEX_TEMPLATE_PREFIX)) { + return lex_str_use(s); + } + +diff --git a/lib/ovn-util.c b/lib/ovn-util.c +index 86b98acf7..69ab56423 100644 +--- a/lib/ovn-util.c ++++ b/lib/ovn-util.c +@@ -825,24 +825,6 @@ ovn_get_internal_version(void) + N_OVNACTS, OVN_INTERNAL_MINOR_VER); + } + +-unsigned int +-ovn_parse_internal_version_minor(const char *ver) +-{ +- const char *p = ver + strlen(ver); +- for (int i = 0; i < strlen(ver); i++) { +- if (*p == '.') { +- break; +- } +- p--; +- } +- +- unsigned int minor; +- if (ovs_scan(p, ".%u", &minor)) { +- return minor; +- } +- return 0; +-} +- + #ifdef DDLOG + /* Callbacks used by the ddlog northd code to print warnings and errors. */ + void +diff --git a/lib/ovn-util.h b/lib/ovn-util.h +index 809ff1d36..48dc846ad 100644 +--- a/lib/ovn-util.h ++++ b/lib/ovn-util.h +@@ -70,6 +70,23 @@ struct lport_addresses { + struct ipv6_netaddr *ipv6_addrs; + }; + ++static inline bool ++ipv6_is_all_router(const struct in6_addr *addr) ++{ ++ return ipv6_addr_equals(addr, &in6addr_all_routers); ++} ++ ++static const struct in6_addr in6addr_all_site_routers = {{{ ++ 0xff,0x05,0x00,0x00,0x00,0x00,0x00,0x00, ++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02 ++}}}; ++ ++static inline bool ++ipv6_is_all_site_router(const struct in6_addr *addr) ++{ ++ return ipv6_addr_equals(addr, &in6addr_all_site_routers); ++} ++ + bool is_dynamic_lsp_address(const char *address); + bool extract_addresses(const char *address, struct lport_addresses *, + int *ofs); +@@ -248,11 +265,6 @@ bool ip_address_and_port_from_lb_key(const char *key, char **ip_address, + * value. */ + char *ovn_get_internal_version(void); + +-/* Parse the provided internal version string and return the "minor" part which +- * is expected to be an unsigned integer followed by the last "." in the +- * string. Returns 0 if the string can't be parsed. */ +-unsigned int ovn_parse_internal_version_minor(const char *ver); +- + /* OVN Packet definitions. These may eventually find a home in OVS's + * packets.h file. For the time being, they live here because OVN uses them + * and OVS does not. +diff --git a/northd/inc-proc-northd.c b/northd/inc-proc-northd.c +index 363e384bd..a7b735333 100644 +--- a/northd/inc-proc-northd.c ++++ b/northd/inc-proc-northd.c +@@ -34,10 +34,13 @@ + #include "en-lflow.h" + #include "en-northd-output.h" + #include "en-sync-sb.h" ++#include "unixctl.h" + #include "util.h" + + VLOG_DEFINE_THIS_MODULE(inc_proc_northd); + ++static unixctl_cb_func chassis_features_list; ++ + #define NB_NODES \ + NB_NODE(nb_global, "nb_global") \ + NB_NODE(copp, "copp") \ +@@ -306,6 +309,12 @@ void inc_proc_northd_init(struct ovsdb_idl_loop *nb, + engine_ovsdb_node_add_index(&en_sb_address_set, + "sbrec_address_set_by_name", + sbrec_address_set_by_name); ++ ++ struct northd_data *northd_data = ++ engine_get_internal_data(&en_northd); ++ unixctl_command_register("debug/chassis-features-list", "", 0, 0, ++ chassis_features_list, ++ &northd_data->features); + } + + void inc_proc_northd_run(struct ovsdb_idl_txn *ovnnb_txn, +@@ -354,3 +363,20 @@ void inc_proc_northd_cleanup(void) + engine_cleanup(); + engine_set_context(NULL); + } ++ ++static void ++chassis_features_list(struct unixctl_conn *conn, int argc OVS_UNUSED, ++ const char *argv[] OVS_UNUSED, void *features_) ++{ ++ struct chassis_features *features = features_; ++ struct ds ds = DS_EMPTY_INITIALIZER; ++ ++ ds_put_format(&ds, "ct_no_masked_label: %s\n", ++ features->ct_no_masked_label ? "true" : "false"); ++ ds_put_format(&ds, "ct_lb_related: %s\n", ++ features->ct_lb_related ? "true" : "false"); ++ ds_put_format(&ds, "mac_binding_timestamp: %s\n", ++ features->mac_binding_timestamp ? "true" : "false"); ++ unixctl_command_reply(conn, ds_cstr(&ds)); ++ ds_destroy(&ds); ++} +diff --git a/northd/northd.c b/northd/northd.c +index 841ae9cc5..9cedec909 100644 +--- a/northd/northd.c ++++ b/northd/northd.c +@@ -125,11 +125,11 @@ enum ovn_stage { + PIPELINE_STAGE(SWITCH, IN, LB_AFF_CHECK, 11, "ls_in_lb_aff_check") \ + PIPELINE_STAGE(SWITCH, IN, LB, 12, "ls_in_lb") \ + PIPELINE_STAGE(SWITCH, IN, LB_AFF_LEARN, 13, "ls_in_lb_aff_learn") \ +- PIPELINE_STAGE(SWITCH, IN, ACL_AFTER_LB, 14, "ls_in_acl_after_lb") \ +- PIPELINE_STAGE(SWITCH, IN, STATEFUL, 15, "ls_in_stateful") \ +- PIPELINE_STAGE(SWITCH, IN, PRE_HAIRPIN, 16, "ls_in_pre_hairpin") \ +- PIPELINE_STAGE(SWITCH, IN, NAT_HAIRPIN, 17, "ls_in_nat_hairpin") \ +- PIPELINE_STAGE(SWITCH, IN, HAIRPIN, 18, "ls_in_hairpin") \ ++ PIPELINE_STAGE(SWITCH, IN, PRE_HAIRPIN, 14, "ls_in_pre_hairpin") \ ++ PIPELINE_STAGE(SWITCH, IN, NAT_HAIRPIN, 15, "ls_in_nat_hairpin") \ ++ PIPELINE_STAGE(SWITCH, IN, HAIRPIN, 16, "ls_in_hairpin") \ ++ PIPELINE_STAGE(SWITCH, IN, ACL_AFTER_LB, 17, "ls_in_acl_after_lb") \ ++ PIPELINE_STAGE(SWITCH, IN, STATEFUL, 18, "ls_in_stateful") \ + PIPELINE_STAGE(SWITCH, IN, ARP_ND_RSP, 19, "ls_in_arp_rsp") \ + PIPELINE_STAGE(SWITCH, IN, DHCP_OPTIONS, 20, "ls_in_dhcp_options") \ + PIPELINE_STAGE(SWITCH, IN, DHCP_RESPONSE, 21, "ls_in_dhcp_response") \ +@@ -215,6 +215,7 @@ enum ovn_stage { + #define REGBIT_ACL_LABEL "reg0[13]" + #define REGBIT_FROM_RAMP "reg0[14]" + #define REGBIT_PORT_SEC_DROP "reg0[15]" ++#define REGBIT_ACL_HINT_ALLOW_REL "reg0[17]" + + #define REG_ORIG_DIP_IPV4 "reg1" + #define REG_ORIG_DIP_IPV6 "xxreg1" +@@ -430,6 +431,13 @@ build_chassis_features(const struct northd_input *input_data, + const struct sbrec_chassis *chassis; + + SBREC_CHASSIS_TABLE_FOR_EACH (chassis, input_data->sbrec_chassis) { ++ /* Only consider local AZ chassis. Remote ones don't install ++ * flows generated by the local northd. ++ */ ++ if (smap_get_bool(&chassis->other_config, "is-remote", false)) { ++ continue; ++ } ++ + bool ct_no_masked_label = + smap_get_bool(&chassis->other_config, + OVN_FEATURE_CT_NO_MASKED_LABEL, +@@ -446,6 +454,15 @@ build_chassis_features(const struct northd_input *input_data, + chassis_features->mac_binding_timestamp) { + chassis_features->mac_binding_timestamp = false; + } ++ ++ bool ct_lb_related = ++ smap_get_bool(&chassis->other_config, ++ OVN_FEATURE_CT_LB_RELATED, ++ false); ++ if (!ct_lb_related && ++ chassis_features->ct_lb_related) { ++ chassis_features->ct_lb_related = false; ++ } + } + } + +@@ -4410,7 +4427,7 @@ sync_lbs(struct northd_input *input_data, struct ovsdb_idl_txn *ovnsb_txn, + + /* Update columns. */ + sbrec_load_balancer_set_name(lb->slb, lb->nlb->name); +- sbrec_load_balancer_set_vips(lb->slb, &lb->nlb->vips); ++ sbrec_load_balancer_set_vips(lb->slb, ovn_northd_lb_get_vips(lb)); + sbrec_load_balancer_set_protocol(lb->slb, lb->nlb->protocol); + sbrec_load_balancer_set_datapath_group(lb->slb, dpg->dp_group); + sbrec_load_balancer_set_options(lb->slb, &options); +@@ -4849,7 +4866,7 @@ ovn_igmp_group_get_ports(const struct sbrec_igmp_group *sb_igmp_group, + struct ovn_port *port = + ovn_port_find(ovn_ports, sb_igmp_group->ports[i]->logical_port); + +- if (!port) { ++ if (!port || !port->nbsp) { + continue; + } + +@@ -6758,7 +6775,8 @@ build_acls(struct ovn_datapath *od, const struct chassis_features *features, + ct_blocked_match); + ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, UINT16_MAX - 3, + ds_cstr(&match), REGBIT_ACL_HINT_DROP" = 0; " +- REGBIT_ACL_HINT_BLOCK" = 0; next;"); ++ REGBIT_ACL_HINT_BLOCK" = 0; " ++ REGBIT_ACL_HINT_ALLOW_REL" = 1; next;"); + ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, UINT16_MAX - 3, + ds_cstr(&match), "next;"); + +@@ -6774,14 +6792,21 @@ build_acls(struct ovn_datapath *od, const struct chassis_features *features, + * a dynamically negotiated FTP data channel), but will allow + * related traffic such as an ICMP Port Unreachable through + * that's generated from a non-listening UDP port. */ ++ const char *ct_in_acl_action = ++ features->ct_lb_related ++ ? REGBIT_ACL_HINT_ALLOW_REL" = 1; ct_commit_nat;" ++ : REGBIT_ACL_HINT_ALLOW_REL" = 1; next;"; ++ const char *ct_out_acl_action = features->ct_lb_related ++ ? "ct_commit_nat;" ++ : "next;"; + ds_clear(&match); + ds_put_format(&match, "!ct.est && ct.rel && !ct.new%s && %s == 0", + use_ct_inv_match ? " && !ct.inv" : "", + ct_blocked_match); + ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, UINT16_MAX - 3, +- ds_cstr(&match), "ct_commit_nat;"); ++ ds_cstr(&match), ct_in_acl_action); + ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, UINT16_MAX - 3, +- ds_cstr(&match), "ct_commit_nat;"); ++ ds_cstr(&match), ct_out_acl_action); + + /* Ingress and Egress ACL Table (Priority 65532). + * +@@ -6790,6 +6815,11 @@ build_acls(struct ovn_datapath *od, const struct chassis_features *features, + "nd || nd_ra || nd_rs || mldv1 || mldv2", "next;"); + ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, UINT16_MAX - 3, + "nd || nd_ra || nd_rs || mldv1 || mldv2", "next;"); ++ ++ /* Reply and related traffic matched by an "allow-related" ACL ++ * should be allowed in the ls_in_acl_after_lb stage too. */ ++ ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL_AFTER_LB, UINT16_MAX - 3, ++ REGBIT_ACL_HINT_ALLOW_REL" == 1", "next;"); + } + + /* Ingress or Egress ACL Table (Various priorities). */ +@@ -7838,7 +7868,7 @@ build_lrouter_groups(struct hmap *ports, struct ovs_list *lr_list) + } + + /* +- * Ingress table 24: Flows that flood self originated ARP/RARP/ND packets in ++ * Ingress table 25: Flows that flood self originated ARP/RARP/ND packets in + * the switching domain. + */ + static void +@@ -7952,7 +7982,7 @@ lrouter_port_ipv6_reachable(const struct ovn_port *op, + } + + /* +- * Ingress table 24: Flows that forward ARP/ND requests only to the routers ++ * Ingress table 25: Flows that forward ARP/ND requests only to the routers + * that own the addresses. Other ARP/ND packets are still flooded in the + * switching domain as regular broadcast. + */ +@@ -7989,7 +8019,7 @@ build_lswitch_rport_arp_req_flow(const char *ips, + } + + /* +- * Ingress table 24: Flows that forward ARP/ND requests only to the routers ++ * Ingress table 25: Flows that forward ARP/ND requests only to the routers + * that own the addresses. + * Priorities: + * - 80: self originated GARPs that need to follow regular processing. +@@ -8318,7 +8348,8 @@ build_lswitch_flows(const struct hmap *datapaths, + + struct ovn_datapath *od; + +- /* Ingress table 25: Destination lookup for unknown MACs (priority 0). */ ++ /* Ingress table 25/26: Destination lookup for unknown MACs ++ * (priority 0). */ + HMAP_FOR_EACH (od, key_node, datapaths) { + if (!od->nbs) { + continue; +@@ -8393,7 +8424,7 @@ build_lswitch_lflows_admission_control(struct ovn_datapath *od, + } + } + +-/* Ingress table 18: ARP/ND responder, skip requests coming from localnet ++/* Ingress table 19: ARP/ND responder, skip requests coming from localnet + * ports. (priority 100); see ovn-northd.8.xml for the rationale. */ + + static void +@@ -8411,7 +8442,7 @@ build_lswitch_arp_nd_responder_skip_local(struct ovn_port *op, + } + } + +-/* Ingress table 18: ARP/ND responder, reply for known IPs. ++/* Ingress table 19: ARP/ND responder, reply for known IPs. + * (priority 50). */ + static void + build_lswitch_arp_nd_responder_known_ips(struct ovn_port *op, +@@ -8671,7 +8702,7 @@ build_lswitch_arp_nd_responder_known_ips(struct ovn_port *op, + } + } + +-/* Ingress table 18: ARP/ND responder, by default goto next. ++/* Ingress table 19: ARP/ND responder, by default goto next. + * (priority 0)*/ + static void + build_lswitch_arp_nd_responder_default(struct ovn_datapath *od, +@@ -8682,7 +8713,7 @@ build_lswitch_arp_nd_responder_default(struct ovn_datapath *od, + } + } + +-/* Ingress table 18: ARP/ND responder for service monitor source ip. ++/* Ingress table 19: ARP/ND responder for service monitor source ip. + * (priority 110)*/ + static void + build_lswitch_arp_nd_service_monitor(struct ovn_northd_lb *lb, +@@ -8730,7 +8761,7 @@ build_lswitch_arp_nd_service_monitor(struct ovn_northd_lb *lb, + } + + +-/* Logical switch ingress table 19 and 20: DHCP options and response ++/* Logical switch ingress table 20 and 21: DHCP options and response + * priority 100 flows. */ + static void + build_lswitch_dhcp_options_and_response(struct ovn_port *op, +@@ -8782,11 +8813,11 @@ build_lswitch_dhcp_options_and_response(struct ovn_port *op, + } + } + +-/* Ingress table 19 and 20: DHCP options and response, by default goto ++/* Ingress table 20 and 21: DHCP options and response, by default goto + * next. (priority 0). +- * Ingress table 21 and 22: DNS lookup and response, by default goto next. ++ * Ingress table 22 and 23: DNS lookup and response, by default goto next. + * (priority 0). +- * Ingress table 23 - External port handling, by default goto next. ++ * Ingress table 24 - External port handling, by default goto next. + * (priority 0). */ + static void + build_lswitch_dhcp_and_dns_defaults(struct ovn_datapath *od, +@@ -8801,7 +8832,7 @@ build_lswitch_dhcp_and_dns_defaults(struct ovn_datapath *od, + } + } + +-/* Logical switch ingress table 21 and 22: DNS lookup and response ++/* Logical switch ingress table 22 and 23: DNS lookup and response + * priority 100 flows. + */ + static void +@@ -8829,7 +8860,7 @@ build_lswitch_dns_lookup_and_response(struct ovn_datapath *od, + } + } + +-/* Table 23: External port. Drop ARP request for router ips from ++/* Table 24: External port. Drop ARP request for router ips from + * external ports on chassis not binding those ports. + * This makes the router pipeline to be run only on the chassis + * binding the external ports. */ +@@ -8846,7 +8877,7 @@ build_lswitch_external_port(struct ovn_port *op, + } + } + +-/* Ingress table 24: Destination lookup, broadcast and multicast handling ++/* Ingress table 25: Destination lookup, broadcast and multicast handling + * (priority 70 - 100). */ + static void + build_lswitch_destination_lookup_bmcast(struct ovn_datapath *od, +@@ -8931,7 +8962,7 @@ build_lswitch_destination_lookup_bmcast(struct ovn_datapath *od, + } + + +-/* Ingress table 24: Add IP multicast flows learnt from IGMP/MLD ++/* Ingress table 25: Add IP multicast flows learnt from IGMP/MLD + * (priority 90). */ + static void + build_lswitch_ip_mcast_igmp_mld(struct ovn_igmp_group *igmp_group, +@@ -8973,9 +9004,11 @@ build_lswitch_ip_mcast_igmp_mld(struct ovn_igmp_group *igmp_group, + igmp_group->mcgroup.name); + } else { + /* RFC 4291, section 2.7.1: Skip groups that correspond to all +- * hosts. ++ * hosts, all link-local routers and all site routers. + */ +- if (ipv6_is_all_hosts(&igmp_group->address)) { ++ if (ipv6_is_all_hosts(&igmp_group->address) || ++ ipv6_is_all_router(&igmp_group->address) || ++ ipv6_is_all_site_router(&igmp_group->address)) { + return; + } + if (atomic_compare_exchange_strong( +@@ -9013,7 +9046,7 @@ build_lswitch_ip_mcast_igmp_mld(struct ovn_igmp_group *igmp_group, + + static struct ovs_mutex mcgroup_mutex = OVS_MUTEX_INITIALIZER; + +-/* Ingress table 24: Destination lookup, unicast handling (priority 50), */ ++/* Ingress table 25: Destination lookup, unicast handling (priority 50), */ + static void + build_lswitch_ip_unicast_lookup(struct ovn_port *op, + struct hmap *lflows, +@@ -10471,9 +10504,11 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip, + struct hmap *lflows, + struct ds *match, struct ds *action, + const struct shash *meter_groups, +- bool ct_lb_mark) ++ const struct chassis_features *features) + { +- const char *ct_natted = ct_lb_mark ? "ct_mark.natted" : "ct_label.natted"; ++ const char *ct_natted = features->ct_no_masked_label ++ ? "ct_mark.natted" ++ : "ct_label.natted"; + char *skip_snat_new_action = NULL; + char *skip_snat_est_action = NULL; + char *new_match; +@@ -10484,7 +10519,7 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip, + + bool reject = build_lb_vip_actions(lb_vip, vips_nb, action, + lb->selection_fields, false, +- ct_lb_mark); ++ features->ct_no_masked_label); + bool drop = !!strncmp(ds_cstr(action), "ct_lb", strlen("ct_lb")); + if (!drop) { + /* Remove the trailing ");". */ +@@ -10506,9 +10541,11 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip, + } + + if (lb->skip_snat) { +- skip_snat_new_action = xasprintf("flags.skip_snat_for_lb = 1; %s%s", +- ds_cstr(action), +- drop ? "" : "; skip_snat);"); ++ const char *skip_snat = features->ct_lb_related && !drop ++ ? "; skip_snat" ++ : ""; ++ skip_snat_new_action = xasprintf("flags.skip_snat_for_lb = 1; %s%s);", ++ ds_cstr(action), skip_snat); + skip_snat_est_action = xasprintf("flags.skip_snat_for_lb = 1; " + "next;"); + } +@@ -10641,9 +10678,11 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip, + skip_snat_new_action, est_match, + skip_snat_est_action, lflows, prio, meter_groups); + +- char *new_actions = xasprintf("flags.force_snat_for_lb = 1; %s%s", +- ds_cstr(action), +- drop ? "" : "; force_snat);"); ++ const char *force_snat = features->ct_lb_related && !drop ++ ? "; force_snat" ++ : ""; ++ char *new_actions = xasprintf("flags.force_snat_for_lb = 1; %s%s);", ++ ds_cstr(action), force_snat); + build_gw_lrouter_nat_flows_for_lb(lb, gw_router_force_snat, + n_gw_router_force_snat, reject, new_match, + new_actions, est_match, +@@ -10898,7 +10937,7 @@ build_lrouter_flows_for_lb(struct ovn_northd_lb *lb, struct hmap *lflows, + + build_lrouter_nat_flows_for_lb(lb_vip, lb, &lb->vips_nb[i], + lflows, match, action, meter_groups, +- features->ct_no_masked_label); ++ features); + + if (!build_empty_lb_event_flow(lb_vip, lb, match, action)) { + continue; +@@ -14208,7 +14247,7 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, struct hmap *lflows, + const struct hmap *ports, struct ds *match, + struct ds *actions, + const struct shash *meter_groups, +- bool ct_lb_mark) ++ const struct chassis_features *features) + { + if (!od->nbr) { + return; +@@ -14239,9 +14278,11 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, struct hmap *lflows, + * a dynamically negotiated FTP data channel), but will allow + * related traffic such as an ICMP Port Unreachable through + * that's generated from a non-listening UDP port. */ +- if (od->has_lb_vip) { ++ if (od->has_lb_vip && features->ct_lb_related) { + ds_clear(match); +- const char *ct_flag_reg = ct_lb_mark ? "ct_mark" : "ct_label"; ++ const char *ct_flag_reg = features->ct_no_masked_label ++ ? "ct_mark" ++ : "ct_label"; + + ds_put_cstr(match, "ct.rel && !ct.est && !ct.new"); + size_t match_len = match->length; +@@ -14328,6 +14369,23 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, struct hmap *lflows, + sset_add(&nat_entries, nat->external_ip); + } else { + if (!sset_contains(&nat_entries, nat->external_ip)) { ++ /* Drop packets coming in from external that still has ++ * destination IP equals to the NAT external IP, to avoid loop. ++ * The packets must have gone through DNAT/unSNAT stage but ++ * failed to convert the destination. */ ++ ds_clear(match); ++ ds_put_format( ++ match, "inport == %s && outport == %s && ip%s.dst == %s", ++ l3dgw_port->json_key, l3dgw_port->json_key, ++ is_v6 ? "6" : "4", nat->external_ip); ++ ovn_lflow_add_with_hint(lflows, od, ++ S_ROUTER_IN_ARP_RESOLVE, ++ 150, ds_cstr(match), ++ debug_drop_action(), ++ &nat->header_); ++ /* Now for packets coming from other (downlink) LRPs, allow ARP ++ * resolve for the NAT IP, so that such packets can be ++ * forwarded for E/W NAT. */ + ds_clear(match); + ds_put_format( + match, "outport == %s && %s == %s", +@@ -14464,7 +14522,7 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, struct hmap *lflows, + + if (od->nbr->n_nat) { + ds_clear(match); +- const char *ct_natted = ct_lb_mark ? ++ const char *ct_natted = features->ct_no_masked_label ? + "ct_mark.natted" : + "ct_label.natted"; + ds_put_format(match, "ip && %s == 1", ct_natted); +@@ -14581,7 +14639,7 @@ build_lswitch_and_lrouter_iterate_by_od(struct ovn_datapath *od, + build_lrouter_arp_nd_for_datapath(od, lsi->lflows, lsi->meter_groups); + build_lrouter_nat_defrag_and_lb(od, lsi->lflows, lsi->ports, &lsi->match, + &lsi->actions, lsi->meter_groups, +- lsi->features->ct_no_masked_label); ++ lsi->features); + build_lb_affinity_default_flows(od, lsi->lflows); + } + +@@ -16073,6 +16131,7 @@ northd_init(struct northd_data *data) + data->features = (struct chassis_features) { + .ct_no_masked_label = true, + .mac_binding_timestamp = true, ++ .ct_lb_related = true, + }; + data->ovn_internal_version_changed = false; + } +diff --git a/northd/northd.h b/northd/northd.h +index ff8727cb7..4d9055296 100644 +--- a/northd/northd.h ++++ b/northd/northd.h +@@ -71,6 +71,7 @@ struct northd_input { + struct chassis_features { + bool ct_no_masked_label; + bool mac_binding_timestamp; ++ bool ct_lb_related; + }; + + struct northd_data { +diff --git a/northd/ovn-northd.8.xml b/northd/ovn-northd.8.xml +index 058cbf71a..4de015e40 100644 +--- a/northd/ovn-northd.8.xml ++++ b/northd/ovn-northd.8.xml +@@ -790,8 +790,9 @@ + policy, ct_mark.blocked will get set and packets in the + reply direction will no longer be allowed, either. This flow also + clears the register bits reg0[9] and +- reg0[10]. If ACL logging and logging of related packets +- is enabled, then a companion priority-65533 flow will be installed that ++ reg0[10] and sets register bit reg0[17]. ++ If ACL logging and logging of related packets is enabled, then a ++ companion priority-65533 flow will be installed that + accomplishes the same thing but also logs the traffic. + + +@@ -1028,92 +1029,7 @@ + + + +-

Ingress table 14: from-lport ACLs after LB

+- +-

+- Logical flows in this table closely reproduce those in the +- ACL table in the OVN_Northbound database +- for the from-lport direction with the option +- apply-after-lb set to true. +- The priority values from the ACL table have a +- limited range and have 1000 added to them to leave room for OVN default +- flows at both higher and lower priorities. +-

+- +-
    +-
  • +- allow apply-after-lb ACLs translate into logical flows +- with the next; action. If there are any stateful ACLs +- (including both before-lb and after-lb ACLs) +- on this datapath, then allow ACLs translate to +- ct_commit; next; (which acts as a hint for the next tables +- to commit the connection to conntrack). In case the ACL +- has a label then reg3 is loaded with the label value and +- reg0[13] bit is set to 1 (which acts as a hint for the +- next tables to commit the label to conntrack). +-
  • +-
  • +- allow-related apply-after-lb ACLs translate into logical +- flows with the ct_commit(ct_label=0/1); next; actions +- for new connections and reg0[1] = 1; next; for existing +- connections. In case the ACL has a label then +- reg3 is loaded with the label value and +- reg0[13] bit is set to 1 (which acts as a hint for the +- next tables to commit the label to conntrack). +-
  • +-
  • +- allow-stateless apply-after-lb ACLs translate into logical +- flows with the next; action. +-
  • +-
  • +- reject apply-after-lb ACLs translate into logical +- flows with the +- tcp_reset { output <-> inport; +- next(pipeline=egress,table=5);} +- action for TCP connections,icmp4/icmp6 action +- for UDP connections, and sctp_abort {output <-%gt; inport; +- next(pipeline=egress,table=5);} action for SCTP associations. +-
  • +-
  • +- Other apply-after-lb ACLs translate to drop; for new +- or untracked connections and ct_commit(ct_label=1/1); for +- known connections. Setting ct_label marks a connection +- as one that was previously allowed, but should no longer be +- allowed due to a policy change. +-
  • +-
+- +-
    +-
  • +- One priority-0 fallback flow that matches all packets and advances to +- the next table. +-
  • +-
+- +-

Ingress Table 15: Stateful

+- +-
    +-
  • +- A priority 100 flow is added which commits the packet to the conntrack +- and sets the most significant 32-bits of ct_label with the +- reg3 value based on the hint provided by previous tables +- (with a match for reg0[1] == 1 && reg0[13] == 1). +- This is used by the ACLs with label to commit the label +- value to conntrack. +-
  • +- +-
  • +- For ACLs without label, a second priority-100 flow commits +- packets to connection tracker using ct_commit; next; +- action based on a hint provided by the previous tables (with a match +- for reg0[1] == 1 && reg0[13] == 0). +-
  • +-
  • +- A priority-0 flow that simply moves traffic to the next table. +-
  • +-
+- +-

Ingress Table 16: Pre-Hairpin

++

Ingress Table 14: Pre-Hairpin

+
    +
  • + If the logical switch has load balancer(s) configured, then a +@@ -1131,7 +1047,7 @@ +
  • +
+ +-

Ingress Table 17: Nat-Hairpin

++

Ingress Table 15: Nat-Hairpin

+
    +
  • + If the logical switch has load balancer(s) configured, then a +@@ -1166,7 +1082,7 @@ +
  • +
+ +-

Ingress Table 18: Hairpin

++

Ingress Table 16: Hairpin

+
    +
  • +

    +@@ -1200,6 +1116,100 @@ +

  • +
+ ++

Ingress table 17: from-lport ACLs after LB

++ ++

++ Logical flows in this table closely reproduce those in the ++ ACL table in the OVN_Northbound database ++ for the from-lport direction with the option ++ apply-after-lb set to true. ++ The priority values from the ACL table have a ++ limited range and have 1000 added to them to leave room for OVN default ++ flows at both higher and lower priorities. ++

++ ++
    ++
  • ++ allow apply-after-lb ACLs translate into logical flows ++ with the next; action. If there are any stateful ACLs ++ (including both before-lb and after-lb ACLs) ++ on this datapath, then allow ACLs translate to ++ ct_commit; next; (which acts as a hint for the next tables ++ to commit the connection to conntrack). In case the ACL ++ has a label then reg3 is loaded with the label value and ++ reg0[13] bit is set to 1 (which acts as a hint for the ++ next tables to commit the label to conntrack). ++
  • ++
  • ++ allow-related apply-after-lb ACLs translate into logical ++ flows with the ct_commit(ct_label=0/1); next; actions ++ for new connections and reg0[1] = 1; next; for existing ++ connections. In case the ACL has a label then ++ reg3 is loaded with the label value and ++ reg0[13] bit is set to 1 (which acts as a hint for the ++ next tables to commit the label to conntrack). ++
  • ++
  • ++ allow-stateless apply-after-lb ACLs translate into logical ++ flows with the next; action. ++
  • ++
  • ++ reject apply-after-lb ACLs translate into logical ++ flows with the ++ tcp_reset { output <-> inport; ++ next(pipeline=egress,table=5);} ++ action for TCP connections,icmp4/icmp6 action ++ for UDP connections, and sctp_abort {output <-%gt; inport; ++ next(pipeline=egress,table=5);} action for SCTP associations. ++
  • ++
  • ++ Other apply-after-lb ACLs translate to drop; for new ++ or untracked connections and ct_commit(ct_label=1/1); for ++ known connections. Setting ct_label marks a connection ++ as one that was previously allowed, but should no longer be ++ allowed due to a policy change. ++
  • ++
++ ++
    ++
  • ++ One priority-65532 flow matching packets with reg0[17] ++ set (either replies to existing sessions or traffic related to ++ existing sessions) and allows these by advancing to the next ++ table. ++
  • ++
++ ++
    ++
  • ++ One priority-0 fallback flow that matches all packets and advances to ++ the next table. ++
  • ++
++ ++

Ingress Table 18: Stateful

++ ++
    ++
  • ++ A priority 100 flow is added which commits the packet to the conntrack ++ and sets the most significant 32-bits of ct_label with the ++ reg3 value based on the hint provided by previous tables ++ (with a match for reg0[1] == 1 && reg0[13] == 1). ++ This is used by the ACLs with label to commit the label ++ value to conntrack. ++
  • ++ ++
  • ++ For ACLs without label, a second priority-100 flow commits ++ packets to connection tracker using ct_commit; next; ++ action based on a hint provided by the previous tables (with a match ++ for reg0[1] == 1 && reg0[13] == 0). ++
  • ++
  • ++ A priority-0 flow that simply moves traffic to the next table. ++
  • ++
++ +

Ingress Table 19: ARP/ND responder

+ +

+@@ -4257,13 +4267,17 @@ outport = P + For each row in the NAT table with IPv4 address + A in the column of +- table, a priority-100 +- flow with the match outport === P && +- reg0 == A has actions eth.dst = E; +- next;, where P is the distributed logical router +- port, E is the Ethernet address if set in the +- column +- of table for of type ++ table, below two flows are ++ programmed: ++

++ ++

++ A priority-100 flow with the match outport == P ++ && reg0 == A has actions eth.dst = ++ E; next;, where P is the distributed ++ logical router port, E is the Ethernet address if set in ++ the ++ column of table for of type + dnat_and_snat, otherwise the Ethernet address of the + distributed logical router port. Note that if the + is not +@@ -4273,9 +4287,18 @@ outport = P + will be added. +

+ ++

++ Corresponding to the above flow, a priority-150 flow with the match ++ inport == P && outport == P ++ && ip4.dst == A has actions ++ drop; to exclude packets that have gone through ++ DNAT/unSNAT stage but failed to convert the destination, to avoid ++ loop. ++

++ +

+ For IPv6 NAT entries, same flows are added, but using the register +- xxreg0 for the match. ++ xxreg0 and field ip6 for the match. +

+ + +diff --git a/ovn-architecture.7.xml b/ovn-architecture.7.xml +index b2e00d6e4..cb1064f71 100644 +--- a/ovn-architecture.7.xml ++++ b/ovn-architecture.7.xml +@@ -2832,8 +2832,7 @@ + The maximum number of networks is reduced to 4096. + +
  • +- The maximum number of ports per network is reduced to 4096. (Including +- multicast group ports.) ++ The maximum number of ports per network is reduced to 2048. +
  • +
  • + ACLs matching against logical ingress port identifiers are not supported. +diff --git a/tests/atlocal.in b/tests/atlocal.in +index 0b9a31276..02e9ce9bb 100644 +--- a/tests/atlocal.in ++++ b/tests/atlocal.in +@@ -166,6 +166,9 @@ fi + # Set HAVE_TCPDUMP + find_command tcpdump + ++# Set HAVE_XXD ++find_command xxd ++ + # Set HAVE_LFTP + find_command lftp + +diff --git a/tests/network-functions.at b/tests/network-functions.at +index c583bc31e..a2481c55c 100644 +--- a/tests/network-functions.at ++++ b/tests/network-functions.at +@@ -128,12 +128,18 @@ OVS_START_SHELL_HELPERS + # hex_to_binary HEXDIGITS + # + # Converts the pairs of HEXDIGITS into bytes and prints them on stdout. +-hex_to_binary() { +- printf $(while test -n "$1"; do +- printf '\\%03o' 0x$(expr "$1" : '\(..\)') +- set -- "${1##??}" +- done) +-} ++if test x$HAVE_XXD = xno; then ++ hex_to_binary() { ++ printf $(while test -n "$1"; do ++ printf '\\%03o' 0x$(expr "$1" : '\(..\)') ++ set -- "${1##??}" ++ done) ++ } ++else ++ hex_to_binary() { ++ echo $1 | xxd -r -p ++ } ++fi + + # tcpdump_hex TITLE PACKET + # +diff --git a/tests/ovn-controller.at b/tests/ovn-controller.at +index 6bc9ba75d..e2f4fc85c 100644 +--- a/tests/ovn-controller.at ++++ b/tests/ovn-controller.at +@@ -2499,3 +2499,30 @@ AT_CHECK([GET_LOCAL_TEMPLATE_VARS], [1], []) + + AT_CLEANUP + ]) ++ ++OVN_FOR_EACH_NORTHD([ ++AT_SETUP([ovn-controller - Requested SNAT Zone in router creation transaction]) ++ovn_start ++ ++net_add n1 ++sim_add hv1 ++as hv1 ++check ovs-vsctl add-br br-phys ++ovn_attach n1 br-phys 192.168.0.1 ++ ++dnl This is key. Add the snat-ct-zone when creating the logical router and then ++dnl do not make any further changes to the logical router settings. ++check ovn-nbctl lr-add lr0 -- set Logical_Router lr0 options:snat-ct-zone=666 ++check ovn-nbctl lrp-add lr0 lrp-gw 01:00:00:00:00:01 172.16.0.1 ++check ovn-nbctl lrp-set-gateway-chassis lrp-gw hv1 ++ ++check ovn-nbctl --wait=hv sync ++ ++lr_uuid=$(fetch_column Datapath_Binding _uuid external_ids:name=lr0) ++ct_zones=$(ovn-appctl -t ovn-controller ct-zone-list) ++zone_num=$(printf "$ct_zones" | grep ${lr_uuid}_snat | cut -d ' ' -f 2) ++ ++check test "$zone_num" -eq 666 ++ ++AT_CLEANUP ++]) +diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at +index c25d1122c..072102f36 100644 +--- a/tests/ovn-northd.at ++++ b/tests/ovn-northd.at +@@ -2232,9 +2232,9 @@ check ovn-nbctl acl-add sw0 to-lport 1002 'outport == "sw0-p1" && ip4.src == 10. + check ovn-nbctl acl-add sw0 to-lport 1002 'outport == "sw0-p1" && ip4.src == 10.0.0.13' allow + check ovn-nbctl acl-add pg0 to-lport 1002 'outport == "pg0" && ip4.src == 10.0.0.11' drop + +-acl1=$(ovn-nbctl --bare --column _uuid,match find acl | grep -B1 '10.0.0.12' | head -1) +-acl2=$(ovn-nbctl --bare --column _uuid,match find acl | grep -B1 '10.0.0.13' | head -1) +-acl3=$(ovn-nbctl --bare --column _uuid,match find acl | grep -B1 '10.0.0.11' | head -1) ++acl1=$(ovn-nbctl --bare --column _uuid,match find acl | grep -F -B1 '10.0.0.12' | head -1) ++acl2=$(ovn-nbctl --bare --column _uuid,match find acl | grep -F -B1 '10.0.0.13' | head -1) ++acl3=$(ovn-nbctl --bare --column _uuid,match find acl | grep -F -B1 '10.0.0.11' | head -1) + check ovn-nbctl set acl $acl1 log=true severity=alert meter=meter_me name=acl_one + check ovn-nbctl set acl $acl2 log=true severity=info meter=meter_me name=acl_two + check ovn-nbctl set acl $acl3 log=true severity=info meter=meter_me name=acl_three +@@ -2472,8 +2472,8 @@ AT_CHECK([ovn-sbctl lflow-list ls | grep -e ls_in_acl_hint -e ls_out_acl_hint -e + table=7 (ls_in_acl_hint ), priority=7 , match=(ct.new && !ct.est), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;) + table=8 (ls_in_acl ), priority=1 , match=(ip && !ct.est), action=(reg0[[1]] = 1; next;) + table=8 (ls_in_acl ), priority=1 , match=(ip && ct.est && ct_mark.blocked == 1), action=(reg0[[1]] = 1; next;) +- table=8 (ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(ct_commit_nat;) +- table=8 (ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; next;) ++ table=8 (ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(reg0[[17]] = 1; ct_commit_nat;) ++ table=8 (ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; reg0[[17]] = 1; next;) + table=8 (ls_in_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;) + ]) + +@@ -2485,7 +2485,8 @@ check ovn-nbctl --wait=sb \ + -- ls-lb-add ls lb + + AT_CHECK([ovn-sbctl lflow-list ls | grep -e ls_in_acl_hint -e ls_out_acl_hint -e ls_in_acl -e ls_out_acl | sort], [0], [dnl +- table=14(ls_in_acl_after_lb ), priority=0 , match=(1), action=(next;) ++ table=17(ls_in_acl_after_lb ), priority=0 , match=(1), action=(next;) ++ table=17(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;) + table=3 (ls_out_acl_hint ), priority=0 , match=(1), action=(next;) + table=3 (ls_out_acl_hint ), priority=1 , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;) + table=3 (ls_out_acl_hint ), priority=2 , match=(ct.est && ct_mark.blocked == 1), action=(reg0[[9]] = 1; next;) +@@ -2518,8 +2519,8 @@ AT_CHECK([ovn-sbctl lflow-list ls | grep -e ls_in_acl_hint -e ls_out_acl_hint -e + table=8 (ls_in_acl ), priority=1001 , match=(reg0[[7]] == 1 && (ip)), action=(reg0[[1]] = 1; next;) + table=8 (ls_in_acl ), priority=1001 , match=(reg0[[8]] == 1 && (ip)), action=(next;) + table=8 (ls_in_acl ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;) +- table=8 (ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(ct_commit_nat;) +- table=8 (ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; next;) ++ table=8 (ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(reg0[[17]] = 1; ct_commit_nat;) ++ table=8 (ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; reg0[[17]] = 1; next;) + table=8 (ls_in_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;) + table=8 (ls_in_acl ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;) + ]) +@@ -2528,7 +2529,7 @@ ovn-nbctl --wait=sb clear logical_switch ls acls + ovn-nbctl --wait=sb clear logical_switch ls load_balancer + + AT_CHECK([ovn-sbctl lflow-list ls | grep -e ls_in_acl_hint -e ls_out_acl_hint -e ls_in_acl -e ls_out_acl | sort], [0], [dnl +- table=14(ls_in_acl_after_lb ), priority=0 , match=(1), action=(next;) ++ table=17(ls_in_acl_after_lb ), priority=0 , match=(1), action=(next;) + table=3 (ls_out_acl_hint ), priority=65535, match=(1), action=(next;) + table=4 (ls_out_acl ), priority=65535, match=(1), action=(next;) + table=7 (ls_in_acl_hint ), priority=65535, match=(1), action=(next;) +@@ -4360,8 +4361,8 @@ ovn-sbctl dump-flows sw0 > sw0flows + AT_CAPTURE_FILE([sw0flows]) + + AT_CHECK([grep -w "ls_in_acl" sw0flows | grep 6553 | sort | sed 's/table=./table=?/'], [0], [dnl +- table=? (ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(ct_commit_nat;) +- table=? (ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; next;) ++ table=? (ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(reg0[[17]] = 1; ct_commit_nat;) ++ table=? (ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; reg0[[17]] = 1; next;) + table=? (ls_in_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;) + table=? (ls_in_acl ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;) + ]) +@@ -4380,9 +4381,9 @@ ovn-sbctl dump-flows sw0 > sw0flows + AT_CAPTURE_FILE([sw0flows]) + + AT_CHECK([grep -w "ls_in_acl" sw0flows | grep 6553 | sort | sed 's/table=./table=?/'], [0], [dnl +- table=? (ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && ct_mark.blocked == 0), action=(ct_commit_nat;) ++ table=? (ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && ct_mark.blocked == 0), action=(reg0[[17]] = 1; ct_commit_nat;) + table=? (ls_in_acl ), priority=65532, match=((ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;) +- table=? (ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; next;) ++ table=? (ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; reg0[[17]] = 1; next;) + table=? (ls_in_acl ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;) + ]) + +@@ -4404,8 +4405,8 @@ ovn-sbctl dump-flows sw0 > sw0flows + AT_CAPTURE_FILE([sw0flows]) + + AT_CHECK([grep -w "ls_in_acl" sw0flows | grep 6553 | sort | sed 's/table=./table=?/'], [0], [dnl +- table=? (ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(ct_commit_nat;) +- table=? (ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; next;) ++ table=? (ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(reg0[[17]] = 1; ct_commit_nat;) ++ table=? (ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; reg0[[17]] = 1; next;) + table=? (ls_in_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;) + table=? (ls_in_acl ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;) + ]) +@@ -5139,7 +5140,8 @@ AT_CHECK([grep "lr_out_snat" lr0flows | sed 's/table=./table=?/' | sort], [0], [ + ]) + + check ovn-sbctl chassis-add gw1 geneve 127.0.0.1 \ +- -- set chassis gw1 other_config:ct-no-masked-label="true" ++ -- set chassis gw1 other_config:ct-no-masked-label="true" \ ++ -- set chassis gw1 other_config:ovn-ct-lb-related="true" + + # Create a distributed gw port on lr0 + check ovn-nbctl ls-add public +@@ -6685,11 +6687,12 @@ AT_CHECK([grep -e "ls_in_acl" lsflows | sed 's/table=../table=??/' | sort], [0], + table=??(ls_in_acl ), priority=2004 , match=(reg0[[10]] == 1 && (ip4 && ip4.dst == 10.0.0.2)), action=(ct_commit { ct_mark.blocked = 1; }; /* drop */) + table=??(ls_in_acl ), priority=2004 , match=(reg0[[9]] == 1 && (ip4 && ip4.dst == 10.0.0.2)), action=(/* drop */) + table=??(ls_in_acl ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;) +- table=??(ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(ct_commit_nat;) +- table=??(ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; next;) ++ table=??(ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(reg0[[17]] = 1; ct_commit_nat;) ++ table=??(ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; reg0[[17]] = 1; next;) + table=??(ls_in_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;) + table=??(ls_in_acl ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;) + table=??(ls_in_acl_after_lb ), priority=0 , match=(1), action=(next;) ++ table=??(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;) + table=??(ls_in_acl_hint ), priority=0 , match=(1), action=(next;) + table=??(ls_in_acl_hint ), priority=1 , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;) + table=??(ls_in_acl_hint ), priority=2 , match=(ct.est && ct_mark.blocked == 1), action=(reg0[[9]] = 1; next;) +@@ -6730,8 +6733,8 @@ AT_CHECK([grep -e "ls_in_acl" lsflows | sed 's/table=../table=??/' | sort], [0], + table=??(ls_in_acl ), priority=1 , match=(ip && !ct.est), action=(reg0[[1]] = 1; next;) + table=??(ls_in_acl ), priority=1 , match=(ip && ct.est && ct_mark.blocked == 1), action=(reg0[[1]] = 1; next;) + table=??(ls_in_acl ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;) +- table=??(ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(ct_commit_nat;) +- table=??(ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; next;) ++ table=??(ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(reg0[[17]] = 1; ct_commit_nat;) ++ table=??(ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; reg0[[17]] = 1; next;) + table=??(ls_in_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;) + table=??(ls_in_acl ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;) + table=??(ls_in_acl_after_lb ), priority=0 , match=(1), action=(next;) +@@ -6743,6 +6746,7 @@ AT_CHECK([grep -e "ls_in_acl" lsflows | sed 's/table=../table=??/' | sort], [0], + table=??(ls_in_acl_after_lb ), priority=2003 , match=(reg0[[8]] == 1 && (ip4 && icmp)), action=(next;) + table=??(ls_in_acl_after_lb ), priority=2004 , match=(reg0[[10]] == 1 && (ip4 && ip4.dst == 10.0.0.2)), action=(ct_commit { ct_mark.blocked = 1; }; /* drop */) + table=??(ls_in_acl_after_lb ), priority=2004 , match=(reg0[[9]] == 1 && (ip4 && ip4.dst == 10.0.0.2)), action=(/* drop */) ++ table=??(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;) + table=??(ls_in_acl_hint ), priority=0 , match=(1), action=(next;) + table=??(ls_in_acl_hint ), priority=1 , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;) + table=??(ls_in_acl_hint ), priority=2 , match=(ct.est && ct_mark.blocked == 1), action=(reg0[[9]] = 1; next;) +@@ -6787,8 +6791,8 @@ AT_CHECK([grep -e "ls_in_acl" lsflows | sed 's/table=../table=??/' | sort], [0], + table=??(ls_in_acl ), priority=2003 , match=(reg0[[7]] == 1 && (ip4 && icmp)), action=(reg0[[1]] = 1; next;) + table=??(ls_in_acl ), priority=2003 , match=(reg0[[8]] == 1 && (ip4 && icmp)), action=(next;) + table=??(ls_in_acl ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;) +- table=??(ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(ct_commit_nat;) +- table=??(ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; next;) ++ table=??(ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(reg0[[17]] = 1; ct_commit_nat;) ++ table=??(ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; reg0[[17]] = 1; next;) + table=??(ls_in_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;) + table=??(ls_in_acl ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;) + table=??(ls_in_acl_after_lb ), priority=0 , match=(1), action=(next;) +@@ -6796,6 +6800,7 @@ AT_CHECK([grep -e "ls_in_acl" lsflows | sed 's/table=../table=??/' | sort], [0], + table=??(ls_in_acl_after_lb ), priority=2001 , match=(reg0[[9]] == 1 && (ip4)), action=(/* drop */) + table=??(ls_in_acl_after_lb ), priority=2004 , match=(reg0[[10]] == 1 && (ip4 && ip4.dst == 10.0.0.2)), action=(ct_commit { ct_mark.blocked = 1; }; /* drop */) + table=??(ls_in_acl_after_lb ), priority=2004 , match=(reg0[[9]] == 1 && (ip4 && ip4.dst == 10.0.0.2)), action=(/* drop */) ++ table=??(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;) + table=??(ls_in_acl_hint ), priority=0 , match=(1), action=(next;) + table=??(ls_in_acl_hint ), priority=1 , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;) + table=??(ls_in_acl_hint ), priority=2 , match=(ct.est && ct_mark.blocked == 1), action=(reg0[[9]] = 1; next;) +@@ -7219,11 +7224,12 @@ AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/ + table=??(ls_in_acl ), priority=1001 , match=(reg0[[7]] == 1 && (ip4 && tcp)), action=(reg0[[1]] = 1; next;) + table=??(ls_in_acl ), priority=1001 , match=(reg0[[8]] == 1 && (ip4 && tcp)), action=(next;) + table=??(ls_in_acl ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;) +- table=??(ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(ct_commit_nat;) +- table=??(ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; next;) ++ table=??(ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(reg0[[17]] = 1; ct_commit_nat;) ++ table=??(ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; reg0[[17]] = 1; next;) + table=??(ls_in_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;) + table=??(ls_in_acl ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;) + table=??(ls_in_acl_after_lb ), priority=0 , match=(1), action=(drop;) ++ table=??(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;) + table=??(ls_in_acl_hint ), priority=0 , match=(1), action=(next;) + table=??(ls_in_acl_hint ), priority=1 , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;) + table=??(ls_in_acl_hint ), priority=2 , match=(ct.est && ct_mark.blocked == 1), action=(reg0[[9]] = 1; next;) +@@ -7342,13 +7348,14 @@ AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/ + table=??(ls_in_acl ), priority=1 , match=(ip && !ct.est), action=(drop;) + table=??(ls_in_acl ), priority=1 , match=(ip && ct.est && ct_mark.blocked == 1), action=(reg0[[1]] = 1; next;) + table=??(ls_in_acl ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;) +- table=??(ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(ct_commit_nat;) +- table=??(ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; next;) ++ table=??(ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(reg0[[17]] = 1; ct_commit_nat;) ++ table=??(ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; reg0[[17]] = 1; next;) + table=??(ls_in_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;) + table=??(ls_in_acl ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;) + table=??(ls_in_acl_after_lb ), priority=0 , match=(1), action=(drop;) + table=??(ls_in_acl_after_lb ), priority=1001 , match=(reg0[[7]] == 1 && (ip4 && tcp)), action=(reg0[[1]] = 1; next;) + table=??(ls_in_acl_after_lb ), priority=1001 , match=(reg0[[8]] == 1 && (ip4 && tcp)), action=(next;) ++ table=??(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;) + table=??(ls_in_acl_hint ), priority=0 , match=(1), action=(next;) + table=??(ls_in_acl_hint ), priority=1 , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;) + table=??(ls_in_acl_hint ), priority=2 , match=(ct.est && ct_mark.blocked == 1), action=(reg0[[9]] = 1; next;) +@@ -7467,11 +7474,12 @@ AT_CHECK([ovn-sbctl dump-flows | grep -E "ls_.*_acl" | sed 's/table=../table=??/ + table=??(ls_in_acl ), priority=1 , match=(ip && !ct.est), action=(drop;) + table=??(ls_in_acl ), priority=1 , match=(ip && ct.est && ct_mark.blocked == 1), action=(reg0[[1]] = 1; next;) + table=??(ls_in_acl ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;) +- table=??(ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(ct_commit_nat;) +- table=??(ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; next;) ++ table=??(ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(reg0[[17]] = 1; ct_commit_nat;) ++ table=??(ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; reg0[[17]] = 1; next;) + table=??(ls_in_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;) + table=??(ls_in_acl ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;) + table=??(ls_in_acl_after_lb ), priority=0 , match=(1), action=(drop;) ++ table=??(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;) + table=??(ls_in_acl_hint ), priority=0 , match=(1), action=(next;) + table=??(ls_in_acl_hint ), priority=1 , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;) + table=??(ls_in_acl_hint ), priority=2 , match=(ct.est && ct_mark.blocked == 1), action=(reg0[[9]] = 1; next;) +@@ -7775,7 +7783,7 @@ sort | sed 's/table=../table=??/' ], [0], [dnl + table=??(ls_in_check_port_sec), priority=100 , match=(vlan.present), action=(drop;) + table=??(ls_in_check_port_sec), priority=50 , match=(1), action=(reg0[[15]] = check_in_port_sec(); next;) + table=??(ls_in_check_port_sec), priority=70 , match=(inport == "localnetport"), action=(set_queue(10); reg0[[15]] = check_in_port_sec(); next;) +- table=??(ls_in_check_port_sec), priority=70 , match=(inport == "sw0p1"), action=(reg0[[14]] = 1; next(pipeline=ingress, table=18);) ++ table=??(ls_in_check_port_sec), priority=70 , match=(inport == "sw0p1"), action=(reg0[[14]] = 1; next(pipeline=ingress, table=16);) + table=??(ls_in_check_port_sec), priority=70 , match=(inport == "sw0p2"), action=(set_queue(10); reg0[[15]] = check_in_port_sec(); next;) + table=??(ls_in_apply_port_sec), priority=0 , match=(1), action=(next;) + table=??(ls_in_apply_port_sec), priority=50 , match=(reg0[[15]] == 1), action=(drop;) +@@ -7832,6 +7840,22 @@ AT_CHECK([ovn-sbctl lflow-list | grep -e natted -e ct_lb], [0], [dnl + table=2 (ls_out_pre_stateful), priority=110 , match=(reg0[[2]] == 1), action=(ct_lb;) + ]) + ++check ovn-nbctl --wait=sb set logical_router lr options:lb_force_snat_ip="42.42.42.1" ++AT_CHECK([ovn-sbctl lflow-list | grep lr_in_dnat], [0], [dnl ++ table=7 (lr_in_dnat ), priority=110 , match=(ct.est && !ct.rel && ip4 && reg0 == 66.66.66.66 && ct_label.natted == 1), action=(flags.force_snat_for_lb = 1; next;) ++ table=7 (lr_in_dnat ), priority=110 , match=(ct.new && !ct.rel && ip4 && reg0 == 66.66.66.66), action=(flags.force_snat_for_lb = 1; ct_lb(backends=42.42.42.2);) ++ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;) ++]) ++check ovn-nbctl remove logical_router lr options lb_force_snat_ip ++ ++check ovn-nbctl --wait=sb set load_balancer lb-test options:skip_snat="true" ++AT_CHECK([ovn-sbctl lflow-list | grep lr_in_dnat], [0], [dnl ++ table=7 (lr_in_dnat ), priority=110 , match=(ct.est && !ct.rel && ip4 && reg0 == 66.66.66.66 && ct_label.natted == 1), action=(flags.skip_snat_for_lb = 1; next;) ++ table=7 (lr_in_dnat ), priority=110 , match=(ct.new && !ct.rel && ip4 && reg0 == 66.66.66.66), action=(flags.skip_snat_for_lb = 1; ct_lb(backends=42.42.42.2);) ++ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;) ++]) ++check ovn-nbctl remove load_balancer lb-test options skip_snat ++ + AS_BOX([Chassis upgrades and supports ct_lb_mark - use ct_lb_mark and ct_mark.natted]) + check ovn-sbctl set chassis hv other_config:ct-no-masked-label=true + check ovn-nbctl --wait=sb sync +@@ -7865,8 +7889,8 @@ AT_CHECK([ovn-sbctl lflow-list | grep 'ls.*acl.*blocked' ], [0], [dnl + table=7 (ls_in_acl_hint ), priority=4 , match=(!ct.new && ct.est && !ct.rpl && ct_mark.blocked == 0), action=(reg0[[8]] = 1; reg0[[10]] = 1; next;) + table=7 (ls_in_acl_hint ), priority=2 , match=(ct.est && ct_mark.blocked == 1), action=(reg0[[9]] = 1; next;) + table=7 (ls_in_acl_hint ), priority=1 , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;) +- table=8 (ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(ct_commit_nat;) +- table=8 (ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; next;) ++ table=8 (ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(reg0[[17]] = 1; ct_commit_nat;) ++ table=8 (ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; reg0[[17]] = 1; next;) + table=8 (ls_in_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;) + table=8 (ls_in_acl ), priority=1 , match=(ip && ct.est && ct_mark.blocked == 1), action=(reg0[[1]] = 1; next;) + table=3 (ls_out_acl_hint ), priority=6 , match=(!ct.new && ct.est && !ct.rpl && ct_mark.blocked == 1), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;) +@@ -7887,15 +7911,15 @@ AT_CHECK([ovn-sbctl lflow-list | grep 'ls.*acl.*blocked' ], [0], [dnl + table=7 (ls_in_acl_hint ), priority=4 , match=(!ct.new && ct.est && !ct.rpl && ct_label.blocked == 0), action=(reg0[[8]] = 1; reg0[[10]] = 1; next;) + table=7 (ls_in_acl_hint ), priority=2 , match=(ct.est && ct_label.blocked == 1), action=(reg0[[9]] = 1; next;) + table=7 (ls_in_acl_hint ), priority=1 , match=(ct.est && ct_label.blocked == 0), action=(reg0[[10]] = 1; next;) +- table=8 (ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_label.blocked == 0), action=(ct_commit_nat;) +- table=8 (ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_label.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; next;) ++ table=8 (ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_label.blocked == 0), action=(reg0[[17]] = 1; next;) ++ table=8 (ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_label.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; reg0[[17]] = 1; next;) + table=8 (ls_in_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_label.blocked == 1)), action=(drop;) + table=8 (ls_in_acl ), priority=1 , match=(ip && ct.est && ct_label.blocked == 1), action=(reg0[[1]] = 1; next;) + table=3 (ls_out_acl_hint ), priority=6 , match=(!ct.new && ct.est && !ct.rpl && ct_label.blocked == 1), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;) + table=3 (ls_out_acl_hint ), priority=4 , match=(!ct.new && ct.est && !ct.rpl && ct_label.blocked == 0), action=(reg0[[8]] = 1; reg0[[10]] = 1; next;) + table=3 (ls_out_acl_hint ), priority=2 , match=(ct.est && ct_label.blocked == 1), action=(reg0[[9]] = 1; next;) + table=3 (ls_out_acl_hint ), priority=1 , match=(ct.est && ct_label.blocked == 0), action=(reg0[[10]] = 1; next;) +- table=4 (ls_out_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_label.blocked == 0), action=(ct_commit_nat;) ++ table=4 (ls_out_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_label.blocked == 0), action=(next;) + table=4 (ls_out_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_label.blocked == 0), action=(next;) + table=4 (ls_out_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_label.blocked == 1)), action=(drop;) + table=4 (ls_out_acl ), priority=1 , match=(ip && ct.est && ct_label.blocked == 1), action=(reg0[[1]] = 1; next;) +@@ -7909,15 +7933,15 @@ AT_CHECK([ovn-sbctl lflow-list | grep 'ls.*acl.*blocked' ], [0], [dnl + table=7 (ls_in_acl_hint ), priority=4 , match=(!ct.new && ct.est && !ct.rpl && ct_mark.blocked == 0), action=(reg0[[8]] = 1; reg0[[10]] = 1; next;) + table=7 (ls_in_acl_hint ), priority=2 , match=(ct.est && ct_mark.blocked == 1), action=(reg0[[9]] = 1; next;) + table=7 (ls_in_acl_hint ), priority=1 , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;) +- table=8 (ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(ct_commit_nat;) +- table=8 (ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; next;) ++ table=8 (ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(reg0[[17]] = 1; next;) ++ table=8 (ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; reg0[[17]] = 1; next;) + table=8 (ls_in_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;) + table=8 (ls_in_acl ), priority=1 , match=(ip && ct.est && ct_mark.blocked == 1), action=(reg0[[1]] = 1; next;) + table=3 (ls_out_acl_hint ), priority=6 , match=(!ct.new && ct.est && !ct.rpl && ct_mark.blocked == 1), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;) + table=3 (ls_out_acl_hint ), priority=4 , match=(!ct.new && ct.est && !ct.rpl && ct_mark.blocked == 0), action=(reg0[[8]] = 1; reg0[[10]] = 1; next;) + table=3 (ls_out_acl_hint ), priority=2 , match=(ct.est && ct_mark.blocked == 1), action=(reg0[[9]] = 1; next;) + table=3 (ls_out_acl_hint ), priority=1 , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;) +- table=4 (ls_out_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(ct_commit_nat;) ++ table=4 (ls_out_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(next;) + table=4 (ls_out_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(next;) + table=4 (ls_out_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;) + table=4 (ls_out_acl ), priority=1 , match=(ip && ct.est && ct_mark.blocked == 1), action=(reg0[[1]] = 1; next;) +@@ -8437,3 +8461,156 @@ check_row_count sb:Chassis_Template_Var 0 + + AT_CLEANUP + ]) ++ ++OVN_FOR_EACH_NORTHD_NO_HV([ ++AT_SETUP([Load balancer CT related backwards compatibility]) ++AT_KEYWORDS([lb]) ++ovn_start ++ ++check ovn-nbctl \ ++ -- ls-add ls \ ++ -- lr-add lr -- set logical_router lr options:chassis=local \ ++ -- lb-add lb-test 192.168.0.1 192.168.1.10 \ ++ -- ls-lb-add ls lb-test \ ++ -- lr-lb-add lr lb-test ++ ++m4_define([DUMP_FLOWS_SORTED], [sed 's/table=[[0-9]]\{1,2\}/table=?/' | sort]) ++ ++AS_BOX([No chassis registered - CT related flows should be installed]) ++check ovn-nbctl --wait=sb sync ++ovn-sbctl dump-flows | DUMP_FLOWS_SORTED > lflows0 ++ ++AT_CHECK([grep -e "lr_in_defrag" -e "lr_in_dnat" lflows0], [0], [dnl ++ table=? (lr_in_defrag ), priority=0 , match=(1), action=(next;) ++ table=? (lr_in_defrag ), priority=100 , match=(ip && ip4.dst == 192.168.0.1), action=(reg0 = 192.168.0.1; ct_dnat;) ++ table=? (lr_in_defrag ), priority=50 , match=(icmp || icmp6), action=(ct_dnat;) ++ table=? (lr_in_dnat ), priority=0 , match=(1), action=(next;) ++ table=? (lr_in_dnat ), priority=110 , match=(ct.est && !ct.rel && ip4 && reg0 == 192.168.0.1 && ct_mark.natted == 1), action=(next;) ++ table=? (lr_in_dnat ), priority=110 , match=(ct.new && !ct.rel && ip4 && reg0 == 192.168.0.1), action=(ct_lb_mark(backends=192.168.1.10);) ++ table=? (lr_in_dnat ), priority=50 , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;) ++ table=? (lr_in_dnat ), priority=70 , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;) ++ table=? (lr_in_dnat ), priority=70 , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;) ++]) ++ ++AT_CHECK([grep -e "ls_in_acl" -e "ls_out_acl" lflows0 | grep "priority=65532"], [0], [dnl ++ table=? (ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(reg0[[17]] = 1; ct_commit_nat;) ++ table=? (ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; reg0[[17]] = 1; next;) ++ table=? (ls_in_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;) ++ table=? (ls_in_acl ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;) ++ table=? (ls_out_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(ct_commit_nat;) ++ table=? (ls_out_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(next;) ++ table=? (ls_out_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;) ++ table=? (ls_out_acl ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;) ++ table=?(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;) ++]) ++ ++ ++AS_BOX([Chassis registered that doesn't support CT related]) ++check ovn-sbctl chassis-add hv geneve 127.0.0.1 ++check ovn-nbctl --wait=sb sync ++ovn-sbctl dump-flows | DUMP_FLOWS_SORTED > lflows1 ++ ++AT_CHECK([grep -e "lr_in_defrag" -e "lr_in_dnat" lflows1], [0], [dnl ++ table=? (lr_in_defrag ), priority=0 , match=(1), action=(next;) ++ table=? (lr_in_defrag ), priority=100 , match=(ip && ip4.dst == 192.168.0.1), action=(reg0 = 192.168.0.1; ct_dnat;) ++ table=? (lr_in_dnat ), priority=0 , match=(1), action=(next;) ++ table=? (lr_in_dnat ), priority=110 , match=(ct.est && !ct.rel && ip4 && reg0 == 192.168.0.1 && ct_label.natted == 1), action=(next;) ++ table=? (lr_in_dnat ), priority=110 , match=(ct.new && !ct.rel && ip4 && reg0 == 192.168.0.1), action=(ct_lb(backends=192.168.1.10);) ++]) ++ ++check ovn-nbctl --wait=sb set logical_router lr options:lb_force_snat_ip="192.168.1.1" ++AT_CHECK([ovn-sbctl lflow-list | grep lr_in_dnat], [0], [dnl ++ table=7 (lr_in_dnat ), priority=110 , match=(ct.est && !ct.rel && ip4 && reg0 == 192.168.0.1 && ct_label.natted == 1), action=(flags.force_snat_for_lb = 1; next;) ++ table=7 (lr_in_dnat ), priority=110 , match=(ct.new && !ct.rel && ip4 && reg0 == 192.168.0.1), action=(flags.force_snat_for_lb = 1; ct_lb(backends=192.168.1.10);) ++ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;) ++]) ++check ovn-nbctl remove logical_router lr options lb_force_snat_ip ++ ++check ovn-nbctl --wait=sb set load_balancer lb-test options:skip_snat="true" ++AT_CHECK([ovn-sbctl lflow-list | grep lr_in_dnat], [0], [dnl ++ table=7 (lr_in_dnat ), priority=110 , match=(ct.est && !ct.rel && ip4 && reg0 == 192.168.0.1 && ct_label.natted == 1), action=(flags.skip_snat_for_lb = 1; next;) ++ table=7 (lr_in_dnat ), priority=110 , match=(ct.new && !ct.rel && ip4 && reg0 == 192.168.0.1), action=(flags.skip_snat_for_lb = 1; ct_lb(backends=192.168.1.10);) ++ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;) ++]) ++check ovn-nbctl remove load_balancer lb-test options skip_snat ++ ++AT_CHECK([grep -e "ls_in_acl" -e "ls_out_acl" lflows1 | grep "priority=65532"], [0], [dnl ++ table=? (ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_label.blocked == 0), action=(reg0[[17]] = 1; next;) ++ table=? (ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_label.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; reg0[[17]] = 1; next;) ++ table=? (ls_in_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_label.blocked == 1)), action=(drop;) ++ table=? (ls_in_acl ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;) ++ table=? (ls_out_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_label.blocked == 0), action=(next;) ++ table=? (ls_out_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_label.blocked == 0), action=(next;) ++ table=? (ls_out_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_label.blocked == 1)), action=(drop;) ++ table=? (ls_out_acl ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;) ++ table=?(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;) ++]) ++ ++AS_BOX([Chassis upgrades and supports CT related]) ++check ovn-sbctl set chassis hv other_config:ct-no-masked-label=true ++check ovn-sbctl set chassis hv other_config:ovn-ct-lb-related=true ++check ovn-nbctl --wait=sb sync ++ovn-sbctl dump-flows | DUMP_FLOWS_SORTED > lflows2 ++ ++AT_CHECK([grep -e "lr_in_defrag" -e "lr_in_dnat" lflows2], [0], [dnl ++ table=? (lr_in_defrag ), priority=0 , match=(1), action=(next;) ++ table=? (lr_in_defrag ), priority=100 , match=(ip && ip4.dst == 192.168.0.1), action=(reg0 = 192.168.0.1; ct_dnat;) ++ table=? (lr_in_defrag ), priority=50 , match=(icmp || icmp6), action=(ct_dnat;) ++ table=? (lr_in_dnat ), priority=0 , match=(1), action=(next;) ++ table=? (lr_in_dnat ), priority=110 , match=(ct.est && !ct.rel && ip4 && reg0 == 192.168.0.1 && ct_mark.natted == 1), action=(next;) ++ table=? (lr_in_dnat ), priority=110 , match=(ct.new && !ct.rel && ip4 && reg0 == 192.168.0.1), action=(ct_lb_mark(backends=192.168.1.10);) ++ table=? (lr_in_dnat ), priority=50 , match=(ct.rel && !ct.est && !ct.new), action=(ct_commit_nat;) ++ table=? (lr_in_dnat ), priority=70 , match=(ct.rel && !ct.est && !ct.new && ct_mark.force_snat == 1), action=(flags.force_snat_for_lb = 1; ct_commit_nat;) ++ table=? (lr_in_dnat ), priority=70 , match=(ct.rel && !ct.est && !ct.new && ct_mark.skip_snat == 1), action=(flags.skip_snat_for_lb = 1; ct_commit_nat;) ++]) ++ ++AT_CHECK([grep -e "ls_in_acl" -e "ls_out_acl" lflows2 | grep "priority=65532"], [0], [dnl ++ table=? (ls_in_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(reg0[[17]] = 1; ct_commit_nat;) ++ table=? (ls_in_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(reg0[[9]] = 0; reg0[[10]] = 0; reg0[[17]] = 1; next;) ++ table=? (ls_in_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;) ++ table=? (ls_in_acl ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;) ++ table=? (ls_out_acl ), priority=65532, match=(!ct.est && ct.rel && !ct.new && !ct.inv && ct_mark.blocked == 0), action=(ct_commit_nat;) ++ table=? (ls_out_acl ), priority=65532, match=(ct.est && !ct.rel && !ct.new && !ct.inv && ct.rpl && ct_mark.blocked == 0), action=(next;) ++ table=? (ls_out_acl ), priority=65532, match=(ct.inv || (ct.est && ct.rpl && ct_mark.blocked == 1)), action=(drop;) ++ table=? (ls_out_acl ), priority=65532, match=(nd || nd_ra || nd_rs || mldv1 || mldv2), action=(next;) ++ table=?(ls_in_acl_after_lb ), priority=65532, match=(reg0[[17]] == 1), action=(next;) ++]) ++ ++AT_CLEANUP ++]) ++ ++OVN_FOR_EACH_NORTHD_NO_HV([ ++AT_SETUP([Chassis-feature compatibitility - remote chassis]) ++ovn_start ++ ++AS_BOX([Local chassis]) ++check ovn-sbctl chassis-add hv1 geneve 127.0.0.1 \ ++ -- set chassis hv1 other_config:ct-no-masked-label=true \ ++ -- set chassis hv1 other_config:ovn-ct-lb-related=true \ ++ -- set chassis hv1 other_config:mac-binding-timestamp=true ++ ++check ovn-nbctl --wait=sb sync ++ ++AT_CHECK([as northd ovn-appctl -t NORTHD_TYPE debug/chassis-features-list], [0], [dnl ++ct_no_masked_label: true ++ct_lb_related: true ++mac_binding_timestamp: true ++]) ++ ++AS_BOX([Remote chassis]) ++check ovn-sbctl chassis-add hv2 geneve 127.0.0.2 \ ++ -- set chassis hv2 other_config:is-remote=true \ ++ -- set chassis hv2 other_config:ct-no-masked-label=false \ ++ -- set chassis hv2 other_config:ovn-ct-lb-related=false \ ++ -- set chassis hv2 other_config:mac-binding-timestamp=false ++ ++check ovn-nbctl --wait=sb sync ++ ++AT_CHECK([as northd ovn-appctl -t NORTHD_TYPE debug/chassis-features-list], [0], [dnl ++ct_no_masked_label: true ++ct_lb_related: true ++mac_binding_timestamp: true ++]) ++ ++AT_CLEANUP ++]) +diff --git a/tests/ovn.at b/tests/ovn.at +index ad2014de6..f77a4983d 100644 +--- a/tests/ovn.at ++++ b/tests/ovn.at +@@ -4461,7 +4461,12 @@ for i in 1 2 3; do + done + + # Gracefully terminate daemons +-OVN_CLEANUP([hv1],[hv2],[vtep]) ++ ++OVN_CLEANUP_SBOX([hv1]) ++OVN_CLEANUP_SBOX([hv2]) ++OVS_WAIT_UNTIL([test `as vtep ovs-vsctl list-ports vtep_bfd | wc -l` -eq 0]) ++OVN_CLEANUP([vtep]) ++ + OVN_CLEANUP_VSWITCH([hv3]) + + AT_CLEANUP +@@ -25064,8 +25069,10 @@ OVN_FOR_EACH_NORTHD([ + AT_SETUP([interconnection]) + + ovn_init_ic_db +-n_az=5 +-n_ts=5 ++# The number needs to stay relatively low due to high memory consumption ++# with address sanitizers enabled. ++n_az=3 ++n_ts=3 + for i in `seq 1 $n_az`; do + ovn_start az$i + done +@@ -28416,24 +28423,39 @@ wait_row_count Port_Binding 1 logical_port=lsp-cont1 chassis=$ch + OVN_CLEANUP([hv1]) + AT_CLEANUP + ++# TEST_LR_DROP_TRAFFIC_FOR_OWN_IPS [ DGP | GR ] + # Test dropping traffic destined to router owned IPs. +-OVN_FOR_EACH_NORTHD([ +-AT_SETUP([gateway router drop traffic for own IPs]) ++m4_define([TEST_LR_DROP_TRAFFIC_FOR_OWN_IPS], [ + ovn_start + +-ovn-nbctl lr-add r1 -- set logical_router r1 options:chassis=hv1 +-ovn-nbctl ls-add s1 +- +-# Connnect r1 to s1. +-ovn-nbctl lrp-add r1 lrp-r1-s1 00:00:00:00:01:01 10.0.1.1/24 +-ovn-nbctl lsp-add s1 lsp-s1-r1 -- set Logical_Switch_Port lsp-s1-r1 type=router \ +- options:router-port=lrp-r1-s1 addresses=router +- +-# Create logical port p1 in s1 +-ovn-nbctl lsp-add s1 p1 \ ++ovn-nbctl lr-add r1 # Gateway router or LR with DGP on the ext side ++ovn-nbctl ls-add ext # simulate external LS ++ovn-nbctl ls-add s2 # simulate internal LS ++ ++# Connnect r1 to ext. ++ovn-nbctl lrp-add r1 lrp-r1-ext 00:00:00:00:01:01 10.0.1.1/24 ++if test X"$1" = X"DGP"; then ++ ovn-nbctl lrp-set-gateway-chassis lrp-r1-ext hv1 1 ++else ++ ovn-nbctl set logical_router r1 options:chassis=hv1 ++fi ++ovn-nbctl lsp-add ext lsp-ext-r1 -- set Logical_Switch_Port lsp-ext-r1 type=router \ ++ options:router-port=lrp-r1-ext addresses=router ++ ++# Connnect r1 to s2. ++ovn-nbctl lrp-add r1 lrp-r1-s2 00:00:00:00:02:01 10.0.2.1/24 ++ovn-nbctl lsp-add s2 lsp-s2-r1 -- set Logical_Switch_Port lsp-s2-r1 type=router \ ++ options:router-port=lrp-r1-s2 addresses=router ++ ++# Create logical port p1 in ext ++ovn-nbctl lsp-add ext p1 \ + -- lsp-set-addresses p1 "f0:00:00:00:01:02 10.0.1.2" \ + -- lsp-set-port-security p1 "f0:00:00:00:01:02 10.0.1.2" + ++# Create logical port p2 in s2 ++ovn-nbctl lsp-add s2 p2 \ ++-- lsp-set-addresses p2 "f0:00:00:00:02:02 10.0.2.2" ++ + # Create two hypervisor and create OVS ports corresponding to logical ports. + net_add n1 + +@@ -28447,6 +28469,12 @@ ovs-vsctl -- add-port br-int hv1-vif1 -- \ + options:rxq_pcap=hv1/vif1-rx.pcap \ + ofport-request=1 + ++ovs-vsctl -- add-port br-int hv1-vif2 -- \ ++ set interface hv1-vif2 external-ids:iface-id=p2 \ ++ options:tx_pcap=hv1/vif2-tx.pcap \ ++ options:rxq_pcap=hv1/vif2-rx.pcap \ ++ ofport-request=2 ++ + # Pre-populate the hypervisors' ARP tables so that we don't lose any + # packets for ARP resolution (native tunneling doesn't queue packets + # for ARP resolution). +@@ -28457,9 +28485,10 @@ ovn-nbctl --wait=hv sync + + sw_key=$(ovn-sbctl --bare --columns tunnel_key list datapath_binding r1) + ++echo sw_key: $sw_key + AT_CHECK([ovn-sbctl lflow-list | grep lr_in_arp_resolve | grep 10.0.1.1], [1], []) + +-# Send ip packets from p1 to lrp-r1-s1 ++# Send ip packets from p1 to lrp-r1-ext + src_mac="f00000000102" + dst_mac="000000000101" + src_ip=`ip_to_hex 10 0 1 2` +@@ -28478,10 +28507,10 @@ AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep -E "table=11, n_packets=1,.* + ]) + + # Use the router IP as SNAT IP. +-ovn-nbctl set logical_router r1 options:lb_force_snat_ip=10.0.1.1 ++ovn-nbctl lr-nat-add r1 snat 10.0.1.1 10.8.8.0/24 + ovn-nbctl --wait=hv sync + +-# Send ip packets from p1 to lrp-r1-s1 ++# Send ip packets from p1 to lrp-r1-ext + src_mac="f00000000102" + dst_mac="000000000101" + src_ip=`ip_to_hex 10 0 1 2` +@@ -28496,11 +28525,53 @@ AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep "actions=controller" | grep + ]) + + # The packet should've been dropped in the lr_in_arp_resolve stage. +-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep -E "table=25, n_packets=1,.* priority=2,ip,metadata=0x${sw_key},nw_dst=10.0.1.1 actions=drop" -c], [0], [dnl ++if test X"$1" = X"DGP"; then ++ prio=150 ++ inport=reg14 ++ outport=reg15 ++else ++ prio=2 ++fi ++AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep -E "table=25, n_packets=1,.* priority=$prio,ip,$inport.*$outport.*metadata=0x${sw_key},nw_dst=10.0.1.1 actions=drop" -c], [0], [dnl + 1 + ]) + ++# Send ip packets from p2 to lrp-r1-ext ++src_mac="f00000000202" ++dst_mac="000000000201" ++src_ip=`ip_to_hex 10 0 2 2` ++dst_ip=`ip_to_hex 10 0 1 1` ++packet=${dst_mac}${src_mac}08004500001c0000000040110000${src_ip}${dst_ip}0035111100080000 ++as hv1 ovs-appctl netdev-dummy/receive hv1-vif2 $packet ++ ++# Still no packet-ins should reach ovn-controller. ++AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep "actions=controller" | grep -v n_packets=0 -c], [1], [dnl ++0 ++]) ++ ++if test X"$1" = X"DGP"; then ++ # The packet dst should be resolved once for E/W centralized NAT purpose. ++ AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep -E "table=25, n_packets=1,.* priority=100,reg0=0xa000101,reg15=.*metadata=0x${sw_key} actions=mod_dl_dst:00:00:00:00:01:01,resubmit" -c], [0], [dnl ++1 ++]) ++fi ++ ++# The packet should've been finally dropped in the lr_in_arp_resolve stage. ++AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep -E "table=25, n_packets=2,.* priority=$prio,ip,$inport.*$outport.*metadata=0x${sw_key},nw_dst=10.0.1.1 actions=drop" -c], [0], [dnl ++1 ++]) + OVN_CLEANUP([hv1]) ++]) ++ ++OVN_FOR_EACH_NORTHD([ ++AT_SETUP([gateway router drop traffic for own IPs]) ++TEST_LR_DROP_TRAFFIC_FOR_OWN_IPS(GR) ++AT_CLEANUP ++]) ++ ++OVN_FOR_EACH_NORTHD([ ++AT_SETUP([distributed gateway port drop traffic for own IPs]) ++TEST_LR_DROP_TRAFFIC_FOR_OWN_IPS(DGP) + AT_CLEANUP + ]) + +diff --git a/tests/system-ovn.at b/tests/system-ovn.at +index 99ad14aa5..1e6767846 100644 +--- a/tests/system-ovn.at ++++ b/tests/system-ovn.at +@@ -1618,8 +1618,8 @@ OVS_WAIT_UNTIL([ + ovn-nbctl --reject lb-add lb3 30.0.0.10:80 "" + ovn-nbctl ls-lb-add foo lb3 + # Filter reset segments +-NS_CHECK_EXEC([foo1], [tcpdump -c 1 -neei foo1 ip[[33:1]]=0x14 > rst.pcap 2>/dev/null &]) +-sleep 1 ++NS_CHECK_EXEC([foo1], [tcpdump -l -c 1 -neei foo1 ip[[33:1]]=0x14 > rst.pcap 2>tcpdump_err &]) ++OVS_WAIT_UNTIL([grep "listening" tcpdump_err]) + NS_CHECK_EXEC([foo1], [wget -q 30.0.0.10],[4]) + + OVS_WAIT_UNTIL([ +@@ -1734,13 +1734,11 @@ OVS_START_L7([bar2], [http6]) + OVS_START_L7([bar3], [http6]) + + dnl Should work with the virtual IP fd03::1 address through NAT +-for i in `seq 1 20`; do +- echo Request $i +- NS_CHECK_EXEC([foo1], [wget http://[[fd03::1]] -t 5 -T 1 --retry-connrefused -v -o wget$i.log || (ovs-ofctl -O OpenFlow13 dump-flows br-int && false)]) ++OVS_WAIT_FOR_OUTPUT([ ++for i in `seq 1 10`; do ++ NS_EXEC([foo1], [wget http://[[fd03::1]] -t 5 -T 1 --retry-connrefused -v -o wget$i.log || (ovs-ofctl -O OpenFlow13 dump-flows br-int && false)]) + done +- +-dnl Each server should have at least one connection. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd03::1) | grep -v fe80 | \ ++ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd03::1) | grep -v fe80 | \ + sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl + tcp,orig=(src=fd01::2,dst=fd03::1,sport=,dport=),reply=(src=fd02::2,dst=fd01::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + tcp,orig=(src=fd01::2,dst=fd03::1,sport=,dport=),reply=(src=fd02::3,dst=fd01::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) +@@ -1748,27 +1746,25 @@ tcp,orig=(src=fd01::2,dst=fd03::1,sport=,dport=),reply=(src=fd + ]) + + dnl Should work with the virtual IP fd03::3 address through NAT +-for i in `seq 1 20`; do +- echo Request $i +- NS_CHECK_EXEC([foo1], [wget http://[[fd03::3]] -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) ++OVS_WAIT_FOR_OUTPUT([ ++for i in `seq 1 10`; do ++ NS_EXEC([foo1], [wget http://[[fd03::3]] -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) + done +- + dnl Each server should have at least one connection. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd03::3) | grep -v fe80 | \ ++ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd03::3) | grep -v fe80 | \ + sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl + tcp,orig=(src=fd01::2,dst=fd03::3,sport=,dport=),reply=(src=fd02::2,dst=fd01::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + tcp,orig=(src=fd01::2,dst=fd03::3,sport=,dport=),reply=(src=fd02::3,dst=fd01::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + tcp,orig=(src=fd01::2,dst=fd03::3,sport=,dport=),reply=(src=fd02::4,dst=fd01::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + ]) + ++OVS_WAIT_FOR_OUTPUT([ + dnl Test load-balancing that includes L4 ports in NAT. +-for i in `seq 1 20`; do +- echo Request $i +- NS_CHECK_EXEC([foo1], [wget http://[[fd03::2]]:8000 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) ++for i in `seq 1 10`; do ++ NS_EXEC([foo1], [wget http://[[fd03::2]]:8000 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) + done +- + dnl Each server should have at least one connection. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd03::2) | grep -v fe80 | \ ++ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd03::2) | grep -v fe80 | \ + sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl + tcp,orig=(src=fd01::2,dst=fd03::2,sport=,dport=),reply=(src=fd02::2,dst=fd01::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + tcp,orig=(src=fd01::2,dst=fd03::2,sport=,dport=),reply=(src=fd02::3,dst=fd01::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) +@@ -1784,14 +1780,14 @@ OVS_WAIT_UNTIL([ + + AT_CHECK([ovs-appctl dpctl/flush-conntrack]) + ++OVS_WAIT_FOR_OUTPUT([ + dnl Test load-balancing that includes L4 ports in NAT. +-for i in `seq 1 20`; do +- echo Request $i +- NS_CHECK_EXEC([foo1], [wget http://[[fd03::2]]:8000 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) ++for i in `seq 1 10`; do ++ NS_EXEC([foo1], [wget http://[[fd03::2]]:8000 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) + done + + dnl Each server should have at least one connection. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd03::2) | grep -v fe80 | \ ++ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd03::2) | grep -v fe80 | \ + sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl + tcp,orig=(src=fd01::2,dst=fd03::2,sport=,dport=),reply=(src=fd02::2,dst=fd01::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + tcp,orig=(src=fd01::2,dst=fd03::2,sport=,dport=),reply=(src=fd02::3,dst=fd01::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) +@@ -1933,13 +1929,13 @@ OVS_START_L7([foo3], [http]) + OVS_START_L7([foo4], [http]) + + dnl Should work with the virtual IP address through NAT +-for i in `seq 1 20`; do +- echo Request $i +- NS_CHECK_EXEC([foo1], [wget 30.0.0.1 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) ++OVS_WAIT_FOR_OUTPUT([ ++for i in `seq 1 10`; do ++ NS_EXEC([foo1], [wget 30.0.0.1 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) + done + + dnl Each server should have at least one connection. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | \ ++ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | \ + sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl + tcp,orig=(src=192.168.1.2,dst=30.0.0.1,sport=,dport=),reply=(src=192.168.1.3,dst=192.168.1.2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + tcp,orig=(src=192.168.1.2,dst=30.0.0.1,sport=,dport=),reply=(src=192.168.1.4,dst=192.168.1.2,sport=,dport=),zone=,mark=2,protoinfo=(state=) +@@ -1947,20 +1943,19 @@ tcp,orig=(src=192.168.1.2,dst=30.0.0.1,sport=,dport=),reply=(s + ]) + + dnl Test load-balancing that includes L4 ports in NAT. +-for i in `seq 1 20`; do +- echo Request $i +- NS_CHECK_EXEC([foo1], [wget 30.0.0.2:8000 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) ++OVS_WAIT_FOR_OUTPUT([ ++for i in `seq 1 10`; do ++ NS_EXEC([foo1], [wget 30.0.0.2:8000 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) + done + + dnl Each server should have at least one connection. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.2) | \ ++ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.2) | \ + sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl + tcp,orig=(src=192.168.1.2,dst=30.0.0.2,sport=,dport=),reply=(src=192.168.1.3,dst=192.168.1.2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + tcp,orig=(src=192.168.1.2,dst=30.0.0.2,sport=,dport=),reply=(src=192.168.1.4,dst=192.168.1.2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + tcp,orig=(src=192.168.1.2,dst=30.0.0.2,sport=,dport=),reply=(src=192.168.1.5,dst=192.168.1.2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + ]) + +- + OVS_APP_EXIT_AND_WAIT([ovn-controller]) + + as ovn-sb +@@ -2044,13 +2039,13 @@ OVS_START_L7([foo3], [http6]) + OVS_START_L7([foo4], [http6]) + + dnl Should work with the virtual IP address through NAT +-for i in `seq 1 20`; do +- echo Request $i +- NS_CHECK_EXEC([foo1], [wget http://[[fd03::1]] -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) ++OVS_WAIT_FOR_OUTPUT([ ++for i in `seq 1 10`; do ++ NS_EXEC([foo1], [wget http://[[fd03::1]] -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) + done + + dnl Each server should have at least one connection. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd03::1) | grep -v fe80 | \ ++ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd03::1) | grep -v fe80 | \ + sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl + tcp,orig=(src=fd01::2,dst=fd03::1,sport=,dport=),reply=(src=fd01::3,dst=fd01::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + tcp,orig=(src=fd01::2,dst=fd03::1,sport=,dport=),reply=(src=fd01::4,dst=fd01::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) +@@ -2058,20 +2053,19 @@ tcp,orig=(src=fd01::2,dst=fd03::1,sport=,dport=),reply=(src=fd + ]) + + dnl Test load-balancing that includes L4 ports in NAT. +-for i in `seq 1 20`; do +- echo Request $i +- NS_CHECK_EXEC([foo1], [wget http://[[fd03::2]]:8000 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) ++OVS_WAIT_FOR_OUTPUT([ ++for i in `seq 1 10`; do ++ NS_EXEC([foo1], [wget http://[[fd03::2]]:8000 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) + done + + dnl Each server should have at least one connection. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd03::2) | grep -v fe80 | \ ++ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd03::2) | grep -v fe80 | \ + sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl + tcp,orig=(src=fd01::2,dst=fd03::2,sport=,dport=),reply=(src=fd01::3,dst=fd01::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + tcp,orig=(src=fd01::2,dst=fd03::2,sport=,dport=),reply=(src=fd01::4,dst=fd01::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + tcp,orig=(src=fd01::2,dst=fd03::2,sport=,dport=),reply=(src=fd01::5,dst=fd01::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + ]) + +- + OVS_APP_EXIT_AND_WAIT([ovn-controller]) + + as ovn-sb +@@ -2199,27 +2193,27 @@ OVS_START_L7([bar1], [http]) + + check ovs-appctl dpctl/flush-conntrack + dnl Should work with the virtual IP address through NAT +-for i in `seq 1 20`; do +- echo Request $i +- NS_CHECK_EXEC([alice1], [wget 30.0.0.1 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) ++OVS_WAIT_FOR_OUTPUT([ ++for i in `seq 1 10`; do ++ NS_EXEC([alice1], [wget 30.0.0.1 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) + done + + dnl Each server should have at least one connection. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | ++ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | + sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl + tcp,orig=(src=172.16.1.2,dst=30.0.0.1,sport=,dport=),reply=(src=192.168.1.2,dst=172.16.1.2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + tcp,orig=(src=172.16.1.2,dst=30.0.0.1,sport=,dport=),reply=(src=192.168.2.2,dst=172.16.1.2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + ]) + + check ovs-appctl dpctl/flush-conntrack ++OVS_WAIT_FOR_OUTPUT([ + dnl Test load-balancing that includes L4 ports in NAT. +-for i in `seq 1 20`; do +- echo Request $i +- NS_CHECK_EXEC([alice1], [wget 30.0.0.2:8000 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) ++for i in `seq 1 10`; do ++ NS_EXEC([alice1], [wget 30.0.0.2:8000 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) + done + + dnl Each server should have at least one connection. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.2) | ++ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.2) | + sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl + tcp,orig=(src=172.16.1.2,dst=30.0.0.2,sport=,dport=),reply=(src=192.168.1.2,dst=172.16.1.2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + tcp,orig=(src=172.16.1.2,dst=30.0.0.2,sport=,dport=),reply=(src=192.168.2.2,dst=172.16.1.2,sport=,dport=),zone=,mark=2,protoinfo=(state=) +@@ -2256,23 +2250,23 @@ OVS_WAIT_UNTIL([ovs-ofctl -O OpenFlow13 dump-flows br-int table=43 | \ + grep 'nat(src=20.0.0.2)']) + + check ovs-appctl dpctl/flush-conntrack ++exp_ct1="tcp,orig=(src=172.16.1.2,dst=30.0.0.2,sport=,dport=),reply=(src=192.168.1.2,dst=172.16.1.2,sport=,dport=),zone=,mark=10,protoinfo=(state=) ++tcp,orig=(src=172.16.1.2,dst=30.0.0.2,sport=,dport=),reply=(src=192.168.2.2,dst=172.16.1.2,sport=,dport=),zone=,mark=10,protoinfo=(state=)" ++exp_ct2="tcp,orig=(src=172.16.1.2,dst=192.168.1.2,sport=,dport=),reply=(src=192.168.1.2,dst=20.0.0.2,sport=,dport=),zone=,protoinfo=(state=) ++tcp,orig=(src=172.16.1.2,dst=192.168.2.2,sport=,dport=),reply=(src=192.168.2.2,dst=20.0.0.2,sport=,dport=),zone=,protoinfo=(state=)" ++ + dnl Test load-balancing that includes L4 ports in NAT. +-for i in `seq 1 20`; do +- echo Request $i +- NS_CHECK_EXEC([alice1], [wget 30.0.0.2:8000 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) ++OVS_WAIT_FOR_OUTPUT_UNQUOTED([ ++for i in `seq 1 10`; do ++ NS_EXEC([alice1], [wget 30.0.0.2:8000 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) + done + + dnl Each server should have at least one connection. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.2) | +-sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl +-tcp,orig=(src=172.16.1.2,dst=30.0.0.2,sport=,dport=),reply=(src=192.168.1.2,dst=172.16.1.2,sport=,dport=),zone=,mark=10,protoinfo=(state=) +-tcp,orig=(src=172.16.1.2,dst=30.0.0.2,sport=,dport=),reply=(src=192.168.2.2,dst=172.16.1.2,sport=,dport=),zone=,mark=10,protoinfo=(state=) +-]) ++ct1=$(ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.2) | sed -e 's/zone=[[0-9]]*/zone=/') ++ct2=$(ovs-appctl dpctl/dump-conntrack | FORMAT_CT(20.0.0.2) | sed -e 's/zone=[[0-9]]*/zone=/') + +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(20.0.0.2) | +-sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl +-tcp,orig=(src=172.16.1.2,dst=192.168.1.2,sport=,dport=),reply=(src=192.168.1.2,dst=20.0.0.2,sport=,dport=),zone=,protoinfo=(state=) +-tcp,orig=(src=172.16.1.2,dst=192.168.2.2,sport=,dport=),reply=(src=192.168.2.2,dst=20.0.0.2,sport=,dport=),zone=,protoinfo=(state=) ++test "x$ct1 = x$exp_ct1" && test "x$ct2 = x$exp_ct2" ++], [0], [dnl + ]) + + OVS_WAIT_UNTIL([check_est_flows], [check established flows]) +@@ -2298,22 +2292,21 @@ rm -f wget*.log + + check ovs-appctl dpctl/flush-conntrack + dnl Test load-balancing that includes L4 ports in NAT. +-for i in `seq 1 20`; do +- echo Request $i +- NS_CHECK_EXEC([alice1], [wget 30.0.0.2:8000 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) ++exp_ct1="tcp,orig=(src=172.16.1.2,dst=30.0.0.2,sport=,dport=),reply=(src=192.168.1.2,dst=172.16.1.2,sport=,dport=),zone=,mark=10,protoinfo=(state=) ++tcp,orig=(src=172.16.1.2,dst=30.0.0.2,sport=,dport=),reply=(src=192.168.2.2,dst=172.16.1.2,sport=,dport=),zone=,mark=10,protoinfo=(state=)" ++exp_ct2="tcp,orig=(src=172.16.1.2,dst=192.168.1.2,sport=,dport=),reply=(src=192.168.1.2,dst=20.0.0.2,sport=,dport=),zone=,protoinfo=(state=) ++tcp,orig=(src=172.16.1.2,dst=192.168.2.2,sport=,dport=),reply=(src=192.168.2.2,dst=20.0.0.2,sport=,dport=),zone=,protoinfo=(state=)" ++ ++OVS_WAIT_FOR_OUTPUT_UNQUOTED([ ++for i in `seq 1 10`; do ++ NS_EXEC([alice1], [wget 30.0.0.2:8000 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) + done + + dnl Each server should have at least one connection. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.2) | +-sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl +-tcp,orig=(src=172.16.1.2,dst=30.0.0.2,sport=,dport=),reply=(src=192.168.1.2,dst=172.16.1.2,sport=,dport=),zone=,mark=10,protoinfo=(state=) +-tcp,orig=(src=172.16.1.2,dst=30.0.0.2,sport=,dport=),reply=(src=192.168.2.2,dst=172.16.1.2,sport=,dport=),zone=,mark=10,protoinfo=(state=) +-]) +- +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(20.0.0.2) | +-sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl +-tcp,orig=(src=172.16.1.2,dst=192.168.1.2,sport=,dport=),reply=(src=192.168.1.2,dst=20.0.0.2,sport=,dport=),zone=,protoinfo=(state=) +-tcp,orig=(src=172.16.1.2,dst=192.168.2.2,sport=,dport=),reply=(src=192.168.2.2,dst=20.0.0.2,sport=,dport=),zone=,protoinfo=(state=) ++ct1=$(ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.2) | sed -e 's/zone=[[0-9]]*/zone=/') ++ct2=$(ovs-appctl dpctl/dump-conntrack | FORMAT_CT(20.0.0.2) | sed -e 's/zone=[[0-9]]*/zone=/') ++test "x$ct1 = x$exp_ct1" && test "x$ct2 = x$exp_ct2" ++], [0], [dnl + ]) + + OVS_WAIT_UNTIL([check_est_flows], [check established flows]) +@@ -2549,26 +2542,26 @@ OVS_START_L7([foo1], [http6]) + OVS_START_L7([bar1], [http6]) + + dnl Should work with the virtual IP address through NAT +-for i in `seq 1 20`; do +- echo Request $i +- NS_CHECK_EXEC([alice1], [wget http://[[fd30::1]] -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) ++OVS_WAIT_FOR_OUTPUT_UNQUOTED([ ++for i in `seq 1 10`; do ++ NS_EXEC([alice1], [wget http://[[fd30::1]] -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) + done + + dnl Each server should have at least one connection. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd30::1) | grep -v fe80 | ++ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd30::1) | grep -v fe80 | + sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl + tcp,orig=(src=fd72::2,dst=fd30::1,sport=,dport=),reply=(src=fd11::2,dst=fd72::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + tcp,orig=(src=fd72::2,dst=fd30::1,sport=,dport=),reply=(src=fd12::2,dst=fd72::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + ]) + + dnl Test load-balancing that includes L4 ports in NAT. +-for i in `seq 1 20`; do +- echo Request $i +- NS_CHECK_EXEC([alice1], [wget http://[[fd30::2]]:8000 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) ++OVS_WAIT_FOR_OUTPUT_UNQUOTED([ ++for i in `seq 1 10`; do ++ NS_EXEC([alice1], [wget http://[[fd30::2]]:8000 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) + done + + dnl Each server should have at least one connection. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd30::2) | grep -v fe80 | ++ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd30::2) | grep -v fe80 | + sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl + tcp,orig=(src=fd72::2,dst=fd30::2,sport=,dport=),reply=(src=fd11::2,dst=fd72::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + tcp,orig=(src=fd72::2,dst=fd30::2,sport=,dport=),reply=(src=fd12::2,dst=fd72::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) +@@ -2727,24 +2720,24 @@ OVS_START_L7([foo1], [http]) + OVS_START_L7([bar1], [http]) + + dnl Should work with the virtual IP address through NAT +-for i in `seq 1 20`; do +- echo Request $i +- NS_CHECK_EXEC([alice1], [wget 30.0.0.1 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) ++exp_ct1="tcp,orig=(src=172.16.1.3,dst=30.0.0.1,sport=,dport=),reply=(src=192.168.1.2,dst=172.16.1.3,sport=,dport=),zone=,mark=10,protoinfo=(state=) ++tcp,orig=(src=172.16.1.3,dst=30.0.0.1,sport=,dport=),reply=(src=192.168.2.2,dst=172.16.1.3,sport=,dport=),zone=,mark=10,protoinfo=(state=)" ++exp_ct2="tcp,orig=(src=172.16.1.3,dst=192.168.1.2,sport=,dport=),reply=(src=192.168.1.2,dst=20.0.0.2,sport=,dport=),zone=,protoinfo=(state=) ++tcp,orig=(src=172.16.1.3,dst=192.168.2.2,sport=,dport=),reply=(src=192.168.2.2,dst=20.0.0.2,sport=,dport=),zone=,protoinfo=(state=)" ++ ++OVS_WAIT_FOR_OUTPUT_UNQUOTED([ ++for i in `seq 1 10`; do ++ NS_EXEC([alice1], [wget 30.0.0.1 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) + done + + dnl Each server should have at least one connection. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | +-sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl +-tcp,orig=(src=172.16.1.3,dst=30.0.0.1,sport=,dport=),reply=(src=192.168.1.2,dst=172.16.1.3,sport=,dport=),zone=,mark=10,protoinfo=(state=) +-tcp,orig=(src=172.16.1.3,dst=30.0.0.1,sport=,dport=),reply=(src=192.168.2.2,dst=172.16.1.3,sport=,dport=),zone=,mark=10,protoinfo=(state=) +-]) +- ++ct1=$(ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | sed -e 's/zone=[[0-9]]*/zone=/') + dnl Force SNAT should have worked. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(20.0.0) | +-sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl +-tcp,orig=(src=172.16.1.3,dst=192.168.1.2,sport=,dport=),reply=(src=192.168.1.2,dst=20.0.0.2,sport=,dport=),zone=,protoinfo=(state=) +-tcp,orig=(src=172.16.1.3,dst=192.168.2.2,sport=,dport=),reply=(src=192.168.2.2,dst=20.0.0.2,sport=,dport=),zone=,protoinfo=(state=) ++ct2=$(ovs-appctl dpctl/dump-conntrack | FORMAT_CT(20.0.0) | sed -e 's/zone=[[0-9]]*/zone=/') ++test "x$ct1 = x$exp_ct1" && test "x$ct2 = x$exp_ct2" ++], [0], [dnl + ]) ++ + OVS_APP_EXIT_AND_WAIT([ovn-controller]) + + as ovn-sb +@@ -2900,24 +2893,24 @@ OVS_START_L7([foo1], [http6]) + OVS_START_L7([bar1], [http6]) + + dnl Should work with the virtual IP address through NAT +-for i in `seq 1 20`; do +- echo Request $i +- NS_CHECK_EXEC([alice1], [wget http://[[fd30::1]] -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) ++exp_ct1="tcp,orig=(src=fd72::3,dst=fd30::1,sport=,dport=),reply=(src=fd11::2,dst=fd72::3,sport=,dport=),zone=,mark=10,protoinfo=(state=) ++tcp,orig=(src=fd72::3,dst=fd30::1,sport=,dport=),reply=(src=fd12::2,dst=fd72::3,sport=,dport=),zone=,mark=10,protoinfo=(state=) ++exp_ct2=tcp,orig=(src=fd72::3,dst=fd11::2,sport=,dport=),reply=(src=fd11::2,dst=fd20::2,sport=,dport=),zone=,protoinfo=(state=) ++tcp,orig=(src=fd72::3,dst=fd12::2,sport=,dport=),reply=(src=fd12::2,dst=fd20::2,sport=,dport=),zone=,protoinfo=(state=)" ++ ++OVS_WAIT_FOR_OUTPUT_UNQUOTED([ ++for i in `seq 1 10`; do ++ NS_EXEC([alice1], [wget http://[[fd30::1]] -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) + done + + dnl Each server should have at least one connection. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd30::1) | grep -v fe80 | +-sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl +-tcp,orig=(src=fd72::3,dst=fd30::1,sport=,dport=),reply=(src=fd11::2,dst=fd72::3,sport=,dport=),zone=,mark=10,protoinfo=(state=) +-tcp,orig=(src=fd72::3,dst=fd30::1,sport=,dport=),reply=(src=fd12::2,dst=fd72::3,sport=,dport=),zone=,mark=10,protoinfo=(state=) +-]) +- ++ct1=$(ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd30::1) | grep -v fe80 | sed -e 's/zone=[[0-9]]*/zone=/') + dnl Force SNAT should have worked. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd20::2) | grep -v fe80 | +-sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl +-tcp,orig=(src=fd72::3,dst=fd11::2,sport=,dport=),reply=(src=fd11::2,dst=fd20::2,sport=,dport=),zone=,protoinfo=(state=) +-tcp,orig=(src=fd72::3,dst=fd12::2,sport=,dport=),reply=(src=fd12::2,dst=fd20::2,sport=,dport=),zone=,protoinfo=(state=) ++ct2=$(ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd20::2) | grep -v fe80 | sed -e 's/zone=[[0-9]]*/zone=/') ++test "x$ct1 = x$exp_ct1" && test "x$ct2 = x$exp_ct2" ++], [0], [dnl + ]) ++ + OVS_APP_EXIT_AND_WAIT([ovn-controller]) + + as ovn-sb +@@ -3111,39 +3104,32 @@ OVS_START_L7([foo16], [http6]) + OVS_START_L7([bar16], [http6]) + + dnl Should work with the virtual IP address through NAT +-for i in `seq 1 20`; do +- echo Request $i +- NS_CHECK_EXEC([alice1], [wget 30.0.0.1 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) +-done +- +-for i in `seq 1 20`; do +- echo Request ${i}_6 +- NS_CHECK_EXEC([alice16], [wget http://[[fd30::1]] -t 5 -T 1 --retry-connrefused -v -o wget${i}_6.log]) ++exp_ct1="tcp,orig=(src=172.16.1.3,dst=30.0.0.1,sport=,dport=),reply=(src=192.168.1.2,dst=172.16.1.3,sport=,dport=),zone=,mark=10,protoinfo=(state=) ++tcp,orig=(src=172.16.1.3,dst=30.0.0.1,sport=,dport=),reply=(src=192.168.2.2,dst=172.16.1.3,sport=,dport=),zone=,mark=10,protoinfo=(state=)" ++exp_ct2="tcp,orig=(src=fd72::3,dst=fd30::1,sport=,dport=),reply=(src=fd11::2,dst=fd72::3,sport=,dport=),zone=,mark=10,protoinfo=(state=) ++tcp,orig=(src=fd72::3,dst=fd30::1,sport=,dport=),reply=(src=fd12::2,dst=fd72::3,sport=,dport=),zone=,mark=10,protoinfo=(state=)" ++exp_ct3="tcp,orig=(src=172.16.1.3,dst=192.168.1.2,sport=,dport=),reply=(src=192.168.1.2,dst=20.0.0.2,sport=,dport=),zone=,protoinfo=(state=) ++tcp,orig=(src=172.16.1.3,dst=192.168.2.2,sport=,dport=),reply=(src=192.168.2.2,dst=20.0.0.2,sport=,dport=),zone=,protoinfo=(state=)" ++exp_ct4="tcp,orig=(src=fd72::3,dst=fd11::2,sport=,dport=),reply=(src=fd11::2,dst=fd20::2,sport=,dport=),zone=,protoinfo=(state=) ++tcp,orig=(src=fd72::3,dst=fd12::2,sport=,dport=),reply=(src=fd12::2,dst=fd20::2,sport=,dport=),zone=,protoinfo=(state=)" ++ ++OVS_WAIT_FOR_OUTPUT_UNQUOTED([ ++for i in `seq 1 10`; do ++ NS_EXEC([alice1], [wget 30.0.0.1 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) ++ NS_EXEC([alice16], [wget http://[[fd30::1]] -t 5 -T 1 --retry-connrefused -v -o wget${i}_6.log]) + done + + dnl Each server should have at least one connection. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | +-sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl +-tcp,orig=(src=172.16.1.3,dst=30.0.0.1,sport=,dport=),reply=(src=192.168.1.2,dst=172.16.1.3,sport=,dport=),zone=,mark=10,protoinfo=(state=) +-tcp,orig=(src=172.16.1.3,dst=30.0.0.1,sport=,dport=),reply=(src=192.168.2.2,dst=172.16.1.3,sport=,dport=),zone=,mark=10,protoinfo=(state=) +-]) +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd30::1) | grep -v fe80 | +-sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl +-tcp,orig=(src=fd72::3,dst=fd30::1,sport=,dport=),reply=(src=fd11::2,dst=fd72::3,sport=,dport=),zone=,mark=10,protoinfo=(state=) +-tcp,orig=(src=fd72::3,dst=fd30::1,sport=,dport=),reply=(src=fd12::2,dst=fd72::3,sport=,dport=),zone=,mark=10,protoinfo=(state=) +-]) ++ct1=$(ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | sed -e 's/zone=[[0-9]]*/zone=/') ++ct2=$(ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd30::1) | grep -v fe80 | sed -e 's/zone=[[0-9]]*/zone=/') + + dnl Force SNAT should have worked. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(20.0.0) | +-sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl +-tcp,orig=(src=172.16.1.3,dst=192.168.1.2,sport=,dport=),reply=(src=192.168.1.2,dst=20.0.0.2,sport=,dport=),zone=,protoinfo=(state=) +-tcp,orig=(src=172.16.1.3,dst=192.168.2.2,sport=,dport=),reply=(src=192.168.2.2,dst=20.0.0.2,sport=,dport=),zone=,protoinfo=(state=) +-]) +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd20::2) | grep -v fe80 | +-sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl +-tcp,orig=(src=fd72::3,dst=fd11::2,sport=,dport=),reply=(src=fd11::2,dst=fd20::2,sport=,dport=),zone=,protoinfo=(state=) +-tcp,orig=(src=fd72::3,dst=fd12::2,sport=,dport=),reply=(src=fd12::2,dst=fd20::2,sport=,dport=),zone=,protoinfo=(state=) ++ct3=$(ovs-appctl dpctl/dump-conntrack | FORMAT_CT(20.0.0) | sed -e 's/zone=[[0-9]]*/zone=/') ++ct4=$(ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd20::2) | grep -v fe80 | sed -e 's/zone=[[0-9]]*/zone=/') ++test "x$ct1 = x$exp_ct1" && test "x$ct2 = x$exp_ct2" && test "x$ct3 = x$exp_ct3" && test "x$ct4 = x$exp_ct4" ++], [0], [dnl + ]) ++ + OVS_APP_EXIT_AND_WAIT([ovn-controller]) + + as ovn-sb +@@ -3262,26 +3248,26 @@ OVS_START_L7([foo1], [http]) + OVS_START_L7([bar1], [http]) + + dnl Should work with the virtual IP address through NAT +-for i in `seq 1 20`; do +- echo Request $i +- NS_CHECK_EXEC([alice1], [wget 172.16.1.10 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) ++OVS_WAIT_FOR_OUTPUT_UNQUOTED([ ++for i in `seq 1 10`; do ++ NS_EXEC([alice1], [wget 172.16.1.10 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) + done + + dnl Each server should have at least one connection. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(172.16.1.10) | ++ovs-appctl dpctl/dump-conntrack | FORMAT_CT(172.16.1.10) | + sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl + tcp,orig=(src=172.16.1.2,dst=172.16.1.10,sport=,dport=),reply=(src=192.168.1.2,dst=172.16.1.2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + tcp,orig=(src=172.16.1.2,dst=172.16.1.10,sport=,dport=),reply=(src=192.168.2.2,dst=172.16.1.2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + ]) + + dnl Test load-balancing that includes L4 ports in NAT. +-for i in `seq 1 20`; do +- echo Request $i +- NS_CHECK_EXEC([alice1], [wget 172.16.1.11:8000 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) ++OVS_WAIT_FOR_OUTPUT_UNQUOTED([ ++for i in `seq 1 10`; do ++ NS_EXEC([alice1], [wget 172.16.1.11:8000 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) + done + + dnl Each server should have at least one connection. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(172.16.1.11) | ++ovs-appctl dpctl/dump-conntrack | FORMAT_CT(172.16.1.11) | + sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl + tcp,orig=(src=172.16.1.2,dst=172.16.1.11,sport=,dport=),reply=(src=192.168.1.2,dst=172.16.1.2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + tcp,orig=(src=172.16.1.2,dst=172.16.1.11,sport=,dport=),reply=(src=192.168.2.2,dst=172.16.1.2,sport=,dport=),zone=,mark=2,protoinfo=(state=) +@@ -3405,26 +3391,26 @@ OVS_START_L7([foo1], [http6]) + OVS_START_L7([bar1], [http6]) + + dnl Should work with the virtual IP address through NAT +-for i in `seq 1 20`; do +- echo Request $i +- NS_CHECK_EXEC([alice1], [wget http://[[fd72::10]] -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) ++OVS_WAIT_FOR_OUTPUT_UNQUOTED([ ++for i in `seq 1 10`; do ++ NS_EXEC([alice1], [wget http://[[fd72::10]] -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) + done + + dnl Each server should have at least one connection. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd72::10) | grep -v fe80 | ++ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd72::10) | grep -v fe80 | + sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl + tcp,orig=(src=fd72::2,dst=fd72::10,sport=,dport=),reply=(src=fd01::2,dst=fd72::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + tcp,orig=(src=fd72::2,dst=fd72::10,sport=,dport=),reply=(src=fd02::2,dst=fd72::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + ]) + + dnl Test load-balancing that includes L4 ports in NAT. +-for i in `seq 1 20`; do +- echo Request $i +- NS_CHECK_EXEC([alice1], [wget http://[[fd72::11]]:8000 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) ++OVS_WAIT_FOR_OUTPUT_UNQUOTED([ ++for i in `seq 1 10`; do ++ NS_EXEC([alice1], [wget http://[[fd72::11]]:8000 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) + done + + dnl Each server should have at least one connection. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd72::11) | grep -v fe80 | ++ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd72::11) | grep -v fe80 | + sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl + tcp,orig=(src=fd72::2,dst=fd72::11,sport=,dport=),reply=(src=fd01::2,dst=fd72::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) + tcp,orig=(src=fd72::2,dst=fd72::11,sport=,dport=),reply=(src=fd02::2,dst=fd72::2,sport=,dport=),zone=,mark=2,protoinfo=(state=) +@@ -3598,8 +3584,8 @@ icmp,orig=(src=192.168.2.2,dst=172.16.1.2,id=,type=8,code=0),reply=(src + ]) + + # Try to ping external network +-NS_CHECK_EXEC([ext-net], [tcpdump -n -c 3 -i ext-veth dst 172.16.1.3 and icmp > ext-net.pcap &]) +-sleep 1 ++NS_CHECK_EXEC([ext-net], [tcpdump -l -n -c 3 -i ext-veth dst 172.16.1.3 and icmp > ext-net.pcap 2>tcpdump_err &]) ++OVS_WAIT_UNTIL([grep "listening" tcpdump_err]) + AT_CHECK([ovn-nbctl lr-nat-del R1 snat]) + NS_CHECK_EXEC([foo1], [ping -q -c 3 -i 0.3 -w 2 10.0.0.1 | FORMAT_PING], \ + [0], [dnl +@@ -4507,17 +4493,15 @@ OVS_WAIT_UNTIL( + [ovn-sbctl dump-flows sw0 | grep ct_lb_mark | grep priority=120 | grep "ip4.dst == 10.0.0.10" > lflows.txt + test 1 = `cat lflows.txt | grep "ct_lb_mark(backends=10.0.0.3:80,20.0.0.3:80)" | wc -l`] + ) +- + # From sw0-p2 send traffic to vip - 10.0.0.10 +-for i in `seq 1 20`; do +- echo Request $i +- ovn-sbctl list service_monitor +- NS_CHECK_EXEC([sw0-p2], [wget 10.0.0.10 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) +-done ++#dnl Each server should have at least one connection. ++OVS_WAIT_FOR_OUTPUT([ ++ for i in `seq 1 10`; do ++ NS_EXEC([sw0-p2], [wget 10.0.0.10 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) ++ done + +-dnl Each server should have at least one connection. +-AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.0.0.10) | \ +-sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl ++ ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.0.0.10) | \ ++ sed -e 's/zone=[[0-9]]*/zone=/'], [0], [dnl + tcp,orig=(src=10.0.0.4,dst=10.0.0.10,sport=,dport=),reply=(src=10.0.0.3,dst=10.0.0.4,sport=,dport=),zone=,mark=2,protoinfo=(state=) + tcp,orig=(src=10.0.0.4,dst=10.0.0.10,sport=,dport=),reply=(src=20.0.0.3,dst=10.0.0.4,sport=,dport=),zone=,mark=2,protoinfo=(state=) + ]) +@@ -4649,10 +4633,12 @@ ovn-nbctl lb-add lb-ipv4-tcp 88.88.88.88:8080 42.42.42.1:4041 tcp + ovn-nbctl lb-add lb-ipv4-tcp-dup 88.88.88.89:8080 42.42.42.1:4041 tcp + ovn-nbctl lb-add lb-ipv4-udp 88.88.88.88:4040 42.42.42.1:2021 udp + ovn-nbctl lb-add lb-ipv4-udp-dup 88.88.88.89:4040 42.42.42.1:2021 udp ++ovn-nbctl lb-add lb-ipv4 88.88.88.90 42.42.42.1 + ovn-nbctl ls-lb-add sw lb-ipv4-tcp + ovn-nbctl ls-lb-add sw lb-ipv4-tcp-dup + ovn-nbctl ls-lb-add sw lb-ipv4-udp + ovn-nbctl ls-lb-add sw lb-ipv4-udp-dup ++ovn-nbctl ls-lb-add sw lb-ipv4 + + ovn-nbctl lr-add rtr + ovn-nbctl lrp-add rtr rtr-sw 00:00:00:00:01:00 42.42.42.254/24 +@@ -4668,28 +4654,39 @@ ADD_VETH(lsp, lsp, br-int, "42.42.42.1/24", "00:00:00:00:00:01", \ + ovn-nbctl --wait=hv -t 3 sync + + # Start IPv4 TCP server on lsp. +-NS_CHECK_EXEC([lsp], [timeout 2s nc -k -l 42.42.42.1 4041 &], [0]) ++NETNS_DAEMONIZE([lsp], [nc -l -k 42.42.42.1 4041], [lsp0.pid]) + + # Check that IPv4 TCP hairpin connection succeeds on both VIPs. + NS_CHECK_EXEC([lsp], [nc 88.88.88.88 8080 -z], [0], [ignore], [ignore]) + NS_CHECK_EXEC([lsp], [nc 88.88.88.89 8080 -z], [0], [ignore], [ignore]) ++NS_CHECK_EXEC([lsp], [nc 88.88.88.90 4041 -z], [0], [ignore], [ignore]) + + # Capture IPv4 UDP hairpinned packets. + filter="dst 42.42.42.1 and dst port 2021 and udp" +-NS_CHECK_EXEC([lsp], [tcpdump -nn -c 2 -i lsp ${filter} > lsp.pcap &]) +- +-sleep 1 ++NS_CHECK_EXEC([lsp], [tcpdump -l -nn -c 3 -i lsp ${filter} > lsp.pcap 2>tcpdump_err &]) ++OVS_WAIT_UNTIL([grep "listening" tcpdump_err]) + + # Generate IPv4 UDP hairpin traffic. + NS_CHECK_EXEC([lsp], [echo a | nc -u 88.88.88.88 4040 &], [0]) + NS_CHECK_EXEC([lsp], [echo a | nc -u 88.88.88.89 4040 &], [0]) ++NS_CHECK_EXEC([lsp], [echo a | nc -u 88.88.88.90 2021 &], [0]) + + # Check hairpin traffic. + OVS_WAIT_UNTIL([ + total_pkts=$(cat lsp.pcap | wc -l) +- test "${total_pkts}" = "2" ++ test "${total_pkts}" = "3" + ]) + ++ovn-nbctl pg-add pg0 lsp ++ovn-nbctl --apply-after-lb acl-add pg0 from-lport 1004 "ip4 && ip4.dst == 10.0.0.2" drop ++ovn-nbctl --apply-after-lb acl-add pg0 from-lport 1002 "ip4 && tcp" allow-related ++ovn-nbctl --apply-after-lb acl-add pg0 from-lport 1002 "ip4 && udp" allow ++ovn-nbctl --wait=hv sync ++ ++## Check that IPv4 TCP hairpin connection succeeds on both VIPs. ++NS_CHECK_EXEC([lsp], [nc 88.88.88.88 8080 -z], [0], [ignore], [ignore]) ++NS_CHECK_EXEC([lsp], [nc 88.88.88.89 8080 -z], [0], [ignore], [ignore]) ++ + OVS_APP_EXIT_AND_WAIT([ovn-controller]) + + as ovn-sb +@@ -4736,10 +4733,12 @@ ovn-nbctl lb-add lb-ipv6-tcp [[8800::0088]]:8080 [[4200::1]]:4041 tcp + ovn-nbctl lb-add lb-ipv6-tcp-dup [[8800::0089]]:8080 [[4200::1]]:4041 tcp + ovn-nbctl lb-add lb-ipv6-udp [[8800::0088]]:4040 [[4200::1]]:2021 udp + ovn-nbctl lb-add lb-ipv6-udp-dup [[8800::0089]]:4040 [[4200::1]]:2021 udp ++ovn-nbctl lb-add lb-ipv6 8800::0090 4200::1 + ovn-nbctl ls-lb-add sw lb-ipv6-tcp + ovn-nbctl ls-lb-add sw lb-ipv6-tcp-dup + ovn-nbctl ls-lb-add sw lb-ipv6-udp + ovn-nbctl ls-lb-add sw lb-ipv6-udp-dup ++ovn-nbctl ls-lb-add sw lb-ipv6 + + ovn-nbctl lr-add rtr + ovn-nbctl lrp-add rtr rtr-sw 00:00:00:00:01:00 4200::00ff/64 +@@ -4754,28 +4753,39 @@ OVS_WAIT_UNTIL([test "$(ip netns exec lsp ip a | grep 4200::1 | grep tentative)" + ovn-nbctl --wait=hv -t 3 sync + + # Start IPv6 TCP server on lsp. +-NS_CHECK_EXEC([lsp], [timeout 2s nc -k -l 4200::1 4041 &], [0]) ++NETNS_DAEMONIZE([lsp], [nc -l -k 4200::1 4041], [lsp0.pid]) + + # Check that IPv6 TCP hairpin connection succeeds on both VIPs. + NS_CHECK_EXEC([lsp], [nc 8800::0088 8080 -z], [0], [ignore], [ignore]) + NS_CHECK_EXEC([lsp], [nc 8800::0089 8080 -z], [0], [ignore], [ignore]) ++NS_CHECK_EXEC([lsp], [nc 8800::0090 4041 -z], [0], [ignore], [ignore]) + + # Capture IPv6 UDP hairpinned packets. + filter="dst 4200::1 and dst port 2021 and udp" +-NS_CHECK_EXEC([lsp], [tcpdump -nn -c 2 -i lsp $filter > lsp.pcap &]) +- +-sleep 1 ++NS_CHECK_EXEC([lsp], [tcpdump -l -nn -c 3 -i lsp $filter > lsp.pcap 2>tcpdump_err &]) ++OVS_WAIT_UNTIL([grep "listening" tcpdump_err]) + + # Generate IPv6 UDP hairpin traffic. + NS_CHECK_EXEC([lsp], [echo a | nc -u 8800::0088 4040 &], [0]) + NS_CHECK_EXEC([lsp], [echo a | nc -u 8800::0089 4040 &], [0]) ++NS_CHECK_EXEC([lsp], [echo a | nc -u 8800::0090 2021 &], [0]) + + # Check hairpin traffic. + OVS_WAIT_UNTIL([ + total_pkts=$(cat lsp.pcap | wc -l) +- test "${total_pkts}" = "2" ++ test "${total_pkts}" = "3" + ]) + ++ovn-nbctl pg-add pg0 lsp ++ovn-nbctl --apply-after-lb acl-add pg0 from-lport 1002 "ip6 && tcp" allow-related ++ovn-nbctl --apply-after-lb acl-add pg0 from-lport 1002 "ip6 && udp" allow ++ovn-nbctl --apply-after-lb acl-add pg0 from-lport 1000 "ip6" drop ++ovn-nbctl --wait=hv sync ++ ++# Check that IPv6 TCP hairpin connection succeeds on both VIPs. ++NS_CHECK_EXEC([lsp], [nc 8800::0088 8080 -z], [0], [ignore], [ignore]) ++NS_CHECK_EXEC([lsp], [nc 8800::0089 8080 -z], [0], [ignore], [ignore]) ++ + OVS_APP_EXIT_AND_WAIT([ovn-controller]) + + as ovn-sb +@@ -4938,7 +4948,7 @@ OVS_WAIT_UNTIL([ + ip netns exec sw0-p1-rej nc -u 10.0.0.4 90 < foo + c=$(cat sw0-p1-rej-icmp.pcap | grep \ + "10.0.0.4 > 10.0.0.3: ICMP 10.0.0.4 udp port 90 unreachable" | uniq | wc -l) +- test $c -eq 1 ++ test $c -ge 1 + ]) + + rm -f *.pcap +@@ -4950,7 +4960,7 @@ OVS_WAIT_UNTIL([ + ip netns exec sw0-p1-rej nc -u 10.0.0.4 94 < foo + c=$(cat sw0-p1-rej-icmp.pcap | grep \ + "10.0.0.4 > 10.0.0.3: ICMP 10.0.0.4 udp port 94 unreachable" | uniq | wc -l) +- test $c -eq 1 ++ test $c -ge 1 + ]) + + # Now test for IPv6 UDP. +@@ -4962,7 +4972,7 @@ OVS_WAIT_UNTIL([ + c=$(cat sw0-p2-rej-icmp6.pcap | grep \ + "IP6 aef0::3 > aef0::4: ICMP6, destination unreachable, unreachable port, \ + aef0::3 udp port 90" | uniq | wc -l) +- test $c -eq 1 ++ test $c -ge 1 + ]) + + rm -f *.pcap +@@ -4975,7 +4985,7 @@ OVS_WAIT_UNTIL([ + c=$(cat sw0-p2-rej-icmp6.pcap | grep \ + "IP6 aef0::3 > aef0::4: ICMP6, destination unreachable, unreachable port, \ + aef0::3 udp port 94" | uniq | wc -l) +- test $c -eq 1 ++ test $c -ge 1 + ]) + + # Delete all the ACLs of pg0 and add the ACL with a generic match with reject action. +@@ -5000,7 +5010,7 @@ OVS_WAIT_UNTIL([ + ip netns exec sw0-p1-rej nc -u 10.0.0.4 90 < foo + c=$(cat sw0-p1-rej-icmp.pcap | grep \ + "10.0.0.4 > 10.0.0.3: ICMP 10.0.0.4 udp port 90 unreachable" | uniq | wc -l) +- test $c -eq 1 ++ test $c -ge 1 + ]) + + rm -f *.pcap +@@ -5012,7 +5022,7 @@ OVS_WAIT_UNTIL([ + c=$(cat sw0-p2-rej-icmp6.pcap | grep \ + "IP6 aef0::3 > aef0::4: ICMP6, destination unreachable, unreachable port, \ + aef0::3 udp port 90" | uniq | wc -l) +- test $c -eq 1 ++ test $c -ge 1 + ]) + + +@@ -5179,7 +5189,7 @@ OVS_WAIT_UNTIL([ + ip netns exec sw0-p1-rej nc -u 10.0.0.4 90 < foo + c=$(cat sw0-p1-rej-icmp.pcap | grep \ + "10.0.0.4 > 10.0.0.3: ICMP 10.0.0.4 udp port 90 unreachable" | uniq | wc -l) +- test $c -eq 1 ++ test $c -ge 1 + ]) + + rm -f *.pcap +@@ -5191,7 +5201,7 @@ OVS_WAIT_UNTIL([ + ip netns exec sw0-p1-rej nc -u 10.0.0.4 94 < foo + c=$(cat sw0-p1-rej-icmp.pcap | grep \ + "10.0.0.4 > 10.0.0.3: ICMP 10.0.0.4 udp port 94 unreachable" | uniq | wc -l) +- test $c -eq 1 ++ test $c -ge 1 + ]) + + # Now test for IPv6 UDP. +@@ -5203,7 +5213,7 @@ OVS_WAIT_UNTIL([ + c=$(cat sw0-p2-rej-icmp6.pcap | grep \ + "IP6 aef0::3 > aef0::4: ICMP6, destination unreachable, unreachable port, \ + aef0::3 udp port 90" | uniq | wc -l) +- test $c -eq 1 ++ test $c -ge 1 + ]) + + rm -f *.pcap +@@ -5216,7 +5226,7 @@ OVS_WAIT_UNTIL([ + c=$(cat sw0-p2-rej-icmp6.pcap | grep \ + "IP6 aef0::3 > aef0::4: ICMP6, destination unreachable, unreachable port, \ + aef0::3 udp port 94" | uniq | wc -l) +- test $c -eq 1 ++ test $c -ge 1 + ]) + + # Delete all the ACLs of pg0 and add the ACL with a generic match with reject action. +@@ -5241,7 +5251,7 @@ OVS_WAIT_UNTIL([ + ip netns exec sw0-p1-rej nc -u 10.0.0.4 90 < foo + c=$(cat sw0-p1-rej-icmp.pcap | grep \ + "10.0.0.4 > 10.0.0.3: ICMP 10.0.0.4 udp port 90 unreachable" | uniq | wc -l) +- test $c -eq 1 ++ test $c -ge 1 + ]) + + rm -f *.pcap +@@ -5253,7 +5263,7 @@ OVS_WAIT_UNTIL([ + c=$(cat sw0-p2-rej-icmp6.pcap | grep \ + "IP6 aef0::3 > aef0::4: ICMP6, destination unreachable, unreachable port, \ + aef0::3 udp port 90" | uniq | wc -l) +- test $c -eq 1 ++ test $c -ge 1 + ]) + + +@@ -9277,13 +9287,15 @@ test_related_traffic() { + + check ovs-appctl dpctl/flush-conntrack + +- NETNS_DAEMONIZE([client], [tcpdump -U -i client -w client.pcap], [tcpdump0.pid]) +- NETNS_DAEMONIZE([server], [tcpdump -U -i server -w server.pcap], [tcpdump1.pid]) ++ NETNS_DAEMONIZE([client], [tcpdump -l -U -i client -w client.pcap 2>client_err], [tcpdump0.pid]) ++ NETNS_DAEMONIZE([server], [tcpdump -l -U -i server -w server.pcap 2>server_err], [tcpdump1.pid]) + + # Setup a dummy UDP listeners so we don't get "port unreachable". + NETNS_DAEMONIZE([client], [nc -l -u 1], [nc0.pid]) + NETNS_DAEMONIZE([server], [nc -l -u 2], [nc1.pid]) +- sleep 1 ++ ++ OVS_WAIT_UNTIL([grep "listening" client_err]) ++ OVS_WAIT_UNTIL([grep "listening" server_err]) + + # Send UDP client -> server + check ovs-ofctl packet-out br-int "in_port=ovs-client,packet=$client_udp,actions=resubmit(,0)" +@@ -9479,7 +9491,8 @@ name: 'vport' value: '666' + # Start IPv4 TCP server on vm1. + NETNS_DAEMONIZE([vm1], [nc -k -l 42.42.42.2 4242], [nc-vm1.pid]) + +-# Make sure connecting to the VIP works. ++# Make sure connecting to the VIP works (hairpin, via ls and via lr). ++NS_CHECK_EXEC([vm1], [nc 66.66.66.66 666 -z], [0], [ignore], [ignore]) + NS_CHECK_EXEC([vm2], [nc 66.66.66.66 666 -z], [0], [ignore], [ignore]) + NS_CHECK_EXEC([vm3], [nc 66.66.66.66 666 -z], [0], [ignore], [ignore]) + +@@ -9572,9 +9585,263 @@ name: 'vport' value: '666' + # Start IPv6 TCP server on vm1. + NETNS_DAEMONIZE([vm1], [nc -k -l 4242::2 4242], [nc-vm1.pid]) + +-# Make sure connecting to the VIP works. ++# Make sure connecting to the VIP works (hairpin, via ls and via lr). ++NS_CHECK_EXEC([vm1], [nc 6666::1 666 -z], [0], [ignore], [ignore]) + NS_CHECK_EXEC([vm2], [nc 6666::1 666 -z], [0], [ignore], [ignore]) + NS_CHECK_EXEC([vm3], [nc 6666::1 666 -z], [0], [ignore], [ignore]) + + AT_CLEANUP + ]) ++ ++########################################################### ++## ls1 -- cluster-router -- join - gr1 -- public1 -- ln1 ## ++########################################################### ++OVN_FOR_EACH_NORTHD([ ++AT_SETUP([Gateway router with dynamic_neigh_routers]) ++ ++CHECK_CONNTRACK() ++CHECK_CONNTRACK_NAT() ++ovn_start ++OVS_TRAFFIC_VSWITCHD_START() ++ADD_BR([br-int]) ++ADD_BR([br-ex], [set Bridge br-ex fail-mode=standalone]) ++ ++check ovs-vsctl set Open_vSwitch . external-ids:ovn-bridge-mappings=provider:br-ex ++ ++# Set external-ids in br-int needed for ovn-controller ++ovs-vsctl \ ++ -- set Open_vSwitch . external-ids:system-id=hv1 \ ++ -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \ ++ -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \ ++ -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \ ++ -- set bridge br-int fail-mode=secure other-config:disable-in-band=true ++ ++# Start ovn-controller ++start_daemon ovn-controller ++ ++# Add routers ++check ovn-nbctl lr-add gr1 ++check ovn-nbctl lr-add cluster-router ++ ++# Add switches ++check ovn-nbctl ls-add join ++check ovn-nbctl ls-add public1 ++check ovn-nbctl ls-add ls1 ++ ++# Add ls1 ports ++check ovn-nbctl lsp-add ls1 ls1p1 \ ++ -- lsp-set-addresses ls1p1 "00:00:00:00:01:11 10.244.2.11" ++ ++check ovn-nbctl lsp-add ls1 ls1-to-cluster-router \ ++ -- lsp-set-type ls1-to-cluster-router router \ ++ -- lsp-set-options ls1-to-cluster-router router-port=cluster-router-to-ls1 \ ++ -- lsp-set-addresses ls1-to-cluster-router router ++ ++# Add cluster-router ports ++check ovn-nbctl lrp-add cluster-router cluster-router-to-ls1 "00:00:00:0f:01:01" 10.244.2.1/24 \ ++ -- lrp-add cluster-router cluster-router-to-join "00:00:00:0f:02:01" 100.64.0.1/16 \ ++ -- lrp-set-gateway-chassis cluster-router-to-ls1 hv1 10 \ ++ -- --policy=src-ip lr-route-add cluster-router 10.244.2.0/24 100.64.0.3 ++ ++# Add join ports ++check ovn-nbctl lsp-add join join-to-cluster-router \ ++ -- lsp-set-type join-to-cluster-router router \ ++ -- lsp-set-options join-to-cluster-router router-port=cluster-router-to-join \ ++ -- lsp-set-addresses join-to-cluster-router router \ ++ -- lsp-add join join-to-gr1 \ ++ -- lsp-set-type join-to-gr1 router \ ++ -- lsp-set-options join-to-gr1 router-port=gr1-to-join \ ++ -- lsp-set-addresses join-to-gr1 router ++ ++check ovn-nbctl set logical_router gr1 options:lb_force_snat_ip=router_ip \ ++ -- set logical_router gr1 options:snat-ct-zone=0 \ ++ -- set logical_router gr1 options:dynamic_neigh_routers=true ++ ++# Add gr1 ports and set natting ++check ovn-nbctl lrp-add gr1 gr1-to-join "00:00:00:0f:02:03" 100.64.0.3/16 \ ++ -- lr-route-add gr1 10.244.0.0/16 100.64.0.1 \ ++ -- lr-nat-add gr1 snat 10.89.189.12 10.244.0.0/16 \ ++ -- lrp-add gr1 gr1-to-public1 "0a:0a:b6:fc:03:12" 10.89.189.12/24 \ ++ -- set logical_router gr1 options:chassis=hv1 ++ ++# Add public1 ports ++check ovn-nbctl lsp-add public1 public1-to-gr1 \ ++ -- lsp-set-type public1-to-gr1 router \ ++ -- lsp-set-options public1-to-gr1 router-port=gr1-to-public1 \ ++ -- lsp-set-addresses public1-to-gr1 router \ ++ -- lsp-add public1 ln1 \ ++ -- lsp-set-type ln1 localnet \ ++ -- lsp-set-options ln1 network_name=provider \ ++ -- lsp-set-addresses ln1 unknown ++ ++check ovn-nbctl --wait=hv sync ++ ++ADD_NAMESPACES(ns_ls1p1) ++ADD_VETH(ls1p1, ns_ls1p1, br-int, "10.244.2.11/24", "00:00:00:00:01:11", "10.244.2.1") ++ ++ADD_NAMESPACES(ns_ext1) ++ADD_VETH(ln1, ns_ext1, br-ex, "10.89.189.1/24", "0a:0a:b6:fc:03:01") ++ ++NS_CHECK_EXEC([ns_ls1p1], [ping -q -c 3 -i 0.3 -w 2 10.89.189.1 | FORMAT_PING], \ ++[0], [dnl ++3 packets transmitted, 3 received, 0% packet loss, time 0ms ++]) ++ ++OVS_APP_EXIT_AND_WAIT([ovn-controller]) ++ ++as ovn-sb ++OVS_APP_EXIT_AND_WAIT([ovsdb-server]) ++ ++as ovn-nb ++OVS_APP_EXIT_AND_WAIT([ovsdb-server]) ++ ++as northd ++OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE]) ++ ++as ++OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d ++/connection dropped.*/d"]) ++AT_CLEANUP ++]) ++ ++OVN_FOR_EACH_NORTHD([ ++AT_SETUP([ACL default_acl_drop]) ++AT_KEYWORDS([acl default_acl_drop]) ++ ++CHECK_CONNTRACK() ++ovn_start ++ ++OVS_TRAFFIC_VSWITCHD_START() ++ADD_BR([br-int]) ++ ++# Set external-ids in br-int needed for ovn-controller ++ovs-vsctl \ ++ -- set Open_vSwitch . external-ids:system-id=hv1 \ ++ -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \ ++ -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \ ++ -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \ ++ -- set bridge br-int fail-mode=secure other-config:disable-in-band=true ++ ++# Start ovn-controller ++start_daemon ovn-controller ++ ++ovn-nbctl ls-add sw ++ ++# Logical port 'vm1' in switch 'sw'. ++ADD_NAMESPACES(vm1) ++ADD_VETH(vm1, vm1, br-int, "10.0.0.1/24", "f0:00:00:01:02:03", \ ++ "10.0.0.254") ++check ovn-nbctl lsp-add sw vm1 \ ++-- lsp-set-addresses vm1 "f0:00:00:01:02:03 10.0.0.1" ++ ++# Logical port 'vm2' in switch 'sw'. ++ADD_NAMESPACES(vm2) ++ADD_VETH(vm2, vm2, br-int, "10.0.0.2/24", "f0:00:00:01:02:05", \ ++"10.0.0.254") ++check ovn-nbctl lsp-add sw vm2 \ ++-- lsp-set-addresses vm2 "f0:00:00:01:02:05 10.0.0.2" ++ ++# Wait for ovn-controller to catch up. ++wait_for_ports_up ++check ovn-nbctl --wait=hv sync ++ ++AS_BOX([from-lport acl, default_acl_drop false]) ++check ovn-nbctl acl-del sw ++check ovn-nbctl set NB_Global . options:default_acl_drop=false \ ++ -- acl-add sw from-lport 20 "ip4 && icmp" allow-related \ ++ -- acl-add sw from-lport 10 "ip4" drop ++check ovn-nbctl --wait=hv sync ++ ++# 'vm1' should be able to ping 'vm2' directly. ++NS_CHECK_EXEC([vm1], [ping -q -c 3 -i 0.3 -w 2 10.0.0.2 | FORMAT_PING], \ ++[0], [dnl ++3 packets transmitted, 3 received, 0% packet loss, time 0ms ++]) ++ ++AS_BOX([from-lport acl, default_acl_drop true]) ++check ovn-nbctl acl-del sw ++check ovn-nbctl set NB_Global . options:default_acl_drop=true \ ++ -- acl-add sw from-lport 20 "ip4 && icmp" allow-related \ ++ -- acl-add sw from-lport 10 "arp" allow \ ++ -- --apply-after-lb acl-add sw from-lport 1 1 allow \ ++ -- acl-add sw to-lport 1 1 allow ++check ovn-nbctl --wait=hv sync ++ ++# 'vm1' should be able to ping 'vm2' directly. ++NS_CHECK_EXEC([vm1], [ping -q -c 3 -i 0.3 -w 2 10.0.0.2 | FORMAT_PING], \ ++[0], [dnl ++3 packets transmitted, 3 received, 0% packet loss, time 0ms ++]) ++ ++AS_BOX([from-lport acl, after LB, default_acl_drop false]) ++check ovn-nbctl acl-del sw ++check ovn-nbctl set NB_Global . options:default_acl_drop=false \ ++ -- --apply-after-lb acl-add sw from-lport 20 "ip4 && icmp" allow-related \ ++ -- --apply-after-lb acl-add sw from-lport 10 "ip4" drop ++check ovn-nbctl --wait=hv sync ++ ++# 'vm1' should be able to ping 'vm2' directly. ++NS_CHECK_EXEC([vm1], [ping -q -c 3 -i 0.3 -w 2 10.0.0.2 | FORMAT_PING], \ ++[0], [dnl ++3 packets transmitted, 3 received, 0% packet loss, time 0ms ++]) ++ ++AS_BOX([from-lport acl, after LB, default_acl_drop true]) ++check ovn-nbctl acl-del sw ++check ovn-nbctl set NB_Global . options:default_acl_drop=true \ ++ -- acl-add sw from-lport 1 1 allow \ ++ -- --apply-after-lb acl-add sw from-lport 20 "ip4 && icmp" allow-related \ ++ -- --apply-after-lb acl-add sw from-lport 20 "arp" allow-related \ ++ -- acl-add sw to-lport 1 1 allow ++check ovn-nbctl --wait=hv sync ++ ++# 'vm1' should be able to ping 'vm2' directly. ++NS_CHECK_EXEC([vm1], [ping -q -c 3 -i 0.3 -w 2 10.0.0.2 | FORMAT_PING], \ ++[0], [dnl ++3 packets transmitted, 3 received, 0% packet loss, time 0ms ++]) ++ ++AS_BOX([to-lport acl, default_acl_drop false]) ++check ovn-nbctl acl-del sw ++check ovn-nbctl set NB_Global . options:default_acl_drop=false \ ++ -- acl-add sw to-lport 20 "ip4 && icmp" allow-related \ ++ -- acl-add sw to-lport 10 "ip4" drop ++check ovn-nbctl --wait=hv sync ++ ++# 'vm1' should be able to ping 'vm2' directly. ++NS_CHECK_EXEC([vm1], [ping -q -c 3 -i 0.3 -w 2 10.0.0.2 | FORMAT_PING], \ ++[0], [dnl ++3 packets transmitted, 3 received, 0% packet loss, time 0ms ++]) ++ ++AS_BOX([to-lport acl, default_acl_drop true]) ++check ovn-nbctl acl-del sw ++check ovn-nbctl set NB_Global . options:default_acl_drop=true \ ++ -- acl-add sw from-lport 1 1 allow \ ++ -- --apply-after-lb acl-add sw from-lport 1 1 allow \ ++ -- acl-add sw to-lport 20 "ip4 && icmp" allow-related \ ++ -- acl-add sw to-lport 20 "arp" allow ++check ovn-nbctl --wait=hv sync ++ ++# 'vm1' should be able to ping 'vm2' directly. ++NS_CHECK_EXEC([vm1], [ping -q -c 3 -i 0.3 -w 2 10.0.0.2 | FORMAT_PING], \ ++[0], [dnl ++3 packets transmitted, 3 received, 0% packet loss, time 0ms ++]) ++ ++OVS_APP_EXIT_AND_WAIT([ovn-controller]) ++ ++as ovn-sb ++OVS_APP_EXIT_AND_WAIT([ovsdb-server]) ++ ++as ovn-nb ++OVS_APP_EXIT_AND_WAIT([ovsdb-server]) ++ ++as northd ++OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE]) ++ ++as ++OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d ++/connection dropped.*/d"]) ++AT_CLEANUP ++]) +diff --git a/utilities/ovn-dbctl.c b/utilities/ovn-dbctl.c +index a850c2f31..5edb82e7f 100644 +--- a/utilities/ovn-dbctl.c ++++ b/utilities/ovn-dbctl.c +@@ -109,6 +109,15 @@ static void server_loop(const struct ovn_dbctl_options *dbctl_options, + struct ovsdb_idl *idl, int argc, char *argv[]); + static void ovn_dbctl_exit(int status); + ++static void ++destroy_argv(int argc, char **argv) ++{ ++ for (int i = 0; i < argc; i++) { ++ free(argv[i]); ++ } ++ free(argv); ++} ++ + int + ovn_dbctl_main(int argc, char *argv[], + const struct ovn_dbctl_options *dbctl_options) +@@ -151,6 +160,7 @@ ovn_dbctl_main(int argc, char *argv[], + char *error_s = ovs_cmdl_parse_all(argc, argv_, get_all_options(), + &parsed_options, &n_parsed_options); + if (error_s) { ++ destroy_argv(argc, argv_); + ctl_fatal("%s", error_s); + } + +@@ -179,6 +189,7 @@ ovn_dbctl_main(int argc, char *argv[], + bool daemon_mode = false; + if (get_detach()) { + if (argc != optind) { ++ destroy_argv(argc, argv_); + ctl_fatal("non-option arguments not supported with --detach " + "(use --help for help)"); + } +@@ -204,11 +215,8 @@ ovn_dbctl_main(int argc, char *argv[], + if (error) { + ovsdb_idl_destroy(idl); + idl = the_idl = NULL; ++ destroy_argv(argc, argv_); + +- for (int i = 0; i < argc; i++) { +- free(argv_[i]); +- } +- free(argv_); + ctl_fatal("%s", error); + } + +@@ -237,21 +245,15 @@ cleanup: + } + free(commands); + if (error) { +- for (int i = 0; i < argc; i++) { +- free(argv_[i]); +- } +- free(argv_); ++ destroy_argv(argc, argv_); + ctl_fatal("%s", error); + } + } + + ovsdb_idl_destroy(idl); + idl = the_idl = NULL; ++ destroy_argv(argc, argv_); + +- for (int i = 0; i < argc; i++) { +- free(argv_[i]); +- } +- free(argv_); + exit(EXIT_SUCCESS); + } + +@@ -1238,40 +1240,53 @@ dbctl_client(const struct ovn_dbctl_options *dbctl_options, + + ctl_timeout_setup(timeout); + ++ char *cmd_result = NULL; ++ char *cmd_error = NULL; + struct jsonrpc *client; ++ int exit_status; ++ char *error_str; ++ + int error = unixctl_client_create(socket_name, &client); + if (error) { +- ctl_fatal("%s: could not connect to %s daemon (%s); " +- "unset %s to avoid using daemon", +- socket_name, program_name, ovs_strerror(error), +- dbctl_options->daemon_env_var_name); ++ error_str = xasprintf("%s: could not connect to %s daemon (%s); " ++ "unset %s to avoid using daemon", ++ socket_name, program_name, ovs_strerror(error), ++ dbctl_options->daemon_env_var_name); ++ goto log_error; + } + +- char *cmd_result; +- char *cmd_error; + error = unixctl_client_transact(client, "run", + args.n, args.names, + &cmd_result, &cmd_error); + if (error) { +- ctl_fatal("%s: transaction error (%s)", +- socket_name, ovs_strerror(error)); ++ error_str = xasprintf("%s: transaction error (%s)", ++ socket_name, ovs_strerror(error)); ++ goto log_error; + } +- svec_destroy(&args); + +- int exit_status; + if (cmd_error) { +- exit_status = EXIT_FAILURE; + fprintf(stderr, "%s: %s", program_name, cmd_error); +- } else { +- exit_status = EXIT_SUCCESS; +- fputs(cmd_result, stdout); ++ goto error; + } ++ ++ exit_status = EXIT_SUCCESS; ++ fputs(cmd_result, stdout); ++ goto cleanup; ++ ++log_error: ++ VLOG_ERR("%s", error_str); ++ ovs_error(0, "%s", error_str); ++ free(error_str); ++ ++error: ++ exit_status = EXIT_FAILURE; ++ ++cleanup: + free(cmd_result); + free(cmd_error); + jsonrpc_close(client); +- for (int i = 0; i < argc; i++) { +- free(argv[i]); +- } +- free(argv); ++ svec_destroy(&args); ++ destroy_argv(argc, argv); ++ + exit(exit_status); + } +diff --git a/utilities/ovn-nbctl.8.xml b/utilities/ovn-nbctl.8.xml +index 92e10c012..72d4088f0 100644 +--- a/utilities/ovn-nbctl.8.xml ++++ b/utilities/ovn-nbctl.8.xml +@@ -814,7 +814,7 @@ + Attaches the mirror m to the logical port port. + + +-
    lsp-dettach-mirror port m
    ++
    lsp-detach-mirror port m
    +
    + Detaches the mirror m from the logical port port. +
    +diff --git a/utilities/ovn-trace.c b/utilities/ovn-trace.c +index 07ebac5e5..e5766ed67 100644 +--- a/utilities/ovn-trace.c ++++ b/utilities/ovn-trace.c +@@ -1486,9 +1486,8 @@ ovntrace_node_prune_hard(struct ovs_list *nodes) + } + + static void +-execute_load(const struct ovnact_load *load, +- const struct ovntrace_datapath *dp, struct flow *uflow, +- struct ovs_list *super OVS_UNUSED) ++execute_load(const struct ovnact *ovnact, const struct ovntrace_datapath *dp, ++ struct flow *uflow, struct ovs_list *super OVS_UNUSED) + { + const struct ovnact_encode_params ep = { + .lookup_port = ovntrace_lookup_port, +@@ -1498,7 +1497,7 @@ execute_load(const struct ovnact_load *load, + uint64_t stub[512 / 8]; + struct ofpbuf ofpacts = OFPBUF_STUB_INITIALIZER(stub); + +- ovnacts_encode(&load->ovnact, sizeof *load, &ep, &ofpacts); ++ ovnacts_encode(ovnact, OVNACT_ALIGN(ovnact->len), &ep, &ofpacts); + + struct ofpact *a; + OFPACT_FOR_EACH (a, ofpacts.data, ofpacts.size) { +@@ -1506,12 +1505,11 @@ execute_load(const struct ovnact_load *load, + + if (!mf_is_register(sf->field->id)) { + struct ds s = DS_EMPTY_INITIALIZER; +- ovnacts_format(&load->ovnact, OVNACT_LOAD_SIZE, &s); +- ds_chomp(&s, ';'); + +- char *friendly = ovntrace_make_names_friendly(ds_cstr(&s)); +- ovntrace_node_append(super, OVNTRACE_NODE_MODIFY, "%s", friendly); +- free(friendly); ++ ovnacts_format(ovnact, OVNACT_ALIGN(ovnact->len), &s); ++ ds_chomp(&s, ';'); ++ ovntrace_node_append(super, OVNTRACE_NODE_MODIFY, "%s", ++ ds_cstr(&s)); + + ds_destroy(&s); + } +@@ -3057,7 +3055,7 @@ trace_actions(const struct ovnact *ovnacts, size_t ovnacts_len, + const struct ovnact *a; + OVNACT_FOR_EACH (a, ovnacts, ovnacts_len) { + ds_clear(&s); +- ovnacts_format(a, sizeof *a * (ovnact_next(a) - a), &s); ++ ovnacts_format(a, OVNACT_ALIGN(a->len), &s); + char *friendly = ovntrace_make_names_friendly(ds_cstr(&s)); + ovntrace_node_append(super, OVNTRACE_NODE_ACTION, "%s", friendly); + free(friendly); +@@ -3072,7 +3070,7 @@ trace_actions(const struct ovnact *ovnacts, size_t ovnacts_len, + break; + + case OVNACT_LOAD: +- execute_load(ovnact_get_LOAD(a), dp, uflow, super); ++ execute_load(a, dp, uflow, super); + break; + + case OVNACT_MOVE: diff --git a/SPECS/ovn22.09.spec b/SPECS/ovn22.09.spec deleted file mode 100644 index cc34c55..0000000 --- a/SPECS/ovn22.09.spec +++ /dev/null @@ -1,638 +0,0 @@ -# Copyright (C) 2009, 2010, 2013, 2014 Nicira Networks, Inc. -# -# Copying and distribution of this file, with or without modification, -# are permitted in any medium without royalty provided the copyright -# notice and this notice are preserved. This file is offered as-is, -# without warranty of any kind. -# -# If tests have to be skipped while building, specify the '--without check' -# option. For example: -# rpmbuild -bb --without check rhel/openvswitch-fedora.spec - -# This defines the base package name's version. - -%define pkgver 2.13 -%define pkgname ovn22.09 - -# If libcap-ng isn't available and there is no need for running OVS -# as regular user, specify the '--without libcapng' -%bcond_without libcapng - -# Enable PIE, bz#955181 -%global _hardened_build 1 - -# RHEL-7 doesn't define _rundir macro yet -# Fedora 15 onwards uses /run as _rundir -%if 0%{!?_rundir:1} -%define _rundir /run -%endif - -# Build python2 (that provides python) and python3 subpackages on Fedora -# Build only python3 (that provides python) subpackage on RHEL8 -# Build only python subpackage on RHEL7 -%if 0%{?rhel} > 7 || 0%{?fedora} -# On RHEL8 Sphinx is included in buildroot -%global external_sphinx 1 -%else -# Don't use external sphinx (RHV doesn't have optional repositories enabled) -%global external_sphinx 0 -%endif - -# We would see rpmlinit error - E: hardcoded-library-path in '% {_prefix}/lib'. -# But there is no solution to fix this. Using {_lib} macro will solve the -# rpmlink error, but will install the files in /usr/lib64/. -# OVN pacemaker ocf script file is copied in /usr/lib/ocf/resource.d/ovn/ -# and we are not sure if pacemaker looks into this path to find the -# OVN resource agent script. -%global ovnlibdir %{_prefix}/lib - -Name: %{pkgname} -Summary: Open Virtual Network support -Group: System Environment/Daemons -URL: http://www.ovn.org/ -Version: 22.09.0 -Release: 31%{?commit0:.%{date}git%{shortcommit0}}%{?dist} -Provides: openvswitch%{pkgver}-ovn-common = %{?epoch:%{epoch}:}%{version}-%{release} -Obsoletes: openvswitch%{pkgver}-ovn-common < 2.11.0-1 - -# Nearly all of openvswitch is ASL 2.0. The bugtool is LGPLv2+, and the -# lib/sflow*.[ch] files are SISSL -License: ASL 2.0 and LGPLv2+ and SISSL - -# Always pull an upstream release, since this is what we rebase to. -Source: https://github.com/ovn-org/ovn/archive/v%{version}.tar.gz#/ovn-%{version}.tar.gz - -%define ovscommit 2410b95597fcec5f733caf77febdb46f4ffacd27 -%define ovsshortcommit 2410b95 - -Source10: https://github.com/openvswitch/ovs/archive/%{ovscommit}.tar.gz#/openvswitch-%{ovsshortcommit}.tar.gz -%define ovsdir ovs-%{ovscommit} - -%define docutilsver 0.12 -%define pygmentsver 1.4 -%define sphinxver 1.1.3 -Source100: https://pypi.io/packages/source/d/docutils/docutils-%{docutilsver}.tar.gz -Source101: https://pypi.io/packages/source/P/Pygments/Pygments-%{pygmentsver}.tar.gz -Source102: https://pypi.io/packages/source/S/Sphinx/Sphinx-%{sphinxver}.tar.gz - -Source500: configlib.sh -Source501: gen_config_group.sh -Source502: set_config.sh - -# Important: source503 is used as the actual copy file -# @TODO: this causes a warning - fix it? -Source504: arm64-armv8a-linuxapp-gcc-config -Source505: ppc_64-power8-linuxapp-gcc-config -Source506: x86_64-native-linuxapp-gcc-config - -Patch: %{pkgname}.patch - -# FIXME Sphinx is used to generate some manpages, unfortunately, on RHEL, it's -# in the -optional repository and so we can't require it directly since RHV -# doesn't have the -optional repository enabled and so TPS fails -%if %{external_sphinx} -BuildRequires: python3-sphinx -%else -# Sphinx dependencies -BuildRequires: python-devel -BuildRequires: python-setuptools -#BuildRequires: python2-docutils -BuildRequires: python-jinja2 -BuildRequires: python-nose -#BuildRequires: python2-pygments -# docutils dependencies -BuildRequires: python-imaging -# pygments dependencies -BuildRequires: python-nose -%endif - -BuildRequires: gcc gcc-c++ make -BuildRequires: autoconf automake libtool -BuildRequires: systemd-units openssl openssl-devel -BuildRequires: python3-devel python3-setuptools -BuildRequires: desktop-file-utils -BuildRequires: groff-base graphviz -BuildRequires: unbound-devel - -# make check dependencies -BuildRequires: procps-ng -%if 0%{?rhel} == 8 || 0%{?fedora} -BuildRequires: python3-pyOpenSSL -%endif -BuildRequires: tcpdump - -%if %{with libcapng} -BuildRequires: libcap-ng libcap-ng-devel -%endif - -Requires: hostname openssl iproute module-init-tools - -Requires(post): systemd-units -Requires(preun): systemd-units -Requires(postun): systemd-units - -# to skip running checks, pass --without check -%bcond_without check - -%description -OVN, the Open Virtual Network, is a system to support virtual network -abstraction. OVN complements the existing capabilities of OVS to add -native support for virtual network abstractions, such as virtual L2 and L3 -overlays and security groups. - -%package central -Summary: Open Virtual Network support -License: ASL 2.0 -Requires: %{pkgname} -Requires: firewalld-filesystem -Provides: openvswitch%{pkgver}-ovn-central = %{?epoch:%{epoch}:}%{version}-%{release} -Obsoletes: openvswitch%{pkgver}-ovn-central < 2.11.0-1 - -%description central -OVN DB servers and ovn-northd running on a central node. - -%package host -Summary: Open Virtual Network support -License: ASL 2.0 -Requires: %{pkgname} -Requires: firewalld-filesystem -Provides: openvswitch%{pkgver}-ovn-host = %{?epoch:%{epoch}:}%{version}-%{release} -Obsoletes: openvswitch%{pkgver}-ovn-host < 2.11.0-1 - -%description host -OVN controller running on each host. - -%package vtep -Summary: Open Virtual Network support -License: ASL 2.0 -Requires: %{pkgname} -Provides: openvswitch%{pkgver}-ovn-vtep = %{?epoch:%{epoch}:}%{version}-%{release} -Obsoletes: openvswitch%{pkgver}-ovn-vtep < 2.11.0-1 - -%description vtep -OVN vtep controller - -%prep -%autosetup -n ovn-%{version} -a 10 -p 1 - -%build -%if 0%{?commit0:1} -# fix the snapshot unreleased version to be the released one. -sed -i.old -e "s/^AC_INIT(openvswitch,.*,/AC_INIT(openvswitch, %{version},/" configure.ac -%endif -./boot.sh - -# OVN source code is now separate. -# Build openvswitch first. -# XXX Current openvswitch2.13 doesn't -# use "2.13.0" for version. It's a commit hash -pushd %{ovsdir} -./boot.sh -%configure \ -%if %{with libcapng} - --enable-libcapng \ -%else - --disable-libcapng \ -%endif - --enable-ssl \ - --with-pkidir=%{_sharedstatedir}/openvswitch/pki - -make %{?_smp_mflags} -popd - -# Build OVN. -# XXX OVS version needs to be updated when ovs2.13 is updated. -%configure \ - --with-ovs-source=$PWD/%{ovsdir} \ -%if %{with libcapng} - --enable-libcapng \ -%else - --disable-libcapng \ -%endif - --enable-ssl \ - --with-pkidir=%{_sharedstatedir}/openvswitch/pki - -make %{?_smp_mflags} - -%install -%make_install -install -p -D -m 0644 \ - rhel/usr_share_ovn_scripts_systemd_sysconfig.template \ - $RPM_BUILD_ROOT/%{_sysconfdir}/sysconfig/ovn - -for service in ovn-controller ovn-controller-vtep ovn-northd; do - install -p -D -m 0644 \ - rhel/usr_lib_systemd_system_${service}.service \ - $RPM_BUILD_ROOT%{_unitdir}/${service}.service -done - -install -d -m 0755 $RPM_BUILD_ROOT/%{_sharedstatedir}/ovn - -install -d $RPM_BUILD_ROOT%{ovnlibdir}/firewalld/services/ -install -p -m 0644 rhel/usr_lib_firewalld_services_ovn-central-firewall-service.xml \ - $RPM_BUILD_ROOT%{ovnlibdir}/firewalld/services/ovn-central-firewall-service.xml -install -p -m 0644 rhel/usr_lib_firewalld_services_ovn-host-firewall-service.xml \ - $RPM_BUILD_ROOT%{ovnlibdir}/firewalld/services/ovn-host-firewall-service.xml - -install -d -m 0755 $RPM_BUILD_ROOT%{ovnlibdir}/ocf/resource.d/ovn -ln -s %{_datadir}/ovn/scripts/ovndb-servers.ocf \ - $RPM_BUILD_ROOT%{ovnlibdir}/ocf/resource.d/ovn/ovndb-servers - -install -p -D -m 0644 rhel/etc_logrotate.d_ovn \ - $RPM_BUILD_ROOT/%{_sysconfdir}/logrotate.d/ovn - -# remove unneeded files. -rm -f $RPM_BUILD_ROOT%{_bindir}/ovs* -rm -f $RPM_BUILD_ROOT%{_bindir}/vtep-ctl -rm -f $RPM_BUILD_ROOT%{_sbindir}/ovs* -rm -f $RPM_BUILD_ROOT%{_mandir}/man1/ovs* -rm -f $RPM_BUILD_ROOT%{_mandir}/man5/ovs* -rm -f $RPM_BUILD_ROOT%{_mandir}/man5/vtep* -rm -f $RPM_BUILD_ROOT%{_mandir}/man7/ovs* -rm -f $RPM_BUILD_ROOT%{_mandir}/man8/ovs* -rm -f $RPM_BUILD_ROOT%{_mandir}/man8/vtep* -rm -rf $RPM_BUILD_ROOT%{_datadir}/ovn/python -rm -f $RPM_BUILD_ROOT%{_datadir}/ovn/scripts/ovs* -rm -rf $RPM_BUILD_ROOT%{_datadir}/ovn/bugtool-plugins -rm -f $RPM_BUILD_ROOT%{_libdir}/*.a -rm -f $RPM_BUILD_ROOT%{_libdir}/*.la -rm -f $RPM_BUILD_ROOT%{_libdir}/pkgconfig/*.pc -rm -f $RPM_BUILD_ROOT%{_includedir}/ovn/* -rm -f $RPM_BUILD_ROOT%{_sysconfdir}/bash_completion.d/ovs-appctl-bashcomp.bash -rm -f $RPM_BUILD_ROOT%{_sysconfdir}/bash_completion.d/ovs-vsctl-bashcomp.bash -rm -rf $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d/openvswitch -rm -f $RPM_BUILD_ROOT%{_datadir}/ovn/scripts/ovn-bugtool* -rm -f $RPM_BUILD_ROOT/%{_bindir}/ovn-docker-overlay-driver \ - $RPM_BUILD_ROOT/%{_bindir}/ovn-docker-underlay-driver - -%check -%if %{with check} - touch resolv.conf - export OVS_RESOLV_CONF=$(pwd)/resolv.conf - if ! make check TESTSUITEFLAGS='%{_smp_mflags}'; then - cat tests/testsuite.log - if ! make check TESTSUITEFLAGS='--recheck'; then - cat tests/testsuite.log - # Presently a test case - "2796: ovn -- ovn-controller incremental processing" - # is failing on aarch64 arch. Let's not exit for this arch - # until we figure out why it is failing. - # Test case 93: ovn.at:12105 ovn -- ACLs on Port Groups is failing - # repeatedly on s390x. This needs to be investigated. - %ifnarch aarch64 - %ifnarch ppc64le - %ifnarch s390x - exit 1 - %endif - %endif - %endif - fi - fi -%endif - -%clean -rm -rf $RPM_BUILD_ROOT - -%pre central -if [ $1 -eq 1 ] ; then - # Package install. - /bin/systemctl status ovn-northd.service >/dev/null - ovn_status=$? - rpm -ql openvswitch-ovn-central > /dev/null - if [[ "$?" = "0" && "$ovn_status" = "0" ]]; then - # ovn-northd service is running which means old openvswitch-ovn-central - # is already installed and it will be cleaned up. So start ovn-northd - # service when posttrans central is called. - touch %{_localstatedir}/lib/rpm-state/ovn-northd - fi -fi - -%pre host -if [ $1 -eq 1 ] ; then - # Package install. - /bin/systemctl status ovn-controller.service >/dev/null - ovn_status=$? - rpm -ql openvswitch-ovn-host > /dev/null - if [[ "$?" = "0" && "$ovn_status" = "0" ]]; then - # ovn-controller service is running which means old - # openvswitch-ovn-host is installed and it will be cleaned up. So - # start ovn-controller service when posttrans host is called. - touch %{_localstatedir}/lib/rpm-state/ovn-controller - fi -fi - -%pre vtep -if [ $1 -eq 1 ] ; then - # Package install. - /bin/systemctl status ovn-controller-vtep.service >/dev/null - ovn_status=$? - rpm -ql openvswitch-ovn-vtep > /dev/null - if [[ "$?" = "0" && "$ovn_status" = "0" ]]; then - # ovn-controller-vtep service is running which means old - # openvswitch-ovn-vtep is installed and it will be cleaned up. So - # start ovn-controller-vtep service when posttrans host is called. - touch %{_localstatedir}/lib/rpm-state/ovn-controller-vtep - fi -fi - -%preun central -%if 0%{?systemd_preun:1} - %systemd_preun ovn-northd.service -%else - if [ $1 -eq 0 ] ; then - # Package removal, not upgrade - /bin/systemctl --no-reload disable ovn-northd.service >/dev/null 2>&1 || : - /bin/systemctl stop ovn-northd.service >/dev/null 2>&1 || : - fi -%endif - -%preun host -%if 0%{?systemd_preun:1} - %systemd_preun ovn-controller.service -%else - if [ $1 -eq 0 ] ; then - # Package removal, not upgrade - /bin/systemctl --no-reload disable ovn-controller.service >/dev/null 2>&1 || : - /bin/systemctl stop ovn-controller.service >/dev/null 2>&1 || : - fi -%endif - -%preun vtep -%if 0%{?systemd_preun:1} - %systemd_preun ovn-controller-vtep.service -%else - if [ $1 -eq 0 ] ; then - # Package removal, not upgrade - /bin/systemctl --no-reload disable ovn-controller-vtep.service >/dev/null 2>&1 || : - /bin/systemctl stop ovn-controller-vtep.service >/dev/null 2>&1 || : - fi -%endif - -%post -%if %{with libcapng} -if [ $1 -eq 1 ]; then - sed -i 's:^#OVN_USER_ID=:OVN_USER_ID=:' %{_sysconfdir}/sysconfig/ovn - sed -i 's:\(.*su\).*:\1 openvswitch openvswitch:' %{_sysconfdir}/logrotate.d/ovn -fi -%endif - -%post central -%if 0%{?systemd_post:1} - %systemd_post ovn-northd.service -%else - # Package install, not upgrade - if [ $1 -eq 1 ]; then - /bin/systemctl daemon-reload >dev/null || : - fi -%endif - -%post host -%if 0%{?systemd_post:1} - %systemd_post ovn-controller.service -%else - # Package install, not upgrade - if [ $1 -eq 1 ]; then - /bin/systemctl daemon-reload >dev/null || : - fi -%endif - -%post vtep -%if 0%{?systemd_post:1} - %systemd_post ovn-controller-vtep.service -%else - # Package install, not upgrade - if [ $1 -eq 1 ]; then - /bin/systemctl daemon-reload >dev/null || : - fi -%endif - -%postun - -%postun central -%if 0%{?systemd_postun_with_restart:1} - %systemd_postun_with_restart ovn-northd.service -%else - /bin/systemctl daemon-reload >/dev/null 2>&1 || : - if [ "$1" -ge "1" ] ; then - # Package upgrade, not uninstall - /bin/systemctl try-restart ovn-northd.service >/dev/null 2>&1 || : - fi -%endif - -%postun host -%if 0%{?systemd_postun_with_restart:1} - %systemd_postun_with_restart ovn-controller.service -%else - /bin/systemctl daemon-reload >/dev/null 2>&1 || : - if [ "$1" -ge "1" ] ; then - # Package upgrade, not uninstall - /bin/systemctl try-restart ovn-controller.service >/dev/null 2>&1 || : - fi -%endif - -%postun vtep -%if 0%{?systemd_postun_with_restart:1} - %systemd_postun_with_restart ovn-controller-vtep.service -%else - /bin/systemctl daemon-reload >/dev/null 2>&1 || : - if [ "$1" -ge "1" ] ; then - # Package upgrade, not uninstall - /bin/systemctl try-restart ovn-controller-vtep.service >/dev/null 2>&1 || : - fi -%endif - -%posttrans central -if [ $1 -eq 1 ]; then - # Package install, not upgrade - if [ -e %{_localstatedir}/lib/rpm-state/ovn-northd ]; then - rm %{_localstatedir}/lib/rpm-state/ovn-northd - /bin/systemctl start ovn-northd.service >/dev/null 2>&1 || : - fi -fi - - -%posttrans host -if [ $1 -eq 1 ]; then - # Package install, not upgrade - if [ -e %{_localstatedir}/lib/rpm-state/ovn-controller ]; then - rm %{_localstatedir}/lib/rpm-state/ovn-controller - /bin/systemctl start ovn-controller.service >/dev/null 2>&1 || : - fi -fi - -%posttrans vtep -if [ $1 -eq 1 ]; then - # Package install, not upgrade - if [ -e %{_localstatedir}/lib/rpm-state/ovn-controller-vtep ]; then - rm %{_localstatedir}/lib/rpm-state/ovn-controller-vtep - /bin/systemctl start ovn-controller-vtep.service >/dev/null 2>&1 || : - fi -fi - -%files -%{_bindir}/ovn-nbctl -%{_bindir}/ovn-sbctl -%{_bindir}/ovn-trace -%{_bindir}/ovn-detrace -%{_bindir}/ovn_detrace.py -%{_bindir}/ovn-appctl -%{_bindir}/ovn-ic-nbctl -%{_bindir}/ovn-ic-sbctl -%dir %{_datadir}/ovn/ -%dir %{_datadir}/ovn/scripts/ -%{_datadir}/ovn/scripts/ovn-ctl -%{_datadir}/ovn/scripts/ovn-lib -%{_datadir}/ovn/scripts/ovndb-servers.ocf -%{_mandir}/man8/ovn-ctl.8* -%{_mandir}/man8/ovn-appctl.8* -%{_mandir}/man8/ovn-nbctl.8* -%{_mandir}/man8/ovn-ic-nbctl.8* -%{_mandir}/man8/ovn-trace.8* -%{_mandir}/man1/ovn-detrace.1* -%{_mandir}/man7/ovn-architecture.7* -%{_mandir}/man8/ovn-sbctl.8* -%{_mandir}/man8/ovn-ic-sbctl.8* -%{_mandir}/man5/ovn-nb.5* -%{_mandir}/man5/ovn-ic-nb.5* -%{_mandir}/man5/ovn-sb.5* -%{_mandir}/man5/ovn-ic-sb.5* -%dir %{ovnlibdir}/ocf/resource.d/ovn/ -%{ovnlibdir}/ocf/resource.d/ovn/ovndb-servers -%config(noreplace) %verify(not md5 size mtime) %{_sysconfdir}/logrotate.d/ovn -%config(noreplace) %verify(not md5 size mtime) %{_sysconfdir}/sysconfig/ovn - -%files central -%{_bindir}/ovn-northd -%{_bindir}/ovn-ic -%{_mandir}/man8/ovn-northd.8* -%{_mandir}/man8/ovn-ic.8* -%{_datadir}/ovn/ovn-nb.ovsschema -%{_datadir}/ovn/ovn-ic-nb.ovsschema -%{_datadir}/ovn/ovn-sb.ovsschema -%{_datadir}/ovn/ovn-ic-sb.ovsschema -%{_unitdir}/ovn-northd.service -%{ovnlibdir}/firewalld/services/ovn-central-firewall-service.xml - -%files host -%{_bindir}/ovn-controller -%{_mandir}/man8/ovn-controller.8* -%{_unitdir}/ovn-controller.service -%{ovnlibdir}/firewalld/services/ovn-host-firewall-service.xml - -%files vtep -%{_bindir}/ovn-controller-vtep -%{_mandir}/man8/ovn-controller-vtep.8* -%{_unitdir}/ovn-controller-vtep.service - -%changelog -* Fri Jan 13 2023 Mark Michelson - 22.09.0-31 -- Merging from branch-22.09 to ovn22.09-lb-affinity -[Upstream: 75e5bb9272fb7e3a867b51fff2f524ca50f53b03] - -* Thu Jan 12 2023 Mark Michelson - 22.09.0-30 -- Merging from branch-22.09 to ovn22.09-lb-affinity -[Upstream: f73dd8e3018cfb7d6ee5cb29a5f2a05927541421] - -* Wed Dec 21 2022 Mark Michelson - 22.09.0-29 -- Merging from branch-22.09 to ovn22.09-lb-affinity -[Upstream: 548638a08d6c5927eb3aad93870af36f58da34a7] - -* Mon Dec 12 2022 Mark Michelson - 22.09.0-28 -- Merging from branch-22.09 to ovn22.09-lb-affinity -[Upstream: d6510560a43a7323d33a1d44f4386b7df846f978] - -* Fri Dec 09 2022 Mark Michelson - 22.09.0-27 -- Merging from branch-22.09 to ovn22.09-lb-affinity -[Upstream: 2a7c712e3bb14cdfd89fc5d21c11b567b0855f2a] - -* Thu Dec 08 2022 Mark Michelson - 22.09.0-26 -- Merging from branch-22.09 to ovn22.09-lb-affinity -[Upstream: c70ad426f41a0ad2799dac0117c4b70ec3ebcd4a] - -* Tue Dec 06 2022 Mark Michelson - 22.09.0-25 -- ovn-northd.at: Fix failing lb-affinity test. -[Upstream: 60b856cfd55b8d636c5f8c1011781f587efe7cf5] - -* Tue Dec 06 2022 Dumitru Ceara - 22.09.0-24 -- northd: Include VIP port in LB affinity learn flow matches. (#2150533) -[Upstream: cc037c7538d635e7d014e98935a83bc15140674f] - -* Tue Dec 06 2022 Ales Musil - 22.09.0-23 -- northd: Improve the LB affinity code -[Upstream: 11da2339668a024a05512ba2178046135b784825] - -* Tue Nov 29 2022 Lorenzo Bianconi - 22.09.0-22 -- northd: rely on new actions for lb affinity -[Upstream: 5b6223dcb6060205c6e9d4e8c092e96134bb032a] - -* Tue Nov 29 2022 Lorenzo Bianconi - 22.09.0-21 -- actions: introduce chk_lb_aff action -[Upstream: f74c418e3cd2079e7cae4d3ec293ffc387f5a660] - -* Tue Nov 29 2022 Lorenzo Bianconi - 22.09.0-20 -- actions: introduce commit_lb_aff action -[Upstream: 2d190e5c69c9440c720ab8412cc04e4096d4114a] - -* Fri Nov 25 2022 Xavier Simonart - 22.09.0-19 -- controller: Fixed ovs/ovn(features) connection lost when running more than 120 seconds (#2144084) -[Upstream: db61b2e4f166092e5bc93f4cba7696a72037a069] - -* Thu Nov 24 2022 Dumitru Ceara - 22.09.0-18 -- ovs: Bump submodule to include latest fixes. -[Upstream: d62dde642879ffb7ff1eb8f4077b6224f977c6d7] - -* Tue Nov 22 2022 Xavier Simonart - 22.09.0-17 -- ovn-controller: Fixed missing flows after interface deletion (#2129866) -[Upstream: 90c165fa5a6ecdd9bac606cf259ae88228b96208] - -* Tue Nov 22 2022 Xavier Simonart - 22.09.0-16 -- ovn-controller: Fix releasing wrong vif -[Upstream: 4da7a269c9eb055b2cfa27d67593a77167b8c9a6] - -* Tue Nov 22 2022 Xavier Simonart - 22.09.0-15 -- tests: Fix flaky test "multi-vtep SB Chassis encap updates" -[Upstream: ef15d5c22fa2255db69be2d6da822cefb099327c] - -* Fri Nov 18 2022 Ilya Maximets - 22.09.0-14 -- controller: Fix QoS for ports with undetected or too low link speed. (#2136716) -[Upstream: ae96d5d753ccbee9b239178f56460e05169ac9f7] - -* Tue Nov 15 2022 Mark Michelson - 22.09.0-13 -- ovn-controller: Fix some issues with CT zone assignment. (#2126406) -[Upstream: 0fc041667031da20cd03c0b76de8de3dbe502d50] - -* Thu Nov 03 2022 Ales Musil - 22.09.0-12 -- ci: Update jobs to use numbers instead of test flags -[Upstream: bc609bf148be3a38a0b8f38f049f30eb7e9b55f8] - -* Thu Oct 20 2022 Xavier Simonart - 22.09.0-11 -- ovs: Bump submodule to tip of branch-3.0 and add related test (#2126450) -[Upstream: c18415d5ae7273c633190df4ac9e872a0a0f9709] - -* Wed Oct 05 2022 Lorenzo Bianconi - 22.09.0-10 -- controller: fix ipv6 prefix delegation in gw router mode (#2129244 2129247) -[Upstream: f2042a2e6aeb1a7fe266316337545331f5186dd0] - -* Wed Oct 05 2022 Vladislav Odintsov - 22.09.0-9 -- spec: require python3-openvswitch for ovn-detrace -[Upstream: 29e4d43966fbf34d9707e31880c455f22a643bb3] - -* Mon Oct 03 2022 Mark Michelson - 22.09.0-8 -- northd: Use separate SNAT for already-DNATted traffic. -[Upstream: 51044dbfdba234a3f50d8c9c952335e41b72a39b] - -* Fri Sep 30 2022 Ales Musil - 22.09.0-7 -- controller: Restore MAC and vlan for DVR scenario (#2123837) -[Upstream: 86e99bf95a2191ebdcd5d03335ff8add2a636f55] - -* Fri Sep 30 2022 Xavier Simonart - 22.09.0-6 -- northd: Fix multicast table full (#2094710) -[Upstream: 40dd85eb8d2d2d88f9000b6be6fb263b4bd1a27f] - -* Tue Sep 27 2022 Xavier Simonart - 22.09.0-5 -- controller: Fix first ping from lsp to external through snat failing (#2130045) -[Upstream: 76a01e53a9fcc3184211cca10787d462cb86a352] - -* Fri Sep 16 2022 Mark Michelson - 22.09.0-4 -- Prepare for 22.09.1. -[Upstream: 854c2b1a4ba9ef35e03348d1bd4fc8265f3f74a3] - diff --git a/SPECS/ovn22.12.spec b/SPECS/ovn22.12.spec new file mode 100644 index 0000000..3536329 --- /dev/null +++ b/SPECS/ovn22.12.spec @@ -0,0 +1,650 @@ +# Copyright (C) 2009, 2010, 2013, 2014 Nicira Networks, Inc. +# +# Copying and distribution of this file, with or without modification, +# are permitted in any medium without royalty provided the copyright +# notice and this notice are preserved. This file is offered as-is, +# without warranty of any kind. +# +# If tests have to be skipped while building, specify the '--without check' +# option. For example: +# rpmbuild -bb --without check rhel/openvswitch-fedora.spec + +# This defines the base package name's version. + +%define pkgver 2.13 +%define pkgname ovn22.12 + +# If libcap-ng isn't available and there is no need for running OVS +# as regular user, specify the '--without libcapng' +%bcond_without libcapng + +# Enable PIE, bz#955181 +%global _hardened_build 1 + +# RHEL-7 doesn't define _rundir macro yet +# Fedora 15 onwards uses /run as _rundir +%if 0%{!?_rundir:1} +%define _rundir /run +%endif + +# Build python2 (that provides python) and python3 subpackages on Fedora +# Build only python3 (that provides python) subpackage on RHEL8 +# Build only python subpackage on RHEL7 +%if 0%{?rhel} > 7 || 0%{?fedora} +# On RHEL8 Sphinx is included in buildroot +%global external_sphinx 1 +%else +# Don't use external sphinx (RHV doesn't have optional repositories enabled) +%global external_sphinx 0 +%endif + +# We would see rpmlinit error - E: hardcoded-library-path in '% {_prefix}/lib'. +# But there is no solution to fix this. Using {_lib} macro will solve the +# rpmlink error, but will install the files in /usr/lib64/. +# OVN pacemaker ocf script file is copied in /usr/lib/ocf/resource.d/ovn/ +# and we are not sure if pacemaker looks into this path to find the +# OVN resource agent script. +%global ovnlibdir %{_prefix}/lib + +Name: %{pkgname} +Summary: Open Virtual Network support +Group: System Environment/Daemons +URL: http://www.ovn.org/ +Version: 22.12.0 +Release: 34%{?commit0:.%{date}git%{shortcommit0}}%{?dist} +Provides: openvswitch%{pkgver}-ovn-common = %{?epoch:%{epoch}:}%{version}-%{release} +Obsoletes: openvswitch%{pkgver}-ovn-common < 2.11.0-1 + +# Nearly all of openvswitch is ASL 2.0. The bugtool is LGPLv2+, and the +# lib/sflow*.[ch] files are SISSL +License: ASL 2.0 and LGPLv2+ and SISSL + +# Always pull an upstream release, since this is what we rebase to. +Source: https://github.com/ovn-org/ovn/archive/v%{version}.tar.gz#/ovn-%{version}.tar.gz + +%define ovscommit a787fbbf9dd6a108a53053afb45fb59a0b58b514 +%define ovsshortcommit a787fbb + +Source10: https://github.com/openvswitch/ovs/archive/%{ovscommit}.tar.gz#/openvswitch-%{ovsshortcommit}.tar.gz +%define ovsdir ovs-%{ovscommit} + +%define docutilsver 0.12 +%define pygmentsver 1.4 +%define sphinxver 1.1.3 +Source100: https://pypi.io/packages/source/d/docutils/docutils-%{docutilsver}.tar.gz +Source101: https://pypi.io/packages/source/P/Pygments/Pygments-%{pygmentsver}.tar.gz +Source102: https://pypi.io/packages/source/S/Sphinx/Sphinx-%{sphinxver}.tar.gz + +Source500: configlib.sh +Source501: gen_config_group.sh +Source502: set_config.sh + +# Important: source503 is used as the actual copy file +# @TODO: this causes a warning - fix it? +Source504: arm64-armv8a-linuxapp-gcc-config +Source505: ppc_64-power8-linuxapp-gcc-config +Source506: x86_64-native-linuxapp-gcc-config + +Patch: %{pkgname}.patch + +# FIXME Sphinx is used to generate some manpages, unfortunately, on RHEL, it's +# in the -optional repository and so we can't require it directly since RHV +# doesn't have the -optional repository enabled and so TPS fails +%if %{external_sphinx} +BuildRequires: python3-sphinx +%else +# Sphinx dependencies +BuildRequires: python-devel +BuildRequires: python-setuptools +#BuildRequires: python2-docutils +BuildRequires: python-jinja2 +BuildRequires: python-nose +#BuildRequires: python2-pygments +# docutils dependencies +BuildRequires: python-imaging +# pygments dependencies +BuildRequires: python-nose +%endif + +BuildRequires: gcc gcc-c++ make +BuildRequires: autoconf automake libtool +BuildRequires: systemd-units openssl openssl-devel +BuildRequires: python3-devel python3-setuptools +BuildRequires: desktop-file-utils +BuildRequires: groff-base graphviz +BuildRequires: unbound-devel + +# make check dependencies +BuildRequires: procps-ng +%if 0%{?rhel} == 8 || 0%{?fedora} +BuildRequires: python3-pyOpenSSL +%endif +BuildRequires: tcpdump + +%if %{with libcapng} +BuildRequires: libcap-ng libcap-ng-devel +%endif + +Requires: hostname openssl iproute module-init-tools + +Requires(post): systemd-units +Requires(preun): systemd-units +Requires(postun): systemd-units + +# to skip running checks, pass --without check +%bcond_without check + +%description +OVN, the Open Virtual Network, is a system to support virtual network +abstraction. OVN complements the existing capabilities of OVS to add +native support for virtual network abstractions, such as virtual L2 and L3 +overlays and security groups. + +%package central +Summary: Open Virtual Network support +License: ASL 2.0 +Requires: %{pkgname} +Requires: firewalld-filesystem +Provides: openvswitch%{pkgver}-ovn-central = %{?epoch:%{epoch}:}%{version}-%{release} +Obsoletes: openvswitch%{pkgver}-ovn-central < 2.11.0-1 + +%description central +OVN DB servers and ovn-northd running on a central node. + +%package host +Summary: Open Virtual Network support +License: ASL 2.0 +Requires: %{pkgname} +Requires: firewalld-filesystem +Provides: openvswitch%{pkgver}-ovn-host = %{?epoch:%{epoch}:}%{version}-%{release} +Obsoletes: openvswitch%{pkgver}-ovn-host < 2.11.0-1 + +%description host +OVN controller running on each host. + +%package vtep +Summary: Open Virtual Network support +License: ASL 2.0 +Requires: %{pkgname} +Provides: openvswitch%{pkgver}-ovn-vtep = %{?epoch:%{epoch}:}%{version}-%{release} +Obsoletes: openvswitch%{pkgver}-ovn-vtep < 2.11.0-1 + +%description vtep +OVN vtep controller + +%prep +%autosetup -n ovn-%{version} -a 10 -p 1 + +%build +%if 0%{?commit0:1} +# fix the snapshot unreleased version to be the released one. +sed -i.old -e "s/^AC_INIT(openvswitch,.*,/AC_INIT(openvswitch, %{version},/" configure.ac +%endif +./boot.sh + +# OVN source code is now separate. +# Build openvswitch first. +# XXX Current openvswitch2.13 doesn't +# use "2.13.0" for version. It's a commit hash +pushd %{ovsdir} +./boot.sh +%configure \ +%if %{with libcapng} + --enable-libcapng \ +%else + --disable-libcapng \ +%endif + --enable-ssl \ + --with-pkidir=%{_sharedstatedir}/openvswitch/pki + +make %{?_smp_mflags} +popd + +# Build OVN. +# XXX OVS version needs to be updated when ovs2.13 is updated. +%configure \ + --with-ovs-source=$PWD/%{ovsdir} \ +%if %{with libcapng} + --enable-libcapng \ +%else + --disable-libcapng \ +%endif + --enable-ssl \ + --with-pkidir=%{_sharedstatedir}/openvswitch/pki + +make %{?_smp_mflags} + +%install +%make_install +install -p -D -m 0644 \ + rhel/usr_share_ovn_scripts_systemd_sysconfig.template \ + $RPM_BUILD_ROOT/%{_sysconfdir}/sysconfig/ovn + +for service in ovn-controller ovn-controller-vtep ovn-northd; do + install -p -D -m 0644 \ + rhel/usr_lib_systemd_system_${service}.service \ + $RPM_BUILD_ROOT%{_unitdir}/${service}.service +done + +install -d -m 0755 $RPM_BUILD_ROOT/%{_sharedstatedir}/ovn + +install -d $RPM_BUILD_ROOT%{ovnlibdir}/firewalld/services/ +install -p -m 0644 rhel/usr_lib_firewalld_services_ovn-central-firewall-service.xml \ + $RPM_BUILD_ROOT%{ovnlibdir}/firewalld/services/ovn-central-firewall-service.xml +install -p -m 0644 rhel/usr_lib_firewalld_services_ovn-host-firewall-service.xml \ + $RPM_BUILD_ROOT%{ovnlibdir}/firewalld/services/ovn-host-firewall-service.xml + +install -d -m 0755 $RPM_BUILD_ROOT%{ovnlibdir}/ocf/resource.d/ovn +ln -s %{_datadir}/ovn/scripts/ovndb-servers.ocf \ + $RPM_BUILD_ROOT%{ovnlibdir}/ocf/resource.d/ovn/ovndb-servers + +install -p -D -m 0644 rhel/etc_logrotate.d_ovn \ + $RPM_BUILD_ROOT/%{_sysconfdir}/logrotate.d/ovn + +# remove unneeded files. +rm -f $RPM_BUILD_ROOT%{_bindir}/ovs* +rm -f $RPM_BUILD_ROOT%{_bindir}/vtep-ctl +rm -f $RPM_BUILD_ROOT%{_sbindir}/ovs* +rm -f $RPM_BUILD_ROOT%{_mandir}/man1/ovs* +rm -f $RPM_BUILD_ROOT%{_mandir}/man5/ovs* +rm -f $RPM_BUILD_ROOT%{_mandir}/man5/vtep* +rm -f $RPM_BUILD_ROOT%{_mandir}/man7/ovs* +rm -f $RPM_BUILD_ROOT%{_mandir}/man8/ovs* +rm -f $RPM_BUILD_ROOT%{_mandir}/man8/vtep* +rm -rf $RPM_BUILD_ROOT%{_datadir}/ovn/python +rm -f $RPM_BUILD_ROOT%{_datadir}/ovn/scripts/ovs* +rm -rf $RPM_BUILD_ROOT%{_datadir}/ovn/bugtool-plugins +rm -f $RPM_BUILD_ROOT%{_libdir}/*.a +rm -f $RPM_BUILD_ROOT%{_libdir}/*.la +rm -f $RPM_BUILD_ROOT%{_libdir}/pkgconfig/*.pc +rm -f $RPM_BUILD_ROOT%{_includedir}/ovn/* +rm -f $RPM_BUILD_ROOT%{_sysconfdir}/bash_completion.d/ovs-appctl-bashcomp.bash +rm -f $RPM_BUILD_ROOT%{_sysconfdir}/bash_completion.d/ovs-vsctl-bashcomp.bash +rm -rf $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d/openvswitch +rm -f $RPM_BUILD_ROOT%{_datadir}/ovn/scripts/ovn-bugtool* +rm -f $RPM_BUILD_ROOT/%{_bindir}/ovn-docker-overlay-driver \ + $RPM_BUILD_ROOT/%{_bindir}/ovn-docker-underlay-driver + +%check +%if %{with check} + touch resolv.conf + export OVS_RESOLV_CONF=$(pwd)/resolv.conf + if ! make check TESTSUITEFLAGS='%{_smp_mflags}'; then + cat tests/testsuite.log + if ! make check TESTSUITEFLAGS='--recheck'; then + cat tests/testsuite.log + # Presently a test case - "2796: ovn -- ovn-controller incremental processing" + # is failing on aarch64 arch. Let's not exit for this arch + # until we figure out why it is failing. + # Test case 93: ovn.at:12105 ovn -- ACLs on Port Groups is failing + # repeatedly on s390x. This needs to be investigated. + %ifnarch aarch64 + %ifnarch ppc64le + %ifnarch s390x + exit 1 + %endif + %endif + %endif + fi + fi +%endif + +%clean +rm -rf $RPM_BUILD_ROOT + +%pre central +if [ $1 -eq 1 ] ; then + # Package install. + /bin/systemctl status ovn-northd.service >/dev/null + ovn_status=$? + rpm -ql openvswitch-ovn-central > /dev/null + if [[ "$?" = "0" && "$ovn_status" = "0" ]]; then + # ovn-northd service is running which means old openvswitch-ovn-central + # is already installed and it will be cleaned up. So start ovn-northd + # service when posttrans central is called. + touch %{_localstatedir}/lib/rpm-state/ovn-northd + fi +fi + +%pre host +if [ $1 -eq 1 ] ; then + # Package install. + /bin/systemctl status ovn-controller.service >/dev/null + ovn_status=$? + rpm -ql openvswitch-ovn-host > /dev/null + if [[ "$?" = "0" && "$ovn_status" = "0" ]]; then + # ovn-controller service is running which means old + # openvswitch-ovn-host is installed and it will be cleaned up. So + # start ovn-controller service when posttrans host is called. + touch %{_localstatedir}/lib/rpm-state/ovn-controller + fi +fi + +%pre vtep +if [ $1 -eq 1 ] ; then + # Package install. + /bin/systemctl status ovn-controller-vtep.service >/dev/null + ovn_status=$? + rpm -ql openvswitch-ovn-vtep > /dev/null + if [[ "$?" = "0" && "$ovn_status" = "0" ]]; then + # ovn-controller-vtep service is running which means old + # openvswitch-ovn-vtep is installed and it will be cleaned up. So + # start ovn-controller-vtep service when posttrans host is called. + touch %{_localstatedir}/lib/rpm-state/ovn-controller-vtep + fi +fi + +%preun central +%if 0%{?systemd_preun:1} + %systemd_preun ovn-northd.service +%else + if [ $1 -eq 0 ] ; then + # Package removal, not upgrade + /bin/systemctl --no-reload disable ovn-northd.service >/dev/null 2>&1 || : + /bin/systemctl stop ovn-northd.service >/dev/null 2>&1 || : + fi +%endif + +%preun host +%if 0%{?systemd_preun:1} + %systemd_preun ovn-controller.service +%else + if [ $1 -eq 0 ] ; then + # Package removal, not upgrade + /bin/systemctl --no-reload disable ovn-controller.service >/dev/null 2>&1 || : + /bin/systemctl stop ovn-controller.service >/dev/null 2>&1 || : + fi +%endif + +%preun vtep +%if 0%{?systemd_preun:1} + %systemd_preun ovn-controller-vtep.service +%else + if [ $1 -eq 0 ] ; then + # Package removal, not upgrade + /bin/systemctl --no-reload disable ovn-controller-vtep.service >/dev/null 2>&1 || : + /bin/systemctl stop ovn-controller-vtep.service >/dev/null 2>&1 || : + fi +%endif + +%post +%if %{with libcapng} +if [ $1 -eq 1 ]; then + sed -i 's:^#OVN_USER_ID=:OVN_USER_ID=:' %{_sysconfdir}/sysconfig/ovn + sed -i 's:\(.*su\).*:\1 openvswitch openvswitch:' %{_sysconfdir}/logrotate.d/ovn +fi +%endif + +%post central +%if 0%{?systemd_post:1} + %systemd_post ovn-northd.service +%else + # Package install, not upgrade + if [ $1 -eq 1 ]; then + /bin/systemctl daemon-reload >dev/null || : + fi +%endif + +%post host +%if 0%{?systemd_post:1} + %systemd_post ovn-controller.service +%else + # Package install, not upgrade + if [ $1 -eq 1 ]; then + /bin/systemctl daemon-reload >dev/null || : + fi +%endif + +%post vtep +%if 0%{?systemd_post:1} + %systemd_post ovn-controller-vtep.service +%else + # Package install, not upgrade + if [ $1 -eq 1 ]; then + /bin/systemctl daemon-reload >dev/null || : + fi +%endif + +%postun + +%postun central +%if 0%{?systemd_postun_with_restart:1} + %systemd_postun_with_restart ovn-northd.service +%else + /bin/systemctl daemon-reload >/dev/null 2>&1 || : + if [ "$1" -ge "1" ] ; then + # Package upgrade, not uninstall + /bin/systemctl try-restart ovn-northd.service >/dev/null 2>&1 || : + fi +%endif + +%postun host +%if 0%{?systemd_postun_with_restart:1} + %systemd_postun_with_restart ovn-controller.service +%else + /bin/systemctl daemon-reload >/dev/null 2>&1 || : + if [ "$1" -ge "1" ] ; then + # Package upgrade, not uninstall + /bin/systemctl try-restart ovn-controller.service >/dev/null 2>&1 || : + fi +%endif + +%postun vtep +%if 0%{?systemd_postun_with_restart:1} + %systemd_postun_with_restart ovn-controller-vtep.service +%else + /bin/systemctl daemon-reload >/dev/null 2>&1 || : + if [ "$1" -ge "1" ] ; then + # Package upgrade, not uninstall + /bin/systemctl try-restart ovn-controller-vtep.service >/dev/null 2>&1 || : + fi +%endif + +%posttrans central +if [ $1 -eq 1 ]; then + # Package install, not upgrade + if [ -e %{_localstatedir}/lib/rpm-state/ovn-northd ]; then + rm %{_localstatedir}/lib/rpm-state/ovn-northd + /bin/systemctl start ovn-northd.service >/dev/null 2>&1 || : + fi +fi + + +%posttrans host +if [ $1 -eq 1 ]; then + # Package install, not upgrade + if [ -e %{_localstatedir}/lib/rpm-state/ovn-controller ]; then + rm %{_localstatedir}/lib/rpm-state/ovn-controller + /bin/systemctl start ovn-controller.service >/dev/null 2>&1 || : + fi +fi + +%posttrans vtep +if [ $1 -eq 1 ]; then + # Package install, not upgrade + if [ -e %{_localstatedir}/lib/rpm-state/ovn-controller-vtep ]; then + rm %{_localstatedir}/lib/rpm-state/ovn-controller-vtep + /bin/systemctl start ovn-controller-vtep.service >/dev/null 2>&1 || : + fi +fi + +%files +%{_bindir}/ovn-nbctl +%{_bindir}/ovn-sbctl +%{_bindir}/ovn-trace +%{_bindir}/ovn-detrace +%{_bindir}/ovn_detrace.py +%{_bindir}/ovn-appctl +%{_bindir}/ovn-ic-nbctl +%{_bindir}/ovn-ic-sbctl +%dir %{_datadir}/ovn/ +%dir %{_datadir}/ovn/scripts/ +%{_datadir}/ovn/scripts/ovn-ctl +%{_datadir}/ovn/scripts/ovn-lib +%{_datadir}/ovn/scripts/ovndb-servers.ocf +%{_mandir}/man8/ovn-ctl.8* +%{_mandir}/man8/ovn-appctl.8* +%{_mandir}/man8/ovn-nbctl.8* +%{_mandir}/man8/ovn-ic-nbctl.8* +%{_mandir}/man8/ovn-trace.8* +%{_mandir}/man1/ovn-detrace.1* +%{_mandir}/man7/ovn-architecture.7* +%{_mandir}/man8/ovn-sbctl.8* +%{_mandir}/man8/ovn-ic-sbctl.8* +%{_mandir}/man5/ovn-nb.5* +%{_mandir}/man5/ovn-ic-nb.5* +%{_mandir}/man5/ovn-sb.5* +%{_mandir}/man5/ovn-ic-sb.5* +%dir %{ovnlibdir}/ocf/resource.d/ovn/ +%{ovnlibdir}/ocf/resource.d/ovn/ovndb-servers +%config(noreplace) %verify(not md5 size mtime) %{_sysconfdir}/logrotate.d/ovn +%config(noreplace) %verify(not md5 size mtime) %{_sysconfdir}/sysconfig/ovn + +%files central +%{_bindir}/ovn-northd +%{_bindir}/ovn-ic +%{_mandir}/man8/ovn-northd.8* +%{_mandir}/man8/ovn-ic.8* +%{_datadir}/ovn/ovn-nb.ovsschema +%{_datadir}/ovn/ovn-ic-nb.ovsschema +%{_datadir}/ovn/ovn-sb.ovsschema +%{_datadir}/ovn/ovn-ic-sb.ovsschema +%{_unitdir}/ovn-northd.service +%{ovnlibdir}/firewalld/services/ovn-central-firewall-service.xml + +%files host +%{_bindir}/ovn-controller +%{_mandir}/man8/ovn-controller.8* +%{_unitdir}/ovn-controller.service +%{ovnlibdir}/firewalld/services/ovn-host-firewall-service.xml + +%files vtep +%{_bindir}/ovn-controller-vtep +%{_mandir}/man8/ovn-controller-vtep.8* +%{_unitdir}/ovn-controller-vtep.service + +%changelog +* Thu Mar 16 2023 Dumitru Ceara - 22.12.0-34 +- northd: Ignore remote chassis when computing the supported feature set. +[Upstream: 31ffefe9f6cc65192a8158adc41ad7adb02f634b] + +* Thu Mar 09 2023 Ales Musil - 22.12.0-33 +- northd: Fix missig "); " from LB flows +[Upstream: cf205ca0e52c425a14f49145fa74b3fe293b547e] + +* Thu Mar 02 2023 Ilya Maximets - 22.12.0-32 +- ovn-util: Remove unused ovn_parse_internal_version_minor. +[Upstream: 5b9e7a70386ee9863469fbf2ce4b9a4d922716d8] + +* Thu Mar 02 2023 Felix Hüttner - 22.12.0-31 +- northd: fix comments on functions +[Upstream: 79dca2c4d0f3550757b6e3bb0813703348dab541] + +* Thu Mar 02 2023 Ales Musil - 22.12.0-30 +- system-tests: Reduce flakiness of ACL reject tests +[Upstream: b3196d1b140c98b409086ca72880a76da96c6c62] + +* Thu Mar 02 2023 Ales Musil - 22.12.0-29 +- dbctl: Fix a couple of memory leaks +[Upstream: 2733558a2e76ad6db3fc639f1bd8235d6382248b] + +* Thu Mar 02 2023 Dumitru Ceara - 22.12.0-28 +- ci: ovn-kubernetes: Bump libovsdb to a6a173993830. +[Upstream: 81ae7831f8d64a114a1be265782b6aa9ad0c52db] + +* Wed Mar 01 2023 Xavier Simonart - 22.12.0-27 +- tests: Fixed some tests failing on (very) slow systems +[Upstream: 2bd8697b25ac4342af33985899248248557c5481] + +* Mon Feb 27 2023 Ales Musil - 22.12.0-26 +- tests: Decrease the number of zones and switches for interconnection +[Upstream: 2a24ebc3064959c57ded1060a6a31be5397382b3] + +* Thu Feb 16 2023 Dumitru Ceara - 22.12.0-25 +- lb: northd: Properly format IPv6 SB load balancer VIPs. +[Upstream: 7053ae61267ebcb282d5ef18b5bd8f2f6c6c37e0] + +* Wed Feb 15 2023 Ales Musil - 22.12.0-24 +- system-test: Use OVS_WAIT_UNTIL for tcpdump start instead fo sleep +[Upstream: d5273f929513458a569cdfb297bffd9922d44c01] + +* Wed Feb 15 2023 Ihar Hrachyshka - 22.12.0-23 +- docs: fix the max number of ports per network for vxlan +[Upstream: 4dfa4ba431ab634b6068f27e886a4d403d589c87] + +* Wed Feb 15 2023 Ales Musil - 22.12.0-22 +- ovn-nbctl: Fix documentation typo (#2168009) +[Upstream: 0c44d7dbf4a013f08c79d5818e89a8f55ecd09e0] + +* Wed Feb 15 2023 Lorenzo Bianconi - 22.12.0-21 +- northd: do not create flows for reserved multicast IPv6 groups (#2154930) +[Upstream: 61e030ed59c2d2a1029866dce6769428e0abbc0c] + +* Thu Feb 02 2023 Han Zhou - 22.12.0-20 +- northd.c: Validate port type to avoid unexpected behavior. +[Upstream: b67009fdb6312e95367183c65b439fd3b7a288bf] + +* Tue Jan 31 2023 Surya Seetharaman - 22.12.0-19 +- Add the metalLB install flag for CI actions +[Upstream: 65990b8398e8e7ff29c6d7e9903fd0cf7ef64965] + +* Mon Jan 23 2023 Ales Musil - 22.12.0-18 +- ovn-trace: Use the original ovnact for execute_load +[Upstream: 4c78bef966927f4083b601a6a4f5fc76a839fd1a] + +* Mon Jan 23 2023 Dumitru Ceara - 22.12.0-17 +- northd: Add logical flows to allow rpl/rel traffic in acl_after_lb stage. (#1947807) +[Upstream: d6914efd53ac28a6e3da6e65f9e026674f05dc4c] + +* Fri Jan 20 2023 Mark Michelson - 22.12.0-16 +- ovn-controller: Fix initial requested SNAT zone assignment. (#2160403) +[Upstream: 17f1e9e0148e298b6ec525d5d6b149082a864dca] + +* Thu Jan 19 2023 Han Zhou - 22.12.0-15 +- northd: Drop packets destined to router owned NAT IP for DGP. +[Upstream: 481f25b784896eec07fedc77631992a009bcdada] + +* Thu Jan 19 2023 Ales Musil - 22.12.0-14 +- northd: Add flag for CT related (#2126083) +[Upstream: 2619f6a27aca2a5925e25297f75e6a925cf1eb6a] + +* Wed Jan 18 2023 Xavier Simonart - 22.12.0-13 +- tests: Fixed load balancing system-tests +[Upstream: 1791a107debbaa474669a794b4d2a6dff4cb1dcb] + +* Wed Jan 18 2023 Xavier Simonart - 22.12.0-12 +- tests: Fixed flaky ACL fair Meters +[Upstream: f9fb0bb4de4e7cb0a02fcb0794e226e6af8e8f5c] + +* Wed Jan 18 2023 Lorenzo Bianconi - 22.12.0-11 +- northd: move hairpin stages before acl_after_lb (#2103086) +[Upstream: 3723a6d6e39dcffc502e094ccc10a8d638fa5efa] + +* Tue Jan 17 2023 Xavier Simonart - 22.12.0-10 +- controller: Fix missing first ping from pod to external (#2129283) +[Upstream: 7109f02b78f5087b5bae2885f153378e627d90f7] + +* Mon Jan 16 2023 Lorenzo Bianconi - 22.12.0-9 +- controller: use packet proto for hairpin traffic learned action if not specified (#2157846) +[Upstream: 588291528fc0568e7da402c05b596c6c855d2c5f] + +* Fri Jan 13 2023 Dumitru Ceara - 22.12.0-8 +- .ci: ovn-kubernetes: Add a "prepare" stage to allow for custom actions. +[Upstream: 29fb21e6ec0a1203e3f5b2bfff4c3ccea8df4d37] + +* Wed Jan 11 2023 Han Zhou - 22.12.0-7 +- build-aux/sodepends.py: Fix flake8 error. +[Upstream: 1fd28ef34bef9b19ca350f15bd03e10265a911dc] + +* Wed Jan 11 2023 Han Zhou - 22.12.0-6 +- build-aux/sodepends.py: Fix broken build when manpage changes. +[Upstream: 79edad8a1e547f4120ea3d20f08aafe1e40a6f65] + +* Tue Jan 10 2023 Dumitru Ceara - 22.12.0-5 +- ovn-ic: Only monitor useful tables and columns. +[Upstream: fdad33f2348f34b5fb886a5a3143d91f44021811] + +* Fri Dec 16 2022 Mark Michelson - 22.12.0-4 +- Prepare for 22.12.1. +[Upstream: 78af8b76ab30ad3e704211256c313dec67f63cb8] +