diff --git a/.ci/linux-build.sh b/.ci/linux-build.sh
index 2b0782aea..dc1ca5240 100755
--- a/.ci/linux-build.sh
+++ b/.ci/linux-build.sh
@@ -47,15 +47,10 @@ else
fi
if [ "$TESTSUITE" ]; then
- TESTSUITEFLAGS=""
- if [[ ! -z $TESTSUITE_KW ]]; then
- TESTSUITEFLAGS="-k $TESTSUITE_KW"
- fi
-
if [ "$TESTSUITE" = "system-test" ]; then
configure_ovn $OPTS
make -j4 || { cat config.log; exit 1; }
- if ! sudo make -j4 check-kernel TESTSUITEFLAGS="$TESTSUITEFLAGS" RECHECK=yes; then
+ if ! sudo make -j4 check-kernel TESTSUITEFLAGS="$TEST_RANGE" RECHECK=yes; then
# system-kmod-testsuite.log is necessary for debugging.
cat tests/system-kmod-testsuite.log
exit 1
@@ -67,7 +62,7 @@ if [ "$TESTSUITE" ]; then
export DISTCHECK_CONFIGURE_FLAGS="$OPTS"
if ! make distcheck CFLAGS="${COMMON_CFLAGS} ${OVN_CFLAGS}" -j4 \
- TESTSUITEFLAGS="$TESTSUITEFLAGS -j4" RECHECK=yes
+ TESTSUITEFLAGS="-j4 $TEST_RANGE" RECHECK=yes
then
# testsuite.log is necessary for debugging.
cat */_build/sub/tests/testsuite.log
diff --git a/.ci/ovn-kubernetes/Dockerfile b/.ci/ovn-kubernetes/Dockerfile
index e74b620be..7edf86a13 100644
--- a/.ci/ovn-kubernetes/Dockerfile
+++ b/.ci/ovn-kubernetes/Dockerfile
@@ -47,9 +47,17 @@ RUN GO111MODULE=on go install github.com/ovn-org/libovsdb/cmd/modelgen@${LIBOVSD
# Clone OVN Kubernetes and build the binary based on the commit passed as argument
WORKDIR /root
RUN git clone https://github.com/ovn-org/ovn-kubernetes.git
-WORKDIR /root/ovn-kubernetes/go-controller
+WORKDIR /root/ovn-kubernetes
RUN git checkout ${OVNKUBE_COMMIT} && git log -n 1
+# Copy the ovn-kubernetes scripts from the OVN sources and apply any
+# custom changes if needed.
+RUN mkdir -p /tmp/ovn/.ci/ovn-kubernetes
+COPY .ci/ovn-kubernetes /tmp/ovn/.ci/ovn-kubernetes
+WORKDIR /tmp/ovn
+RUN .ci/ovn-kubernetes/prepare.sh /root/ovn-kubernetes
+
+WORKDIR /root/ovn-kubernetes/go-controller
# Make sure we use the OVN NB/SB schema from the local code.
COPY --from=ovnbuilder /tmp/ovn/ovn-nb.ovsschema pkg/nbdb/ovn-nb.ovsschema
COPY --from=ovnbuilder /tmp/ovn/ovn-sb.ovsschema pkg/sbdb/ovn-sb.ovsschema
diff --git a/.ci/ovn-kubernetes/custom.patch b/.ci/ovn-kubernetes/custom.patch
new file mode 100644
index 000000000..ea5dd7540
--- /dev/null
+++ b/.ci/ovn-kubernetes/custom.patch
@@ -0,0 +1,31 @@
+From 903eef2dd6f9fec818a580760f4757d8137b9974 Mon Sep 17 00:00:00 2001
+From: Dumitru Ceara <dceara@redhat.com>
+Date: Mon, 19 Dec 2022 12:18:55 +0100
+Subject: [PATCH] DOWNSTREAM: Disable session affinity tests.
+
+Commit https://github.com/ovn-org/ovn-kubernetes/commit/898d2f8f10c4
+enabled affinity timeout tests but the underlying OVN feature is
+not supported in this branch. Disable affinity tests.
+
+Signed-off-by: Dumitru Ceara <dceara@redhat.com>
+---
+ test/scripts/e2e-kind.sh | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/test/scripts/e2e-kind.sh b/test/scripts/e2e-kind.sh
+index 69959fa1b..c3b2a5c3e 100755
+--- a/test/scripts/e2e-kind.sh
++++ b/test/scripts/e2e-kind.sh
+@@ -26,6 +26,9 @@ kube-proxy
+ should set TCP CLOSE_WAIT timeout
+ \[Feature:ProxyTerminatingEndpoints\]
+
++# Disable session affinity tests completely.
++session affinity
++
+ # NOT IMPLEMENTED; SEE DISCUSSION IN https://github.com/ovn-org/ovn-kubernetes/pull/1225
+ named port.+\[Feature:NetworkPolicy\]
+
+--
+2.31.1
+
diff --git a/.ci/ovn-kubernetes/prepare.sh b/.ci/ovn-kubernetes/prepare.sh
new file mode 100755
index 000000000..8fc9652af
--- /dev/null
+++ b/.ci/ovn-kubernetes/prepare.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+set -ev
+
+ovnk8s_path=$1
+topdir=$PWD
+
+pushd ${ovnk8s_path}
+
+# Add here any custom operations that need to performed on the
+# ovn-kubernetes cloned repo, e.g., custom patches.
+
+# git apply --allow-empty is too new so not all git versions from major
+# distros support it, just check if the custom patch file is not empty
+# before applying it.
+[ -s ${topdir}/.ci/ovn-kubernetes/custom.patch ] && \
+ git apply -v ${topdir}/.ci/ovn-kubernetes/custom.patch
+
+popd # ${ovnk8s_path}
+exit 0
diff --git a/.github/workflows/ovn-kubernetes.yml b/.github/workflows/ovn-kubernetes.yml
index ba6b291ff..34ff2cdda 100644
--- a/.github/workflows/ovn-kubernetes.yml
+++ b/.github/workflows/ovn-kubernetes.yml
@@ -91,12 +91,19 @@ jobs:
go-version: ${{ env.GO_VERSION }}
id: go
+ - name: Check out ovn
+ uses: actions/checkout@v3
+
- name: Check out ovn-kubernetes
uses: actions/checkout@v2
with:
path: src/github.com/ovn-org/ovn-kubernetes
repository: ovn-org/ovn-kubernetes
+ - name: Prepare
+ run: |
+ .ci/ovn-kubernetes/prepare.sh src/github.com/ovn-org/ovn-kubernetes
+
- name: Set up environment
run: |
export GOPATH=$(go env GOPATH)
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 7a59cd478..88c48dd2c 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -24,7 +24,7 @@ jobs:
M32: ${{ matrix.cfg.m32 }}
OPTS: ${{ matrix.cfg.opts }}
TESTSUITE: ${{ matrix.cfg.testsuite }}
- TESTSUITE_KW: ${{ matrix.cfg.testsuite_kw }}
+ TEST_RANGE: ${{ matrix.cfg.test_range }}
SANITIZERS: ${{ matrix.cfg.sanitizers }}
name: linux ${{ join(matrix.cfg.*, ' ') }}
@@ -36,31 +36,23 @@ jobs:
cfg:
- { compiler: gcc, opts: --disable-ssl }
- { compiler: clang, opts: --disable-ssl }
- - { compiler: gcc, testsuite: test, testsuite_kw: "parallelization=yes,ovn_monitor_all=yes" }
- - { compiler: gcc, testsuite: test, testsuite_kw: "parallelization=yes,ovn_monitor_all=no" }
- - { compiler: gcc, testsuite: test, testsuite_kw: "parallelization=no,ovn_monitor_all=yes" }
- - { compiler: gcc, testsuite: test, testsuite_kw: "parallelization=no,ovn_monitor_all=no" }
- - { compiler: gcc, testsuite: test, testsuite_kw: "!ovn-northd" }
- - { compiler: clang, testsuite: test, sanitizers: sanitizers, testsuite_kw: "parallelization=yes,ovn_monitor_all=yes" }
- - { compiler: clang, testsuite: test, sanitizers: sanitizers, testsuite_kw: "parallelization=yes,ovn_monitor_all=no" }
- - { compiler: clang, testsuite: test, sanitizers: sanitizers, testsuite_kw: "parallelization=no,ovn_monitor_all=yes" }
- - { compiler: clang, testsuite: test, sanitizers: sanitizers, testsuite_kw: "parallelization=no,ovn_monitor_all=no" }
- - { compiler: clang, testsuite: test, sanitizers: sanitizers, testsuite_kw: "!ovn-northd" }
- - { compiler: gcc, testsuite: test, libs: -ljemalloc, testsuite_kw: "parallelization=yes,ovn_monitor_all=yes" }
- - { compiler: gcc, testsuite: test, libs: -ljemalloc, testsuite_kw: "parallelization=yes,ovn_monitor_all=no" }
- - { compiler: gcc, testsuite: test, libs: -ljemalloc, testsuite_kw: "parallelization=no,ovn_monitor_all=yes" }
- - { compiler: gcc, testsuite: test, libs: -ljemalloc, testsuite_kw: "parallelization=no,ovn_monitor_all=no" }
- - { compiler: gcc, testsuite: test, libs: -ljemalloc, testsuite_kw: "!ovn-northd" }
- - { compiler: clang, testsuite: test, libs: -ljemalloc, testsuite_kw: "parallelization=yes,ovn_monitor_all=yes" }
- - { compiler: clang, testsuite: test, libs: -ljemalloc, testsuite_kw: "parallelization=yes,ovn_monitor_all=no" }
- - { compiler: clang, testsuite: test, libs: -ljemalloc, testsuite_kw: "parallelization=no,ovn_monitor_all=yes" }
- - { compiler: clang, testsuite: test, libs: -ljemalloc, testsuite_kw: "parallelization=no,ovn_monitor_all=no" }
- - { compiler: clang, testsuite: test, libs: -ljemalloc, testsuite_kw: "!ovn-northd" }
- - { compiler: gcc, testsuite: system-test, testsuite_kw: "parallelization=yes,ovn_monitor_all=yes" }
- - { compiler: gcc, testsuite: system-test, testsuite_kw: "parallelization=yes,ovn_monitor_all=no" }
- - { compiler: gcc, testsuite: system-test, testsuite_kw: "parallelization=no,ovn_monitor_all=yes" }
- - { compiler: gcc, testsuite: system-test, testsuite_kw: "parallelization=no,ovn_monitor_all=no" }
- - { compiler: gcc, testsuite: system-test, testsuite_kw: "!ovn-northd" }
+ - { compiler: gcc, testsuite: test, test_range: "-500" }
+ - { compiler: gcc, testsuite: test, test_range: "501-1000" }
+ - { compiler: gcc, testsuite: test, test_range: "1001-" }
+ - { compiler: clang, testsuite: test, sanitizers: sanitizers, test_range: "-300" }
+ - { compiler: clang, testsuite: test, sanitizers: sanitizers, test_range: "301-600" }
+ - { compiler: clang, testsuite: test, sanitizers: sanitizers, test_range: "601-900" }
+ - { compiler: clang, testsuite: test, sanitizers: sanitizers, test_range: "901-1200" }
+ - { compiler: clang, testsuite: test, sanitizers: sanitizers, test_range: "1201-" }
+ - { compiler: gcc, testsuite: test, libs: -ljemalloc, test_range: "-500" }
+ - { compiler: gcc, testsuite: test, libs: -ljemalloc, test_range: "501-1000" }
+ - { compiler: gcc, testsuite: test, libs: -ljemalloc, test_range: "1001-" }
+ - { compiler: clang, testsuite: test, libs: -ljemalloc, test_range: "-500" }
+ - { compiler: clang, testsuite: test, libs: -ljemalloc, test_range: "501-1000" }
+ - { compiler: clang, testsuite: test, libs: -ljemalloc, test_range: "1001-" }
+ - { compiler: gcc, testsuite: system-test, test_range: "-100" }
+ - { compiler: gcc, testsuite: system-test, test_range: "101-200" }
+ - { compiler: gcc, testsuite: system-test, test_range: "201-" }
- { compiler: gcc, m32: m32, opts: --disable-ssl}
steps:
diff --git a/Makefile.am b/Makefile.am
index 3b0df8393..f7758d114 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -85,12 +85,13 @@ EXTRA_DIST = \
MAINTAINERS.rst \
README.rst \
NOTICE \
- .cirrus.yml \
.ci/linux-build.sh \
.ci/linux-prepare.sh \
.ci/osx-build.sh \
.ci/osx-prepare.sh \
.ci/ovn-kubernetes/Dockerfile \
+ .ci/ovn-kubernetes/prepare.sh \
+ .ci/ovn-kubernetes/custom.patch \
.github/workflows/test.yml \
.github/workflows/ovn-kubernetes.yml \
boot.sh \
diff --git a/NEWS b/NEWS
index ef6a99fed..1a7a7855d 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,10 @@
+OVN v22.09.2 - xx xxx xxxx
+--------------------------
+
+OVN v22.09.1 - 20 Dec 2022
+--------------------------
+ - Bug fixes
+
OVN v22.09.0 - 16 Sep 2022
--------------------------
- ovn-controller: Add configuration knob, through OVS external-id
diff --git a/build-aux/sodepends.py b/build-aux/sodepends.py
index 343fda1af..7b1f9c840 100755
--- a/build-aux/sodepends.py
+++ b/build-aux/sodepends.py
@@ -63,7 +63,8 @@ def sodepends(include_info, filenames, dst):
continue
# Open file.
- include_dirs = [info[0] for info in include_info]
+ include_dirs = [info[1] if len(info) == 2 else info[0]
+ for info in include_info]
fn = soutil.find_file(include_dirs, toplevel)
if not fn:
ok = False
diff --git a/configure.ac b/configure.ac
index 765aacb17..408184649 100644
--- a/configure.ac
+++ b/configure.ac
@@ -13,7 +13,7 @@
# limitations under the License.
AC_PREREQ(2.63)
-AC_INIT(ovn, 22.09.0, bugs@openvswitch.org)
+AC_INIT(ovn, 22.09.2, bugs@openvswitch.org)
AC_CONFIG_MACRO_DIR([m4])
AC_CONFIG_AUX_DIR([build-aux])
AC_CONFIG_HEADERS([config.h])
diff --git a/controller/binding.c b/controller/binding.c
index 8f6b4b19d..5df62baef 100644
--- a/controller/binding.c
+++ b/controller/binding.c
@@ -220,7 +220,14 @@ set_noop_qos(struct ovsdb_idl_txn *ovs_idl_txn,
static void
set_qos_type(struct netdev *netdev, const char *type)
{
- int error = netdev_set_qos(netdev, type, NULL);
+ /* 34359738360 == (2^32 - 1) * 8. netdev_set_qos() doesn't support
+ * 64-bit rate netlink attributes, so the maximum value is 2^32 - 1 bytes.
+ * The 'max-rate' config option is in bits, so multiplying by 8.
+ * Without setting max-rate the reported link speed will be used, which
+ * can be unrecognized for certain NICs or reported too low for virtual
+ * interfaces. */
+ const struct smap conf = SMAP_CONST1(&conf, "max-rate", "34359738360");
+ int error = netdev_set_qos(netdev, type, &conf);
if (error) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
VLOG_WARN_RL(&rl, "%s: could not set qdisc type \"%s\" (%s)",
@@ -1866,6 +1873,7 @@ build_local_bindings(struct binding_ctx_in *b_ctx_in,
lbinding = local_binding_create(iface_id, iface_rec);
local_binding_add(local_bindings, lbinding);
} else {
+ lbinding->multiple_bindings = true;
static struct vlog_rate_limit rl =
VLOG_RATE_LIMIT_INIT(1, 5);
VLOG_WARN_RL(
@@ -2156,6 +2164,10 @@ consider_iface_claim(const struct ovsrec_interface *iface_rec,
lbinding = local_binding_create(iface_id, iface_rec);
local_binding_add(local_bindings, lbinding);
} else {
+ if (lbinding->iface && lbinding->iface != iface_rec) {
+ lbinding->multiple_bindings = true;
+ b_ctx_out->local_lports_changed = true;
+ }
lbinding->iface = iface_rec;
}
@@ -2174,6 +2186,13 @@ consider_iface_claim(const struct ovsrec_interface *iface_rec,
return true;
}
+ /* If multiple bindings to the same port, remove the "old" binding.
+ * This ensures that change tracking is correct.
+ */
+ if (lbinding->multiple_bindings) {
+ remove_related_lport(pb, b_ctx_out);
+ }
+
enum en_lport_type lport_type = get_lport_type(pb);
if (lport_type == LP_LOCALPORT) {
return consider_localport(pb, b_ctx_in, b_ctx_out);
@@ -2226,6 +2245,29 @@ consider_iface_release(const struct ovsrec_interface *iface_rec,
struct shash *binding_lports = &b_ctx_out->lbinding_data->lports;
lbinding = local_binding_find(local_bindings, iface_id);
+
+ if (lbinding) {
+ if (lbinding->multiple_bindings) {
+ VLOG_INFO("Multiple bindings for %s: force recompute to clean up",
+ iface_id);
+ return false;
+ } else {
+ int64_t ofport = iface_rec->n_ofport ? *iface_rec->ofport : 0;
+ if (lbinding->iface != iface_rec && !ofport) {
+ /* If external_ids:iface-id is set within the same transaction
+ * as adding an interface to a bridge, ovn-controller is
+ * usually initially notified of ovs interface changes with
+ * ofport == 0. If the lport was bound to a different interface
+ * we do not want to release it.
+ */
+ VLOG_DBG("Not releasing lport %s as %s was claimed "
+ "and %s was never bound)", iface_id, lbinding->iface ?
+ lbinding->iface->name : "", iface_rec->name);
+ return true;
+ }
+ }
+ }
+
struct binding_lport *b_lport =
local_binding_get_primary_or_localport_lport(lbinding);
if (is_binding_lport_this_chassis(b_lport, b_ctx_in->chassis_rec)) {
@@ -2666,7 +2708,7 @@ consider_patch_port_for_local_datapaths(const struct sbrec_port_binding *pb,
get_local_datapath(b_ctx_out->local_datapaths,
peer->datapath->tunnel_key);
}
- if (peer_ld && need_add_patch_peer_to_local(
+ if (peer_ld && need_add_peer_to_local(
b_ctx_in->sbrec_port_binding_by_name, peer,
b_ctx_in->chassis_rec)) {
add_local_datapath(
@@ -2681,7 +2723,7 @@ consider_patch_port_for_local_datapaths(const struct sbrec_port_binding *pb,
/* Add the peer datapath to the local datapaths if it's
* not present yet.
*/
- if (need_add_patch_peer_to_local(
+ if (need_add_peer_to_local(
b_ctx_in->sbrec_port_binding_by_name, pb,
b_ctx_in->chassis_rec)) {
add_local_datapath_peer_port(
@@ -3034,6 +3076,7 @@ local_binding_create(const char *name, const struct ovsrec_interface *iface)
struct local_binding *lbinding = xzalloc(sizeof *lbinding);
lbinding->name = xstrdup(name);
lbinding->iface = iface;
+ lbinding->multiple_bindings = false;
ovs_list_init(&lbinding->binding_lports);
return lbinding;
diff --git a/controller/binding.h b/controller/binding.h
index ad959a9e6..6c3a98b02 100644
--- a/controller/binding.h
+++ b/controller/binding.h
@@ -135,6 +135,7 @@ struct local_binding {
char *name;
const struct ovsrec_interface *iface;
struct ovs_list binding_lports;
+ bool multiple_bindings;
};
struct local_binding_data {
diff --git a/controller/lflow.h b/controller/lflow.h
index 543d3cd96..e57b061c3 100644
--- a/controller/lflow.h
+++ b/controller/lflow.h
@@ -81,6 +81,7 @@ struct uuid;
#define OFTABLE_CHK_OUT_PORT_SEC 75
#define OFTABLE_ECMP_NH_MAC 76
#define OFTABLE_ECMP_NH 77
+#define OFTABLE_CHK_LB_AFFINITY 78
enum ref_type {
REF_TYPE_ADDRSET,
diff --git a/controller/local_data.c b/controller/local_data.c
index 9eee568d1..035f10fff 100644
--- a/controller/local_data.c
+++ b/controller/local_data.c
@@ -115,14 +115,19 @@ local_datapath_destroy(struct local_datapath *ld)
free(ld);
}
-/* Checks if pb is a patch port and the peer datapath should be added to local
- * datapaths. */
+/* Checks if pb is running on local gw router or pb is a patch port
+ * and the peer datapath should be added to local datapaths. */
bool
-need_add_patch_peer_to_local(
+need_add_peer_to_local(
struct ovsdb_idl_index *sbrec_port_binding_by_name,
const struct sbrec_port_binding *pb,
const struct sbrec_chassis *chassis)
{
+ /* This port is running on local gw router. */
+ if (!strcmp(pb->type, "l3gateway") && pb->chassis == chassis) {
+ return true;
+ }
+
/* If it is not a patch port, no peer to add. */
if (strcmp(pb->type, "patch")) {
return false;
@@ -571,7 +576,7 @@ add_local_datapath__(struct ovsdb_idl_index *sbrec_datapath_binding_by_key,
peer_name);
if (peer && peer->datapath) {
- if (need_add_patch_peer_to_local(
+ if (need_add_peer_to_local(
sbrec_port_binding_by_name, pb, chassis)) {
struct local_datapath *peer_ld =
add_local_datapath__(sbrec_datapath_binding_by_key,
diff --git a/controller/local_data.h b/controller/local_data.h
index d898c8aa5..b5429eb58 100644
--- a/controller/local_data.h
+++ b/controller/local_data.h
@@ -66,7 +66,7 @@ struct local_datapath *local_datapath_alloc(
struct local_datapath *get_local_datapath(const struct hmap *,
uint32_t tunnel_key);
bool
-need_add_patch_peer_to_local(
+need_add_peer_to_local(
struct ovsdb_idl_index *sbrec_port_binding_by_name,
const struct sbrec_port_binding *,
const struct sbrec_chassis *);
diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c
index 43fbf2ba3..a92fc895c 100644
--- a/controller/ovn-controller.c
+++ b/controller/ovn-controller.c
@@ -151,6 +151,14 @@ struct pending_pkt {
/* Registered ofctrl seqno type for nb_cfg propagation. */
static size_t ofctrl_seq_type_nb_cfg;
+/* Only set monitor conditions on tables that are available in the
+ * server schema.
+ */
+#define sb_table_set_monitor_condition(idl, table, cond) \
+ (sbrec_server_has_##table##_table(idl) \
+ ? sbrec_##table##_set_condition(idl, cond) \
+ : 0)
+
static unsigned int
update_sb_monitors(struct ovsdb_idl *ovnsb_idl,
const struct sbrec_chassis *chassis,
@@ -279,16 +287,16 @@ update_sb_monitors(struct ovsdb_idl *ovnsb_idl,
out:;
unsigned int cond_seqnos[] = {
- sbrec_port_binding_set_condition(ovnsb_idl, &pb),
- sbrec_logical_flow_set_condition(ovnsb_idl, &lf),
- sbrec_logical_dp_group_set_condition(ovnsb_idl, &ldpg),
- sbrec_mac_binding_set_condition(ovnsb_idl, &mb),
- sbrec_multicast_group_set_condition(ovnsb_idl, &mg),
- sbrec_dns_set_condition(ovnsb_idl, &dns),
- sbrec_controller_event_set_condition(ovnsb_idl, &ce),
- sbrec_ip_multicast_set_condition(ovnsb_idl, &ip_mcast),
- sbrec_igmp_group_set_condition(ovnsb_idl, &igmp),
- sbrec_chassis_private_set_condition(ovnsb_idl, &chprv),
+ sb_table_set_monitor_condition(ovnsb_idl, port_binding, &pb),
+ sb_table_set_monitor_condition(ovnsb_idl, logical_flow, &lf),
+ sb_table_set_monitor_condition(ovnsb_idl, logical_dp_group, &ldpg),
+ sb_table_set_monitor_condition(ovnsb_idl, mac_binding, &mb),
+ sb_table_set_monitor_condition(ovnsb_idl, multicast_group, &mg),
+ sb_table_set_monitor_condition(ovnsb_idl, dns, &dns),
+ sb_table_set_monitor_condition(ovnsb_idl, controller_event, &ce),
+ sb_table_set_monitor_condition(ovnsb_idl, ip_multicast, &ip_mcast),
+ sb_table_set_monitor_condition(ovnsb_idl, igmp_group, &igmp),
+ sb_table_set_monitor_condition(ovnsb_idl, chassis_private, &chprv),
};
unsigned int expected_cond_seqno = 0;
@@ -658,7 +666,8 @@ update_ct_zones(const struct shash *binding_lports,
const char *user;
struct sset all_users = SSET_INITIALIZER(&all_users);
struct simap req_snat_zones = SIMAP_INITIALIZER(&req_snat_zones);
- unsigned long unreq_snat_zones[BITMAP_N_LONGS(MAX_CT_ZONES)];
+ unsigned long unreq_snat_zones_map[BITMAP_N_LONGS(MAX_CT_ZONES)];
+ struct simap unreq_snat_zones = SIMAP_INITIALIZER(&unreq_snat_zones);
struct shash_node *shash_node;
SHASH_FOR_EACH (shash_node, binding_lports) {
@@ -698,49 +707,46 @@ update_ct_zones(const struct shash *binding_lports,
bitmap_set0(ct_zone_bitmap, ct_zone->data);
simap_delete(ct_zones, ct_zone);
} else if (!simap_find(&req_snat_zones, ct_zone->name)) {
- bitmap_set1(unreq_snat_zones, ct_zone->data);
+ bitmap_set1(unreq_snat_zones_map, ct_zone->data);
+ simap_put(&unreq_snat_zones, ct_zone->name, ct_zone->data);
}
}
/* Prioritize requested CT zones */
struct simap_node *snat_req_node;
SIMAP_FOR_EACH (snat_req_node, &req_snat_zones) {
- struct simap_node *node = simap_find(ct_zones, snat_req_node->name);
- if (node) {
- if (node->data == snat_req_node->data) {
- /* No change to this request, so no action needed */
- continue;
- } else {
- /* Zone request has changed for this node. delete old entry */
- bitmap_set0(ct_zone_bitmap, node->data);
- simap_delete(ct_zones, node);
- }
- }
-
/* Determine if someone already had this zone auto-assigned.
* If so, then they need to give up their assignment since
* that zone is being explicitly requested now.
*/
- if (bitmap_is_set(unreq_snat_zones, snat_req_node->data)) {
- struct simap_node *dup;
- SIMAP_FOR_EACH_SAFE (dup, ct_zones) {
- if (dup != snat_req_node && dup->data == snat_req_node->data) {
- simap_delete(ct_zones, dup);
- break;
+ if (bitmap_is_set(unreq_snat_zones_map, snat_req_node->data)) {
+ struct simap_node *unreq_node;
+ SIMAP_FOR_EACH_SAFE (unreq_node, &unreq_snat_zones) {
+ if (unreq_node->data == snat_req_node->data) {
+ simap_find_and_delete(ct_zones, unreq_node->name);
+ simap_delete(&unreq_snat_zones, unreq_node);
}
}
+
/* Set this bit to 0 so that if multiple datapaths have requested
* this zone, we don't needlessly double-detect this condition.
*/
- bitmap_set0(unreq_snat_zones, snat_req_node->data);
+ bitmap_set0(unreq_snat_zones_map, snat_req_node->data);
}
- add_pending_ct_zone_entry(pending_ct_zones, CT_ZONE_OF_QUEUED,
- snat_req_node->data, true,
- snat_req_node->name);
-
- bitmap_set1(ct_zone_bitmap, snat_req_node->data);
- simap_put(ct_zones, snat_req_node->name, snat_req_node->data);
+ struct simap_node *node = simap_find(ct_zones, snat_req_node->name);
+ if (node) {
+ if (node->data != snat_req_node->data) {
+ /* Zone request has changed for this node. delete old entry and
+ * create new one*/
+ add_pending_ct_zone_entry(pending_ct_zones, CT_ZONE_OF_QUEUED,
+ snat_req_node->data, true,
+ snat_req_node->name);
+ bitmap_set0(ct_zone_bitmap, node->data);
+ }
+ bitmap_set1(ct_zone_bitmap, snat_req_node->data);
+ node->data = snat_req_node->data;
+ }
}
/* xxx This is wasteful to assign a zone to each port--even if no
@@ -758,6 +764,7 @@ update_ct_zones(const struct shash *binding_lports,
}
simap_destroy(&req_snat_zones);
+ simap_destroy(&unreq_snat_zones);
sset_destroy(&all_users);
shash_destroy(&all_lds);
}
@@ -799,11 +806,36 @@ commit_ct_zones(const struct ovsrec_bridge *br_int,
}
}
+/* Connection tracking zones. */
+struct ed_type_ct_zones {
+ unsigned long bitmap[BITMAP_N_LONGS(MAX_CT_ZONES)];
+ struct shash pending;
+ struct simap current;
+
+ /* Tracked data. */
+ bool recomputed;
+};
+
static void
restore_ct_zones(const struct ovsrec_bridge_table *bridge_table,
const struct ovsrec_open_vswitch_table *ovs_table,
- struct simap *ct_zones, unsigned long *ct_zone_bitmap)
+ struct ed_type_ct_zones *ct_zones_data)
{
+ memset(ct_zones_data->bitmap, 0, sizeof ct_zones_data->bitmap);
+ bitmap_set1(ct_zones_data->bitmap, 0); /* Zone 0 is reserved. */
+
+ struct shash_node *pending_node;
+ SHASH_FOR_EACH (pending_node, &ct_zones_data->pending) {
+ struct ct_zone_pending_entry *ctpe = pending_node->data;
+
+ if (ctpe->add) {
+ VLOG_DBG("restoring ct zone %"PRId32" for '%s'", ctpe->zone,
+ pending_node->name);
+ bitmap_set1(ct_zones_data->bitmap, ctpe->zone);
+ simap_put(&ct_zones_data->current, pending_node->name, ctpe->zone);
+ }
+ }
+
const struct ovsrec_open_vswitch *cfg;
cfg = ovsrec_open_vswitch_table_first(ovs_table);
if (!cfg) {
@@ -829,14 +861,18 @@ restore_ct_zones(const struct ovsrec_bridge_table *bridge_table,
continue;
}
+ if (shash_find(&ct_zones_data->pending, user)) {
+ continue;
+ }
+
unsigned int zone;
if (!str_to_uint(node->value, 10, &zone)) {
continue;
}
VLOG_DBG("restoring ct zone %"PRId32" for '%s'", zone, user);
- bitmap_set1(ct_zone_bitmap, zone);
- simap_put(ct_zones, user, zone);
+ bitmap_set1(ct_zones_data->bitmap, zone);
+ simap_put(&ct_zones_data->current, user, zone);
}
}
@@ -2058,16 +2094,6 @@ out:
return true;
}
-/* Connection tracking zones. */
-struct ed_type_ct_zones {
- unsigned long bitmap[BITMAP_N_LONGS(MAX_CT_ZONES)];
- struct shash pending;
- struct simap current;
-
- /* Tracked data. */
- bool recomputed;
-};
-
static void *
en_ct_zones_init(struct engine_node *node, struct engine_arg *arg OVS_UNUSED)
{
@@ -2082,9 +2108,7 @@ en_ct_zones_init(struct engine_node *node, struct engine_arg *arg OVS_UNUSED)
shash_init(&data->pending);
simap_init(&data->current);
- memset(data->bitmap, 0, sizeof data->bitmap);
- bitmap_set1(data->bitmap, 0); /* Zone 0 is reserved. */
- restore_ct_zones(bridge_table, ovs_table, &data->current, data->bitmap);
+ restore_ct_zones(bridge_table, ovs_table, data);
return data;
}
@@ -2111,6 +2135,12 @@ en_ct_zones_run(struct engine_node *node, void *data)
struct ed_type_runtime_data *rt_data =
engine_get_input_data("runtime_data", node);
+ const struct ovsrec_open_vswitch_table *ovs_table =
+ EN_OVSDB_GET(engine_get_input("OVS_open_vswitch", node));
+ const struct ovsrec_bridge_table *bridge_table =
+ EN_OVSDB_GET(engine_get_input("OVS_bridge", node));
+
+ restore_ct_zones(bridge_table, ovs_table, ct_zones_data);
update_ct_zones(&rt_data->lbinding_data.lports, &rt_data->local_datapaths,
&ct_zones_data->current, ct_zones_data->bitmap,
&ct_zones_data->pending);
@@ -2188,7 +2218,7 @@ ct_zones_runtime_data_handler(struct engine_node *node, void *data)
struct hmap *tracked_dp_bindings = &rt_data->tracked_dp_bindings;
struct tracked_datapath *tdp;
- int scan_start = 0;
+ int scan_start = 1;
bool updated = false;
@@ -4197,6 +4227,7 @@ main(int argc, char *argv[])
}
stopwatch_start(PINCTRL_RUN_STOPWATCH_NAME,
time_msec());
+ pinctrl_update(ovnsb_idl_loop.idl, br_int->name);
pinctrl_run(ovnsb_idl_txn,
sbrec_datapath_binding_by_key,
sbrec_port_binding_by_datapath,
diff --git a/controller/physical.c b/controller/physical.c
index f3c8bddce..705146316 100644
--- a/controller/physical.c
+++ b/controller/physical.c
@@ -803,6 +803,14 @@ put_replace_router_port_mac_flows(struct ovsdb_idl_index
ofpact_put_OUTPUT(ofpacts_p)->port = ofport;
+ /* Replace the MAC back and strip vlan. In case of l2 flooding
+ * traffic (ARP/ND) we need to restore previous state so other ports
+ * do not receive the traffic tagged and with wrong MAC. */
+ ofpact_put_SET_ETH_SRC(ofpacts_p)->mac = router_port_mac;
+ if (tag) {
+ ofpact_put_STRIP_VLAN(ofpacts_p);
+ }
+
ofctrl_add_flow(flow_table, OFTABLE_LOG_TO_PHY, 150,
localnet_port->header_.uuid.parts[0],
&match, ofpacts_p, &localnet_port->header_.uuid);
diff --git a/controller/pinctrl.c b/controller/pinctrl.c
index 3f5d0af79..bcbb04eed 100644
--- a/controller/pinctrl.c
+++ b/controller/pinctrl.c
@@ -173,6 +173,7 @@ struct pinctrl {
pthread_t pinctrl_thread;
/* Latch to destroy the 'pinctrl_thread' */
struct latch pinctrl_thread_exit;
+ bool mac_binding_can_timestamp;
};
static struct pinctrl pinctrl;
@@ -544,6 +545,7 @@ pinctrl_init(void)
bfd_monitor_init();
init_fdb_entries();
pinctrl.br_int_name = NULL;
+ pinctrl.mac_binding_can_timestamp = false;
pinctrl_handler_seq = seq_create();
pinctrl_main_seq = seq_create();
@@ -3519,7 +3521,7 @@ pinctrl_handler(void *arg_)
}
static void
-pinctrl_set_br_int_name_(char *br_int_name)
+pinctrl_set_br_int_name_(const char *br_int_name)
OVS_REQUIRES(pinctrl_mutex)
{
if (br_int_name && (!pinctrl.br_int_name || strcmp(pinctrl.br_int_name,
@@ -3533,13 +3535,31 @@ pinctrl_set_br_int_name_(char *br_int_name)
}
void
-pinctrl_set_br_int_name(char *br_int_name)
+pinctrl_set_br_int_name(const char *br_int_name)
{
ovs_mutex_lock(&pinctrl_mutex);
pinctrl_set_br_int_name_(br_int_name);
ovs_mutex_unlock(&pinctrl_mutex);
}
+void
+pinctrl_update(const struct ovsdb_idl *idl, const char *br_int_name)
+{
+ ovs_mutex_lock(&pinctrl_mutex);
+ pinctrl_set_br_int_name_(br_int_name);
+
+ bool can_timestamp = sbrec_server_has_mac_binding_table_col_timestamp(idl);
+ if (can_timestamp != pinctrl.mac_binding_can_timestamp) {
+ pinctrl.mac_binding_can_timestamp = can_timestamp;
+
+ /* Notify pinctrl_handler that mac binding timestamp column
+ * availability has changed. */
+ notify_pinctrl_handler();
+ }
+
+ ovs_mutex_unlock(&pinctrl_mutex);
+}
+
/* Called by ovn-controller. */
void
pinctrl_run(struct ovsdb_idl_txn *ovnsb_idl_txn,
@@ -3563,7 +3583,6 @@ pinctrl_run(struct ovsdb_idl_txn *ovnsb_idl_txn,
const struct shash *local_active_ports_ras)
{
ovs_mutex_lock(&pinctrl_mutex);
- pinctrl_set_br_int_name_(br_int->name);
run_put_mac_bindings(ovnsb_idl_txn, sbrec_datapath_binding_by_key,
sbrec_port_binding_by_key,
sbrec_mac_binding_by_lport_ip);
@@ -4245,12 +4264,17 @@ mac_binding_add_to_sb(struct ovsdb_idl_txn *ovnsb_idl_txn,
b = sbrec_mac_binding_insert(ovnsb_idl_txn);
sbrec_mac_binding_set_logical_port(b, logical_port);
sbrec_mac_binding_set_ip(b, ip);
- sbrec_mac_binding_set_mac(b, mac_string);
sbrec_mac_binding_set_datapath(b, dp);
- sbrec_mac_binding_set_timestamp(b, time_wall_msec());
- } else if (strcmp(b->mac, mac_string)) {
+ }
+
+ if (strcmp(b->mac, mac_string)) {
sbrec_mac_binding_set_mac(b, mac_string);
- sbrec_mac_binding_set_timestamp(b, time_wall_msec());
+
+ /* For backward compatibility check if timestamp column is available
+ * in SB DB. */
+ if (pinctrl.mac_binding_can_timestamp) {
+ sbrec_mac_binding_set_timestamp(b, time_wall_msec());
+ }
}
}
@@ -4378,7 +4402,7 @@ run_buffered_binding(struct ovsdb_idl_index *sbrec_mac_binding_by_lport_ip,
const struct sbrec_port_binding *pb;
SBREC_PORT_BINDING_FOR_EACH_EQUAL (pb, target,
sbrec_port_binding_by_datapath) {
- if (strcmp(pb->type, "patch")) {
+ if (strcmp(pb->type, "patch") && strcmp(pb->type, "l3gateway")) {
continue;
}
struct buffered_packets *cur_qp;
diff --git a/controller/pinctrl.h b/controller/pinctrl.h
index d4f52e94d..cfece04da 100644
--- a/controller/pinctrl.h
+++ b/controller/pinctrl.h
@@ -26,6 +26,7 @@
struct hmap;
struct shash;
struct lport_index;
+struct ovsdb_idl;
struct ovsdb_idl_index;
struct ovsdb_idl_txn;
struct ovsrec_bridge;
@@ -57,7 +58,8 @@ void pinctrl_run(struct ovsdb_idl_txn *ovnsb_idl_txn,
const struct shash *local_active_ports_ras);
void pinctrl_wait(struct ovsdb_idl_txn *ovnsb_idl_txn);
void pinctrl_destroy(void);
-void pinctrl_set_br_int_name(char *br_int_name);
+void pinctrl_set_br_int_name(const char *br_int_name);
+void pinctrl_update(const struct ovsdb_idl *idl, const char *br_int_name);
struct activated_port {
uint32_t dp_key;
diff --git a/debian/changelog b/debian/changelog
index 267e12baa..08cc66fc0 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,15 @@
+OVN (22.09.2-1) unstable; urgency=low
+ [ OVN team ]
+ * New upstream version
+
+ -- OVN team <dev@openvswitch.org> Tue, 20 Dec 2022 13:53:56 -0500
+
+OVN (22.09.1-1) unstable; urgency=low
+ [ OVN team ]
+ * New upstream version
+
+ -- OVN team <dev@openvswitch.org> Tue, 20 Dec 2022 13:53:56 -0500
+
ovn (22.09.0-1) unstable; urgency=low
* New upstream version
diff --git a/ic/ovn-ic.c b/ic/ovn-ic.c
index e5c193d9d..9a80a7f68 100644
--- a/ic/ovn-ic.c
+++ b/ic/ovn-ic.c
@@ -71,6 +71,7 @@ struct ic_context {
struct ovsdb_idl_index *icsbrec_port_binding_by_az;
struct ovsdb_idl_index *icsbrec_port_binding_by_ts;
struct ovsdb_idl_index *icsbrec_port_binding_by_ts_az;
+ struct ovsdb_idl_index *icsbrec_route_by_az;
struct ovsdb_idl_index *icsbrec_route_by_ts;
struct ovsdb_idl_index *icsbrec_route_by_ts_az;
};
@@ -756,6 +757,7 @@ port_binding_run(struct ic_context *ctx,
}
icsbrec_port_binding_index_destroy_row(isb_pb_key);
+ const struct sbrec_port_binding *sb_pb;
const struct icnbrec_transit_switch *ts;
ICNBREC_TRANSIT_SWITCH_FOR_EACH (ts, ctx->ovninb_idl) {
const struct nbrec_logical_switch *ls = find_ts_in_nb(ctx, ts->name);
@@ -787,9 +789,9 @@ port_binding_run(struct ic_context *ctx,
for (int i = 0; i < ls->n_ports; i++) {
lsp = ls->ports[i];
- const struct sbrec_port_binding *sb_pb = find_lsp_in_sb(ctx, lsp);
if (!strcmp(lsp->type, "router")) {
/* The port is local. */
+ sb_pb = find_lsp_in_sb(ctx, lsp);
if (!sb_pb) {
continue;
}
@@ -806,6 +808,7 @@ port_binding_run(struct ic_context *ctx,
if (!isb_pb) {
nbrec_logical_switch_update_ports_delvalue(ls, lsp);
} else {
+ sb_pb = find_lsp_in_sb(ctx, lsp);
if (!sb_pb) {
continue;
}
@@ -881,17 +884,18 @@ ic_route_hash(const struct in6_addr *prefix, unsigned int plen,
static struct ic_route_info *
ic_route_find(struct hmap *routes, const struct in6_addr *prefix,
unsigned int plen, const struct in6_addr *nexthop,
- const char *origin, char *route_table)
+ const char *origin, const char *route_table, uint32_t hash)
{
struct ic_route_info *r;
- uint32_t hash = ic_route_hash(prefix, plen, nexthop, origin, route_table);
+ if (!hash) {
+ hash = ic_route_hash(prefix, plen, nexthop, origin, route_table);
+ }
HMAP_FOR_EACH_WITH_HASH (r, node, hash, routes) {
if (ipv6_addr_equals(&r->prefix, prefix) &&
r->plen == plen &&
ipv6_addr_equals(&r->nexthop, nexthop) &&
!strcmp(r->origin, origin) &&
- !strcmp(r->route_table ? r->route_table : "", route_table) &&
- ipv6_addr_equals(&r->nexthop, nexthop)) {
+ !strcmp(r->route_table ? r->route_table : "", route_table)) {
return r;
}
}
@@ -942,8 +946,8 @@ add_to_routes_learned(struct hmap *routes_learned,
}
const char *origin = smap_get_def(&nb_route->options, "origin", "");
if (ic_route_find(routes_learned, &prefix, plen, &nexthop, origin,
- nb_route->route_table)) {
- /* Route is already added to learned in previous iteration. */
+ nb_route->route_table, 0)) {
+ /* Route was added to learned on previous iteration. */
return true;
}
@@ -1090,20 +1094,44 @@ route_need_advertise(const char *policy,
}
static void
-add_to_routes_ad(struct hmap *routes_ad,
- const struct nbrec_logical_router_static_route *nb_route,
- const struct lport_addresses *nexthop_addresses,
- const struct smap *nb_options, const char *route_table)
+add_to_routes_ad(struct hmap *routes_ad, const struct in6_addr prefix,
+ unsigned int plen, const struct in6_addr nexthop,
+ const char *origin, const char *route_table,
+ const struct nbrec_logical_router_port *nb_lrp,
+ const struct nbrec_logical_router_static_route *nb_route)
{
- if (strcmp(route_table, nb_route->route_table)) {
- if (VLOG_IS_DBG_ENABLED()) {
- VLOG_DBG("Skip advertising route %s -> %s as its route table %s !="
- " %s of TS port", nb_route->ip_prefix, nb_route->nexthop,
- nb_route->route_table, route_table);
- }
- return;
+ if (route_table == NULL) {
+ route_table = "";
+ }
+
+ uint hash = ic_route_hash(&prefix, plen, &nexthop, origin, route_table);
+
+ if (!ic_route_find(routes_ad, &prefix, plen, &nexthop, origin, route_table,
+ hash)) {
+ struct ic_route_info *ic_route = xzalloc(sizeof *ic_route);
+ ic_route->prefix = prefix;
+ ic_route->plen = plen;
+ ic_route->nexthop = nexthop;
+ ic_route->nb_route = nb_route;
+ ic_route->origin = origin;
+ ic_route->route_table = route_table;
+ ic_route->nb_lrp = nb_lrp;
+ hmap_insert(routes_ad, &ic_route->node, hash);
+ } else {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+ VLOG_WARN_RL(&rl, "Duplicate route advertisement was suppressed! NB "
+ "route uuid: "UUID_FMT,
+ UUID_ARGS(&nb_route->header_.uuid));
}
+}
+static void
+add_static_to_routes_ad(
+ struct hmap *routes_ad,
+ const struct nbrec_logical_router_static_route *nb_route,
+ const struct lport_addresses *nexthop_addresses,
+ const struct smap *nb_options)
+{
struct in6_addr prefix, nexthop;
unsigned int plen;
if (!parse_route(nb_route->ip_prefix, nb_route->nexthop,
@@ -1142,16 +1170,8 @@ add_to_routes_ad(struct hmap *routes_ad,
ds_destroy(&msg);
}
- struct ic_route_info *ic_route = xzalloc(sizeof *ic_route);
- ic_route->prefix = prefix;
- ic_route->plen = plen;
- ic_route->nexthop = nexthop;
- ic_route->nb_route = nb_route;
- ic_route->origin = ROUTE_ORIGIN_STATIC;
- ic_route->route_table = nb_route->route_table;
- hmap_insert(routes_ad, &ic_route->node,
- ic_route_hash(&prefix, plen, &nexthop, ROUTE_ORIGIN_STATIC,
- nb_route->route_table));
+ add_to_routes_ad(routes_ad, prefix, plen, nexthop, ROUTE_ORIGIN_STATIC,
+ nb_route->route_table, NULL, nb_route);
}
static void
@@ -1195,18 +1215,9 @@ add_network_to_routes_ad(struct hmap *routes_ad, const char *network,
ds_destroy(&msg);
}
- struct ic_route_info *ic_route = xzalloc(sizeof *ic_route);
- ic_route->prefix = prefix;
- ic_route->plen = plen;
- ic_route->nexthop = nexthop;
- ic_route->nb_lrp = nb_lrp;
- ic_route->origin = ROUTE_ORIGIN_CONNECTED;
-
/* directly-connected routes go to <main> route table */
- ic_route->route_table = NULL;
- hmap_insert(routes_ad, &ic_route->node,
- ic_route_hash(&prefix, plen, &nexthop,
- ROUTE_ORIGIN_CONNECTED, ""));
+ add_to_routes_ad(routes_ad, prefix, plen, nexthop, ROUTE_ORIGIN_CONNECTED,
+ NULL, nb_lrp, NULL);
}
static bool
@@ -1366,7 +1377,7 @@ sync_learned_routes(struct ic_context *ctx,
struct ic_route_info *route_learned
= ic_route_find(&ic_lr->routes_learned, &prefix, plen,
&nexthop, isb_route->origin,
- isb_route->route_table);
+ isb_route->route_table, 0);
if (route_learned) {
/* Sync external-ids */
struct uuid ext_id;
@@ -1465,7 +1476,7 @@ advertise_routes(struct ic_context *ctx,
}
struct ic_route_info *route_adv =
ic_route_find(routes_ad, &prefix, plen, &nexthop,
- isb_route->origin, isb_route->route_table);
+ isb_route->origin, isb_route->route_table, 0);
if (!route_adv) {
/* Delete the extra route from IC-SB. */
VLOG_DBG("Delete route %s -> %s from IC-SB, which is not found"
@@ -1545,10 +1556,10 @@ build_ts_routes_to_adv(struct ic_context *ctx,
nbrec_logical_router_update_static_routes_delvalue(lr,
nb_route);
}
- } else {
+ } else if (!strcmp(ts_route_table, nb_route->route_table)) {
/* It may be a route to be advertised */
- add_to_routes_ad(routes_ad, nb_route, ts_port_addrs,
- &nb_global->options, ts_route_table);
+ add_static_to_routes_ad(routes_ad, nb_route, ts_port_addrs,
+ &nb_global->options);
}
}
@@ -1581,7 +1592,6 @@ advertise_lr_routes(struct ic_context *ctx,
const struct icsbrec_port_binding *isb_pb;
const char *lrp_name, *ts_name, *route_table;
struct lport_addresses ts_port_addrs;
- const struct nbrec_logical_router *lr = ic_lr->lr;
const struct icnbrec_transit_switch *key;
struct hmap routes_ad = HMAP_INITIALIZER(&routes_ad);
@@ -1599,7 +1609,7 @@ advertise_lr_routes(struct ic_context *ctx,
VLOG_INFO_RL(&rl, "Route sync ignores port %s on ts %s for router"
" %s because the addresses are invalid.",
isb_pb->logical_port, isb_pb->transit_switch,
- lr->name);
+ ic_lr->lr->name);
continue;
}
lrp_name = get_lrp_name_by_ts_port_name(ctx, isb_pb->logical_port);
@@ -1612,6 +1622,39 @@ advertise_lr_routes(struct ic_context *ctx,
hmap_destroy(&routes_ad);
}
+static void
+delete_orphan_ic_routes(struct ic_context *ctx,
+ const struct icsbrec_availability_zone *az)
+{
+ const struct icsbrec_route *isb_route, *isb_route_key =
+ icsbrec_route_index_init_row(ctx->icsbrec_route_by_az);
+ icsbrec_route_index_set_availability_zone(isb_route_key, az);
+
+ const struct icnbrec_transit_switch *t_sw, *t_sw_key;
+
+ ICSBREC_ROUTE_FOR_EACH_EQUAL (isb_route, isb_route_key,
+ ctx->icsbrec_route_by_az)
+ {
+ t_sw_key = icnbrec_transit_switch_index_init_row(
+ ctx->icnbrec_transit_switch_by_name);
+ icnbrec_transit_switch_index_set_name(t_sw_key,
+ isb_route->transit_switch);
+ t_sw = icnbrec_transit_switch_index_find(
+ ctx->icnbrec_transit_switch_by_name, t_sw_key);
+ icnbrec_transit_switch_index_destroy_row(t_sw_key);
+
+ if (!t_sw) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+ VLOG_INFO_RL(&rl, "Deleting orphan ICDB:Route: %s->%s (%s, rtb:%s,"
+ " transit switch: %s)", isb_route->ip_prefix,
+ isb_route->nexthop, isb_route->origin,
+ isb_route->route_table, isb_route->transit_switch);
+ icsbrec_route_delete(isb_route);
+ }
+ }
+ icsbrec_route_index_destroy_row(isb_route_key);
+}
+
static void
route_run(struct ic_context *ctx,
const struct icsbrec_availability_zone *az)
@@ -1620,6 +1663,8 @@ route_run(struct ic_context *ctx,
return;
}
+ delete_orphan_ic_routes(ctx, az);
+
struct hmap ic_lrs = HMAP_INITIALIZER(&ic_lrs);
const struct icsbrec_port_binding *isb_pb;
const struct icsbrec_port_binding *isb_pb_key =
@@ -1866,13 +1911,112 @@ main(int argc, char *argv[])
struct ovsdb_idl_loop ovnisb_idl_loop = OVSDB_IDL_LOOP_INITIALIZER(
ovsdb_idl_create(ovn_ic_sb_db, &icsbrec_idl_class, true, true));
- /* ovn-nb db. XXX: add only needed tables and columns */
+ /* ovn-nb db. */
struct ovsdb_idl_loop ovnnb_idl_loop = OVSDB_IDL_LOOP_INITIALIZER(
- ovsdb_idl_create(ovnnb_db, &nbrec_idl_class, true, true));
-
- /* ovn-sb db. XXX: add only needed tables and columns */
+ ovsdb_idl_create(ovnnb_db, &nbrec_idl_class, false, true));
+
+ ovsdb_idl_add_table(ovnnb_idl_loop.idl, &nbrec_table_nb_global);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, &nbrec_nb_global_col_name);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl, &nbrec_nb_global_col_options);
+
+ ovsdb_idl_add_table(ovnnb_idl_loop.idl,
+ &nbrec_table_logical_router_static_route);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_router_static_route_col_route_table);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_router_static_route_col_ip_prefix);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_router_static_route_col_nexthop);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_router_static_route_col_external_ids);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_router_static_route_col_options);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_router_static_route_col_policy);
+
+ ovsdb_idl_add_table(ovnnb_idl_loop.idl, &nbrec_table_logical_router);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_router_col_name);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_router_col_static_routes);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_router_col_ports);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_router_col_options);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_router_col_external_ids);
+
+ ovsdb_idl_add_table(ovnnb_idl_loop.idl, &nbrec_table_logical_router_port);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_router_port_col_name);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_router_port_col_networks);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_router_port_col_external_ids);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_router_port_col_options);
+
+ ovsdb_idl_add_table(ovnnb_idl_loop.idl, &nbrec_table_logical_switch);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_switch_col_name);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_switch_col_ports);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_switch_col_other_config);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_switch_col_external_ids);
+
+ ovsdb_idl_add_table(ovnnb_idl_loop.idl, &nbrec_table_logical_switch_port);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_switch_port_col_name);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_switch_port_col_addresses);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_switch_port_col_options);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_switch_port_col_type);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_switch_port_col_up);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_switch_port_col_addresses);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_switch_port_col_enabled);
+ ovsdb_idl_add_column(ovnnb_idl_loop.idl,
+ &nbrec_logical_switch_port_col_external_ids);
+
+ /* ovn-sb db. */
struct ovsdb_idl_loop ovnsb_idl_loop = OVSDB_IDL_LOOP_INITIALIZER(
- ovsdb_idl_create(ovnsb_db, &sbrec_idl_class, true, true));
+ ovsdb_idl_create(ovnsb_db, &sbrec_idl_class, false, true));
+
+ ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_chassis);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_chassis_col_encaps);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_chassis_col_name);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_chassis_col_hostname);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_chassis_col_other_config);
+
+ ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_encap);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_encap_col_chassis_name);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_encap_col_type);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_encap_col_ip);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_encap_col_options);
+
+ ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_datapath_binding);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl,
+ &sbrec_datapath_binding_col_external_ids);
+
+ ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_port_binding);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl,
+ &sbrec_port_binding_col_datapath);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl,
+ &sbrec_port_binding_col_mac);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl,
+ &sbrec_port_binding_col_options);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl,
+ &sbrec_port_binding_col_logical_port);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl,
+ &sbrec_port_binding_col_external_ids);
+ ovsdb_idl_add_column(ovnsb_idl_loop.idl,
+ &sbrec_port_binding_col_chassis);
/* Create IDL indexes */
struct ovsdb_idl_index *nbrec_ls_by_name
@@ -1908,6 +2052,10 @@ main(int argc, char *argv[])
&icsbrec_port_binding_col_transit_switch,
&icsbrec_port_binding_col_availability_zone);
+ struct ovsdb_idl_index *icsbrec_route_by_az
+ = ovsdb_idl_index_create1(ovnisb_idl_loop.idl,
+ &icsbrec_route_col_availability_zone);
+
struct ovsdb_idl_index *icsbrec_route_by_ts
= ovsdb_idl_index_create1(ovnisb_idl_loop.idl,
&icsbrec_route_col_transit_switch);
@@ -1962,6 +2110,7 @@ main(int argc, char *argv[])
.icsbrec_port_binding_by_az = icsbrec_port_binding_by_az,
.icsbrec_port_binding_by_ts = icsbrec_port_binding_by_ts,
.icsbrec_port_binding_by_ts_az = icsbrec_port_binding_by_ts_az,
+ .icsbrec_route_by_az = icsbrec_route_by_az,
.icsbrec_route_by_ts = icsbrec_route_by_ts,
.icsbrec_route_by_ts_az = icsbrec_route_by_ts_az,
};
diff --git a/include/ovn/actions.h b/include/ovn/actions.h
index d7ee84dac..fdb6ab08b 100644
--- a/include/ovn/actions.h
+++ b/include/ovn/actions.h
@@ -121,6 +121,8 @@ struct ovn_extend_table;
OVNACT(COMMIT_ECMP_NH, ovnact_commit_ecmp_nh) \
OVNACT(CHK_ECMP_NH_MAC, ovnact_result) \
OVNACT(CHK_ECMP_NH, ovnact_result) \
+ OVNACT(COMMIT_LB_AFF, ovnact_commit_lb_aff) \
+ OVNACT(CHK_LB_AFF, ovnact_result) \
/* enum ovnact_type, with a member OVNACT_<ENUM> for each action. */
enum OVS_PACKED_ENUM ovnact_type {
@@ -463,6 +465,20 @@ struct ovnact_commit_ecmp_nh {
uint8_t proto;
};
+/* OVNACT_COMMIT_LB_AFF. */
+struct ovnact_commit_lb_aff {
+ struct ovnact ovnact;
+
+ struct in6_addr vip;
+ uint16_t vip_port;
+ uint8_t proto;
+
+ struct in6_addr backend;
+ uint16_t backend_port;
+
+ uint16_t timeout;
+};
+
/* Internal use by the helpers below. */
void ovnact_init(struct ovnact *, enum ovnact_type, size_t len);
void *ovnact_put(struct ofpbuf *, enum ovnact_type, size_t len);
diff --git a/include/ovn/logical-fields.h b/include/ovn/logical-fields.h
index 3db7265e4..8060488f9 100644
--- a/include/ovn/logical-fields.h
+++ b/include/ovn/logical-fields.h
@@ -53,6 +53,11 @@ enum ovn_controller_event {
#define MFF_N_LOG_REGS 10
+#define MFF_LOG_LB_AFF_MATCH_IP4_ADDR MFF_REG4
+#define MFF_LOG_LB_AFF_MATCH_LS_IP6_ADDR MFF_XXREG0
+#define MFF_LOG_LB_AFF_MATCH_LR_IP6_ADDR MFF_XXREG1
+#define MFF_LOG_LB_AFF_MATCH_PORT MFF_REG8
+
void ovn_init_symtab(struct shash *symtab);
/* MFF_LOG_FLAGS_REG bit assignments */
@@ -71,6 +76,7 @@ enum mff_log_flags_bits {
MLF_USE_SNAT_ZONE = 11,
MLF_CHECK_PORT_SEC_BIT = 12,
MLF_LOOKUP_COMMIT_ECMP_NH_BIT = 13,
+ MLF_USE_LB_AFF_SESSION_BIT = 14,
};
/* MFF_LOG_FLAGS_REG flag assignments */
@@ -116,6 +122,8 @@ enum mff_log_flags {
MLF_LOCALPORT = (1 << MLF_LOCALPORT_BIT),
MLF_LOOKUP_COMMIT_ECMP_NH = (1 << MLF_LOOKUP_COMMIT_ECMP_NH_BIT),
+
+ MLF_USE_LB_AFF_SESSION = (1 << MLF_USE_LB_AFF_SESSION_BIT),
};
/* OVN logical fields
diff --git a/lib/actions.c b/lib/actions.c
index adbb42db4..5d88fccb7 100644
--- a/lib/actions.c
+++ b/lib/actions.c
@@ -4600,6 +4600,429 @@ encode_CHK_ECMP_NH(const struct ovnact_result *res,
MLF_LOOKUP_COMMIT_ECMP_NH_BIT, ofpacts);
}
+static void
+parse_commit_lb_aff(struct action_context *ctx,
+ struct ovnact_commit_lb_aff *lb_aff)
+{
+ int vip_family, backend_family;
+ uint16_t timeout, port = 0;
+ char *ip_str;
+
+ lexer_force_match(ctx->lexer, LEX_T_LPAREN); /* Skip '('. */
+ if (!lexer_match_id(ctx->lexer, "vip")) {
+ lexer_syntax_error(ctx->lexer, "invalid parameter");
+ return;
+ }
+
+ if (!lexer_force_match(ctx->lexer, LEX_T_EQUALS)) {
+ lexer_syntax_error(ctx->lexer, "invalid parameter");
+ return;
+ }
+
+ if (ctx->lexer->token.type != LEX_T_STRING) {
+ lexer_syntax_error(ctx->lexer, "invalid parameter");
+ return;
+ }
+
+ if (!ip_address_and_port_from_lb_key(ctx->lexer->token.s, &ip_str,
+ &port, &vip_family)) {
+ lexer_syntax_error(ctx->lexer, "invalid parameter");
+ return;
+ }
+
+ if (vip_family == AF_INET) {
+ ovs_be32 vip4;
+ ip_parse(ip_str, &vip4);
+ in6_addr_set_mapped_ipv4(&lb_aff->vip, vip4);
+ } else {
+ ipv6_parse(ip_str, &lb_aff->vip);
+ }
+
+ lb_aff->vip_port = port;
+ free(ip_str);
+
+ lexer_get(ctx->lexer);
+ lexer_force_match(ctx->lexer, LEX_T_COMMA);
+
+ if (!lexer_match_id(ctx->lexer, "backend")) {
+ lexer_syntax_error(ctx->lexer, "invalid parameter");
+ return;
+ }
+
+ if (!lexer_force_match(ctx->lexer, LEX_T_EQUALS)) {
+ lexer_syntax_error(ctx->lexer, "invalid parameter");
+ return;
+ }
+
+ if (ctx->lexer->token.type != LEX_T_STRING) {
+ lexer_syntax_error(ctx->lexer, "invalid parameter");
+ return;
+ }
+
+ if (!ip_address_and_port_from_lb_key(ctx->lexer->token.s, &ip_str,
+ &port, &backend_family)) {
+ lexer_syntax_error(ctx->lexer, "invalid parameter");
+ return;
+ }
+
+ if (backend_family == AF_INET) {
+ ovs_be32 backend4;
+ ip_parse(ip_str, &backend4);
+ in6_addr_set_mapped_ipv4(&lb_aff->backend, backend4);
+ } else {
+ ipv6_parse(ip_str, &lb_aff->backend);
+ }
+
+ free(ip_str);
+
+ if (backend_family != vip_family) {
+ lexer_syntax_error(ctx->lexer, "invalid protocol family");
+ return;
+ }
+
+ lb_aff->backend_port = port;
+
+ lexer_get(ctx->lexer);
+ lexer_force_match(ctx->lexer, LEX_T_COMMA);
+
+ if (lb_aff->vip_port) {
+ if (!lexer_match_id(ctx->lexer, "proto")) {
+ lexer_syntax_error(ctx->lexer, "invalid parameter");
+ return;
+ }
+
+ if (!lexer_force_match(ctx->lexer, LEX_T_EQUALS)) {
+ lexer_syntax_error(ctx->lexer, "invalid parameter");
+ return;
+ }
+
+ if (lexer_match_id(ctx->lexer, "tcp")) {
+ lb_aff->proto = IPPROTO_TCP;
+ } else if (lexer_match_id(ctx->lexer, "udp")) {
+ lb_aff->proto = IPPROTO_UDP;
+ } else if (lexer_match_id(ctx->lexer, "sctp")) {
+ lb_aff->proto = IPPROTO_SCTP;
+ } else {
+ lexer_syntax_error(ctx->lexer, "invalid protocol");
+ return;
+ }
+ lexer_force_match(ctx->lexer, LEX_T_COMMA);
+ }
+
+ if (!lexer_match_id(ctx->lexer, "timeout")) {
+ lexer_syntax_error(ctx->lexer, "invalid parameter");
+ return;
+ }
+ if (!lexer_force_match(ctx->lexer, LEX_T_EQUALS)) {
+ lexer_syntax_error(ctx->lexer, "invalid parameter");
+ return;
+ }
+ if (!action_parse_uint16(ctx, &timeout, "affinity timeout")) {
+ return;
+ }
+ lb_aff->timeout = timeout;
+
+ lexer_force_match(ctx->lexer, LEX_T_RPAREN); /* Skip ')'. */
+}
+
+static void
+format_COMMIT_LB_AFF(const struct ovnact_commit_lb_aff *lb_aff, struct ds *s)
+{
+ bool ipv6 = !IN6_IS_ADDR_V4MAPPED(&lb_aff->vip);
+
+ if (ipv6) {
+ char ip_str[INET6_ADDRSTRLEN] = {};
+ inet_ntop(AF_INET6, &lb_aff->vip, ip_str, INET6_ADDRSTRLEN);
+ ds_put_format(s, "commit_lb_aff(vip = \"[%s]", ip_str);
+ } else {
+ ovs_be32 ip = in6_addr_get_mapped_ipv4(&lb_aff->vip);
+ char *ip_str = xasprintf(IP_FMT, IP_ARGS(ip));
+ ds_put_format(s, "commit_lb_aff(vip = \"%s", ip_str);
+ free(ip_str);
+ }
+ if (lb_aff->vip_port) {
+ ds_put_format(s, ":%d", lb_aff->vip_port);
+ }
+ ds_put_cstr(s, "\"");
+
+ if (ipv6) {
+ char ip_str[INET6_ADDRSTRLEN] = {};
+ inet_ntop(AF_INET6, &lb_aff->backend, ip_str, INET6_ADDRSTRLEN);
+ ds_put_format(s, ", backend = \"[%s]", ip_str);
+ } else {
+ ovs_be32 ip = in6_addr_get_mapped_ipv4(&lb_aff->backend);
+ char *ip_str = xasprintf(IP_FMT, IP_ARGS(ip));
+ ds_put_format(s, ", backend = \"%s", ip_str);
+ free(ip_str);
+ }
+ if (lb_aff->backend_port) {
+ ds_put_format(s, ":%d", lb_aff->backend_port);
+ }
+ ds_put_cstr(s, "\"");
+
+ if (lb_aff->proto) {
+ const char *proto;
+ switch (lb_aff->proto) {
+ case IPPROTO_UDP:
+ proto = "udp";
+ break;
+ case IPPROTO_SCTP:
+ proto = "sctp";
+ break;
+ case IPPROTO_TCP:
+ default:
+ proto = "tcp";
+ break;
+ }
+ ds_put_format(s, ", proto = %s", proto);
+ }
+ ds_put_format(s, ", timeout = %d);", lb_aff->timeout);
+}
+
+static void
+encode_COMMIT_LB_AFF(const struct ovnact_commit_lb_aff *lb_aff,
+ const struct ovnact_encode_params *ep,
+ struct ofpbuf *ofpacts)
+{
+ bool ipv6 = !IN6_IS_ADDR_V4MAPPED(&lb_aff->vip);
+ size_t ol_offset = ofpacts->size;
+ struct ofpact_learn *ol = ofpact_put_LEARN(ofpacts);
+ struct match match = MATCH_CATCHALL_INITIALIZER;
+ struct ofpact_learn_spec *ol_spec;
+ unsigned int imm_bytes;
+ uint8_t *src_imm;
+
+ ol->flags = NX_LEARN_F_DELETE_LEARNED;
+ ol->idle_timeout = lb_aff->timeout; /* seconds. */
+ ol->hard_timeout = OFP_FLOW_PERMANENT;
+ ol->priority = OFP_DEFAULT_PRIORITY;
+ ol->table_id = OFTABLE_CHK_LB_AFFINITY;
+
+ /* Match on metadata of the packet that created the new table. */
+ ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+ ol_spec->dst.field = mf_from_id(MFF_METADATA);
+ ol_spec->dst.ofs = 0;
+ ol_spec->dst.n_bits = ol_spec->dst.field->n_bits;
+ ol_spec->n_bits = ol_spec->dst.n_bits;
+ ol_spec->dst_type = NX_LEARN_DST_MATCH;
+ ol_spec->src_type = NX_LEARN_SRC_FIELD;
+ ol_spec->src.field = mf_from_id(MFF_METADATA);
+
+ /* Match on the same ETH type as the packet that created the new table. */
+ ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+ ol_spec->dst.field = mf_from_id(MFF_ETH_TYPE);
+ ol_spec->dst.ofs = 0;
+ ol_spec->dst.n_bits = ol_spec->dst.field->n_bits;
+ ol_spec->n_bits = ol_spec->dst.n_bits;
+ ol_spec->dst_type = NX_LEARN_DST_MATCH;
+ ol_spec->src_type = NX_LEARN_SRC_IMMEDIATE;
+ union mf_value imm_eth_type = {
+ .be16 = ipv6 ? htons(ETH_TYPE_IPV6) : htons(ETH_TYPE_IP)
+ };
+ mf_write_subfield_value(&ol_spec->dst, &imm_eth_type, &match);
+ /* Push value last, as this may reallocate 'ol_spec'. */
+ imm_bytes = DIV_ROUND_UP(ol_spec->dst.n_bits, 8);
+ src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes));
+ memcpy(src_imm, &imm_eth_type, imm_bytes);
+
+ /* IP src. */
+ ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+ ol_spec->dst.field =
+ ipv6 ? mf_from_id(MFF_IPV6_SRC) : mf_from_id(MFF_IPV4_SRC);
+ ol_spec->src.field =
+ ipv6 ? mf_from_id(MFF_IPV6_SRC) : mf_from_id(MFF_IPV4_SRC);
+ ol_spec->dst.ofs = 0;
+ ol_spec->dst.n_bits = ol_spec->dst.field->n_bits;
+ ol_spec->n_bits = ol_spec->dst.n_bits;
+ ol_spec->dst_type = NX_LEARN_DST_MATCH;
+ ol_spec->src_type = NX_LEARN_SRC_FIELD;
+
+ /* IP dst. */
+ ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+ ol_spec->dst.field =
+ ipv6 ? mf_from_id(MFF_IPV6_DST) : mf_from_id(MFF_IPV4_DST);
+ union mf_value imm_ip;
+ if (ipv6) {
+ imm_ip = (union mf_value) {
+ .ipv6 = lb_aff->vip,
+ };
+ } else {
+ ovs_be32 ip4 = in6_addr_get_mapped_ipv4(&lb_aff->vip);
+ imm_ip = (union mf_value) {
+ .be32 = ip4,
+ };
+ }
+ ol_spec->dst.ofs = 0;
+ ol_spec->dst.n_bits = ol_spec->dst.field->n_bits;
+ ol_spec->n_bits = ol_spec->dst.n_bits;
+ ol_spec->dst_type = NX_LEARN_DST_MATCH;
+ ol_spec->src_type = NX_LEARN_SRC_IMMEDIATE;
+ mf_write_subfield_value(&ol_spec->dst, &imm_ip, &match);
+
+ /* Push value last, as this may reallocate 'ol_spec' */
+ imm_bytes = DIV_ROUND_UP(ol_spec->dst.n_bits, 8);
+ src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes));
+ memcpy(src_imm, &imm_ip, imm_bytes);
+
+ if (lb_aff->proto) {
+ /* IP proto. */
+ union mf_value imm_proto = {
+ .u8 = lb_aff->proto,
+ };
+ ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+ ol_spec->dst.field = mf_from_id(MFF_IP_PROTO);
+ ol_spec->src.field = mf_from_id(MFF_IP_PROTO);
+ ol_spec->dst.ofs = 0;
+ ol_spec->dst.n_bits = ol_spec->dst.field->n_bits;
+ ol_spec->n_bits = ol_spec->dst.n_bits;
+ ol_spec->dst_type = NX_LEARN_DST_MATCH;
+ ol_spec->src_type = NX_LEARN_SRC_IMMEDIATE;
+ mf_write_subfield_value(&ol_spec->dst, &imm_proto, &match);
+ /* Push value last, as this may reallocate 'ol_spec' */
+ imm_bytes = DIV_ROUND_UP(ol_spec->dst.n_bits, 8);
+ src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes));
+ memcpy(src_imm, &imm_proto, imm_bytes);
+
+ /* dst port */
+ ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+ switch (lb_aff->proto) {
+ case IPPROTO_TCP:
+ ol_spec->dst.field = mf_from_id(MFF_TCP_DST);
+ ol_spec->src.field = mf_from_id(MFF_TCP_DST);
+ break;
+ case IPPROTO_UDP:
+ ol_spec->dst.field = mf_from_id(MFF_UDP_DST);
+ ol_spec->src.field = mf_from_id(MFF_UDP_DST);
+ break;
+ case IPPROTO_SCTP:
+ ol_spec->dst.field = mf_from_id(MFF_SCTP_DST);
+ ol_spec->src.field = mf_from_id(MFF_SCTP_DST);
+ break;
+ default:
+ OVS_NOT_REACHED();
+ break;
+ }
+ ol_spec->dst.ofs = 0;
+ ol_spec->dst.n_bits = ol_spec->dst.field->n_bits;
+ ol_spec->n_bits = ol_spec->dst.n_bits;
+ ol_spec->dst_type = NX_LEARN_DST_MATCH;
+ ol_spec->src_type = NX_LEARN_SRC_IMMEDIATE;
+ /* Match on vip port. */
+ union mf_value imm_vip_port = (union mf_value) {
+ .be16 = htons(lb_aff->vip_port),
+ };
+
+ mf_write_subfield_value(&ol_spec->dst, &imm_vip_port, &match);
+ /* Push value last, as this may reallocate 'ol_spec' */
+ imm_bytes = DIV_ROUND_UP(ol_spec->dst.n_bits, 8);
+ src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes));
+ memcpy(src_imm, &imm_vip_port, imm_bytes);
+ }
+
+ /* Set MLF_USE_LB_AFF_SESSION_BIT for ecmp replies. */
+ ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+ ol_spec->dst.field = mf_from_id(MFF_LOG_FLAGS);
+ ol_spec->dst.ofs = MLF_USE_LB_AFF_SESSION_BIT;
+ ol_spec->dst.n_bits = 1;
+ ol_spec->n_bits = ol_spec->dst.n_bits;
+ ol_spec->dst_type = NX_LEARN_DST_LOAD;
+ ol_spec->src_type = NX_LEARN_SRC_IMMEDIATE;
+ union mf_value imm_reg_value = {
+ .u8 = 1
+ };
+ mf_write_subfield_value(&ol_spec->dst, &imm_reg_value, &match);
+
+ /* Push value last, as this may reallocate 'ol_spec' */
+ imm_bytes = DIV_ROUND_UP(ol_spec->dst.n_bits, 8);
+ src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes));
+ ol = ofpacts->header;
+ memcpy(src_imm, &imm_reg_value, imm_bytes);
+
+ /* Load backend IP in REG4/XXREG1. */
+ union mf_value imm_backend_ip;
+ ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+
+ if (ipv6) {
+ imm_backend_ip = (union mf_value) {
+ .ipv6 = lb_aff->backend,
+ };
+ if (ep->is_switch) {
+ ol_spec->dst.field = mf_from_id(MFF_LOG_LB_AFF_MATCH_LS_IP6_ADDR);
+ } else {
+ ol_spec->dst.field = mf_from_id(MFF_LOG_LB_AFF_MATCH_LR_IP6_ADDR);
+ }
+ } else {
+ ovs_be32 ip4 = in6_addr_get_mapped_ipv4(&lb_aff->backend);
+ imm_backend_ip = (union mf_value) {
+ .be32 = ip4,
+ };
+ ol_spec->dst.field = mf_from_id(MFF_LOG_LB_AFF_MATCH_IP4_ADDR);
+ }
+
+ ol_spec->dst_type = NX_LEARN_DST_LOAD;
+ ol_spec->src_type = NX_LEARN_SRC_IMMEDIATE;
+ ol_spec->dst.ofs = 0;
+ ol_spec->dst.n_bits = ol_spec->dst.field->n_bits;
+ ol_spec->n_bits = ol_spec->dst.n_bits;
+ mf_write_subfield_value(&ol_spec->dst, &imm_backend_ip, &match);
+ /* Push value last, as this may reallocate 'ol_spec' */
+ imm_bytes = DIV_ROUND_UP(ol_spec->dst.n_bits, 8);
+ src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes));
+ memcpy(src_imm, &imm_backend_ip, imm_bytes);
+
+ if (lb_aff->backend_port) {
+ /* Load backend port in REG8. */
+ union mf_value imm_backend_port;
+ ol_spec = ofpbuf_put_zeros(ofpacts, sizeof *ol_spec);
+ imm_backend_port = (union mf_value) {
+ .be16 = htons(lb_aff->backend_port),
+ };
+
+ ol_spec->dst.field = mf_from_id(MFF_LOG_LB_AFF_MATCH_PORT);
+ ol_spec->dst_type = NX_LEARN_DST_LOAD;
+ ol_spec->src_type = NX_LEARN_SRC_IMMEDIATE;
+ ol_spec->dst.ofs = 0;
+ ol_spec->dst.n_bits = 8 * sizeof(lb_aff->backend_port);
+ ol_spec->n_bits = ol_spec->dst.n_bits;
+ mf_write_subfield_value(&ol_spec->dst, &imm_backend_port, &match);
+ /* Push value last, as this may reallocate 'ol_spec' */
+ imm_bytes = DIV_ROUND_UP(ol_spec->dst.n_bits, 8);
+ src_imm = ofpbuf_put_zeros(ofpacts, OFPACT_ALIGN(imm_bytes));
+ memcpy(src_imm, &imm_backend_port, imm_bytes);
+ }
+
+ ol = ofpbuf_at_assert(ofpacts, ol_offset, sizeof *ol);
+ ofpact_finish_LEARN(ofpacts, &ol);
+}
+
+static void
+ovnact_commit_lb_aff_free(struct ovnact_commit_lb_aff *lb_aff OVS_UNUSED)
+{
+}
+
+static void
+parse_chk_lb_aff(struct action_context *ctx, const struct expr_field *dst,
+ struct ovnact_result *res)
+{
+ parse_ovnact_result(ctx, "chk_lb_aff", NULL, dst, res);
+}
+
+static void
+format_CHK_LB_AFF(const struct ovnact_result *res, struct ds *s)
+{
+ expr_field_format(&res->dst, s);
+ ds_put_cstr(s, " = chk_lb_aff();");
+}
+
+static void
+encode_CHK_LB_AFF(const struct ovnact_result *res,
+ const struct ovnact_encode_params *ep OVS_UNUSED,
+ struct ofpbuf *ofpacts)
+{
+ encode_result_action__(res, OFTABLE_CHK_LB_AFFINITY,
+ MLF_USE_LB_AFF_SESSION_BIT, ofpacts);
+}
+
/* Parses an assignment or exchange or put_dhcp_opts action. */
static void
parse_set_action(struct action_context *ctx)
@@ -4684,6 +5107,10 @@ parse_set_action(struct action_context *ctx)
&& lexer_lookahead(ctx->lexer) == LEX_T_LPAREN) {
parse_chk_ecmp_nh(ctx, &lhs,
ovnact_put_CHK_ECMP_NH(ctx->ovnacts));
+ } else if (!strcmp(ctx->lexer->token.s, "chk_lb_aff") &&
+ lexer_lookahead(ctx->lexer) == LEX_T_LPAREN) {
+ parse_chk_lb_aff(ctx, &lhs,
+ ovnact_put_CHK_LB_AFF(ctx->ovnacts));
} else {
parse_assignment_action(ctx, false, &lhs);
}
@@ -4790,6 +5217,8 @@ parse_action(struct action_context *ctx)
parse_put_fdb(ctx, ovnact_put_PUT_FDB(ctx->ovnacts));
} else if (lexer_match_id(ctx->lexer, "commit_ecmp_nh")) {
parse_commit_ecmp_nh(ctx, ovnact_put_COMMIT_ECMP_NH(ctx->ovnacts));
+ } else if (lexer_match_id(ctx->lexer, "commit_lb_aff")) {
+ parse_commit_lb_aff(ctx, ovnact_put_COMMIT_LB_AFF(ctx->ovnacts));
} else {
lexer_syntax_error(ctx->lexer, "expecting action");
}
diff --git a/lib/features.c b/lib/features.c
index f15ec42bb..462b99818 100644
--- a/lib/features.c
+++ b/lib/features.c
@@ -26,10 +26,13 @@
#include "openvswitch/rconn.h"
#include "openvswitch/ofp-msgs.h"
#include "openvswitch/ofp-meter.h"
+#include "openvswitch/ofp-util.h"
#include "ovn/features.h"
VLOG_DEFINE_THIS_MODULE(features);
+#define FEATURES_DEFAULT_PROBE_INTERVAL_SEC 5
+
struct ovs_feature {
enum ovs_feature_value value;
const char *name;
@@ -74,7 +77,8 @@ static void
ovs_feature_rconn_setup(const char *br_name)
{
if (!swconn) {
- swconn = rconn_create(5, 0, DSCP_DEFAULT, 1 << OFP15_VERSION);
+ swconn = rconn_create(FEATURES_DEFAULT_PROBE_INTERVAL_SEC, 0,
+ DSCP_DEFAULT, 1 << OFP15_VERSION);
}
if (!rconn_is_connected(swconn)) {
@@ -85,11 +89,14 @@ ovs_feature_rconn_setup(const char *br_name)
}
free(target);
}
+ rconn_set_probe_interval(swconn, FEATURES_DEFAULT_PROBE_INTERVAL_SEC);
}
static bool
ovs_feature_get_openflow_cap(const char *br_name)
{
+ struct ofpbuf *msg;
+
if (!br_name) {
return false;
}
@@ -102,15 +109,14 @@ ovs_feature_get_openflow_cap(const char *br_name)
}
/* send new requests just after reconnect. */
- if (conn_seq_no == rconn_get_connection_seqno(swconn)) {
- return false;
+ if (conn_seq_no != rconn_get_connection_seqno(swconn)) {
+ /* dump datapath meter capabilities. */
+ msg = ofpraw_alloc(OFPRAW_OFPST13_METER_FEATURES_REQUEST,
+ rconn_get_version(swconn), 0);
+ rconn_send(swconn, msg, NULL);
}
bool ret = false;
- /* dump datapath meter capabilities. */
- struct ofpbuf *msg = ofpraw_alloc(OFPRAW_OFPST13_METER_FEATURES_REQUEST,
- rconn_get_version(swconn), 0);
- rconn_send(swconn, msg, NULL);
for (int i = 0; i < 50; i++) {
msg = rconn_recv(swconn);
if (!msg) {
@@ -137,6 +143,8 @@ ovs_feature_get_openflow_cap(const char *br_name)
}
}
conn_seq_no = rconn_get_connection_seqno(swconn);
+ } else if (type == OFPTYPE_ECHO_REQUEST) {
+ rconn_send(swconn, ofputil_encode_echo_reply(oh), NULL);
}
ofpbuf_delete(msg);
}
diff --git a/lib/lb.c b/lib/lb.c
index 477cf8f5e..bb5ae2196 100644
--- a/lib/lb.c
+++ b/lib/lb.c
@@ -225,6 +225,16 @@ ovn_northd_lb_create(const struct nbrec_load_balancer *nbrec_lb)
smap_get_def(&nbrec_lb->options, "neighbor_responder", "reachable");
lb->neigh_mode = strcmp(mode, "all") ? LB_NEIGH_RESPOND_REACHABLE
: LB_NEIGH_RESPOND_ALL;
+ uint32_t affinity_timeout =
+ smap_get_uint(&nbrec_lb->options, "affinity_timeout", 0);
+ if (affinity_timeout > UINT16_MAX) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+ VLOG_WARN_RL(&rl, "max affinity_timeout timeout value is %u",
+ UINT16_MAX);
+ affinity_timeout = UINT16_MAX;
+ }
+ lb->affinity_timeout = affinity_timeout;
+
sset_init(&lb->ips_v4);
sset_init(&lb->ips_v6);
struct smap_node *node;
diff --git a/lib/lb.h b/lib/lb.h
index 9b902f005..241872681 100644
--- a/lib/lb.h
+++ b/lib/lb.h
@@ -67,6 +67,7 @@ struct ovn_northd_lb {
bool controller_event;
bool routable;
bool skip_snat;
+ uint16_t affinity_timeout;
struct sset ips_v4;
struct sset ips_v6;
diff --git a/northd/northd.c b/northd/northd.c
index 84440a47f..404c40b8c 100644
--- a/northd/northd.c
+++ b/northd/northd.c
@@ -121,20 +121,22 @@ enum ovn_stage {
PIPELINE_STAGE(SWITCH, IN, ACL, 8, "ls_in_acl") \
PIPELINE_STAGE(SWITCH, IN, QOS_MARK, 9, "ls_in_qos_mark") \
PIPELINE_STAGE(SWITCH, IN, QOS_METER, 10, "ls_in_qos_meter") \
- PIPELINE_STAGE(SWITCH, IN, LB, 11, "ls_in_lb") \
- PIPELINE_STAGE(SWITCH, IN, ACL_AFTER_LB, 12, "ls_in_acl_after_lb") \
- PIPELINE_STAGE(SWITCH, IN, STATEFUL, 13, "ls_in_stateful") \
- PIPELINE_STAGE(SWITCH, IN, PRE_HAIRPIN, 14, "ls_in_pre_hairpin") \
- PIPELINE_STAGE(SWITCH, IN, NAT_HAIRPIN, 15, "ls_in_nat_hairpin") \
- PIPELINE_STAGE(SWITCH, IN, HAIRPIN, 16, "ls_in_hairpin") \
- PIPELINE_STAGE(SWITCH, IN, ARP_ND_RSP, 17, "ls_in_arp_rsp") \
- PIPELINE_STAGE(SWITCH, IN, DHCP_OPTIONS, 18, "ls_in_dhcp_options") \
- PIPELINE_STAGE(SWITCH, IN, DHCP_RESPONSE, 19, "ls_in_dhcp_response") \
- PIPELINE_STAGE(SWITCH, IN, DNS_LOOKUP, 20, "ls_in_dns_lookup") \
- PIPELINE_STAGE(SWITCH, IN, DNS_RESPONSE, 21, "ls_in_dns_response") \
- PIPELINE_STAGE(SWITCH, IN, EXTERNAL_PORT, 22, "ls_in_external_port") \
- PIPELINE_STAGE(SWITCH, IN, L2_LKUP, 23, "ls_in_l2_lkup") \
- PIPELINE_STAGE(SWITCH, IN, L2_UNKNOWN, 24, "ls_in_l2_unknown") \
+ PIPELINE_STAGE(SWITCH, IN, LB_AFF_CHECK, 11, "ls_in_lb_aff_check") \
+ PIPELINE_STAGE(SWITCH, IN, LB, 12, "ls_in_lb") \
+ PIPELINE_STAGE(SWITCH, IN, LB_AFF_LEARN, 13, "ls_in_lb_aff_learn") \
+ PIPELINE_STAGE(SWITCH, IN, ACL_AFTER_LB, 14, "ls_in_acl_after_lb") \
+ PIPELINE_STAGE(SWITCH, IN, STATEFUL, 15, "ls_in_stateful") \
+ PIPELINE_STAGE(SWITCH, IN, PRE_HAIRPIN, 16, "ls_in_pre_hairpin") \
+ PIPELINE_STAGE(SWITCH, IN, NAT_HAIRPIN, 17, "ls_in_nat_hairpin") \
+ PIPELINE_STAGE(SWITCH, IN, HAIRPIN, 18, "ls_in_hairpin") \
+ PIPELINE_STAGE(SWITCH, IN, ARP_ND_RSP, 19, "ls_in_arp_rsp") \
+ PIPELINE_STAGE(SWITCH, IN, DHCP_OPTIONS, 20, "ls_in_dhcp_options") \
+ PIPELINE_STAGE(SWITCH, IN, DHCP_RESPONSE, 21, "ls_in_dhcp_response") \
+ PIPELINE_STAGE(SWITCH, IN, DNS_LOOKUP, 22, "ls_in_dns_lookup") \
+ PIPELINE_STAGE(SWITCH, IN, DNS_RESPONSE, 23, "ls_in_dns_response") \
+ PIPELINE_STAGE(SWITCH, IN, EXTERNAL_PORT, 24, "ls_in_external_port") \
+ PIPELINE_STAGE(SWITCH, IN, L2_LKUP, 25, "ls_in_l2_lkup") \
+ PIPELINE_STAGE(SWITCH, IN, L2_UNKNOWN, 26, "ls_in_l2_unknown") \
\
/* Logical switch egress stages. */ \
PIPELINE_STAGE(SWITCH, OUT, PRE_LB, 0, "ls_out_pre_lb") \
@@ -155,20 +157,22 @@ enum ovn_stage {
PIPELINE_STAGE(ROUTER, IN, IP_INPUT, 3, "lr_in_ip_input") \
PIPELINE_STAGE(ROUTER, IN, UNSNAT, 4, "lr_in_unsnat") \
PIPELINE_STAGE(ROUTER, IN, DEFRAG, 5, "lr_in_defrag") \
- PIPELINE_STAGE(ROUTER, IN, DNAT, 6, "lr_in_dnat") \
- PIPELINE_STAGE(ROUTER, IN, ECMP_STATEFUL, 7, "lr_in_ecmp_stateful") \
- PIPELINE_STAGE(ROUTER, IN, ND_RA_OPTIONS, 8, "lr_in_nd_ra_options") \
- PIPELINE_STAGE(ROUTER, IN, ND_RA_RESPONSE, 9, "lr_in_nd_ra_response") \
- PIPELINE_STAGE(ROUTER, IN, IP_ROUTING_PRE, 10, "lr_in_ip_routing_pre") \
- PIPELINE_STAGE(ROUTER, IN, IP_ROUTING, 11, "lr_in_ip_routing") \
- PIPELINE_STAGE(ROUTER, IN, IP_ROUTING_ECMP, 12, "lr_in_ip_routing_ecmp") \
- PIPELINE_STAGE(ROUTER, IN, POLICY, 13, "lr_in_policy") \
- PIPELINE_STAGE(ROUTER, IN, POLICY_ECMP, 14, "lr_in_policy_ecmp") \
- PIPELINE_STAGE(ROUTER, IN, ARP_RESOLVE, 15, "lr_in_arp_resolve") \
- PIPELINE_STAGE(ROUTER, IN, CHK_PKT_LEN, 16, "lr_in_chk_pkt_len") \
- PIPELINE_STAGE(ROUTER, IN, LARGER_PKTS, 17, "lr_in_larger_pkts") \
- PIPELINE_STAGE(ROUTER, IN, GW_REDIRECT, 18, "lr_in_gw_redirect") \
- PIPELINE_STAGE(ROUTER, IN, ARP_REQUEST, 19, "lr_in_arp_request") \
+ PIPELINE_STAGE(ROUTER, IN, LB_AFF_CHECK, 6, "lr_in_lb_aff_check") \
+ PIPELINE_STAGE(ROUTER, IN, DNAT, 7, "lr_in_dnat") \
+ PIPELINE_STAGE(ROUTER, IN, LB_AFF_LEARN, 8, "lr_in_lb_aff_learn") \
+ PIPELINE_STAGE(ROUTER, IN, ECMP_STATEFUL, 9, "lr_in_ecmp_stateful") \
+ PIPELINE_STAGE(ROUTER, IN, ND_RA_OPTIONS, 10, "lr_in_nd_ra_options") \
+ PIPELINE_STAGE(ROUTER, IN, ND_RA_RESPONSE, 11, "lr_in_nd_ra_response") \
+ PIPELINE_STAGE(ROUTER, IN, IP_ROUTING_PRE, 12, "lr_in_ip_routing_pre") \
+ PIPELINE_STAGE(ROUTER, IN, IP_ROUTING, 13, "lr_in_ip_routing") \
+ PIPELINE_STAGE(ROUTER, IN, IP_ROUTING_ECMP, 14, "lr_in_ip_routing_ecmp") \
+ PIPELINE_STAGE(ROUTER, IN, POLICY, 15, "lr_in_policy") \
+ PIPELINE_STAGE(ROUTER, IN, POLICY_ECMP, 16, "lr_in_policy_ecmp") \
+ PIPELINE_STAGE(ROUTER, IN, ARP_RESOLVE, 17, "lr_in_arp_resolve") \
+ PIPELINE_STAGE(ROUTER, IN, CHK_PKT_LEN, 18, "lr_in_chk_pkt_len") \
+ PIPELINE_STAGE(ROUTER, IN, LARGER_PKTS, 19, "lr_in_larger_pkts") \
+ PIPELINE_STAGE(ROUTER, IN, GW_REDIRECT, 20, "lr_in_gw_redirect") \
+ PIPELINE_STAGE(ROUTER, IN, ARP_REQUEST, 21, "lr_in_arp_request") \
\
/* Logical router egress stages. */ \
PIPELINE_STAGE(ROUTER, OUT, CHECK_DNAT_LOCAL, 0, \
@@ -215,8 +219,17 @@ enum ovn_stage {
#define REG_ORIG_DIP_IPV6 "xxreg1"
#define REG_ORIG_TP_DPORT "reg2[0..15]"
+/* Register used to store backend ipv6 address
+ * for load balancer affinity. */
+#define REG_LB_L2_AFF_BACKEND_IP6 "xxreg0"
+
/* Register definitions for switches and routers. */
+/* Register used to store backend ipv4 address
+ * for load balancer affinity. */
+#define REG_LB_AFF_BACKEND_IP4 "reg4"
+#define REG_LB_AFF_MATCH_PORT "reg8[0..15]"
+
/* Indicate that this packet has been recirculated using egress
* loopback. This allows certain checks to be bypassed, such as a
* logical router dropping packets with source IP address equals
@@ -228,6 +241,7 @@ enum ovn_stage {
#define REGBIT_LOOKUP_NEIGHBOR_IP_RESULT "reg9[3]"
#define REGBIT_DST_NAT_IP_LOCAL "reg9[4]"
#define REGBIT_KNOWN_ECMP_NH "reg9[5]"
+#define REGBIT_KNOWN_LB_SESSION "reg9[6]"
/* Register to store the eth address associated to a router port for packets
* received in S_ROUTER_IN_ADMISSION.
@@ -245,6 +259,10 @@ enum ovn_stage {
#define REG_SRC_IPV6 "xxreg1"
#define REG_ROUTE_TABLE_ID "reg7"
+/* Register used to store backend ipv6 address
+ * for load balancer affinity. */
+#define REG_LB_L3_AFF_BACKEND_IP6 "xxreg1"
+
#define REG_ORIG_TP_DPORT_ROUTER "reg9[16..31]"
/* Register used for setting a label for ACLs in a Logical Switch. */
@@ -267,73 +285,75 @@ enum ovn_stage {
* OVS register usage:
*
* Logical Switch pipeline:
- * +----+----------------------------------------------+---+------------------+
- * | R0 | REGBIT_{CONNTRACK/DHCP/DNS} | | |
- * | | REGBIT_{HAIRPIN/HAIRPIN_REPLY} | | |
- * | | REGBIT_ACL_HINT_{ALLOW_NEW/ALLOW/DROP/BLOCK} | | |
- * | | REGBIT_ACL_LABEL | X | |
- * +----+----------------------------------------------+ X | |
- * | R1 | ORIG_DIP_IPV4 (>= IN_PRE_STATEFUL) | R | |
- * +----+----------------------------------------------+ E | |
- * | R2 | ORIG_TP_DPORT (>= IN_PRE_STATEFUL) | G | |
- * +----+----------------------------------------------+ 0 | |
- * | R3 | ACL LABEL | | |
- * +----+----------------------------------------------+---+------------------+
- * | R4 | UNUSED | | |
- * +----+----------------------------------------------+ X | ORIG_DIP_IPV6(>= |
- * | R5 | UNUSED | X | IN_PRE_STATEFUL) |
- * +----+----------------------------------------------+ R | |
- * | R6 | UNUSED | E | |
- * +----+----------------------------------------------+ G | |
- * | R7 | UNUSED | 1 | |
- * +----+----------------------------------------------+---+------------------+
- * | R8 | UNUSED |
+ * +----+----------------------------------------------+---+-----------------------------------+
+ * | R0 | REGBIT_{CONNTRACK/DHCP/DNS} | | |
+ * | | REGBIT_{HAIRPIN/HAIRPIN_REPLY} | | |
+ * | | REGBIT_ACL_HINT_{ALLOW_NEW/ALLOW/DROP/BLOCK} | | |
+ * | | REGBIT_ACL_LABEL | X | |
+ * +----+----------------------------------------------+ X | |
+ * | R5 | UNUSED | X | LB_L2_AFF_BACKEND_IP6 |
+ * | R1 | ORIG_DIP_IPV4 (>= IN_PRE_STATEFUL) | R | |
+ * +----+----------------------------------------------+ E | |
+ * | R2 | ORIG_TP_DPORT (>= IN_PRE_STATEFUL) | G | |
+ * +----+----------------------------------------------+ 0 | |
+ * | R3 | ACL LABEL | | |
+ * +----+----------------------------------------------+---+-----------------------------------+
+ * | R4 | REG_LB_AFF_BACKEND_IP4 | | |
+ * +----+----------------------------------------------+ X | |
+ * | R5 | UNUSED | X | ORIG_DIP_IPV6(>= IN_PRE_STATEFUL) |
+ * +----+----------------------------------------------+ R | |
+ * | R6 | UNUSED | E | |
+ * +----+----------------------------------------------+ G | |
+ * | R7 | UNUSED | 1 | |
+ * +----+----------------------------------------------+---+-----------------------------------+
+ * | R8 | LB_AFF_MATCH_PORT |
* +----+----------------------------------------------+
* | R9 | UNUSED |
* +----+----------------------------------------------+
*
* Logical Router pipeline:
- * +-----+--------------------------+---+-----------------+---+---------------+
- * | R0 | REGBIT_ND_RA_OPTS_RESULT | | | | |
- * | | (= IN_ND_RA_OPTIONS) | X | | | |
- * | | NEXT_HOP_IPV4 | R | | | |
- * | | (>= IP_INPUT) | E | INPORT_ETH_ADDR | X | |
- * +-----+--------------------------+ G | (< IP_INPUT) | X | |
- * | R1 | SRC_IPV4 for ARP-REQ | 0 | | R | |
- * | | (>= IP_INPUT) | | | E | NEXT_HOP_IPV6 |
- * +-----+--------------------------+---+-----------------+ G | ( >= DEFRAG ) |
- * | R2 | UNUSED | X | | 0 | |
- * | | | R | | | |
- * +-----+--------------------------+ E | UNUSED | | |
- * | R3 | UNUSED | G | | | |
- * | | | 1 | | | |
- * +-----+--------------------------+---+-----------------+---+---------------+
- * | R4 | UNUSED | X | | | |
- * | | | R | | | |
- * +-----+--------------------------+ E | UNUSED | X | |
- * | R5 | UNUSED | G | | X | |
- * | | | 2 | | R |SRC_IPV6 for NS|
- * +-----+--------------------------+---+-----------------+ E | ( >= |
- * | R6 | UNUSED | X | | G | IN_IP_ROUTING)|
- * | | | R | | 1 | |
- * +-----+--------------------------+ E | UNUSED | | |
- * | R7 | ROUTE_TABLE_ID | G | | | |
- * | | (>= IN_IP_ROUTING_PRE && | 3 | | | |
- * | | <= IN_IP_ROUTING) | | | | |
- * +-----+--------------------------+---+-----------------+---+---------------+
- * | R8 | ECMP_GROUP_ID | | |
- * | | ECMP_MEMBER_ID | X | |
- * +-----+--------------------------+ R | |
- * | | REGBIT_{ | E | |
- * | | EGRESS_LOOPBACK/ | G | UNUSED |
- * | R9 | PKT_LARGER/ | 4 | |
- * | | LOOKUP_NEIGHBOR_RESULT/| | |
- * | | SKIP_LOOKUP_NEIGHBOR/ | | |
- * | | KNOWN_ECMP_NH} | | |
- * | | | | |
- * | | REG_ORIG_TP_DPORT_ROUTER | | |
- * | | | | |
- * +-----+--------------------------+---+-----------------+
+ * +-----+---------------------------+---+-----------------+---+------------------------------------+
+ * | R0 | REGBIT_ND_RA_OPTS_RESULT | | | | |
+ * | | (= IN_ND_RA_OPTIONS) | X | | | |
+ * | | NEXT_HOP_IPV4 | R | | | |
+ * | | (>= IP_INPUT) | E | INPORT_ETH_ADDR | X | |
+ * +-----+---------------------------+ G | (< IP_INPUT) | X | |
+ * | R1 | SRC_IPV4 for ARP-REQ | 0 | | R | |
+ * | | (>= IP_INPUT) | | | E | NEXT_HOP_IPV6 (>= DEFRAG ) |
+ * +-----+---------------------------+---+-----------------+ G | |
+ * | R2 | UNUSED | X | | 0 | |
+ * | | | R | | | |
+ * +-----+---------------------------+ E | UNUSED | | |
+ * | R3 | UNUSED | G | | | |
+ * | | | 1 | | | |
+ * +-----+---------------------------+---+-----------------+---+------------------------------------+
+ * | R4 | REG_LB_AFF_BACKEND_IP4 | X | | | |
+ * | | | R | | | |
+ * +-----+---------------------------+ E | UNUSED | X | |
+ * | R5 | UNUSED | G | | X | |
+ * | | | 2 | | R | LB_L3_AFF_BACKEND_IP6 |
+ * +-----+---------------------------+---+-----------------+ E | (<= IN_DNAT) |
+ * | R6 | UNUSED | X | | G | |
+ * | | | R | | 1 | |
+ * +-----+---------------------------+ E | UNUSED | | |
+ * | R7 | ROUTE_TABLE_ID | G | | | |
+ * | | (>= IN_IP_ROUTING_PRE && | 3 | | | |
+ * | | <= IN_IP_ROUTING) | | | | |
+ * +-----+---------------------------+---+-----------------+---+------------------------------------+
+ * | R8 | ECMP_GROUP_ID | | |
+ * | | ECMP_MEMBER_ID | | |
+ * | | LB_AFF_MATCH_PORT | X | |
+ * +-----+---------------------------+ R | |
+ * | | REGBIT_{ | E | |
+ * | | EGRESS_LOOPBACK/ | G | UNUSED |
+ * | R9 | PKT_LARGER/ | 4 | |
+ * | | LOOKUP_NEIGHBOR_RESULT/ | | |
+ * | | SKIP_LOOKUP_NEIGHBOR/ | | |
+ * | | KNOWN_ECMP_NH} | | |
+ * | | | | |
+ * | | REG_ORIG_TP_DPORT_ROUTER | | |
+ * | | | | |
+ * +-----+---------------------------+---+-----------------+
*
*/
@@ -1040,7 +1060,16 @@ init_mcast_info_for_switch_datapath(struct ovn_datapath *od)
mcast_sw_info->query_max_response =
smap_get_ullong(&od->nbs->other_config, "mcast_query_max_response",
OVN_MCAST_DEFAULT_QUERY_MAX_RESPONSE_S);
+}
+
+static void
+init_mcast_flow_count(struct ovn_datapath *od)
+{
+ if (od->nbr) {
+ return;
+ }
+ struct mcast_switch_info *mcast_sw_info = &od->mcast_info.sw;
mcast_sw_info->active_v4_flows = ATOMIC_VAR_INIT(0);
mcast_sw_info->active_v6_flows = ATOMIC_VAR_INIT(0);
}
@@ -6936,6 +6965,426 @@ build_lb_rules_pre_stateful(struct hmap *lflows, struct ovn_northd_lb *lb,
}
}
+/* Builds the logical router flows related to load balancer affinity.
+ * For a LB configured with 'vip=V:VP' and backends 'B1:BP1,B2:BP2' and
+ * affinity timeout set to T, it generates the following logical flows:
+ * - load balancing affinity check:
+ * table=lr_in_lb_aff_check, priority=100
+ * match=(new_lb_match)
+ * action=(REGBIT_KNOWN_LB_SESSION = chk_lb_aff(); next;)
+ *
+ * - load balancing:
+ * table=lr_in_dnat, priority=150
+ * match=(REGBIT_KNOWN_LB_SESSION == 1 && ct.new && ip4
+ * && REG_LB_AFF_BACKEND_IP4 == B1 && REG_LB_AFF_MATCH_PORT == BP1)
+ * action=(REG_NEXT_HOP_IPV4 = V; lb_action;
+ * ct_lb_mark(backends=B1:BP1);)
+ * table=lr_in_dnat, priority=150
+ * match=(REGBIT_KNOWN_LB_SESSION == 1 && ct.new && ip4
+ * && REG_LB_AFF_BACKEND_IP4 == B2 && REG_LB_AFF_MATCH_PORT == BP2)
+ * action=(REG_NEXT_HOP_IPV4 = V; lb_action;
+ * ct_lb_mark(backends=B2:BP2);)
+ *
+ * - load balancing affinity learn:
+ * table=lr_in_lb_aff_learn, priority=100
+ * match=(REGBIT_KNOWN_LB_SESSION == 0
+ * && ct.new && ip4
+ * && REG_NEXT_HOP_IPV4 == V && REG_ORIG_TP_DPORT_ROUTER = VP
+ * && ip4.dst == B1 && tcp.dst == BP1)
+ * action=(commit_lb_aff(vip = "V:VP", backend = "B1:BP1",
+ * proto = tcp, timeout = T));
+ * table=lr_in_lb_aff_learn, priority=100
+ * match=(REGBIT_KNOWN_LB_SESSION == 0
+ * && ct.new && ip4
+ * && REG_NEXT_HOP_IPV4 == V && REG_ORIG_TP_DPORT_ROUTER = VP
+ * && ip4.dst == B2 && tcp.dst == BP2)
+ * action=(commit_lb_aff(vip = "V:VP", backend = "B2:BP2",
+ * proto = tcp, timeout = T));
+ *
+ */
+static void
+build_lb_affinity_lr_flows(struct hmap *lflows, struct ovn_northd_lb *lb,
+ struct ovn_lb_vip *lb_vip, char *new_lb_match,
+ char *lb_action, struct ovn_datapath **dplist,
+ int n_dplist)
+{
+ if (!lb->affinity_timeout) {
+ return;
+ }
+
+ static char *aff_check = REGBIT_KNOWN_LB_SESSION" = chk_lb_aff(); next;";
+ struct ovn_lflow *lflow_ref_aff_check = NULL;
+ /* Check if we have already a enstablished connection for this
+ * tuple and we are in affinity timeslot. */
+ uint32_t hash_aff_check = ovn_logical_flow_hash(
+ ovn_stage_get_table(S_ROUTER_IN_LB_AFF_CHECK),
+ ovn_stage_get_pipeline(S_ROUTER_IN_LB_AFF_CHECK), 100,
+ new_lb_match, aff_check);
+
+ for (size_t i = 0; i < n_dplist; i++) {
+ if (!ovn_dp_group_add_with_reference(lflow_ref_aff_check, dplist[i])) {
+ lflow_ref_aff_check = ovn_lflow_add_at_with_hash(
+ lflows, dplist[i], S_ROUTER_IN_LB_AFF_CHECK, 100,
+ new_lb_match, aff_check, NULL, NULL, &lb->nlb->header_,
+ OVS_SOURCE_LOCATOR, hash_aff_check);
+ }
+ }
+
+ struct ds aff_action = DS_EMPTY_INITIALIZER;
+ struct ds aff_action_learn = DS_EMPTY_INITIALIZER;
+ struct ds aff_match = DS_EMPTY_INITIALIZER;
+ struct ds aff_match_learn = DS_EMPTY_INITIALIZER;
+
+ bool ipv6 = !IN6_IS_ADDR_V4MAPPED(&lb_vip->vip);
+ const char *ip_match = ipv6 ? "ip6" : "ip4";
+
+ const char *reg_vip = ipv6 ? REG_NEXT_HOP_IPV6 : REG_NEXT_HOP_IPV4;
+ const char *reg_backend =
+ ipv6 ? REG_LB_L3_AFF_BACKEND_IP6 : REG_LB_AFF_BACKEND_IP4;
+
+ /* Prepare common part of affinity LB and affinity learn action. */
+ ds_put_format(&aff_action, "%s = %s; ", reg_vip, lb_vip->vip_str);
+ ds_put_cstr(&aff_action_learn, "commit_lb_aff(vip = \"");
+
+ if (lb_vip->vip_port) {
+ ds_put_format(&aff_action_learn, ipv6 ? "[%s]:%"PRIu16 : "%s:%"PRIu16,
+ lb_vip->vip_str, lb_vip->vip_port);
+ } else {
+ ds_put_cstr(&aff_action_learn, lb_vip->vip_str);
+ }
+
+ if (lb_action) {
+ ds_put_cstr(&aff_action, lb_action);
+ }
+ ds_put_cstr(&aff_action, "ct_lb_mark(backends=");
+ ds_put_cstr(&aff_action_learn, "\", backend = \"");
+
+ /* Prepare common part of affinity learn match. */
+ if (lb_vip->vip_port) {
+ ds_put_format(&aff_match_learn, REGBIT_KNOWN_LB_SESSION" == 0 && "
+ "ct.new && %s && %s == %s && "
+ REG_ORIG_TP_DPORT_ROUTER" == %"PRIu16" && "
+ "%s.dst == ", ip_match, reg_vip, lb_vip->vip_str,
+ lb_vip->vip_port, ip_match);
+ } else {
+ ds_put_format(&aff_match_learn, REGBIT_KNOWN_LB_SESSION" == 0 && "
+ "ct.new && %s && %s == %s && %s.dst == ", ip_match,
+ reg_vip, lb_vip->vip_str, ip_match);
+ }
+
+ /* Prepare common part of affinity match. */
+ ds_put_format(&aff_match, REGBIT_KNOWN_LB_SESSION" == 1 && "
+ "ct.new && %s && %s == ", ip_match, reg_backend);
+
+ /* Store the common part length. */
+ size_t aff_action_len = aff_action.length;
+ size_t aff_action_learn_len = aff_action_learn.length;
+ size_t aff_match_len = aff_match.length;
+ size_t aff_match_learn_len = aff_match_learn.length;
+
+
+ for (size_t i = 0; i < lb_vip->n_backends; i++) {
+ struct ovn_lb_backend *backend = &lb_vip->backends[i];
+
+ ds_put_cstr(&aff_match_learn, backend->ip_str);
+ ds_put_cstr(&aff_match, backend->ip_str);
+
+ if (backend->port) {
+ ds_put_format(&aff_action, ipv6 ? "[%s]:%d" : "%s:%d",
+ backend->ip_str, backend->port);
+ ds_put_format(&aff_action_learn, ipv6 ? "[%s]:%d" : "%s:%d",
+ backend->ip_str, backend->port);
+
+ ds_put_format(&aff_match_learn, " && %s.dst == %d",
+ lb->proto, backend->port);
+ ds_put_format(&aff_match, " && "REG_LB_AFF_MATCH_PORT" == %d",
+ backend->port);
+ } else {
+ ds_put_cstr(&aff_action, backend->ip_str);
+ ds_put_cstr(&aff_action_learn, backend->ip_str);
+ }
+
+ ds_put_cstr(&aff_action, ");");
+ ds_put_char(&aff_action_learn, '"');
+
+ if (lb_vip->vip_port) {
+ ds_put_format(&aff_action_learn, ", proto = %s", lb->proto);
+ }
+
+ ds_put_format(&aff_action_learn, ", timeout = %d); /* drop */",
+ lb->affinity_timeout);
+
+ struct ovn_lflow *lflow_ref_aff_learn = NULL;
+ uint32_t hash_aff_learn = ovn_logical_flow_hash(
+ ovn_stage_get_table(S_ROUTER_IN_LB_AFF_LEARN),
+ ovn_stage_get_pipeline(S_ROUTER_IN_LB_AFF_LEARN),
+ 100, ds_cstr(&aff_match_learn), ds_cstr(&aff_action_learn));
+
+ struct ovn_lflow *lflow_ref_aff_lb = NULL;
+ uint32_t hash_aff_lb = ovn_logical_flow_hash(
+ ovn_stage_get_table(S_ROUTER_IN_DNAT),
+ ovn_stage_get_pipeline(S_ROUTER_IN_DNAT),
+ 150, ds_cstr(&aff_match), ds_cstr(&aff_action));
+
+ for (size_t j = 0; j < n_dplist; j++) {
+ /* Forward to OFTABLE_CHK_LB_AFFINITY table to store flow tuple. */
+ if (!ovn_dp_group_add_with_reference(lflow_ref_aff_learn,
+ dplist[j])) {
+ lflow_ref_aff_learn = ovn_lflow_add_at_with_hash(
+ lflows, dplist[j], S_ROUTER_IN_LB_AFF_LEARN, 100,
+ ds_cstr(&aff_match_learn), ds_cstr(&aff_action_learn),
+ NULL, NULL, &lb->nlb->header_, OVS_SOURCE_LOCATOR,
+ hash_aff_learn);
+ }
+ /* Use already selected backend within affinity timeslot. */
+ if (!ovn_dp_group_add_with_reference(lflow_ref_aff_lb,
+ dplist[j])) {
+ lflow_ref_aff_lb = ovn_lflow_add_at_with_hash(
+ lflows, dplist[j], S_ROUTER_IN_DNAT, 150,
+ ds_cstr(&aff_match), ds_cstr(&aff_action), NULL, NULL,
+ &lb->nlb->header_, OVS_SOURCE_LOCATOR,
+ hash_aff_lb);
+ }
+ }
+
+ ds_truncate(&aff_action, aff_action_len);
+ ds_truncate(&aff_action_learn, aff_action_learn_len);
+ ds_truncate(&aff_match, aff_match_len);
+ ds_truncate(&aff_match_learn, aff_match_learn_len);
+ }
+
+ ds_destroy(&aff_action);
+ ds_destroy(&aff_action_learn);
+ ds_destroy(&aff_match);
+ ds_destroy(&aff_match_learn);
+}
+
+/* Builds the logical switch flows related to load balancer affinity.
+ * For a LB configured with 'vip=V:VP' and backends 'B1:BP1,B2:BP2' and
+ * affinity timeout set to T, it generates the following logical flows:
+ * - load balancing affinity check:
+ * table=ls_in_lb_aff_check, priority=100
+ * match=(ct.new && ip4
+ * && REG_ORIG_DIP_IPV4 == V && REG_ORIG_TP_DPORT == VP)
+ * action=(REGBIT_KNOWN_LB_SESSION = chk_lb_aff(); next;)
+ *
+ * - load balancing:
+ * table=ls_in_lb, priority=150
+ * match=(REGBIT_KNOWN_LB_SESSION == 1 && ct.new && ip4
+ * && REG_LB_AFF_BACKEND_IP4 == B1 && REG_LB_AFF_MATCH_PORT == BP1)
+ * action=(REGBIT_CONNTRACK_COMMIT = 0;
+ * REG_ORIG_DIP_IPV4 = V; REG_ORIG_TP_DPORT = VP;
+ * ct_lb_mark(backends=B1:BP1);)
+ * table=ls_in_lb, priority=150
+ * match=(REGBIT_KNOWN_LB_SESSION == 1 && ct.new && ip4
+ * && REG_LB_AFF_BACKEND_IP4 == B2 && REG_LB_AFF_MATCH_PORT == BP2)
+ * action=(REGBIT_CONNTRACK_COMMIT = 0;
+ * REG_ORIG_DIP_IPV4 = V;
+ * REG_ORIG_TP_DPORT = VP;
+ * ct_lb_mark(backends=B1:BP2);)
+ *
+ * - load balancing affinity learn:
+ * table=ls_in_lb_aff_learn, priority=100
+ * match=(REGBIT_KNOWN_LB_SESSION == 0
+ * && ct.new && ip4
+ * && REG_ORIG_DIP_IPV4 == V && REG_ORIG_TP_DPORT == VP
+ * && ip4.dst == B1 && tcp.dst == BP1)
+ * action=(commit_lb_aff(vip = "V:VP", backend = "B1:BP1",
+ * proto = tcp, timeout = T));
+ * table=ls_in_lb_aff_learn, priority=100
+ * match=(REGBIT_KNOWN_LB_SESSION == 0
+ * && ct.new && ip4
+ * && REG_ORIG_DIP_IPV4 == V && REG_ORIG_TP_DPORT == VP
+ * && ip4.dst == B2 && tcp.dst == BP2)
+ * action=(commit_lb_aff(vip = "V:VP", backend = "B2:BP2",
+ * proto = tcp, timeout = T));
+ *
+ */
+static void
+build_lb_affinity_ls_flows(struct hmap *lflows, struct ovn_northd_lb *lb,
+ struct ovn_lb_vip *lb_vip)
+{
+ if (!lb->affinity_timeout) {
+ return;
+ }
+
+ struct ds new_lb_match = DS_EMPTY_INITIALIZER;
+ if (IN6_IS_ADDR_V4MAPPED(&lb_vip->vip)) {
+ ds_put_format(&new_lb_match,
+ "ct.new && ip4 && "REG_ORIG_DIP_IPV4 " == %s",
+ lb_vip->vip_str);
+ } else {
+ ds_put_format(&new_lb_match,
+ "ct.new && ip6 && "REG_ORIG_DIP_IPV6 " == %s",
+ lb_vip->vip_str);
+ }
+
+ if (lb_vip->vip_port) {
+ ds_put_format(&new_lb_match, " && "REG_ORIG_TP_DPORT " == %"PRIu16,
+ lb_vip->vip_port);
+ }
+
+ static char *aff_check = REGBIT_KNOWN_LB_SESSION" = chk_lb_aff(); next;";
+ struct ovn_lflow *lflow_ref_aff_check = NULL;
+ /* Check if we have already a enstablished connection for this
+ * tuple and we are in affinity timeslot. */
+ uint32_t hash_aff_check = ovn_logical_flow_hash(
+ ovn_stage_get_table(S_SWITCH_IN_LB_AFF_CHECK),
+ ovn_stage_get_pipeline(S_SWITCH_IN_LB_AFF_CHECK), 100,
+ ds_cstr(&new_lb_match), aff_check);
+
+ for (size_t i = 0; i < lb->n_nb_ls; i++) {
+ if (!ovn_dp_group_add_with_reference(lflow_ref_aff_check,
+ lb->nb_ls[i])) {
+ lflow_ref_aff_check = ovn_lflow_add_at_with_hash(
+ lflows, lb->nb_ls[i], S_SWITCH_IN_LB_AFF_CHECK, 100,
+ ds_cstr(&new_lb_match), aff_check, NULL, NULL,
+ &lb->nlb->header_, OVS_SOURCE_LOCATOR, hash_aff_check);
+ }
+ }
+ ds_destroy(&new_lb_match);
+
+ struct ds aff_action = DS_EMPTY_INITIALIZER;
+ struct ds aff_action_learn = DS_EMPTY_INITIALIZER;
+ struct ds aff_match = DS_EMPTY_INITIALIZER;
+ struct ds aff_match_learn = DS_EMPTY_INITIALIZER;
+
+ bool ipv6 = !IN6_IS_ADDR_V4MAPPED(&lb_vip->vip);
+ const char *ip_match = ipv6 ? "ip6" : "ip4";
+
+ const char *reg_vip = ipv6 ? REG_ORIG_DIP_IPV6 : REG_ORIG_DIP_IPV4;
+ const char *reg_backend =
+ ipv6 ? REG_LB_L2_AFF_BACKEND_IP6 : REG_LB_AFF_BACKEND_IP4;
+
+ /* Prepare common part of affinity LB and affinity learn action. */
+ ds_put_format(&aff_action, REGBIT_CONNTRACK_COMMIT" = 0; %s = %s; ",
+ reg_vip, lb_vip->vip_str);
+ ds_put_cstr(&aff_action_learn, "commit_lb_aff(vip = \"");
+
+ if (lb_vip->vip_port) {
+ ds_put_format(&aff_action, REG_ORIG_TP_DPORT" = %"PRIu16"; ",
+ lb_vip->vip_port);
+ ds_put_format(&aff_action_learn, ipv6 ? "[%s]:%"PRIu16 : "%s:%"PRIu16,
+ lb_vip->vip_str, lb_vip->vip_port);
+ } else {
+ ds_put_cstr(&aff_action_learn, lb_vip->vip_str);
+ }
+
+ ds_put_cstr(&aff_action, "ct_lb_mark(backends=");
+ ds_put_cstr(&aff_action_learn, "\", backend = \"");
+
+ /* Prepare common part of affinity learn match. */
+ if (lb_vip->vip_port) {
+ ds_put_format(&aff_match_learn, REGBIT_KNOWN_LB_SESSION" == 0 && "
+ "ct.new && %s && %s == %s && "
+ REG_ORIG_TP_DPORT" == %"PRIu16" && %s.dst == ",
+ ip_match, reg_vip, lb_vip->vip_str,
+ lb_vip->vip_port, ip_match);
+ } else {
+ ds_put_format(&aff_match_learn, REGBIT_KNOWN_LB_SESSION" == 0 && "
+ "ct.new && %s && %s == %s && %s.dst == ",
+ ip_match, reg_vip, lb_vip->vip_str, ip_match);
+ }
+
+ /* Prepare common part of affinity match. */
+ ds_put_format(&aff_match, REGBIT_KNOWN_LB_SESSION" == 1 && "
+ "ct.new && %s && %s == ", ip_match, reg_backend);
+
+ /* Store the common part length. */
+ size_t aff_action_len = aff_action.length;
+ size_t aff_action_learn_len = aff_action_learn.length;
+ size_t aff_match_len = aff_match.length;
+ size_t aff_match_learn_len = aff_match_learn.length;
+
+ for (size_t i = 0; i < lb_vip->n_backends; i++) {
+ struct ovn_lb_backend *backend = &lb_vip->backends[i];
+
+ ds_put_cstr(&aff_match_learn, backend->ip_str);
+ ds_put_cstr(&aff_match, backend->ip_str);
+
+ if (backend->port) {
+ ds_put_format(&aff_action, ipv6 ? "[%s]:%d" : "%s:%d",
+ backend->ip_str, backend->port);
+ ds_put_format(&aff_action_learn, ipv6 ? "[%s]:%d" : "%s:%d",
+ backend->ip_str, backend->port);
+
+ ds_put_format(&aff_match_learn, " && %s.dst == %d",
+ lb->proto, backend->port);
+ ds_put_format(&aff_match, " && "REG_LB_AFF_MATCH_PORT" == %d",
+ backend->port);
+ } else {
+ ds_put_cstr(&aff_action, backend->ip_str);
+ ds_put_cstr(&aff_action_learn, backend->ip_str);
+ }
+
+ ds_put_cstr(&aff_action, ");");
+ ds_put_char(&aff_action_learn, '"');
+
+ if (lb_vip->vip_port) {
+ ds_put_format(&aff_action_learn, ", proto = %s", lb->proto);
+ }
+
+ ds_put_format(&aff_action_learn, ", timeout = %d); /* drop */",
+ lb->affinity_timeout);
+
+ struct ovn_lflow *lflow_ref_aff_learn = NULL;
+ uint32_t hash_aff_learn = ovn_logical_flow_hash(
+ ovn_stage_get_table(S_SWITCH_IN_LB_AFF_LEARN),
+ ovn_stage_get_pipeline(S_SWITCH_IN_LB_AFF_LEARN),
+ 100, ds_cstr(&aff_match_learn), ds_cstr(&aff_action_learn));
+
+ struct ovn_lflow *lflow_ref_aff_lb = NULL;
+ uint32_t hash_aff_lb = ovn_logical_flow_hash(
+ ovn_stage_get_table(S_SWITCH_IN_LB),
+ ovn_stage_get_pipeline(S_SWITCH_IN_LB),
+ 150, ds_cstr(&aff_match), ds_cstr(&aff_action));
+
+ for (size_t j = 0; j < lb->n_nb_ls; j++) {
+ /* Forward to OFTABLE_CHK_LB_AFFINITY table to store flow tuple. */
+ if (!ovn_dp_group_add_with_reference(lflow_ref_aff_learn,
+ lb->nb_ls[j])) {
+ lflow_ref_aff_learn = ovn_lflow_add_at_with_hash(
+ lflows, lb->nb_ls[j], S_SWITCH_IN_LB_AFF_LEARN, 100,
+ ds_cstr(&aff_match_learn), ds_cstr(&aff_action_learn),
+ NULL, NULL, &lb->nlb->header_, OVS_SOURCE_LOCATOR,
+ hash_aff_learn);
+ }
+ /* Use already selected backend within affinity timeslot. */
+ if (!ovn_dp_group_add_with_reference(lflow_ref_aff_lb,
+ lb->nb_ls[j])) {
+ lflow_ref_aff_lb = ovn_lflow_add_at_with_hash(
+ lflows, lb->nb_ls[j], S_SWITCH_IN_LB, 150,
+ ds_cstr(&aff_match), ds_cstr(&aff_action), NULL, NULL,
+ &lb->nlb->header_, OVS_SOURCE_LOCATOR,
+ hash_aff_lb);
+ }
+ }
+
+ ds_truncate(&aff_action, aff_action_len);
+ ds_truncate(&aff_action_learn, aff_action_learn_len);
+ ds_truncate(&aff_match, aff_match_len);
+ ds_truncate(&aff_match_learn, aff_match_learn_len);
+ }
+
+ ds_destroy(&aff_action);
+ ds_destroy(&aff_action_learn);
+ ds_destroy(&aff_match);
+ ds_destroy(&aff_match_learn);
+}
+
+static void
+build_lb_affinity_default_flows(struct ovn_datapath *od, struct hmap *lflows)
+{
+ if (od->nbs) {
+ ovn_lflow_add(lflows, od, S_SWITCH_IN_LB_AFF_CHECK, 0, "1", "next;");
+ ovn_lflow_add(lflows, od, S_SWITCH_IN_LB_AFF_LEARN, 0, "1", "next;");
+ }
+ if (od->nbr) {
+ ovn_lflow_add(lflows, od, S_ROUTER_IN_LB_AFF_CHECK, 0, "1", "next;");
+ ovn_lflow_add(lflows, od, S_ROUTER_IN_LB_AFF_LEARN, 0, "1", "next;");
+ }
+}
+
static void
build_lb_rules(struct hmap *lflows, struct ovn_northd_lb *lb, bool ct_lb_mark,
struct ds *match, struct ds *action,
@@ -6985,6 +7434,8 @@ build_lb_rules(struct hmap *lflows, struct ovn_northd_lb *lb, bool ct_lb_mark,
priority = 120;
}
+ build_lb_affinity_ls_flows(lflows, lb, lb_vip);
+
struct ovn_lflow *lflow_ref = NULL;
uint32_t hash = ovn_logical_flow_hash(
ovn_stage_get_table(S_SWITCH_IN_LB),
@@ -8451,6 +8902,10 @@ build_lswitch_ip_mcast_igmp_mld(struct ovn_igmp_group *igmp_group,
if (atomic_compare_exchange_strong(
&mcast_sw_info->active_v4_flows, &table_size,
mcast_sw_info->table_size)) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+
+ VLOG_INFO_RL(&rl, "Too many active mcast flows: %"PRIu64,
+ mcast_sw_info->active_v4_flows);
return;
}
atomic_add(&mcast_sw_info->active_v4_flows, 1, &dummy);
@@ -10063,6 +10518,14 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip,
xcalloc(lb->n_nb_lr, sizeof *distributed_router);
int n_distributed_router = 0;
+ struct ovn_datapath **lb_aff_force_snat_router =
+ xcalloc(lb->n_nb_lr, sizeof *lb_aff_force_snat_router);
+ int n_lb_aff_force_snat_router = 0;
+
+ struct ovn_datapath **lb_aff_router =
+ xcalloc(lb->n_nb_lr, sizeof *lb_aff_router);
+ int n_lb_aff_router = 0;
+
/* Group gw router since we do not have datapath dependency in
* lflow generation for them.
*/
@@ -10081,6 +10544,13 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip,
distributed_router[n_distributed_router++] = od;
}
+ if (!lport_addresses_is_empty(&od->lb_force_snat_addrs) ||
+ od->lb_force_snat_router_ip) {
+ lb_aff_force_snat_router[n_lb_aff_force_snat_router++] = od;
+ } else {
+ lb_aff_router[n_lb_aff_router++] = od;
+ }
+
if (sset_contains(&od->external_ips, lb_vip->vip_str)) {
/* The load balancer vip is also present in the NAT entries.
* So add a high priority lflow to advance the the packet
@@ -10113,10 +10583,26 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip,
"flags.force_snat_for_lb = 1; next;",
lflows, prio, meter_groups);
+ /* LB affinity flows for datapaths where CMS has specified
+ * force_snat_for_lb floag option.
+ */
+ build_lb_affinity_lr_flows(lflows, lb, lb_vip, new_match,
+ "flags.force_snat_for_lb = 1; ",
+ lb_aff_force_snat_router,
+ n_lb_aff_force_snat_router);
+
build_gw_lrouter_nat_flows_for_lb(lb, gw_router, n_gw_router,
reject, new_match, ds_cstr(action), est_match,
"next;", lflows, prio, meter_groups);
+ /* LB affinity flows for datapaths where CMS has specified
+ * skip_snat_for_lb floag option or regular datapaths.
+ */
+ char *lb_aff_action =
+ lb->skip_snat ? "flags.skip_snat_for_lb = 1; " : NULL;
+ build_lb_affinity_lr_flows(lflows, lb, lb_vip, new_match, lb_aff_action,
+ lb_aff_router, n_lb_aff_router);
+
/* Distributed router logic */
for (size_t i = 0; i < n_distributed_router; i++) {
struct ovn_datapath *od = distributed_router[i];
@@ -10210,6 +10696,8 @@ build_lrouter_nat_flows_for_lb(struct ovn_lb_vip *lb_vip,
free(gw_router_force_snat);
free(gw_router_skip_snat);
free(distributed_router);
+ free(lb_aff_force_snat_router);
+ free(lb_aff_router);
free(gw_router);
}
@@ -13633,7 +14121,8 @@ static void
build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, struct hmap *lflows,
const struct hmap *ports, struct ds *match,
struct ds *actions,
- const struct shash *meter_groups)
+ const struct shash *meter_groups,
+ bool ct_lb_mark)
{
if (!od->nbr) {
return;
@@ -13827,6 +14316,26 @@ build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, struct hmap *lflows,
}
}
+ if (od->nbr->n_nat) {
+ ds_clear(match);
+ const char *ct_natted = ct_lb_mark ?
+ "ct_mark.natted" :
+ "ct_label.natted";
+ ds_put_format(match, "ip && %s == 1", ct_natted);
+ /* This flow is unique since it is in the egress pipeline but checks
+ * the value of ct_label.natted, which would have been set in the
+ * ingress pipeline. If a change is ever introduced that clears or
+ * otherwise invalidates the ct_label between the ingress and egress
+ * pipelines, then an alternative will need to be devised.
+ */
+ ds_clear(actions);
+ ds_put_cstr(actions, REGBIT_DST_NAT_IP_LOCAL" = 1; next;");
+ ovn_lflow_add_with_hint(lflows, od, S_ROUTER_OUT_CHECK_DNAT_LOCAL,
+ 50, ds_cstr(match), ds_cstr(actions),
+ &od->nbr->header_);
+
+ }
+
/* Handle force SNAT options set in the gateway router. */
if (od->is_gw_router) {
if (dnat_force_snat_ip) {
@@ -13925,7 +14434,9 @@ build_lswitch_and_lrouter_iterate_by_od(struct ovn_datapath *od,
build_misc_local_traffic_drop_flows_for_lrouter(od, lsi->lflows);
build_lrouter_arp_nd_for_datapath(od, lsi->lflows, lsi->meter_groups);
build_lrouter_nat_defrag_and_lb(od, lsi->lflows, lsi->ports, &lsi->match,
- &lsi->actions, lsi->meter_groups);
+ &lsi->actions, lsi->meter_groups,
+ lsi->features->ct_no_masked_label);
+ build_lb_affinity_default_flows(od, lsi->lflows);
}
/* Helper function to combine all lflow generation which is iterated by port.
@@ -15148,6 +15659,11 @@ build_mcast_groups(struct lflow_input *input_data,
hmap_init(mcast_groups);
hmap_init(igmp_groups);
+ struct ovn_datapath *od;
+
+ HMAP_FOR_EACH (od, key_node, datapaths) {
+ init_mcast_flow_count(od);
+ }
HMAP_FOR_EACH (op, key_node, ports) {
if (op->nbrp && lrport_is_enabled(op->nbrp)) {
@@ -15205,8 +15721,7 @@ build_mcast_groups(struct lflow_input *input_data,
}
/* If the datapath value is stale, purge the group. */
- struct ovn_datapath *od =
- ovn_datapath_from_sbrec(datapaths, sb_igmp->datapath);
+ od = ovn_datapath_from_sbrec(datapaths, sb_igmp->datapath);
if (!od || ovn_datapath_is_stale(od)) {
sbrec_igmp_group_delete(sb_igmp);
@@ -15251,7 +15766,6 @@ build_mcast_groups(struct lflow_input *input_data,
* IGMP groups are based on the groups learnt by their multicast enabled
* peers.
*/
- struct ovn_datapath *od;
HMAP_FOR_EACH (od, key_node, datapaths) {
if (ovs_list_is_empty(&od->mcast_info.groups)) {
diff --git a/northd/ovn-northd.8.xml b/northd/ovn-northd.8.xml
index dae961c87..509ca4821 100644
--- a/northd/ovn-northd.8.xml
+++ b/northd/ovn-northd.8.xml
@@ -853,9 +853,56 @@
</li>
</ul>
- <h3>Ingress Table 11: LB</h3>
+ <h3>Ingress Table 11: Load balancing affinity check</h3>
+
+ <p>
+ Load balancing affinity check table contains the following
+ logical flows:
+ </p>
<ul>
+ <li>
+ For all the configured load balancing rules for a switch in
+ <code>OVN_Northbound</code> database where a positive affinity timeout
+ is specified in <code>options</code> column, that includes a L4 port
+ <var>PORT</var> of protocol <var>P</var> and IP address <var>VIP</var>,
+ a priority-100 flow is added. For IPv4 <var>VIPs</var>, the flow
+ matches <code>ct.new && ip && ip4.dst == <var>VIP</var>
+ && <var>P</var>.dst == <var>PORT</var></code>. For IPv6
+ <var>VIPs</var>, the flow matches <code>ct.new && ip &&
+ ip6.dst == <var>VIP</var>&& <var>P</var> &&
+ <var>P</var>.dst == <var> PORT</var></code>. The flow's action is
+ <code>reg9[6] = chk_lb_aff(); next;</code>.
+ </li>
+
+ <li>
+ A priority 0 flow is added which matches on all packets and applies
+ the action <code>next;</code>.
+ </li>
+ </ul>
+
+ <h3>Ingress Table 12: LB</h3>
+
+ <ul>
+ <li>
+ For all the configured load balancing rules for a switch in
+ <code>OVN_Northbound</code> database where a positive affinity timeout
+ is specified in <code>options</code> column, that includes a L4 port
+ <var>PORT</var> of protocol <var>P</var> and IP address <var>VIP</var>,
+ a priority-150 flow is added. For IPv4 <var>VIPs</var>, the flow
+ matches <code>reg9[6] == 1 && ct.new && ip &&
+ ip4.dst == <var>VIP</var> && <var>P</var>.dst == <var>PORT
+ </var></code>. For IPv6 <var>VIPs</var>, the flow matches
+ <code>reg9[6] == 1 && ct.new && ip &&
+ ip6.dst == <var> VIP </var>&& <var>P</var> &&
+ <var>P</var>.dst == <var> PORT</var></code>.
+ The flow's action is <code>ct_lb_mark(<var>args</var>)</code>, where
+ <var>args</var> contains comma separated IP addresses (and optional
+ port numbers) to load balance to. The address family of the IP
+ addresses of <var>args</var> is the same as the address family
+ of <var>VIP</var>.
+ </li>
+
<li>
For all the configured load balancing rules for a switch in
<code>OVN_Northbound</code> database that includes a L4 port
@@ -914,7 +961,38 @@
</li>
</ul>
- <h3>Ingress table 12: <code>from-lport</code> ACLs after LB</h3>
+ <h3>Ingress Table 13: Load balancing affinity learn</h3>
+
+ <p>
+ Load balancing affinity learn table contains the following
+ logical flows:
+ </p>
+
+ <ul>
+ <li>
+ For all the configured load balancing rules for a switch in
+ <code>OVN_Northbound</code> database where a positive affinity timeout
+ <var>T</var> is specified in <code>options</code> column, that includes
+ a L4 port <var>PORT</var> of protocol <var>P</var> and IP address
+ <var>VIP</var>, a priority-100 flow is added. For IPv4 <var>VIPs</var>,
+ the flow matches <code>reg9[6] == 0 && ct.new && ip
+ && ip4.dst == <var>VIP</var> && <var>P</var>.dst ==
+ <var>PORT</var></code>. For IPv6 <var>VIPs</var>, the flow matches
+ <code>ct.new && ip && ip6.dst == <var>VIP</var>
+ && <var>P</var> && <var>P</var>.dst == <var>PORT</var>
+ </code>. The flow's action is <code>commit_lb_aff(vip =
+ <var>VIP</var>:<var>PORT</var>, backend = <var>backend ip</var>:
+ <var>backend port</var>, proto = <var>P</var>, timeout = <var>T</var>);
+ </code>.
+ </li>
+
+ <li>
+ A priority 0 flow is added which matches on all packets and applies
+ the action <code>next;</code>.
+ </li>
+ </ul>
+
+ <h3>Ingress table 14: <code>from-lport</code> ACLs after LB</h3>
<p>
Logical flows in this table closely reproduce those in the
@@ -976,7 +1054,7 @@
</li>
</ul>
- <h3>Ingress Table 13: Stateful</h3>
+ <h3>Ingress Table 15: Stateful</h3>
<ul>
<li>
@@ -999,7 +1077,7 @@
</li>
</ul>
- <h3>Ingress Table 14: Pre-Hairpin</h3>
+ <h3>Ingress Table 16: Pre-Hairpin</h3>
<ul>
<li>
If the logical switch has load balancer(s) configured, then a
@@ -1017,7 +1095,7 @@
</li>
</ul>
- <h3>Ingress Table 15: Nat-Hairpin</h3>
+ <h3>Ingress Table 17: Nat-Hairpin</h3>
<ul>
<li>
If the logical switch has load balancer(s) configured, then a
@@ -1052,7 +1130,7 @@
</li>
</ul>
- <h3>Ingress Table 16: Hairpin</h3>
+ <h3>Ingress Table 18: Hairpin</h3>
<ul>
<li>
<p>
@@ -1086,7 +1164,7 @@
</li>
</ul>
- <h3>Ingress Table 17: ARP/ND responder</h3>
+ <h3>Ingress Table 19: ARP/ND responder</h3>
<p>
This table implements ARP/ND responder in a logical switch for known
@@ -1388,7 +1466,7 @@ output;
</li>
</ul>
- <h3>Ingress Table 18: DHCP option processing</h3>
+ <h3>Ingress Table 20: DHCP option processing</h3>
<p>
This table adds the DHCPv4 options to a DHCPv4 packet from the
@@ -1449,7 +1527,7 @@ next;
</li>
</ul>
- <h3>Ingress Table 19: DHCP responses</h3>
+ <h3>Ingress Table 21: DHCP responses</h3>
<p>
This table implements DHCP responder for the DHCP replies generated by
@@ -1530,7 +1608,7 @@ output;
</li>
</ul>
- <h3>Ingress Table 20 DNS Lookup</h3>
+ <h3>Ingress Table 22 DNS Lookup</h3>
<p>
This table looks up and resolves the DNS names to the corresponding
@@ -1559,7 +1637,7 @@ reg0[4] = dns_lookup(); next;
</li>
</ul>
- <h3>Ingress Table 21 DNS Responses</h3>
+ <h3>Ingress Table 23 DNS Responses</h3>
<p>
This table implements DNS responder for the DNS replies generated by
@@ -1594,7 +1672,7 @@ output;
</li>
</ul>
- <h3>Ingress table 22 External ports</h3>
+ <h3>Ingress table 24 External ports</h3>
<p>
Traffic from the <code>external</code> logical ports enter the ingress
@@ -1637,7 +1715,7 @@ output;
</li>
</ul>
- <h3>Ingress Table 23 Destination Lookup</h3>
+ <h3>Ingress Table 25 Destination Lookup</h3>
<p>
This table implements switching behavior. It contains these logical
@@ -1806,7 +1884,7 @@ output;
</li>
</ul>
- <h3>Ingress Table 24 Destination unknown</h3>
+ <h3>Ingress Table 26 Destination unknown</h3>
<p>
This table handles the packets whose destination was not found or
@@ -3172,7 +3250,33 @@ icmp6 {
packet de-fragmentation and tracking before sending it to the next table.
</p>
- <h3>Ingress Table 6: DNAT</h3>
+ <h3>Ingress Table 6: Load balancing affinity check</h3>
+
+ <p>
+ Load balancing affinity check table contains the following
+ logical flows:
+ </p>
+
+ <ul>
+ <li>
+ For all the configured load balancing rules for a logical router where
+ a positive affinity timeout is specified in <code>options</code>
+ column, that includes a L4 port <var>PORT</var> of protocol
+ <var>P</var> and IPv4 or IPv6 address <var>VIP</var>, a priority-100
+ flow that matches on <code>ct.new && ip &&
+ reg0 == <var>VIP</var> && <var>P</var> && reg9[16..31]
+ == </code> <code><var>PORT</var></code> (<code>xxreg0 == <var>VIP
+ </var></code> in the IPv6 case) with an action of <code>reg9[6] =
+ chk_lb_aff(); next;</code>
+ </li>
+
+ <li>
+ A priority 0 flow is added which matches on all packets and applies
+ the action <code>next;</code>.
+ </li>
+ </ul>
+
+ <h3>Ingress Table 7: DNAT</h3>
<p>
Packets enter the pipeline with destination IP address that needs to
@@ -3180,7 +3284,7 @@ icmp6 {
in the reverse direction needs to be unDNATed.
</p>
- <p>Ingress Table 6: Load balancing DNAT rules</p>
+ <p>Ingress Table 7: Load balancing DNAT rules</p>
<p>
Following load balancing DNAT flows are added for Gateway router or
@@ -3190,6 +3294,21 @@ icmp6 {
</p>
<ul>
+ <li>
+ For all the configured load balancing rules for a logical router where
+ a positive affinity timeout is specified in <code>options</code>
+ column, that includes a L4 port <var>PORT</var> of protocol
+ <var>P</var> and IPv4 or IPv6 address <var>VIP</var>, a priority-150
+ flow that matches on <code>reg9[6] == 1 && ct.new &&
+ ip && reg0 == <var>VIP</var> && <var>P</var> &&
+ reg9[16..31] == </code> <code><var>PORT</var></code> (<code>xxreg0
+ == <var>VIP</var></code> in the IPv6 case) with an action of
+ <code>ct_lb_mark(<var>args</var>) </code>, where <var>args</var>
+ contains comma separated IP addresses (and optional port numbers)
+ to load balance to. The address family of the IP addresses of
+ <var>args</var> is the same as the address family of <var>VIP</var>.
+ </li>
+
<li>
If controller_event has been enabled for all the configured load
balancing rules for a Gateway router or Router with gateway port
@@ -3319,7 +3438,7 @@ icmp6 {
</li>
</ul>
- <p>Ingress Table 6: DNAT on Gateway Routers</p>
+ <p>Ingress Table 7: DNAT on Gateway Routers</p>
<ul>
<li>
@@ -3361,7 +3480,7 @@ icmp6 {
</li>
</ul>
- <p>Ingress Table 6: DNAT on Distributed Routers</p>
+ <p>Ingress Table 7: DNAT on Distributed Routers</p>
<p>
On distributed routers, the DNAT table only handles packets
@@ -3416,7 +3535,35 @@ icmp6 {
</li>
</ul>
- <h3>Ingress Table 7: ECMP symmetric reply processing</h3>
+ <h3>Ingress Table 8: Load balancing affinity learn</h3>
+
+ <p>
+ Load balancing affinity learn table contains the following
+ logical flows:
+ </p>
+
+ <ul>
+ <li>
+ For all the configured load balancing rules for a logical router where
+ a positive affinity timeout <var>T</var> is specified in <code>options
+ </code> column, that includes a L4 port <var>PORT</var> of protocol
+ <var>P</var> and IPv4 or IPv6 address <var>VIP</var>, a priority-100
+ flow that matches on <code>reg9[6] == 0 && ct.new &&
+ ip && reg0 == <var>VIP</var> && <var>P</var> &&
+ reg9[16..31] == </code> <code><var>PORT</var></code> (<code>xxreg0 ==
+ <var>VIP</var> </code> in the IPv6 case) with an action of
+ <code>commit_lb_aff(vip = <var>VIP</var>:<var>PORT</var>, backend =
+ <var>backend ip</var>: <var>backend port</var>, proto = <var>P</var>,
+ timeout = <var>T</var>);</code>.
+ </li>
+
+ <li>
+ A priority 0 flow is added which matches on all packets and applies
+ the action <code>next;</code>.
+ </li>
+ </ul>
+
+ <h3>Ingress Table 9: ECMP symmetric reply processing</h3>
<ul>
<li>
If ECMP routes with symmetric reply are configured in the
@@ -3435,7 +3582,7 @@ icmp6 {
</li>
</ul>
- <h3>Ingress Table 8: IPv6 ND RA option processing</h3>
+ <h3>Ingress Table 10: IPv6 ND RA option processing</h3>
<ul>
<li>
@@ -3465,7 +3612,7 @@ reg0[5] = put_nd_ra_opts(<var>options</var>);next;
</li>
</ul>
- <h3>Ingress Table 9: IPv6 ND RA responder</h3>
+ <h3>Ingress Table 11: IPv6 ND RA responder</h3>
<p>
This table implements IPv6 ND RA responder for the IPv6 ND RA replies
@@ -3510,7 +3657,7 @@ output;
</li>
</ul>
- <h3>Ingress Table 10: IP Routing Pre</h3>
+ <h3>Ingress Table 12: IP Routing Pre</h3>
<p>
If a packet arrived at this table from Logical Router Port <var>P</var>
@@ -3540,7 +3687,7 @@ output;
</li>
</ul>
- <h3>Ingress Table 11: IP Routing</h3>
+ <h3>Ingress Table 13: IP Routing</h3>
<p>
A packet that arrives at this table is an IP packet that should be
@@ -3741,7 +3888,7 @@ select(reg8[16..31], <var>MID1</var>, <var>MID2</var>, ...);
</li>
</ul>
- <h3>Ingress Table 12: IP_ROUTING_ECMP</h3>
+ <h3>Ingress Table 14: IP_ROUTING_ECMP</h3>
<p>
This table implements the second part of IP routing for ECMP routes
@@ -3793,7 +3940,7 @@ outport = <var>P</var>;
</li>
</ul>
- <h3>Ingress Table 13: Router policies</h3>
+ <h3>Ingress Table 15: Router policies</h3>
<p>
This table adds flows for the logical router policies configured
on the logical router. Please see the
@@ -3865,7 +4012,7 @@ next;
</li>
</ul>
- <h3>Ingress Table 14: ECMP handling for router policies</h3>
+ <h3>Ingress Table 16: ECMP handling for router policies</h3>
<p>
This table handles the ECMP for the router policies configured
with multiple nexthops.
@@ -3909,7 +4056,7 @@ outport = <var>P</var>
</li>
</ul>
- <h3>Ingress Table 15: ARP/ND Resolution</h3>
+ <h3>Ingress Table 17: ARP/ND Resolution</h3>
<p>
Any packet that reaches this table is an IP packet whose next-hop
@@ -4110,7 +4257,7 @@ outport = <var>P</var>
</ul>
- <h3>Ingress Table 16: Check packet length</h3>
+ <h3>Ingress Table 18: Check packet length</h3>
<p>
For distributed logical routers or gateway routers with gateway
@@ -4147,7 +4294,7 @@ REGBIT_PKT_LARGER = check_pkt_larger(<var>L</var>); next;
and advances to the next table.
</p>
- <h3>Ingress Table 17: Handle larger packets</h3>
+ <h3>Ingress Table 19: Handle larger packets</h3>
<p>
For distributed logical routers or gateway routers with gateway port
@@ -4210,7 +4357,7 @@ icmp6 {
and advances to the next table.
</p>
- <h3>Ingress Table 18: Gateway Redirect</h3>
+ <h3>Ingress Table 20: Gateway Redirect</h3>
<p>
For distributed logical routers where one or more of the logical router
@@ -4278,7 +4425,7 @@ icmp6 {
</li>
</ul>
- <h3>Ingress Table 19: ARP Request</h3>
+ <h3>Ingress Table 21: ARP Request</h3>
<p>
In the common case where the Ethernet destination has been resolved, this
@@ -4392,6 +4539,22 @@ nd_ns {
</li>
</ul>
+ <p>
+ This table also installs a priority-50 logical flow for each logical
+ router that has NATs configured on it. The flow has match
+ <code>ip && ct_label.natted == 1</code> and action
+ <code>REGBIT_DST_NAT_IP_LOCAL = 1; next;</code>. This is intended
+ to ensure that traffic that was DNATted locally will use a separate
+ conntrack zone for SNAT if SNAT is required later in the egress
+ pipeline. Note that this flow checks the value of
+ <code>ct_label.natted</code>, which is set in the ingress pipeline.
+ This means that ovn-northd assumes that this value is carried over
+ from the ingress pipeline to the egress pipeline and is not altered
+ or cleared. If conntrack label values are ever changed to be cleared
+ between the ingress and egress pipelines, then the match conditions
+ of this flow will be updated accordingly.
+ </p>
+
<h3>Egress Table 1: UNDNAT</h3>
<p>
diff --git a/northd/ovn-northd.c b/northd/ovn-northd.c
index 96f17f15f..4bf1afe3b 100644
--- a/northd/ovn-northd.c
+++ b/northd/ovn-northd.c
@@ -125,6 +125,10 @@ static const char *rbac_igmp_group_auth[] =
{""};
static const char *rbac_igmp_group_update[] =
{"address", "chassis", "datapath", "ports"};
+static const char *rbac_bfd_auth[] =
+ {""};
+static const char *rbac_bfd_update[] =
+ {"status"};
static struct rbac_perm_cfg {
const char *table;
@@ -207,6 +211,14 @@ static struct rbac_perm_cfg {
.update = rbac_igmp_group_update,
.n_update = ARRAY_SIZE(rbac_igmp_group_update),
.row = NULL
+ },{
+ .table = "BFD",
+ .auth = rbac_bfd_auth,
+ .n_auth = ARRAY_SIZE(rbac_bfd_auth),
+ .insdel = false,
+ .update = rbac_bfd_update,
+ .n_update = ARRAY_SIZE(rbac_bfd_update),
+ .row = NULL
},{
.table = NULL,
.auth = NULL,
diff --git a/ovn-nb.xml b/ovn-nb.xml
index 7fe88af27..dee9d4c15 100644
--- a/ovn-nb.xml
+++ b/ovn-nb.xml
@@ -1908,6 +1908,14 @@
requests only for VIPs that are part of a router's subnet. The default
value of this option, if not specified, is <code>reachable</code>.
</column>
+
+ <column name="options" key="affinity_timeout">
+ If the CMS provides a positive value (in seconds) for
+ <code>affinity_timeout</code>, OVN will dnat connections received
+ from the same client to this lb to the same backend if received in
+ the affinity timeslot. Max supported affinity_timeout is 65535
+ seconds.
+ </column>
</group>
</table>
diff --git a/ovn-sb.xml b/ovn-sb.xml
index 37a709f83..f9cecb2f9 100644
--- a/ovn-sb.xml
+++ b/ovn-sb.xml
@@ -2624,6 +2624,50 @@ tcp.flags = RST;
register <var>R</var> is set to 1.
</p>
</dd>
+
+ <dt>
+ <code>
+ commit_lb_aff(<var>vip</var>, <var>backend</var>,
+ <var>proto</var>, <var>timeout</var>);
+ </code>
+ </dt>
+ <dd>
+ <p>
+ <b>Parameters</b>: load-balancer virtual ip:port <var>vip</var>,
+ load-balancer backend ip:port <var>backend</var>, load-balancer
+ protocol <var>proto</var>, affinity timeout <var>timeout</var>.
+ </p>
+
+ <p>
+ This action translates to an openflow "learn" action that inserts
+ a new flow in table 78.
+ </p>
+
+ <ul>
+ <li>
+ Match on the 4-tuple in table 78: <code>nw_src=ip client</code>,
+ <code>nw_dst=vip ip</code>, <code>ip_proto</code>,
+ <code>tp_dst=vip port</code> and set <code>reg9[6]</code> to 1,
+ <code>reg4</code> and <code>reg8</code> to backend ip and port
+ respectively. For IPv6 register <code>xxreg1</code> is used to
+ store the backend ip.
+ </li>
+ </ul>
+
+ <p>
+ This action is applied for new connections received by a specific
+ load-balacer with affinity timeout configured.
+ </p>
+ </dd>
+
+ <dt><code><var>R</var> = chk_lb_aff();</code></dt>
+ <dd>
+ <p>
+ This action checks if the packet under consideration matches any
+ flow in table 78. If it is so, then the 1-bit destination
+ register <var>R</var> is set to 1.
+ </p>
+ </dd>
</dl>
</column>
diff --git a/rhel/ovn-fedora.spec.in b/rhel/ovn-fedora.spec.in
index 821eb03cc..57dc977c1 100644
--- a/rhel/ovn-fedora.spec.in
+++ b/rhel/ovn-fedora.spec.in
@@ -65,6 +65,7 @@ BuildRequires: tcpdump
BuildRequires: unbound unbound-devel
Requires: openssl hostname iproute module-init-tools openvswitch
+Requires: python3-openvswitch
Requires(post): systemd-units
Requires(preun): systemd-units
diff --git a/tests/ovn-controller.at b/tests/ovn-controller.at
index 3c3fb31c7..6a0e83c33 100644
--- a/tests/ovn-controller.at
+++ b/tests/ovn-controller.at
@@ -2337,3 +2337,115 @@ done
AT_CHECK([grep "deleted interface patch" hv1/ovs-vswitchd.log], [1], [ignore])
OVN_CLEANUP([hv1])
AT_CLEANUP
+
+AT_SETUP([ovn-controller - resolve CT zone conflicts from ovsdb])
+
+ovn_start
+
+net_add n1
+sim_add hv1
+as hv1
+check ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.1
+
+get_zone_num () {
+ output=$1
+ name=$2
+ printf "$output" | grep $name | cut -d ' ' -f 2
+}
+
+check_ovsdb_zone() {
+ name=$1
+ ct_zone=$2
+ db_zone=$(ovs-vsctl get Bridge br-int external_ids:ct-zone-${name} | sed -e 's/^"//' -e 's/"$//')
+ test $ct_zone -eq $db_zone
+}
+
+check ovs-vsctl add-port br-int ls0-hv1 -- set Interface ls0-hv1 external-ids:iface-id=ls0-hv1
+check ovs-vsctl add-port br-int ls0-hv2 -- set Interface ls0-hv2 external-ids:iface-id=ls0-hv2
+
+check ovn-nbctl lr-add lr0
+
+check ovn-nbctl ls-add ls0
+check ovn-nbctl lsp-add ls0 ls0-lr0
+check ovn-nbctl lsp-set-type ls0-lr0 router
+check ovn-nbctl lsp-set-addresses ls0-lr0 router
+check ovn-nbctl lrp-add lr0 lr0-ls0 00:00:00:00:00:01 10.0.0.1
+
+check ovn-nbctl lsp-add ls0 ls0-hv1
+check ovn-nbctl lsp-set-addresses ls0-hv1 "00:00:00:00:00:02 10.0.0.2"
+
+check ovn-nbctl lsp-add ls0 ls0-hv2
+check ovn-nbctl lsp-set-addresses ls0-hv2 "00:00:00:00:00:03 10.0.0.3"
+
+check ovn-nbctl lrp-add lr0 lrp-gw 01:00:00:00:00:01 172.16.0.1
+check ovn-nbctl lrp-set-gateway-chassis lrp-gw hv1
+
+check ovn-nbctl --wait=hv sync
+
+ct_zones=$(ovn-appctl -t ovn-controller ct-zone-list)
+echo "$ct_zones"
+
+port1_zone=$(get_zone_num "$ct_zones" ls0-hv1)
+port2_zone=$(get_zone_num "$ct_zones" ls0-hv2)
+
+lr_uuid=$(fetch_column Datapath_Binding _uuid external_ids:name=lr0)
+snat_zone=$(get_zone_num "$ct_zones" ${lr_uuid}_snat)
+echo "snat_zone is $snat_zone"
+
+check test "$port1_zone" -ne "$port2_zone"
+check test "$port2_zone" -ne "$snat_zone"
+check test "$port1_zone" -ne "$snat_zone"
+
+OVS_WAIT_UNTIL([check_ovsdb_zone ls0-hv1 $port1_zone])
+OVS_WAIT_UNTIL([check_ovsdb_zone ls0-hv2 $port2_zone])
+OVS_WAIT_UNTIL([check_ovsdb_zone ${lr_uuid}_snat $snat_zone])
+
+# Now purposely request an SNAT zone for lr0 that conflicts with a zone
+# currently assigned to a logical port
+
+snat_req_zone=$port1_zone
+check ovn-nbctl set Logical_Router lr0 options:snat-ct-zone=$snat_req_zone
+ovn-nbctl --wait=hv sync
+
+ct_zones=$(ovn-appctl -t ovn-controller ct-zone-list)
+echo "$ct_zones"
+
+port1_zone=$(get_zone_num "$ct_zones" ls0-hv1)
+port2_zone=$(get_zone_num "$ct_zones" ls0-hv2)
+snat_zone=$(get_zone_num "$ct_zones" ${lr_uuid}_snat)
+
+check test "$snat_zone" -eq "$snat_req_zone"
+check test "$port1_zone" -ne "$port2_zone"
+check test "$port2_zone" -ne "$snat_zone"
+check test "$port1_zone" -ne "$snat_zone"
+
+OVS_WAIT_UNTIL([check_ovsdb_zone ls0-hv1 $port1_zone])
+OVS_WAIT_UNTIL([check_ovsdb_zone ls0-hv2 $port2_zone])
+OVS_WAIT_UNTIL([check_ovsdb_zone ${lr_uuid}_snat $snat_zone])
+
+# Now create a conflict in the OVSDB and restart ovn-controller.
+
+ovs-vsctl set bridge br-int external_ids:ct-zone-ls0-hv1="$snat_req_zone"
+ovs-vsctl set bridge br-int external_ids:ct-zone-ls0-hv2="$snat_req_zone"
+
+ovn-appctl -t ovn-controller inc-engine/recompute
+
+ct_zones=$(ovn-appctl -t ovn-controller ct-zone-list)
+echo "$ct_zones"
+
+port1_zone=$(get_zone_num "$ct_zones" ls0-hv1)
+port2_zone=$(get_zone_num "$ct_zones" ls0-hv2)
+snat_zone=$(get_zone_num "$ct_zones" ${lr_uuid}_snat)
+
+check test "$snat_zone" -eq "$snat_req_zone"
+check test "$port1_zone" -ne "$port2_zone"
+check test "$port2_zone" -ne "$snat_zone"
+check test "$port1_zone" -ne "$snat_zone"
+
+OVS_WAIT_UNTIL([check_ovsdb_zone ls0-hv1 $port1_zone])
+OVS_WAIT_UNTIL([check_ovsdb_zone ls0-hv2 $port2_zone])
+OVS_WAIT_UNTIL([check_ovsdb_zone ${lr_uuid}_snat $snat_zone])
+
+OVN_CLEANUP([hv1])
+AT_CLEANUP
diff --git a/tests/ovn-ic.at b/tests/ovn-ic.at
index b136472c8..c2e26a4be 100644
--- a/tests/ovn-ic.at
+++ b/tests/ovn-ic.at
@@ -119,6 +119,139 @@ OVN_CLEANUP_IC
AT_CLEANUP
])
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([ovn-ic -- route deletion upon TS deletion])
+
+ovn_init_ic_db
+net_add n1
+
+# 1 GW per AZ
+for i in 1 2; do
+ az=az$i
+ ovn_start $az
+ sim_add gw-$az
+ as gw-$az
+ check ovs-vsctl add-br br-phys
+ ovn_az_attach $az n1 br-phys 192.168.1.$i
+ check ovs-vsctl set open . external-ids:ovn-is-interconn=true
+ check ovn-nbctl set nb-global . \
+ options:ic-route-adv=true \
+ options:ic-route-adv-default=true \
+ options:ic-route-learn=true \
+ options:ic-route-learn-default=true
+done
+
+create_ic_infra() {
+ az_id=$1
+ ts_id=$2
+ az=az$i
+
+ lsp=lsp${az_id}-${ts_id}
+ lrp=lrp${az_id}-${ts_id}
+ ts=ts${az_id}-${ts_id}
+ lr=lr${az_id}-${ts_id}
+
+ ovn_as $az
+
+ check ovn-ic-nbctl ts-add $ts
+ check ovn-nbctl lr-add $lr
+ check ovn-nbctl lrp-add $lr $lrp 00:00:00:00:00:0$az_id 10.0.$az_id.1/24
+ check ovn-nbctl lrp-set-gateway-chassis $lrp gw-$az
+
+ check ovn-nbctl lsp-add $ts $lsp -- \
+ lsp-set-addresses $lsp router -- \
+ lsp-set-type $lsp router -- \
+ lsp-set-options $lsp router-port=$lrp
+
+ check ovn-nbctl lr-route-add $lr 192.168.0.0/16 10.0.$az_id.10
+}
+
+create_ic_infra 1 1
+create_ic_infra 1 2
+create_ic_infra 2 1
+
+ovn_as az1
+
+wait_row_count ic-sb:Route 3 ip_prefix=192.168.0.0/16
+
+# remove transit switch 1 (from az1) and check if its route is deleted
+# same route from another AZ and ts should remain, as
+check ovn-ic-nbctl ts-del ts1-1
+sleep 2
+ovn-ic-sbctl list route
+ovn-ic-nbctl list transit_switch
+wait_row_count ic-sb:route 2 ip_prefix=192.168.0.0/16
+ovn-ic-sbctl list route
+
+for i in 1 2; do
+ az=az$i
+ OVN_CLEANUP_SBOX(gw-$az)
+ OVN_CLEANUP_AZ([$az])
+done
+OVN_CLEANUP_IC
+AT_CLEANUP
+])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([ovn-ic -- duplicate NB route adv/learn])
+
+ovn_init_ic_db
+net_add n1
+
+# 1 GW per AZ
+for i in 1 2; do
+ az=az$i
+ ovn_start $az
+ sim_add gw-$az
+ as gw-$az
+ check ovs-vsctl add-br br-phys
+ ovn_az_attach $az n1 br-phys 192.168.1.$i
+ check ovs-vsctl set open . external-ids:ovn-is-interconn=true
+ check ovn-nbctl set nb-global . \
+ options:ic-route-adv=true \
+ options:ic-route-adv-default=true \
+ options:ic-route-learn=true \
+ options:ic-route-learn-default=true
+done
+
+ovn_as az1
+
+# create transit switch and connect to LR
+check ovn-ic-nbctl ts-add ts1
+for i in 1 2; do
+ ovn_as az$i
+
+ check ovn-nbctl lr-add lr1
+ check ovn-nbctl lrp-add lr1 lrp$i 00:00:00:00:0$i:01 10.0.$i.1/24
+ check ovn-nbctl lrp-set-gateway-chassis lrp$i gw-az$i
+
+ check ovn-nbctl lsp-add ts1 lsp$i -- \
+ lsp-set-addresses lsp$i router -- \
+ lsp-set-type lsp$i router -- \
+ lsp-set-options lsp$i router-port=lrp$i
+done
+
+ovn_as az1
+
+ovn-nbctl \
+ --id=@id create logical-router-static-route ip_prefix=1.1.1.1/32 nexthop=10.0.1.10 -- \
+ add logical-router lr1 static_routes @id
+ovn-nbctl \
+ --id=@id create logical-router-static-route ip_prefix=1.1.1.1/32 nexthop=10.0.1.10 -- \
+ add logical-router lr1 static_routes @id
+
+wait_row_count ic-sb:route 1 ip_prefix=1.1.1.1/32
+
+for i in 1 2; do
+ az=az$i
+ OVN_CLEANUP_SBOX(gw-$az)
+ OVN_CLEANUP_AZ([$az])
+done
+
+OVN_CLEANUP_IC
+AT_CLEANUP
+])
+
OVN_FOR_EACH_NORTHD([
AT_SETUP([ovn-ic -- gateway sync])
diff --git a/tests/ovn-nbctl.at b/tests/ovn-nbctl.at
index 726efa6f4..0d3412742 100644
--- a/tests/ovn-nbctl.at
+++ b/tests/ovn-nbctl.at
@@ -1623,6 +1623,7 @@ AT_CHECK([ovn-nbctl lr-route-add lr0 0.0.0.0/0 192.168.0.1])
AT_CHECK([ovn-nbctl lr-route-add lr0 10.0.1.0/24 11.0.1.1 lp0])
AT_CHECK([ovn-nbctl lr-route-add lr0 10.0.0.1/24 11.0.0.2])
AT_CHECK([ovn-nbctl lr-route-add lr0 10.0.10.0/24 lp0])
+AT_CHECK([ovn-nbctl --bfd lr-route-add lr0 10.0.20.0/24 11.0.2.1 lp0])
AT_CHECK([ovn-nbctl lr-route-add lr0 10.0.10.0/24 lp1], [1], [],
[ovn-nbctl: bad IPv4 nexthop argument: lp1
])
@@ -1676,6 +1677,7 @@ Route Table <main>:
10.0.0.0/24 11.0.0.1 dst-ip
10.0.1.0/24 11.0.1.1 dst-ip lp0
10.0.10.0/24 dst-ip lp0
+ 10.0.20.0/24 11.0.2.1 dst-ip lp0 bfd
20.0.0.0/24 discard dst-ip
9.16.1.0/24 11.0.0.1 src-ip
10.0.0.0/24 11.0.0.2 src-ip
@@ -1683,6 +1685,10 @@ Route Table <main>:
0.0.0.0/0 192.168.0.1 dst-ip
])
+check_row_count nb:BFD 1
+AT_CHECK([ovn-nbctl lr-route-del lr0 10.0.20.0/24])
+check_row_count nb:BFD 0
+
AT_CHECK([ovn-nbctl lrp-add lr0 lp1 f0:00:00:00:00:02 11.0.0.254/24])
AT_CHECK([ovn-nbctl --may-exist lr-route-add lr0 10.0.0.111/24 11.0.0.1 lp1])
AT_CHECK([ovn-nbctl lr-route-list lr0], [0], [dnl
diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at
index 7c3c84007..c00831432 100644
--- a/tests/ovn-northd.at
+++ b/tests/ovn-northd.at
@@ -2149,9 +2149,9 @@ AT_CAPTURE_FILE([sw1flows])
AT_CHECK(
[grep -E 'ls_(in|out)_acl' sw0flows sw1flows | grep pg0 | sort], [0], [dnl
-sw0flows: table=4 (ls_out_acl ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
+sw0flows: table=4 (ls_out_acl ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };)
sw0flows: table=8 (ls_in_acl ), priority=2002 , match=(inport == @pg0 && ip4 && tcp && tcp.dst == 80), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=egress,table=5); };)
-sw1flows: table=4 (ls_out_acl ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
+sw1flows: table=4 (ls_out_acl ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };)
sw1flows: table=8 (ls_in_acl ), priority=2002 , match=(inport == @pg0 && ip4 && tcp && tcp.dst == 80), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=egress,table=5); };)
])
@@ -2165,10 +2165,10 @@ ovn-sbctl dump-flows sw1 > sw1flows2
AT_CAPTURE_FILE([sw1flows2])
AT_CHECK([grep "ls_out_acl" sw0flows2 sw1flows2 | grep pg0 | sort], [0], [dnl
-sw0flows2: table=4 (ls_out_acl ), priority=2002 , match=(outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
-sw0flows2: table=4 (ls_out_acl ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
-sw1flows2: table=4 (ls_out_acl ), priority=2002 , match=(outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
-sw1flows2: table=4 (ls_out_acl ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
+sw0flows2: table=4 (ls_out_acl ), priority=2002 , match=(outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };)
+sw0flows2: table=4 (ls_out_acl ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };)
+sw1flows2: table=4 (ls_out_acl ), priority=2002 , match=(outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };)
+sw1flows2: table=4 (ls_out_acl ), priority=2003 , match=(outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };)
])
AS_BOX([3])
@@ -2183,16 +2183,16 @@ AT_CAPTURE_FILE([sw1flows3])
AT_CHECK([grep "ls_out_acl" sw0flows3 sw1flows3 | grep pg0 | sort], [0], [dnl
sw0flows3: table=4 (ls_out_acl ), priority=2001 , match=(reg0[[7]] == 1 && (outport == @pg0 && ip)), action=(reg0[[1]] = 1; next;)
sw0flows3: table=4 (ls_out_acl ), priority=2001 , match=(reg0[[8]] == 1 && (outport == @pg0 && ip)), action=(next;)
-sw0flows3: table=4 (ls_out_acl ), priority=2002 , match=((reg0[[10]] == 1) && outport == @pg0 && ip4 && udp), action=(ct_commit { ct_mark.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
-sw0flows3: table=4 (ls_out_acl ), priority=2002 , match=((reg0[[9]] == 1) && outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
-sw0flows3: table=4 (ls_out_acl ), priority=2003 , match=((reg0[[10]] == 1) && outport == @pg0 && ip6 && udp), action=(ct_commit { ct_mark.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
-sw0flows3: table=4 (ls_out_acl ), priority=2003 , match=((reg0[[9]] == 1) && outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
+sw0flows3: table=4 (ls_out_acl ), priority=2002 , match=((reg0[[10]] == 1) && outport == @pg0 && ip4 && udp), action=(ct_commit { ct_mark.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };)
+sw0flows3: table=4 (ls_out_acl ), priority=2002 , match=((reg0[[9]] == 1) && outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };)
+sw0flows3: table=4 (ls_out_acl ), priority=2003 , match=((reg0[[10]] == 1) && outport == @pg0 && ip6 && udp), action=(ct_commit { ct_mark.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };)
+sw0flows3: table=4 (ls_out_acl ), priority=2003 , match=((reg0[[9]] == 1) && outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };)
sw1flows3: table=4 (ls_out_acl ), priority=2001 , match=(reg0[[7]] == 1 && (outport == @pg0 && ip)), action=(reg0[[1]] = 1; next;)
sw1flows3: table=4 (ls_out_acl ), priority=2001 , match=(reg0[[8]] == 1 && (outport == @pg0 && ip)), action=(next;)
-sw1flows3: table=4 (ls_out_acl ), priority=2002 , match=((reg0[[10]] == 1) && outport == @pg0 && ip4 && udp), action=(ct_commit { ct_mark.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
-sw1flows3: table=4 (ls_out_acl ), priority=2002 , match=((reg0[[9]] == 1) && outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
-sw1flows3: table=4 (ls_out_acl ), priority=2003 , match=((reg0[[10]] == 1) && outport == @pg0 && ip6 && udp), action=(ct_commit { ct_mark.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
-sw1flows3: table=4 (ls_out_acl ), priority=2003 , match=((reg0[[9]] == 1) && outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
+sw1flows3: table=4 (ls_out_acl ), priority=2002 , match=((reg0[[10]] == 1) && outport == @pg0 && ip4 && udp), action=(ct_commit { ct_mark.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };)
+sw1flows3: table=4 (ls_out_acl ), priority=2002 , match=((reg0[[9]] == 1) && outport == @pg0 && ip4 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };)
+sw1flows3: table=4 (ls_out_acl ), priority=2003 , match=((reg0[[10]] == 1) && outport == @pg0 && ip6 && udp), action=(ct_commit { ct_mark.blocked = 1; }; reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };)
+sw1flows3: table=4 (ls_out_acl ), priority=2003 , match=((reg0[[9]] == 1) && outport == @pg0 && ip6 && udp), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };)
])
AT_CLEANUP
])
@@ -2364,7 +2364,7 @@ check ovn-nbctl --wait=sb \
-- ls-lb-add ls lb
AT_CHECK([ovn-sbctl lflow-list ls | grep -e ls_in_acl_hint -e ls_out_acl_hint -e ls_in_acl -e ls_out_acl | sort], [0], [dnl
- table=12(ls_in_acl_after_lb ), priority=0 , match=(1), action=(next;)
+ table=14(ls_in_acl_after_lb ), priority=0 , match=(1), action=(next;)
table=3 (ls_out_acl_hint ), priority=0 , match=(1), action=(next;)
table=3 (ls_out_acl_hint ), priority=1 , match=(ct.est && ct_mark.blocked == 0), action=(reg0[[10]] = 1; next;)
table=3 (ls_out_acl_hint ), priority=2 , match=(ct.est && ct_mark.blocked == 1), action=(reg0[[9]] = 1; next;)
@@ -2407,7 +2407,7 @@ ovn-nbctl --wait=sb clear logical_switch ls acls
ovn-nbctl --wait=sb clear logical_switch ls load_balancer
AT_CHECK([ovn-sbctl lflow-list ls | grep -e ls_in_acl_hint -e ls_out_acl_hint -e ls_in_acl -e ls_out_acl | sort], [0], [dnl
- table=12(ls_in_acl_after_lb ), priority=0 , match=(1), action=(next;)
+ table=14(ls_in_acl_after_lb ), priority=0 , match=(1), action=(next;)
table=3 (ls_out_acl_hint ), priority=65535, match=(1), action=(next;)
table=4 (ls_out_acl ), priority=65535, match=(1), action=(next;)
table=7 (ls_in_acl_hint ), priority=65535, match=(1), action=(next;)
@@ -3640,11 +3640,11 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
])
AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(ct_lb_mark(backends=10.0.0.4:8080);)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(ct_lb_mark(backends=10.0.0.40:8080);)
+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(ct_lb_mark(backends=10.0.0.4:8080);)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(ct_lb_mark(backends=10.0.0.40:8080);)
])
AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
@@ -3676,11 +3676,11 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
])
AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080);)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080);)
+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080);)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080);)
])
AT_CHECK([grep "lr_out_snat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
@@ -3722,11 +3722,11 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
])
AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080);)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080);)
+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080);)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080);)
])
AT_CHECK([grep "lr_out_snat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
@@ -3782,11 +3782,11 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
])
AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080);)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080);)
+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080);)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080);)
])
AT_CHECK([grep "lr_out_snat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
@@ -3829,8 +3829,8 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
])
AT_CHECK([grep "lr_in_dnat" lr0flows | grep skip_snat_for_lb | sort], [0], [dnl
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.20 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.skip_snat_for_lb = 1; next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.20 && tcp && reg9[[16..31]] == 80), action=(flags.skip_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080);)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.20 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.skip_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.20 && tcp && reg9[[16..31]] == 80), action=(flags.skip_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.40:8080);)
])
AT_CHECK([grep "lr_out_snat" lr0flows | grep skip_snat_for_lb | sed 's/table=./table=?/' | sort], [0], [dnl
@@ -3998,7 +3998,7 @@ check_stateful_flows() {
table=? (ls_in_pre_stateful ), priority=120 , match=(ip4.dst == 10.0.0.20 && tcp.dst == 80), action=(reg1 = 10.0.0.20; reg2[[0..15]] = 80; ct_lb_mark;)
])
- AT_CHECK([grep "ls_in_lb" sw0flows | sort | sed 's/table=../table=??/'], [0], [dnl
+ AT_CHECK([grep "ls_in_lb " sw0flows | sort | sed 's/table=../table=??/'], [0], [dnl
table=??(ls_in_lb ), priority=0 , match=(1), action=(next;)
table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.4:8080);)
table=??(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 10.0.0.20 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.40:8080);)
@@ -4064,7 +4064,7 @@ AT_CHECK([grep "ls_in_pre_stateful" sw0flows | sort | sed 's/table=./table=?/'],
table=? (ls_in_pre_stateful ), priority=110 , match=(reg0[[2]] == 1), action=(ct_lb_mark;)
])
-AT_CHECK([grep "ls_in_lb" sw0flows | sort | sed 's/table=../table=??/'], [0], [dnl
+AT_CHECK([grep "ls_in_lb " sw0flows | sort | sed 's/table=../table=??/'], [0], [dnl
table=??(ls_in_lb ), priority=0 , match=(1), action=(next;)
])
@@ -4925,7 +4925,7 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
])
AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
])
AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
@@ -4961,7 +4961,7 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
])
AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
])
AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
@@ -5013,12 +5013,13 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
])
AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
- table=6 (lr_in_dnat ), priority=100 , match=(ip && ip4.dst == 172.168.0.20 && inport == "lr0-public" && is_chassis_resident("cr-lr0-public")), action=(ct_dnat_in_czone(10.0.0.3);)
+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
+ table=7 (lr_in_dnat ), priority=100 , match=(ip && ip4.dst == 172.168.0.20 && inport == "lr0-public" && is_chassis_resident("cr-lr0-public")), action=(ct_dnat_in_czone(10.0.0.3);)
])
AT_CHECK([grep "lr_out_chk_dnat_local" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
table=? (lr_out_chk_dnat_local), priority=0 , match=(1), action=(reg9[[4]] = 0; next;)
+ table=? (lr_out_chk_dnat_local), priority=50 , match=(ip && ct_mark.natted == 1), action=(reg9[[4]] = 1; next;)
table=? (lr_out_chk_dnat_local), priority=50 , match=(ip && ip4.dst == 172.168.0.10 && is_chassis_resident("cr-lr0-public")), action=(reg9[[4]] = 1; next;)
table=? (lr_out_chk_dnat_local), priority=50 , match=(ip && ip4.dst == 172.168.0.20 && is_chassis_resident("cr-lr0-public")), action=(reg9[[4]] = 1; next;)
table=? (lr_out_chk_dnat_local), priority=50 , match=(ip && ip4.dst == 172.168.0.30 && is_chassis_resident("cr-lr0-public")), action=(reg9[[4]] = 1; next;)
@@ -5079,20 +5080,21 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
])
AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
- table=6 (lr_in_dnat ), priority=100 , match=(ip && ip4.dst == 172.168.0.20 && inport == "lr0-public" && is_chassis_resident("cr-lr0-public")), action=(ct_dnat_in_czone(10.0.0.3);)
- table=6 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;)
- table=6 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 172.168.0.200 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.80,10.0.0.81);)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.4:8080);)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);)
+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
+ table=7 (lr_in_dnat ), priority=100 , match=(ip && ip4.dst == 172.168.0.20 && inport == "lr0-public" && is_chassis_resident("cr-lr0-public")), action=(ct_dnat_in_czone(10.0.0.3);)
+ table=7 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;)
+ table=7 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 172.168.0.200 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.80,10.0.0.81);)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1 && is_chassis_resident("cr-lr0-public")), action=(next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.4:8080);)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && is_chassis_resident("cr-lr0-public")), action=(ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);)
])
AT_CHECK([grep "lr_out_chk_dnat_local" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
table=? (lr_out_chk_dnat_local), priority=0 , match=(1), action=(reg9[[4]] = 0; next;)
+ table=? (lr_out_chk_dnat_local), priority=50 , match=(ip && ct_mark.natted == 1), action=(reg9[[4]] = 1; next;)
table=? (lr_out_chk_dnat_local), priority=50 , match=(ip && ip4.dst == 172.168.0.10 && is_chassis_resident("cr-lr0-public")), action=(reg9[[4]] = 1; next;)
table=? (lr_out_chk_dnat_local), priority=50 , match=(ip && ip4.dst == 172.168.0.20 && is_chassis_resident("cr-lr0-public")), action=(reg9[[4]] = 1; next;)
table=? (lr_out_chk_dnat_local), priority=50 , match=(ip && ip4.dst == 172.168.0.30 && is_chassis_resident("cr-lr0-public")), action=(reg9[[4]] = 1; next;)
@@ -5147,20 +5149,21 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
])
AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
- table=6 (lr_in_dnat ), priority=100 , match=(ip && ip4.dst == 172.168.0.20), action=(flags.loopback = 1; ct_dnat(10.0.0.3);)
- table=6 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1), action=(next;)
- table=6 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 172.168.0.200), action=(ct_lb_mark(backends=10.0.0.80,10.0.0.81);)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1), action=(next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(ct_lb_mark(backends=10.0.0.4:8080);)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082), action=(ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);)
+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
+ table=7 (lr_in_dnat ), priority=100 , match=(ip && ip4.dst == 172.168.0.20), action=(flags.loopback = 1; ct_dnat(10.0.0.3);)
+ table=7 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1), action=(next;)
+ table=7 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 172.168.0.200), action=(ct_lb_mark(backends=10.0.0.80,10.0.0.81);)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1), action=(next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(ct_lb_mark(backends=10.0.0.4:8080);)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082), action=(ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);)
])
AT_CHECK([grep "lr_out_chk_dnat_local" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
table=? (lr_out_chk_dnat_local), priority=0 , match=(1), action=(reg9[[4]] = 0; next;)
+ table=? (lr_out_chk_dnat_local), priority=50 , match=(ip && ct_mark.natted == 1), action=(reg9[[4]] = 1; next;)
])
AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
@@ -5207,20 +5210,21 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
])
AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
- table=6 (lr_in_dnat ), priority=100 , match=(ip && ip4.dst == 172.168.0.20), action=(flags.loopback = 1; ct_dnat(10.0.0.3);)
- table=6 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
- table=6 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 172.168.0.200), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.80,10.0.0.81);)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080);)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);)
+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
+ table=7 (lr_in_dnat ), priority=100 , match=(ip && ip4.dst == 172.168.0.20), action=(flags.loopback = 1; ct_dnat(10.0.0.3);)
+ table=7 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 172.168.0.200), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.80,10.0.0.81);)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080);)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);)
])
AT_CHECK([grep "lr_out_chk_dnat_local" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
table=? (lr_out_chk_dnat_local), priority=0 , match=(1), action=(reg9[[4]] = 0; next;)
+ table=? (lr_out_chk_dnat_local), priority=50 , match=(ip && ct_mark.natted == 1), action=(reg9[[4]] = 1; next;)
])
AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
@@ -5270,22 +5274,23 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
])
AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
- table=6 (lr_in_dnat ), priority=100 , match=(ip && ip4.dst == 172.168.0.20), action=(flags.loopback = 1; ct_dnat(10.0.0.3);)
- table=6 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
- table=6 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 172.168.0.200), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.80,10.0.0.81);)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.10 && tcp && reg9[[16..31]] == 9082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080);)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.10 && tcp && reg9[[16..31]] == 9082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);)
+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
+ table=7 (lr_in_dnat ), priority=100 , match=(ip && ip4.dst == 172.168.0.20), action=(flags.loopback = 1; ct_dnat(10.0.0.3);)
+ table=7 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 172.168.0.200), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.80,10.0.0.81);)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.10 && tcp && reg9[[16..31]] == 9082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080);)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.10 && tcp && reg9[[16..31]] == 9082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);)
])
AT_CHECK([grep "lr_out_chk_dnat_local" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
table=? (lr_out_chk_dnat_local), priority=0 , match=(1), action=(reg9[[4]] = 0; next;)
+ table=? (lr_out_chk_dnat_local), priority=50 , match=(ip && ct_mark.natted == 1), action=(reg9[[4]] = 1; next;)
])
AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
@@ -5346,24 +5351,25 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
])
AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
- table=6 (lr_in_dnat ), priority=100 , match=(ip && ip4.dst == 172.168.0.20), action=(flags.loopback = 1; ct_dnat(10.0.0.3);)
- table=6 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
- table=6 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 172.168.0.200), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.80,10.0.0.81);)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.10 && tcp && reg9[[16..31]] == 9082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip6 && xxreg0 == def0::2 && tcp && reg9[[16..31]] == 8000 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080);)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.10 && tcp && reg9[[16..31]] == 9082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip6 && xxreg0 == def0::2 && tcp && reg9[[16..31]] == 8000), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=[[aef0::2]]:80,[[aef0::3]]:80);)
+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
+ table=7 (lr_in_dnat ), priority=100 , match=(ip && ip4.dst == 172.168.0.20), action=(flags.loopback = 1; ct_dnat(10.0.0.3);)
+ table=7 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 172.168.0.200 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 172.168.0.200), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.80,10.0.0.81);)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.10 && tcp && reg9[[16..31]] == 9082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip6 && xxreg0 == def0::2 && tcp && reg9[[16..31]] == 8000 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.4:8080);)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.10 && tcp && reg9[[16..31]] == 9082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.100 && tcp && reg9[[16..31]] == 8082), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:82,10.0.0.60:82);)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip6 && xxreg0 == def0::2 && tcp && reg9[[16..31]] == 8000), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=[[aef0::2]]:80,[[aef0::3]]:80);)
])
AT_CHECK([grep "lr_out_chk_dnat_local" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
table=? (lr_out_chk_dnat_local), priority=0 , match=(1), action=(reg9[[4]] = 0; next;)
+ table=? (lr_out_chk_dnat_local), priority=50 , match=(ip && ct_mark.natted == 1), action=(reg9[[4]] = 1; next;)
])
AT_CHECK([grep "lr_out_undnat" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
@@ -5413,11 +5419,11 @@ AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
])
AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
- table=6 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && tcp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && tcp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);)
- table=6 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);)
+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && tcp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && tcp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.168.0.210 && udp && reg9[[16..31]] == 60), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.50:6062,10.0.0.60:6062);)
])
AT_CHECK([grep "lr_out_chk_dnat_local" lr0flows | sed 's/table=./table=?/' | sort], [0], [dnl
@@ -6129,7 +6135,6 @@ AT_CHECK([grep -e "(lr_in_ip_routing ).*outport" lr0flows | sed 's/table=../ta
])
AT_CLEANUP
-])
OVN_FOR_EACH_NORTHD([
AT_SETUP([check exclude-lb-vips-from-garp option])
@@ -6508,7 +6513,7 @@ AT_CHECK([grep -e "ls_in_acl" lsflows | sed 's/table=../table=??/' | sort], [0],
table=??(ls_in_acl_hint ), priority=7 , match=(ct.new && !ct.est), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;)
])
-AT_CHECK([grep -e "ls_in_lb" lsflows | sed 's/table=../table=??/' | sort], [0], [dnl
+AT_CHECK([grep -e "ls_in_lb " lsflows | sed 's/table=../table=??/' | sort], [0], [dnl
table=??(ls_in_lb ), priority=0 , match=(1), action=(next;)
table=??(ls_in_lb ), priority=110 , match=(ct.new && ip4.dst == 10.0.0.2), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.10);)
])
@@ -6561,7 +6566,7 @@ AT_CHECK([grep -e "ls_in_acl" lsflows | sed 's/table=../table=??/' | sort], [0],
table=??(ls_in_acl_hint ), priority=7 , match=(ct.new && !ct.est), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;)
])
-AT_CHECK([grep -e "ls_in_lb" lsflows | sed 's/table=../table=??/' | sort], [0], [dnl
+AT_CHECK([grep -e "ls_in_lb " lsflows | sed 's/table=../table=??/' | sort], [0], [dnl
table=??(ls_in_lb ), priority=0 , match=(1), action=(next;)
table=??(ls_in_lb ), priority=110 , match=(ct.new && ip4.dst == 10.0.0.2), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.10);)
])
@@ -6614,7 +6619,7 @@ AT_CHECK([grep -e "ls_in_acl" lsflows | sed 's/table=../table=??/' | sort], [0],
table=??(ls_in_acl_hint ), priority=7 , match=(ct.new && !ct.est), action=(reg0[[7]] = 1; reg0[[9]] = 1; next;)
])
-AT_CHECK([grep -e "ls_in_lb" lsflows | sed 's/table=../table=??/' | sort], [0], [dnl
+AT_CHECK([grep -e "ls_in_lb " lsflows | sed 's/table=../table=??/' | sort], [0], [dnl
table=??(ls_in_lb ), priority=0 , match=(1), action=(next;)
table=??(ls_in_lb ), priority=110 , match=(ct.new && ip4.dst == 10.0.0.2), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.10);)
])
@@ -7582,7 +7587,7 @@ sort | sed 's/table=../table=??/' ], [0], [dnl
table=??(ls_in_check_port_sec), priority=100 , match=(vlan.present), action=(drop;)
table=??(ls_in_check_port_sec), priority=50 , match=(1), action=(reg0[[15]] = check_in_port_sec(); next;)
table=??(ls_in_check_port_sec), priority=70 , match=(inport == "localnetport"), action=(set_queue(10); reg0[[15]] = check_in_port_sec(); next;)
- table=??(ls_in_check_port_sec), priority=70 , match=(inport == "sw0p1"), action=(reg0[[14]] = 1; next(pipeline=ingress, table=16);)
+ table=??(ls_in_check_port_sec), priority=70 , match=(inport == "sw0p1"), action=(reg0[[14]] = 1; next(pipeline=ingress, table=18);)
table=??(ls_in_check_port_sec), priority=70 , match=(inport == "sw0p2"), action=(set_queue(10); reg0[[15]] = check_in_port_sec(); next;)
table=??(ls_in_apply_port_sec), priority=0 , match=(1), action=(next;)
table=??(ls_in_apply_port_sec), priority=50 , match=(reg0[[15]] == 1), action=(drop;)
@@ -7619,11 +7624,11 @@ check ovn-nbctl \
AS_BOX([No chassis registered - use ct_lb_mark and ct_mark.natted])
check ovn-nbctl --wait=sb sync
AT_CHECK([ovn-sbctl lflow-list | grep -e natted -e ct_lb], [0], [dnl
- table=6 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 66.66.66.66 && ct_mark.natted == 1), action=(next;)
- table=6 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 66.66.66.66), action=(ct_lb_mark(backends=42.42.42.2);)
+ table=7 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 66.66.66.66 && ct_mark.natted == 1), action=(next;)
+ table=7 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 66.66.66.66), action=(ct_lb_mark(backends=42.42.42.2);)
table=6 (ls_in_pre_stateful ), priority=120 , match=(ip4.dst == 66.66.66.66), action=(reg1 = 66.66.66.66; ct_lb_mark;)
table=6 (ls_in_pre_stateful ), priority=110 , match=(reg0[[2]] == 1), action=(ct_lb_mark;)
- table=11(ls_in_lb ), priority=110 , match=(ct.new && ip4.dst == 66.66.66.66), action=(reg0[[1]] = 0; ct_lb_mark(backends=42.42.42.2);)
+ table=12(ls_in_lb ), priority=110 , match=(ct.new && ip4.dst == 66.66.66.66), action=(reg0[[1]] = 0; ct_lb_mark(backends=42.42.42.2);)
table=2 (ls_out_pre_stateful), priority=110 , match=(reg0[[2]] == 1), action=(ct_lb_mark;)
])
@@ -7631,11 +7636,11 @@ AS_BOX([Chassis registered that doesn't support ct_lb_mark - use ct_lb and ct_la
check ovn-sbctl chassis-add hv geneve 127.0.0.1
check ovn-nbctl --wait=sb sync
AT_CHECK([ovn-sbctl lflow-list | grep -e natted -e ct_lb], [0], [dnl
- table=6 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 66.66.66.66 && ct_label.natted == 1), action=(next;)
- table=6 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 66.66.66.66), action=(ct_lb(backends=42.42.42.2);)
+ table=7 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 66.66.66.66 && ct_label.natted == 1), action=(next;)
+ table=7 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 66.66.66.66), action=(ct_lb(backends=42.42.42.2);)
table=6 (ls_in_pre_stateful ), priority=120 , match=(ip4.dst == 66.66.66.66), action=(reg1 = 66.66.66.66; ct_lb;)
table=6 (ls_in_pre_stateful ), priority=110 , match=(reg0[[2]] == 1), action=(ct_lb;)
- table=11(ls_in_lb ), priority=110 , match=(ct.new && ip4.dst == 66.66.66.66), action=(reg0[[1]] = 0; ct_lb(backends=42.42.42.2);)
+ table=12(ls_in_lb ), priority=110 , match=(ct.new && ip4.dst == 66.66.66.66), action=(reg0[[1]] = 0; ct_lb(backends=42.42.42.2);)
table=2 (ls_out_pre_stateful), priority=110 , match=(reg0[[2]] == 1), action=(ct_lb;)
])
@@ -7643,11 +7648,11 @@ AS_BOX([Chassis upgrades and supports ct_lb_mark - use ct_lb_mark and ct_mark.na
check ovn-sbctl set chassis hv other_config:ct-no-masked-label=true
check ovn-nbctl --wait=sb sync
AT_CHECK([ovn-sbctl lflow-list | grep -e natted -e ct_lb], [0], [dnl
- table=6 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 66.66.66.66 && ct_mark.natted == 1), action=(next;)
- table=6 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 66.66.66.66), action=(ct_lb_mark(backends=42.42.42.2);)
+ table=7 (lr_in_dnat ), priority=110 , match=(ct.est && ip4 && reg0 == 66.66.66.66 && ct_mark.natted == 1), action=(next;)
+ table=7 (lr_in_dnat ), priority=110 , match=(ct.new && ip4 && reg0 == 66.66.66.66), action=(ct_lb_mark(backends=42.42.42.2);)
table=6 (ls_in_pre_stateful ), priority=120 , match=(ip4.dst == 66.66.66.66), action=(reg1 = 66.66.66.66; ct_lb_mark;)
table=6 (ls_in_pre_stateful ), priority=110 , match=(reg0[[2]] == 1), action=(ct_lb_mark;)
- table=11(ls_in_lb ), priority=110 , match=(ct.new && ip4.dst == 66.66.66.66), action=(reg0[[1]] = 0; ct_lb_mark(backends=42.42.42.2);)
+ table=12(ls_in_lb ), priority=110 , match=(ct.new && ip4.dst == 66.66.66.66), action=(reg0[[1]] = 0; ct_lb_mark(backends=42.42.42.2);)
table=2 (ls_out_pre_stateful), priority=110 , match=(reg0[[2]] == 1), action=(ct_lb_mark;)
])
@@ -7801,11 +7806,11 @@ ovn-sbctl dump-flows S1 > S1flows
AT_CAPTURE_FILE([S0flows])
AT_CAPTURE_FILE([S1flows])
-AT_CHECK([grep "ls_in_lb" S0flows | sort], [0], [dnl
- table=11(ls_in_lb ), priority=0 , match=(1), action=(next;)
+AT_CHECK([grep "ls_in_lb " S0flows | sort], [0], [dnl
+ table=12(ls_in_lb ), priority=0 , match=(1), action=(next;)
])
-AT_CHECK([grep "ls_in_lb" S1flows | sort], [0], [dnl
- table=11(ls_in_lb ), priority=0 , match=(1), action=(next;)
+AT_CHECK([grep "ls_in_lb " S1flows | sort], [0], [dnl
+ table=12(ls_in_lb ), priority=0 , match=(1), action=(next;)
])
ovn-nbctl --wait=sb set NB_Global . options:install_ls_lb_from_router=true
@@ -7816,13 +7821,13 @@ ovn-sbctl dump-flows S1 > S1flows
AT_CAPTURE_FILE([S0flows])
AT_CAPTURE_FILE([S1flows])
-AT_CHECK([grep "ls_in_lb" S0flows | sort], [0], [dnl
- table=11(ls_in_lb ), priority=0 , match=(1), action=(next;)
- table=11(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 172.16.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:80);)
+AT_CHECK([grep "ls_in_lb " S0flows | sort], [0], [dnl
+ table=12(ls_in_lb ), priority=0 , match=(1), action=(next;)
+ table=12(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 172.16.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:80);)
])
-AT_CHECK([grep "ls_in_lb" S1flows | sort], [0], [dnl
- table=11(ls_in_lb ), priority=0 , match=(1), action=(next;)
- table=11(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 172.16.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:80);)
+AT_CHECK([grep "ls_in_lb " S1flows | sort], [0], [dnl
+ table=12(ls_in_lb ), priority=0 , match=(1), action=(next;)
+ table=12(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 172.16.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:80);)
])
ovn-sbctl get datapath S0 _uuid > dp_uuids
@@ -7841,14 +7846,137 @@ ovn-sbctl dump-flows S1 > S1flows
AT_CAPTURE_FILE([S0flows])
AT_CAPTURE_FILE([S1flows])
-AT_CHECK([grep "ls_in_lb" S0flows | sort], [0], [dnl
- table=11(ls_in_lb ), priority=0 , match=(1), action=(next;)
+AT_CHECK([grep "ls_in_lb " S0flows | sort], [0], [dnl
+ table=12(ls_in_lb ), priority=0 , match=(1), action=(next;)
])
-AT_CHECK([grep "ls_in_lb" S1flows | sort], [0], [dnl
- table=11(ls_in_lb ), priority=0 , match=(1), action=(next;)
+AT_CHECK([grep "ls_in_lb " S1flows | sort], [0], [dnl
+ table=12(ls_in_lb ), priority=0 , match=(1), action=(next;)
])
check_column "" sb:load_balancer datapaths name=lb0
AT_CLEANUP
])
+
+AT_SETUP([check lb-affinity flows])
+AT_KEYWORDS([lb-affinity-flows])
+ovn_start
+
+ovn-nbctl lr-add R1
+ovn-nbctl set logical_router R1 options:chassis=hv1
+ovn-nbctl lrp-add R1 R1-S0 02:ac:10:01:00:01 10.0.0.1/24
+ovn-nbctl lrp-add R1 R1-S1 02:ac:10:01:01:01 20.0.0.1/24
+ovn-nbctl lrp-add R1 R1-PUB 02:ac:20:01:01:01 172.16.0.1/24
+
+ovn-nbctl ls-add S0
+ovn-nbctl lsp-add S0 S0-R1
+ovn-nbctl lsp-set-type S0-R1 router
+ovn-nbctl lsp-set-addresses S0-R1 02:ac:10:01:00:01
+ovn-nbctl lsp-set-options S0-R1 router-port=R1-S0
+
+ovn-nbctl ls-add S1
+ovn-nbctl lsp-add S1 S1-R1
+ovn-nbctl lsp-set-type S1-R1 router
+ovn-nbctl lsp-set-addresses S1-R1 02:ac:10:01:01:01
+ovn-nbctl lsp-set-options S1-R1 router-port=R1-S1
+
+# Add load balancers on the logical router R1
+ovn-nbctl lb-add lb0 172.16.0.10:80 10.0.0.2:80,20.0.0.2:80 tcp
+ovn-nbctl lr-lb-add R1 lb0
+ovn-nbctl ls-lb-add S0 lb0
+
+ovn-sbctl dump-flows S0 > S0flows
+ovn-sbctl dump-flows R1 > R1flows
+
+AT_CAPTURE_FILE([S0flows])
+AT_CAPTURE_FILE([R1flows])
+
+AT_CHECK([grep "ls_in_lb_aff_check" S0flows | sort], [0], [dnl
+ table=11(ls_in_lb_aff_check ), priority=0 , match=(1), action=(next;)
+])
+AT_CHECK([grep "ls_in_lb_aff_learn" S0flows | sort], [0], [dnl
+ table=13(ls_in_lb_aff_learn ), priority=0 , match=(1), action=(next;)
+])
+
+AT_CHECK([grep "lr_in_lb_aff_check" R1flows | sort], [0], [dnl
+ table=6 (lr_in_lb_aff_check ), priority=0 , match=(1), action=(next;)
+])
+AT_CHECK([grep "lr_in_lb_aff_learn" R1flows | sort], [0], [dnl
+ table=8 (lr_in_lb_aff_learn ), priority=0 , match=(1), action=(next;)
+])
+
+ovn-nbctl --wait=sb set load_balancer lb0 options:affinity_timeout=60
+
+AS_BOX([Test LS flows])
+ovn-sbctl dump-flows S0 > S0flows
+AT_CAPTURE_FILE([S0flows])
+
+AT_CHECK([grep "ls_in_lb_aff_check" S0flows | sort], [0], [dnl
+ table=11(ls_in_lb_aff_check ), priority=0 , match=(1), action=(next;)
+ table=11(ls_in_lb_aff_check ), priority=100 , match=(ct.new && ip4 && reg1 == 172.16.0.10 && reg2[[0..15]] == 80), action=(reg9[[6]] = chk_lb_aff(); next;)
+])
+AT_CHECK([grep "ls_in_lb " S0flows | sort], [0], [dnl
+ table=12(ls_in_lb ), priority=0 , match=(1), action=(next;)
+ table=12(ls_in_lb ), priority=120 , match=(ct.new && ip4.dst == 172.16.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:80,20.0.0.2:80);)
+ table=12(ls_in_lb ), priority=150 , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 10.0.0.2 && reg8[[0..15]] == 80), action=(reg0[[1]] = 0; reg1 = 172.16.0.10; reg2[[0..15]] = 80; ct_lb_mark(backends=10.0.0.2:80);)
+ table=12(ls_in_lb ), priority=150 , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 20.0.0.2 && reg8[[0..15]] == 80), action=(reg0[[1]] = 0; reg1 = 172.16.0.10; reg2[[0..15]] = 80; ct_lb_mark(backends=20.0.0.2:80);)
+])
+AT_CHECK([grep "ls_in_lb_aff_learn" S0flows | sort], [0], [dnl
+ table=13(ls_in_lb_aff_learn ), priority=0 , match=(1), action=(next;)
+ table=13(ls_in_lb_aff_learn ), priority=100 , match=(reg9[[6]] == 0 && ct.new && ip4 && reg1 == 172.16.0.10 && reg2[[0..15]] == 80 && ip4.dst == 10.0.0.2 && tcp.dst == 80), action=(commit_lb_aff(vip = "172.16.0.10:80", backend = "10.0.0.2:80", proto = tcp, timeout = 60); /* drop */)
+ table=13(ls_in_lb_aff_learn ), priority=100 , match=(reg9[[6]] == 0 && ct.new && ip4 && reg1 == 172.16.0.10 && reg2[[0..15]] == 80 && ip4.dst == 20.0.0.2 && tcp.dst == 80), action=(commit_lb_aff(vip = "172.16.0.10:80", backend = "20.0.0.2:80", proto = tcp, timeout = 60); /* drop */)
+])
+
+AS_BOX([Test LR flows])
+ovn-sbctl dump-flows R1 > R1flows
+AT_CAPTURE_FILE([R1flows])
+
+AT_CHECK([grep "lr_in_lb_aff_check" R1flows | sort], [0], [dnl
+ table=6 (lr_in_lb_aff_check ), priority=0 , match=(1), action=(next;)
+ table=6 (lr_in_lb_aff_check ), priority=100 , match=(ct.new && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80), action=(reg9[[6]] = chk_lb_aff(); next;)
+])
+AT_CHECK([grep "lr_in_dnat " R1flows | sort], [0], [dnl
+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80), action=(ct_lb_mark(backends=10.0.0.2:80,20.0.0.2:80);)
+ table=7 (lr_in_dnat ), priority=150 , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 10.0.0.2 && reg8[[0..15]] == 80), action=(reg0 = 172.16.0.10; ct_lb_mark(backends=10.0.0.2:80);)
+ table=7 (lr_in_dnat ), priority=150 , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 20.0.0.2 && reg8[[0..15]] == 80), action=(reg0 = 172.16.0.10; ct_lb_mark(backends=20.0.0.2:80);)
+])
+AT_CHECK([grep "lr_in_lb_aff_learn" R1flows | sort], [0], [dnl
+ table=8 (lr_in_lb_aff_learn ), priority=0 , match=(1), action=(next;)
+ table=8 (lr_in_lb_aff_learn ), priority=100 , match=(reg9[[6]] == 0 && ct.new && ip4 && reg0 == 172.16.0.10 && reg9[[16..31]] == 80 && ip4.dst == 10.0.0.2 && tcp.dst == 80), action=(commit_lb_aff(vip = "172.16.0.10:80", backend = "10.0.0.2:80", proto = tcp, timeout = 60); /* drop */)
+ table=8 (lr_in_lb_aff_learn ), priority=100 , match=(reg9[[6]] == 0 && ct.new && ip4 && reg0 == 172.16.0.10 && reg9[[16..31]] == 80 && ip4.dst == 20.0.0.2 && tcp.dst == 80), action=(commit_lb_aff(vip = "172.16.0.10:80", backend = "20.0.0.2:80", proto = tcp, timeout = 60); /* drop */)
+])
+
+AS_BOX([Test LR flows - skip_snat=true])
+check ovn-nbctl --wait=sb set load_balancer lb0 options:skip_snat=true
+
+ovn-sbctl dump-flows R1 > R1flows_skip_snat
+AT_CAPTURE_FILE([R1flows_skip_snat])
+
+AT_CHECK([grep "lr_in_dnat " R1flows_skip_snat | sort], [0], [dnl
+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.skip_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.skip_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.2:80,20.0.0.2:80);)
+ table=7 (lr_in_dnat ), priority=150 , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 10.0.0.2 && reg8[[0..15]] == 80), action=(reg0 = 172.16.0.10; flags.skip_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.2:80);)
+ table=7 (lr_in_dnat ), priority=150 , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 20.0.0.2 && reg8[[0..15]] == 80), action=(reg0 = 172.16.0.10; flags.skip_snat_for_lb = 1; ct_lb_mark(backends=20.0.0.2:80);)
+])
+
+check ovn-nbctl remove load_balancer lb0 options skip_snat
+
+AS_BOX([Test LR flows - lb_force_snat_ip="172.16.0.1"])
+check ovn-nbctl --wait=sb set logical_router R1 options:lb_force_snat_ip="172.16.0.1"
+
+ovn-sbctl dump-flows R1 > R1flows_force_snat
+AT_CAPTURE_FILE([R1flows_force_snat])
+
+AT_CHECK([grep "lr_in_dnat " R1flows_force_snat | sort], [0], [dnl
+ table=7 (lr_in_dnat ), priority=0 , match=(1), action=(next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.est && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80 && ct_mark.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+ table=7 (lr_in_dnat ), priority=120 , match=(ct.new && ip4 && reg0 == 172.16.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.2:80,20.0.0.2:80);)
+ table=7 (lr_in_dnat ), priority=150 , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 10.0.0.2 && reg8[[0..15]] == 80), action=(reg0 = 172.16.0.10; flags.force_snat_for_lb = 1; ct_lb_mark(backends=10.0.0.2:80);)
+ table=7 (lr_in_dnat ), priority=150 , match=(reg9[[6]] == 1 && ct.new && ip4 && reg4 == 20.0.0.2 && reg8[[0..15]] == 80), action=(reg0 = 172.16.0.10; flags.force_snat_for_lb = 1; ct_lb_mark(backends=20.0.0.2:80);)
+])
+
+AT_CLEANUP
+])
diff --git a/tests/ovn.at b/tests/ovn.at
index 80e9192ca..63b419154 100644
--- a/tests/ovn.at
+++ b/tests/ovn.at
@@ -2094,6 +2094,20 @@ reg9[5] = chk_ecmp_nh_mac();
reg9[5] = chk_ecmp_nh();
encodes as set_field:0/0x2000->reg10,resubmit(,77),move:NXM_NX_REG10[13]->OXM_OF_PKT_REG4[5]
+# commit_lb_aff
+commit_lb_aff(vip = "172.16.0.123:8080", backend = "10.0.0.3:8080", proto = tcp, timeout = 30);
+ encodes as learn(table=78,idle_timeout=30,delete_learned,OXM_OF_METADATA[],eth_type=0x800,NXM_OF_IP_SRC[],ip_dst=172.16.0.123,nw_proto=6,tcp_dst=8080,load:0x1->NXM_NX_REG10[14],load:0xa000003->NXM_NX_REG4[],load:0x1f90->NXM_NX_REG8[0..15])
+
+commit_lb_aff(vip = "172.16.0.123", backend = "10.0.0.3", timeout = 30);
+ encodes as learn(table=78,idle_timeout=30,delete_learned,OXM_OF_METADATA[],eth_type=0x800,NXM_OF_IP_SRC[],ip_dst=172.16.0.123,load:0x1->NXM_NX_REG10[14],load:0xa000003->NXM_NX_REG4[])
+
+commit_lb_aff(vip = "[::1]:8080", backend = "[::2]:8080", proto = tcp, timeout = 30);
+ encodes as learn(table=78,idle_timeout=30,delete_learned,OXM_OF_METADATA[],eth_type=0x86dd,NXM_NX_IPV6_SRC[],ipv6_dst=::1,nw_proto=6,tcp_dst=8080,load:0x1->NXM_NX_REG10[14],load:0x2->NXM_NX_XXREG0[],load:0x1f90->NXM_NX_REG8[0..15])
+
+# chk_lb_aff()
+reg9[6] = chk_lb_aff();
+ encodes as set_field:0/0x4000->reg10,resubmit(,78),move:NXM_NX_REG10[14]->OXM_OF_PKT_REG4[6]
+
# push/pop
push(xxreg0);push(xxreg1[10..20]);push(eth.src);pop(xxreg0[0..47]);pop(xxreg0[48..57]);pop(xxreg1);
formats as push(xxreg0); push(xxreg1[10..20]); push(eth.src); pop(xxreg0[0..47]); pop(xxreg0[48..57]); pop(xxreg1);
@@ -16051,7 +16065,7 @@ ovn-sbctl dump-flows sw0 > sw0-flows
AT_CAPTURE_FILE([sw0-flows])
AT_CHECK([grep -E 'ls_(in|out)_acl' sw0-flows |grep reject| sed 's/table=../table=??/' | sort], [0], [dnl
- table=??(ls_out_acl ), priority=2002 , match=(ip), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=23); };)
+ table=??(ls_out_acl ), priority=2002 , match=(ip), action=(reg0 = 0; reject { /* eth.dst <-> eth.src; ip.dst <-> ip.src; is implicit. */ outport <-> inport; next(pipeline=ingress,table=25); };)
])
@@ -18619,7 +18633,7 @@ wait_for_ports_up ls1-lp_ext1
# There should be a flow in hv2 to drop traffic from ls1-lp_ext1 destined
# to router mac.
AT_CHECK([as hv2 ovs-ofctl dump-flows br-int \
-table=30,dl_src=f0:00:00:00:00:03,dl_dst=a0:10:00:00:00:01 | \
+table=32,dl_src=f0:00:00:00:00:03,dl_dst=a0:10:00:00:00:01 | \
grep -c "actions=drop"], [0], [1
])
# Stop ovn-controllers on hv1 and hv3.
@@ -20290,7 +20304,7 @@ check_row_count Port_Binding 1 logical_port=sw0-vir virtual_parent=sw0-p1
wait_for_ports_up sw0-vir
check ovn-nbctl --wait=hv sync
AT_CHECK([test 2 = `cat hv1/ovn-controller.log | grep "pinctrl received packet-in" | \
-grep opcode=BIND_VPORT | grep OF_Table_ID=25 | wc -l`])
+grep opcode=BIND_VPORT | grep OF_Table_ID=27 | wc -l`])
wait_row_count Port_Binding 1 logical_port=sw0-vir6 chassis=$hv1_ch_uuid
check_row_count Port_Binding 1 logical_port=sw0-vir6 virtual_parent=sw0-p1
@@ -20339,7 +20353,7 @@ eth_dst=00000000ff01
ip_src=$(ip_to_hex 10 0 0 10)
ip_dst=$(ip_to_hex 172 168 0 101)
send_icmp_packet 1 1 $eth_src $eth_dst $ip_src $ip_dst c4c9 0000000000000000000000
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int metadata=0x$lr0_dp_key | awk '/table=26, n_packets=1, n_bytes=45/{print $7" "$8}'],[0],[dnl
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int metadata=0x$lr0_dp_key | awk '/table=28, n_packets=1, n_bytes=45/{print $7" "$8}'],[0],[dnl
priority=80,ip,reg15=0x3,metadata=0x3,nw_src=10.0.0.10 actions=drop
])
@@ -26331,7 +26345,7 @@ ovn-sbctl dump-flows > sbflows
AT_CAPTURE_FILE([sbflows])
AT_CAPTURE_FILE([offlows])
OVS_WAIT_UNTIL([
- as hv1 ovs-ofctl dump-flows br-int table=21 > offlows
+ as hv1 ovs-ofctl dump-flows br-int table=23 > offlows
test $(grep -c "load:0x64->NXM_NX_PKT_MARK" offlows) = 1 && \
test $(grep -c "load:0x3->NXM_NX_PKT_MARK" offlows) = 1 && \
test $(grep -c "load:0x4->NXM_NX_PKT_MARK" offlows) = 1 && \
@@ -26424,12 +26438,12 @@ send_ipv4_pkt hv1 hv1-vif1 505400000003 00000000ff01 \
$(ip_to_hex 10 0 0 3) $(ip_to_hex 172 168 0 120)
OVS_WAIT_UNTIL([
- test 1 -eq $(as hv1 ovs-ofctl dump-flows br-int table=21 | \
+ test 1 -eq $(as hv1 ovs-ofctl dump-flows br-int table=23 | \
grep "load:0x2->NXM_NX_PKT_MARK" -c)
])
AT_CHECK([
- test 0 -eq $(as hv1 ovs-ofctl dump-flows br-int table=21 | \
+ test 0 -eq $(as hv1 ovs-ofctl dump-flows br-int table=23 | \
grep "load:0x64->NXM_NX_PKT_MARK" -c)
])
@@ -27121,23 +27135,23 @@ check ovn-nbctl --wait=hv sync
# Ensure ECMP symmetric reply flows are not present on any hypervisor.
AT_CHECK([
- test 0 -eq $(as hv1 ovs-ofctl dump-flows br-int table=15 | \
+ test 0 -eq $(as hv1 ovs-ofctl dump-flows br-int table=17 | \
grep "priority=100" | \
grep "ct(commit,zone=NXM_NX_REG11\\[[0..15\\]],exec(move:NXM_OF_ETH_SRC\\[[\\]]->NXM_NX_CT_LABEL\\[[32..79\\]],load:0x[[0-9]]->NXM_NX_CT_LABEL\\[[80..95\\]]))" -c)
])
AT_CHECK([
- test 0 -eq $(as hv1 ovs-ofctl dump-flows br-int table=21 | \
+ test 0 -eq $(as hv1 ovs-ofctl dump-flows br-int table=25 | \
grep "priority=200" | \
grep "actions=move:NXM_NX_CT_LABEL\\[[32..79\\]]->NXM_OF_ETH_DST\\[[\\]]" -c)
])
AT_CHECK([
- test 0 -eq $(as hv2 ovs-ofctl dump-flows br-int table=15 | \
+ test 0 -eq $(as hv2 ovs-ofctl dump-flows br-int table=17 | \
grep "priority=100" | \
grep "ct(commit,zone=NXM_NX_REG11\\[[0..15\\]],exec(move:NXM_OF_ETH_SRC\\[[\\]]->NXM_NX_CT_LABEL\\[[32..79\\]],load:0x[[0-9]]->NXM_NX_CT_LABEL\\[[80..95\\]]))" -c)
])
AT_CHECK([
- test 0 -eq $(as hv2 ovs-ofctl dump-flows br-int table=21 | \
+ test 0 -eq $(as hv2 ovs-ofctl dump-flows br-int table=25 | \
grep "priority=200" | \
grep "actions=move:NXM_NX_CT_LABEL\\[[32..79\\]]->NXM_OF_ETH_DST\\[[\\]]" -c)
])
@@ -27155,11 +27169,11 @@ AT_CAPTURE_FILE([hv2flows])
AT_CHECK([
for hv in 1 2; do
- grep table=15 hv${hv}flows | \
+ grep table=17 hv${hv}flows | \
grep "priority=100" | \
grep -c "ct(commit,zone=NXM_NX_REG11\\[[0..15\\]],.*exec(move:NXM_OF_ETH_SRC\\[[\\]]->NXM_NX_CT_LABEL\\[[32..79\\]],load:0x[[0-9]]->NXM_NX_CT_MARK\\[[16..31\\]]))"
- grep table=23 hv${hv}flows | \
+ grep table=25 hv${hv}flows | \
grep "priority=200" | \
grep -c "move:NXM_NX_CT_LABEL\\[[\\]]->NXM_NX_XXREG1\\[[\\]],move:NXM_NX_XXREG1\\[[32..79\\]]->NXM_OF_ETH_DST"
done; :], [0], [dnl
@@ -27247,23 +27261,23 @@ check ovn-nbctl --wait=hv sync
# Ensure ECMP symmetric reply flows are not present on any hypervisor.
AT_CHECK([
- test 0 -eq $(as hv1 ovs-ofctl dump-flows br-int table=15 | \
+ test 0 -eq $(as hv1 ovs-ofctl dump-flows br-int table=17 | \
grep "priority=100" | \
grep "ct(commit,zone=NXM_NX_REG11\\[[0..15\\]],exec(move:NXM_OF_ETH_SRC\\[[\\]]->NXM_NX_CT_LABEL\\[[32..79\\]],load:0x[[0-9]]->NXM_NX_CT_LABEL\\[[80..95\\]]))" -c)
])
AT_CHECK([
- test 0 -eq $(as hv1 ovs-ofctl dump-flows br-int table=21 | \
+ test 0 -eq $(as hv1 ovs-ofctl dump-flows br-int table=25 | \
grep "priority=200" | \
grep "actions=move:NXM_NX_CT_LABEL\\[[32..79\\]]->NXM_OF_ETH_DST\\[[\\]]" -c)
])
AT_CHECK([
- test 0 -eq $(as hv2 ovs-ofctl dump-flows br-int table=15 | \
+ test 0 -eq $(as hv2 ovs-ofctl dump-flows br-int table=17 | \
grep "priority=100" | \
grep "ct(commit,zone=NXM_NX_REG11\\[[0..15\\]],exec(move:NXM_OF_ETH_SRC\\[[\\]]->NXM_NX_CT_LABEL\\[[32..79\\]],load:0x[[0-9]]->NXM_NX_CT_MARK\\[[16..31\\]]))" -c)
])
AT_CHECK([
- test 0 -eq $(as hv2 ovs-ofctl dump-flows br-int table=21 | \
+ test 0 -eq $(as hv2 ovs-ofctl dump-flows br-int table=25 | \
grep "priority=200" | \
grep "actions=move:NXM_NX_CT_LABEL\\[[\\]]->NXM_OF_ETH_DST\\[[\\]]" -c)
])
@@ -27280,11 +27294,11 @@ AT_CAPTURE_FILE([hv2flows])
AT_CHECK([
for hv in 1 2; do
- grep table=15 hv${hv}flows | \
+ grep table=17 hv${hv}flows | \
grep "priority=100" | \
grep -c "ct(commit,zone=NXM_NX_REG11\\[[0..15\\]],.*exec(move:NXM_OF_ETH_SRC\\[[\\]]->NXM_NX_CT_LABEL\\[[32..79\\]],load:0x[[0-9]]->NXM_NX_CT_MARK\\[[16..31\\]]))"
- grep table=23 hv${hv}flows | \
+ grep table=25 hv${hv}flows | \
grep "priority=200" | \
grep -c "move:NXM_NX_CT_LABEL\\[[\\]]->NXM_NX_XXREG1\\[[\\]],move:NXM_NX_XXREG1\\[[32..79\\]]->NXM_OF_ETH_DST"
done; :], [0], [dnl
@@ -27748,7 +27762,7 @@ AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep "actions=controller" | grep
])
# The packet should've been dropped in the lr_in_arp_resolve stage.
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep -E "table=23, n_packets=1,.* priority=1,ip,metadata=0x${sw_key},nw_dst=10.0.1.1 actions=drop" -c], [0], [dnl
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int | grep -E "table=25, n_packets=1,.* priority=1,ip,metadata=0x${sw_key},nw_dst=10.0.1.1 actions=drop" -c], [0], [dnl
1
])
@@ -28428,7 +28442,11 @@ ovs-vsctl add-br br-phys
ovn_attach n1 br-phys 192.168.0.1 24 geneve
# Get the encap rec, should be just one - with geneve/192.168.0.1
-encap_rec=$(ovn-sbctl --data=bare --no-heading --column encaps list chassis hv1)
+# Skip initial null encap
+OVS_WAIT_UNTIL(
+ [encap_rec=$(ovn-sbctl --bare --no-heading --columns encaps list chassis hv1)
+ echo "encap_rec = $encap_rec"
+ test $encap_rec])
# Set multiple IPs
as hv1
@@ -28437,9 +28455,10 @@ ovs-vsctl \
# Check if the encap_rec changed - should have, no need to
# compare the exact values.
-encap_rec_mvtep=$(ovn-sbctl --data=bare --no-heading --column encaps list chassis hv1)
-
-AT_CHECK([test "$encap_rec" != "$encap_rec_mvtep"], [0], [])
+OVS_WAIT_UNTIL(
+ [encap_rec_mvtep=$(ovn-sbctl --bare --no-heading --columns encaps list chassis hv1)
+ echo "encap_rec_mvtep = $encap_rec_mvtep"
+ test "$encap_rec" != "$encap_rec_mvtep"])
# now, wait for a couple of secs - should be enough time, I suppose.
sleep 2
@@ -31261,15 +31280,15 @@ done
check ovn-nbctl --wait=hv sync
# hv0 should see flows for lsp1 but not lsp2
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=25 | grep 10.0.1.2], [0], [ignore])
-AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=25 | grep 10.0.2.2], [1])
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=27 | grep 10.0.1.2], [0], [ignore])
+AT_CHECK([as hv1 ovs-ofctl dump-flows br-int table=27 | grep 10.0.2.2], [1])
# hv2 should see flows for lsp2 but not lsp1
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=25 | grep 10.0.2.2], [0], [ignore])
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=25 | grep 10.0.1.2], [1])
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=27 | grep 10.0.2.2], [0], [ignore])
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=27 | grep 10.0.1.2], [1])
# Change lrp_lr_ls1 to a regular lrp, hv2 should see flows for lsp1
check ovn-nbctl --wait=hv lrp-del-gateway-chassis lrp_lr_ls1 hv1
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=25 | grep 10.0.1.2], [0], [ignore])
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=27 | grep 10.0.1.2], [0], [ignore])
# Change it back, and trigger recompute to make sure extra flows are removed
# from hv2 (recompute is needed because currently I-P adds local datapaths but
@@ -31277,11 +31296,11 @@ AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=25 | grep 10.0.1.2], [0], [ig
check ovn-nbctl --wait=hv lrp-set-gateway-chassis lrp_lr_ls1 hv1 1
as hv2 check ovn-appctl -t ovn-controller recompute
ovn-nbctl --wait=hv sync
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=25 | grep 10.0.1.2], [1])
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=27 | grep 10.0.1.2], [1])
# Enable dnat_and_snat on lr, and now hv2 should see flows for lsp1.
AT_CHECK([ovn-nbctl --wait=hv --gateway-port=lrp_lr_ls1 lr-nat-add lr dnat_and_snat 192.168.0.1 10.0.1.3 lsp1 f0:00:00:00:00:03])
-AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=25 | grep 10.0.1.2], [0], [ignore])
+AT_CHECK([as hv2 ovs-ofctl dump-flows br-int table=27 | grep 10.0.1.2], [0], [ignore])
OVN_CLEANUP([hv1],[hv2])
AT_CLEANUP
@@ -32889,3 +32908,231 @@ check ovn-nbctl --wait=hv sync
OVN_CLEANUP([hv1])
AT_CLEANUP
])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([ovn-controller: batch add port and delete port in same IDL])
+ovn_start
+net_add n1
+
+sim_add hv1
+as hv1
+ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.1
+check ovs-vsctl add-port br-int p1
+
+check ovs-vsctl set interface p1 external-ids:iface-id=sw0-port1
+check ovn-nbctl --wait=hv sync
+ovn-appctl debug/pause
+OVS_WAIT_UNTIL([test x$(as hv1 ovn-appctl -t ovn-controller debug/status) = "xpaused"])
+
+check ovn-nbctl ls-add sw0 -- lsp-add sw0 sw0-port1
+check ovn-nbctl lsp-del sw0-port1
+check ovn-nbctl --wait=sb sync
+
+ovn-appctl debug/resume
+check ovn-nbctl --wait=hv sync
+
+check ovn-nbctl ls-del sw0
+check ovn-nbctl --wait=hv sync
+OVN_CLEANUP([hv1])
+AT_CLEANUP
+])
+
+m4_define([MULTIPLE_OVS_INT],
+ [OVN_FOR_EACH_NORTHD([
+ AT_SETUP([ovn-controller: Multiple OVS interfaces bound to same logical port ($1)])
+ ovn_start
+ net_add n1
+
+ sim_add hv1
+ as hv1
+ ovs-vsctl add-br br-phys
+ ovn_attach n1 br-phys 192.168.0.1
+
+ get_flows()
+ {
+ cookie=${1}
+ ovs-ofctl dump-flows br-int | grep $cookie |
+ sed -e 's/duration=[[0-9.]]*s, //g' |
+ sed -e 's/idle_age=[[0-9]]*, //g' |
+ sed -e 's/n_packets=[[0-9]]*, //g' |
+ sed -e 's/n_bytes=[[0-9]]*, //g'
+ }
+
+ check ovn-nbctl ls-add ls
+ check ovn-nbctl lsp-add ls lp
+ if test X$1 != X; then
+ check ovn-nbctl lsp-set-type lp $1
+ fi
+ check ovn-nbctl lsp-set-addresses lp "00:00:00:01:01:02 192.168.1.2"
+
+ check ovn-nbctl lsp-add ls vm1
+ check ovn-nbctl lsp-set-addresses vm1 "00:00:00:01:01:11 192.168.1.11"
+ check ovs-vsctl add-port br-int vm1 -- set interface vm1 type=internal external_ids:iface-id=vm1
+
+ check ovn-nbctl --wait=hv sync
+
+ check ovs-vsctl add-port br-int lpold -- set interface lpold type=internal
+ check ovs-vsctl set interface lpold external_ids:iface-id=lp
+
+ OVS_WAIT_UNTIL([test x$(ovn-sbctl --bare --columns _uuid find port_binding logical_port=lp) != x])
+ echo ======================================================
+ echo === Flows after iface-id set for the old interface ===
+ echo ======================================================
+ COOKIE=$(ovn-sbctl find port_binding logical_port=lp|grep uuid|cut -d: -f2| cut -c1-8 | sed 's/^\s*0\{0,8\}//')
+
+ OVS_WAIT_UNTIL([
+ ofport=$(ovs-vsctl --bare --columns ofport find Interface name=lpold)
+ ovs-ofctl dump-flows br-int | grep $COOKIE | grep "actions=output:$ofport"
+ ])
+ nb_flows=`ovs-ofctl dump-flows br-int | grep $COOKIE | wc -l`
+ echo $nb_flows "flows after iface-id set for old interface"
+
+ echo ======================================================
+ echo === Flows after iface-id set for the new interface ===
+ echo ======================================================
+ # Set external_ids:iface-id within same transaction as adding the port.
+ # This will generally cause ovn-controller to get initially notified of ovs interface changes with ofport == 0.
+ check ovs-vsctl add-port br-int lpnew -- set interface lpnew type=internal -- set interface lpnew external_ids:iface-id=lp
+ OVS_WAIT_UNTIL([
+ ofport=$(ovs-vsctl --bare --columns ofport find Interface name=lpnew)
+ ovs-ofctl dump-flows br-int | grep $COOKIE | grep "actions=output:$ofport"
+ ])
+ check test "$nb_flows" = $(ovs-ofctl dump-flows br-int | grep $COOKIE | wc -l)
+ flows_lpnew=$(get_flows $COOKIE)
+
+ echo ======================================================
+ echo ======= Flows after old interface is deleted =========
+ echo ======================================================
+ check ovs-vsctl del-port br-int lpold
+ # We do not expect changes, so let's wait for controller to get time to process any update
+ check ovn-nbctl --wait=hv sync
+ check test "$nb_flows" = $(ovs-ofctl dump-flows br-int | grep $COOKIE | wc -l)
+ flows_after_deletion=$(get_flows $COOKIE)
+ check test "$flows_lpnew" = "$flows_after_deletion"
+
+ echo ======================================================
+ echo ======= Flows after lptemp interface is created ====
+ echo ======================================================
+ # Set external_ids:iface-id in a different transaction as adding the port.
+ # This will generally cause ovn-controller to get notified of ovs interface changes with a proper ofport.
+ check ovs-vsctl add-port br-int lptemp -- set Interface lptemp type=internal
+ check ovs-vsctl set Interface lptemp external_ids:iface-id=lp
+ OVS_WAIT_UNTIL([
+ ofport=$(ovs-vsctl --bare --columns ofport find Interface name=lptemp)
+ ovs-ofctl dump-flows br-int | grep $COOKIE | grep "actions=output:$ofport"
+ ])
+ check test "$nb_flows" = $(ovs-ofctl dump-flows br-int | grep $COOKIE | wc -l)
+
+ echo ======================================================
+ echo ======= Flows after lptemp interface is deleted ======
+ echo ======================================================
+ check ovs-vsctl del-port br-int lptemp
+ OVS_WAIT_UNTIL([
+ ofport=$(ovs-vsctl --bare --columns ofport find Interface name=lpnew)
+ echo $ofport
+ ovs-ofctl dump-flows br-int | grep $COOKIE
+ ovs-ofctl dump-flows br-int | grep $COOKIE | grep "actions=output:$ofport"
+ ])
+ check test "$nb_flows" = $(ovs-ofctl dump-flows br-int | grep $COOKIE | wc -l)
+ flows_after_deletion=$(get_flows $COOKIE)
+ check test "$flows_lpnew" = "$flows_after_deletion"
+
+ echo ======================================================
+ echo ======= Flows after new interface is deleted =========
+ echo ======================================================
+ check ovs-vsctl del-port br-int lpnew
+ OVS_WAIT_UNTIL([
+ nb_flows=`ovs-ofctl dump-flows br-int | grep $COOKIE | wc -l`
+ test "${nb_flows}" = 0
+ ])
+
+ echo ======================================================
+ echo ======= Three interfaces bound to the same port ======
+ echo ======================================================
+ check ovs-vsctl add-port br-int lpold -- set interface lpold type=internal
+ check ovs-vsctl set interface lpold external_ids:iface-id=lp
+ check ovs-vsctl add-port br-int lpnew -- set interface lpnew type=internal
+ check ovs-vsctl set interface lpnew external_ids:iface-id=lp
+
+ # Wait for lpnew flows to be installed
+ OVS_WAIT_UNTIL([
+ ofport=$(ovs-vsctl --bare --columns ofport find Interface name=lpnew)
+ ovs-ofctl dump-flows br-int | grep $COOKIE | grep "actions=output:$ofport"
+ ])
+ flows_lpnew=$(get_flows $COOKIE)
+ nb_flows=`ovs-ofctl dump-flows br-int | grep $COOKIE | wc -l`
+
+ check ovs-vsctl add-port br-int lptemp -- set Interface lptemp type=internal
+ check ovs-vsctl set Interface lptemp external_ids:iface-id=lp
+
+ # Wait for lptemp flows to be installed
+ OVS_WAIT_UNTIL([
+ ofport=$(ovs-vsctl --bare --columns ofport find Interface name=lptemp)
+ ovs-ofctl dump-flows br-int | grep $COOKIE | grep "actions=output:$ofport"
+ ])
+
+ # Delete both lpold and lptemp to go to a stable situation
+ check ovs-vsctl del-port br-int lptemp
+ check ovs-vsctl del-port br-int lpold
+
+ OVS_WAIT_UNTIL([
+ test 0 = $(ovs-vsctl show | grep "Port lpold" | wc -l)
+ ])
+
+ # Wait for correct/lpnew flows to be installed
+ OVS_WAIT_UNTIL([
+ ofport=$(ovs-vsctl --bare --columns ofport find Interface name=lpnew)
+ ovs-ofctl dump-flows br-int | grep $COOKIE | grep "actions=output:$ofport"
+ ])
+ check test "$nb_flows" = $(ovs-ofctl dump-flows br-int | grep $COOKIE | wc -l)
+ flows_after_deletion=$(get_flows $COOKIE)
+ check test "$flows_lpnew" = "$flows_after_deletion"
+
+ # Check that recompute still works
+ check ovn-appctl -t ovn-controller recompute
+ OVS_WAIT_UNTIL([
+ ofport=$(ovs-vsctl --bare --columns ofport find Interface name=lpnew)
+ ovs-ofctl dump-flows br-int | grep $COOKIE | grep "actions=output:$ofport"
+ ])
+ check test "$nb_flows" = $(ovs-ofctl dump-flows br-int | grep $COOKIE | wc -l)
+ flows_after_deletion=$(get_flows $COOKIE)
+ check test "$flows_lpnew" = "$flows_after_deletion"
+
+ OVN_CLEANUP([hv1])
+ AT_CLEANUP
+ ])])
+
+MULTIPLE_OVS_INT([localport])
+MULTIPLE_OVS_INT([])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([feature inactivity probe])
+ovn_start
+net_add n1
+
+sim_add hv1
+as hv1
+check ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.1
+
+dnl Ensure that there are at least 3 openflow connections.
+OVS_WAIT_UNTIL([test "$(grep -c 'negotiated OpenFlow version' hv1/ovs-vswitchd.log)" -eq "3"])
+
+dnl "Wait" 3 times 60 seconds and ensure ovn-controller writes to the
+dnl openflow connections in the meantime. This should allow ovs-vswitchd
+dnl to probe the openflow connections at least twice.
+
+as hv1 ovs-appctl time/warp 60000
+check ovn-nbctl --wait=hv sync
+
+as hv1 ovs-appctl time/warp 60000
+check ovn-nbctl --wait=hv sync
+
+as hv1 ovs-appctl time/warp 60000
+check ovn-nbctl --wait=hv sync
+
+AT_CHECK([test -z "`grep disconnecting hv1/ovs-vswitchd.log`"])
+OVN_CLEANUP([hv1])
+AT_CLEANUP
+])
diff --git a/tests/system-common-macros.at b/tests/system-common-macros.at
index 616a87fcf..8e6cb415c 100644
--- a/tests/system-common-macros.at
+++ b/tests/system-common-macros.at
@@ -44,15 +44,38 @@ m4_define([NS_CHECK_EXEC],
# appropriate type, and allows additional arguments to be passed.
m4_define([ADD_BR], [ovs-vsctl _ADD_BR([$1]) -- $2])
-# ADD_INT([port], [namespace], [ovs-br], [ip_addr])
+# ADD_INT([port], [namespace], [ovs-br], [ip_addr] [ip6_addr])
#
# Add an internal port to 'ovs-br', then shift it into 'namespace' and
# configure it with 'ip_addr' (specified in CIDR notation).
+# Optionally add an ipv6 address
m4_define([ADD_INT],
[ AT_CHECK([ovs-vsctl add-port $3 $1 -- set int $1 type=internal])
AT_CHECK([ip link set $1 netns $2])
NS_CHECK_EXEC([$2], [ip addr add $4 dev $1])
NS_CHECK_EXEC([$2], [ip link set dev $1 up])
+ if test -n "$5"; then
+ NS_CHECK_EXEC([$2], [ip -6 addr add $5 dev $1])
+ fi
+ ]
+)
+
+# NS_ADD_INT([port], [namespace], [ovs-br], [ip_addr] [mac_addr] [ip6_addr] [default_gw] [default_ipv6_gw])
+# Create a namespace
+# Add an internal port to 'ovs-br', then shift it into 'namespace'.
+# Configure it with 'ip_addr' (specified in CIDR notation) and ip6_addr.
+# Set mac_addr
+# Add default gw for ipv4 and ipv6
+m4_define([NS_ADD_INT],
+ [ AT_CHECK([ovs-vsctl add-port $3 $1 -- set int $1 type=internal external_ids:iface-id=$1])
+ ADD_NAMESPACES($2)
+ AT_CHECK([ip link set $1 netns $2])
+ NS_CHECK_EXEC([$2], [ip link set $1 address $5])
+ NS_CHECK_EXEC([$2], [ip link set dev $1 up])
+ NS_CHECK_EXEC([$2], [ip addr add $4 dev $1])
+ NS_CHECK_EXEC([$2], [ip addr add $6 dev $1])
+ NS_CHECK_EXEC([$2], [ip route add default via $7 dev $1])
+ NS_CHECK_EXEC([$2], [ip -6 route add default via $8 dev $1])
]
)
@@ -333,4 +356,166 @@ m4_define([OVS_CHECK_CT_CLEAR],
# OVS_CHECK_CT_ZERO_SNAT()
m4_define([OVS_CHECK_CT_ZERO_SNAT],
- [AT_SKIP_IF([! grep -q "Datapath supports ct_zero_snat" ovs-vswitchd.log])]))
+ [AT_SKIP_IF([! grep -q "Datapath supports ct_zero_snat" ovs-vswitchd.log])])
+
+# OVN_TEST_IPV6_PREFIX_DELEGATION()
+m4_define([OVN_TEST_IPV6_PREFIX_DELEGATION],
+[
+ovn_start
+OVS_TRAFFIC_VSWITCHD_START()
+
+ADD_BR([br-int])
+ADD_BR([br-ext])
+
+ovs-ofctl add-flow br-ext action=normal
+# Set external-ids in br-int needed for ovn-controller
+ovs-vsctl \
+ -- set Open_vSwitch . external-ids:system-id=hv1 \
+ -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+ -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+ -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+ -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
+
+# Start ovn-controller
+start_daemon ovn-controller
+
+ADD_NAMESPACES(sw01)
+ADD_VETH(sw01, sw01, br-int, "192.168.1.2/24", "f0:00:00:01:02:03", \
+ "192.168.1.1")
+ADD_NAMESPACES(sw11)
+ADD_VETH(sw11, sw11, br-int, "192.168.2.2/24", "f0:00:00:02:02:03", \
+ "192.168.2.1")
+ADD_NAMESPACES(server)
+ADD_VETH(s1, server, br-ext, "2001:1db8:3333::2/64", "f0:00:00:01:02:05", \
+ "2001:1db8:3333::1")
+
+if test X"$1" = X"GR"; then
+ ovn-nbctl create Logical_Router name=R1 options:chassis=hv1
+else
+ ovn-nbctl lr-add R1
+fi
+
+ovn-nbctl ls-add sw0
+ovn-nbctl ls-add sw1
+ovn-nbctl ls-add public
+
+ovn-nbctl lrp-add R1 rp-sw0 00:00:01:01:02:03 192.168.1.1/24
+ovn-nbctl lrp-add R1 rp-sw1 00:00:03:01:02:03 192.168.2.1/24
+ovn-nbctl lrp-add R1 rp-public 00:00:02:01:02:03 172.16.1.1/24
+
+if test X"$1" != X"GR"; then
+ ovn-nbctl lrp-set-gateway-chassis rp-public hv1
+fi
+
+ovn-nbctl lsp-add sw0 sw0-rp -- set Logical_Switch_Port sw0-rp \
+ type=router options:router-port=rp-sw0 \
+ -- lsp-set-addresses sw0-rp router
+ovn-nbctl lsp-add sw1 sw1-rp -- set Logical_Switch_Port sw1-rp \
+ type=router options:router-port=rp-sw1 \
+ -- lsp-set-addresses sw1-rp router
+
+ovn-nbctl lsp-add public public-rp -- set Logical_Switch_Port public-rp \
+ type=router options:router-port=rp-public \
+ -- lsp-set-addresses public-rp router
+
+ovn-nbctl lsp-add sw0 sw01 \
+ -- lsp-set-addresses sw01 "f0:00:00:01:02:03 192.168.1.2"
+
+ovn-nbctl lsp-add sw1 sw11 \
+ -- lsp-set-addresses sw11 "f0:00:00:02:02:03 192.168.2.2"
+
+OVS_WAIT_UNTIL([test "$(ip netns exec server ip a | grep 2001:1db8:3333::2 | grep tentative)" = ""])
+OVS_WAIT_UNTIL([test "$(ip netns exec server ip a | grep fe80 | grep tentative)" = ""])
+
+AT_CHECK([ovs-vsctl set Open_vSwitch . external-ids:ovn-bridge-mappings=phynet:br-ext])
+ovn-nbctl lsp-add public public1 \
+ -- lsp-set-addresses public1 unknown \
+ -- lsp-set-type public1 localnet \
+ -- lsp-set-options public1 network_name=phynet
+
+ovn-nbctl set logical_router_port rp-public options:prefix_delegation=true
+ovn-nbctl set logical_router_port rp-public options:prefix=true
+ovn-nbctl set logical_router_port rp-sw0 options:prefix=true
+ovn-nbctl set logical_router_port rp-sw1 options:prefix=true
+
+OVN_POPULATE_ARP
+
+ovn-nbctl --wait=hv sync
+
+cat > /etc/dhcp/dhcpd.conf <<EOF
+option dhcp-rebinding-time 15;
+option dhcp-renewal-time 10;
+option dhcp6.unicast 2001:1db8:3333::1;
+subnet6 2001:1db8:3333::/64 {
+ prefix6 2001:1db8:3333:100:: 2001:1db8:3333:111:: /96;
+}
+EOF
+rm -f /var/lib/dhcp/dhcpd6.leases
+touch /var/lib/dhcp/dhcpd6.leases
+chown root:dhcpd /var/lib/dhcp /var/lib/dhcp/dhcpd6.leases
+chmod 775 /var/lib/dhcp
+chmod 664 /var/lib/dhcp/dhcpd6.leases
+
+NS_CHECK_EXEC([server], [tcpdump -nni s1 > pkt.pcap &])
+
+NETNS_DAEMONIZE([server], [dhcpd -6 -f s1 > dhcpd.log 2>&1], [dhcpd.pid])
+ovn-nbctl --wait=hv sync
+
+OVS_WAIT_WHILE([test "$(ovn-nbctl get logical_router_port rp-public ipv6_prefix | cut -c4-15)" = ""])
+OVS_WAIT_WHILE([test "$(ovn-nbctl get logical_router_port rp-sw0 ipv6_prefix | cut -c4-15)" = ""])
+OVS_WAIT_WHILE([test "$(ovn-nbctl get logical_router_port rp-sw1 ipv6_prefix | cut -c4-15)" = ""])
+
+AT_CHECK([ovn-nbctl get logical_router_port rp-public ipv6_prefix | cut -c3-16], [0], [dnl
+[2001:1db8:3333]
+])
+AT_CHECK([ovn-nbctl get logical_router_port rp-sw0 ipv6_prefix | cut -c3-16], [0], [dnl
+[2001:1db8:3333]
+])
+AT_CHECK([ovn-nbctl get logical_router_port rp-sw1 ipv6_prefix | cut -c3-16], [0], [dnl
+[2001:1db8:3333]
+])
+
+prefix=$(ovn-nbctl list logical_router_port rp-public | awk -F/ '/ipv6_prefix/{print substr($ 1,25,9)}' | sed 's/://g')
+ovn-nbctl list logical_router_port rp-public > /tmp/rp-public
+ovn-nbctl set logical_router_port rp-sw0 options:prefix=false
+ovn-nbctl set logical_router_port rp-sw1 options:prefix=false
+# Renew message
+NS_CHECK_EXEC([server], [tcpdump -c 1 -nni s1 ip6[[48:1]]=0x05 and ip6[[113:4]]=0x${prefix} > renew.pcap &])
+# Reply message with Status OK
+NS_CHECK_EXEC([server], [tcpdump -c 1 -nni s1 ip6[[48:1]]=0x07 and ip6[[81:4]]=0x${prefix} > reply.pcap &])
+
+OVS_WAIT_UNTIL([
+ total_pkts=$(cat renew.pcap | wc -l)
+ test "${total_pkts}" = "1"
+])
+
+OVS_WAIT_UNTIL([
+ total_pkts=$(cat reply.pcap | wc -l)
+ test "${total_pkts}" = "1"
+])
+
+kill $(pidof tcpdump)
+
+ovn-nbctl set logical_router_port rp-sw0 options:prefix=false
+ovn-nbctl clear logical_router_port rp-sw0 ipv6_prefix
+OVS_WAIT_WHILE([test "$(ovn-nbctl get logical_router_port rp-sw0 ipv6_prefix | cut -c3-16)" = "[2001:1db8:3333]"])
+AT_CHECK([ovn-nbctl get logical_router_port rp-sw0 ipv6_prefix | cut -c3-16], [0], [dnl
+[]
+])
+
+kill $(pidof ovn-controller)
+
+as ovn-sb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as ovn-nb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as northd
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+as
+OVS_TRAFFIC_VSWITCHD_STOP(["/.*error receiving.*/d
+/failed to query port patch-.*/d
+/.*terminating with signal 15.*/d"])
+]))
diff --git a/tests/system-ovn.at b/tests/system-ovn.at
index 8acfb3e39..161c2823e 100644
--- a/tests/system-ovn.at
+++ b/tests/system-ovn.at
@@ -5272,158 +5272,22 @@ AT_CLEANUP
])
OVN_FOR_EACH_NORTHD([
-AT_SETUP([IPv6 prefix delegation])
+AT_SETUP([IPv6 prefix delegation - distributed router])
AT_SKIP_IF([test $HAVE_DHCPD = no])
AT_SKIP_IF([test $HAVE_TCPDUMP = no])
AT_KEYWORDS([ovn-ipv6-prefix_d])
-ovn_start
-OVS_TRAFFIC_VSWITCHD_START()
-
-ADD_BR([br-int])
-ADD_BR([br-ext])
-
-ovs-ofctl add-flow br-ext action=normal
-# Set external-ids in br-int needed for ovn-controller
-ovs-vsctl \
- -- set Open_vSwitch . external-ids:system-id=hv1 \
- -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
- -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
- -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
- -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
-
-# Start ovn-controller
-start_daemon ovn-controller
-
-ovn-nbctl lr-add R1
-
-ovn-nbctl ls-add sw0
-ovn-nbctl ls-add sw1
-ovn-nbctl ls-add public
-
-ovn-nbctl lrp-add R1 rp-sw0 00:00:01:01:02:03 192.168.1.1/24
-ovn-nbctl lrp-add R1 rp-sw1 00:00:03:01:02:03 192.168.2.1/24
-ovn-nbctl lrp-add R1 rp-public 00:00:02:01:02:03 172.16.1.1/24 \
- -- lrp-set-gateway-chassis rp-public hv1
-
-ovn-nbctl lsp-add sw0 sw0-rp -- set Logical_Switch_Port sw0-rp \
- type=router options:router-port=rp-sw0 \
- -- lsp-set-addresses sw0-rp router
-ovn-nbctl lsp-add sw1 sw1-rp -- set Logical_Switch_Port sw1-rp \
- type=router options:router-port=rp-sw1 \
- -- lsp-set-addresses sw1-rp router
-
-ovn-nbctl lsp-add public public-rp -- set Logical_Switch_Port public-rp \
- type=router options:router-port=rp-public \
- -- lsp-set-addresses public-rp router
-
-ADD_NAMESPACES(sw01)
-ADD_VETH(sw01, sw01, br-int, "192.168.1.2/24", "f0:00:00:01:02:03", \
- "192.168.1.1")
-ovn-nbctl lsp-add sw0 sw01 \
- -- lsp-set-addresses sw01 "f0:00:00:01:02:03 192.168.1.2"
-
-ADD_NAMESPACES(sw11)
-ADD_VETH(sw11, sw11, br-int, "192.168.2.2/24", "f0:00:00:02:02:03", \
- "192.168.2.1")
-ovn-nbctl lsp-add sw1 sw11 \
- -- lsp-set-addresses sw11 "f0:00:00:02:02:03 192.168.2.2"
-
-ADD_NAMESPACES(server)
-ADD_VETH(s1, server, br-ext, "2001:1db8:3333::2/64", "f0:00:00:01:02:05", \
- "2001:1db8:3333::1")
-
-OVS_WAIT_UNTIL([test "$(ip netns exec server ip a | grep 2001:1db8:3333::2 | grep tentative)" = ""])
-OVS_WAIT_UNTIL([test "$(ip netns exec server ip a | grep fe80 | grep tentative)" = ""])
-
-AT_CHECK([ovs-vsctl set Open_vSwitch . external-ids:ovn-bridge-mappings=phynet:br-ext])
-ovn-nbctl lsp-add public public1 \
- -- lsp-set-addresses public1 unknown \
- -- lsp-set-type public1 localnet \
- -- lsp-set-options public1 network_name=phynet
-
-ovn-nbctl set logical_router_port rp-public options:prefix_delegation=true
-ovn-nbctl set logical_router_port rp-public options:prefix=true
-ovn-nbctl set logical_router_port rp-sw0 options:prefix=true
-ovn-nbctl set logical_router_port rp-sw1 options:prefix=true
-
-OVN_POPULATE_ARP
-
-ovn-nbctl --wait=hv sync
-
-cat > /etc/dhcp/dhcpd.conf <<EOF
-option dhcp-rebinding-time 15;
-option dhcp-renewal-time 10;
-option dhcp6.unicast 2001:1db8:3333::1;
-subnet6 2001:1db8:3333::/64 {
- prefix6 2001:1db8:3333:100:: 2001:1db8:3333:111:: /96;
-}
-EOF
-rm -f /var/lib/dhcp/dhcpd6.leases
-touch /var/lib/dhcp/dhcpd6.leases
-chown root:dhcpd /var/lib/dhcp /var/lib/dhcp/dhcpd6.leases
-chmod 775 /var/lib/dhcp
-chmod 664 /var/lib/dhcp/dhcpd6.leases
-
-NETNS_DAEMONIZE([server], [dhcpd -6 -f s1 > dhcpd.log 2>&1], [dhcpd.pid])
-ovn-nbctl --wait=hv sync
-
-OVS_WAIT_WHILE([test "$(ovn-nbctl get logical_router_port rp-public ipv6_prefix | cut -c4-15)" = ""])
-OVS_WAIT_WHILE([test "$(ovn-nbctl get logical_router_port rp-sw0 ipv6_prefix | cut -c4-15)" = ""])
-OVS_WAIT_WHILE([test "$(ovn-nbctl get logical_router_port rp-sw1 ipv6_prefix | cut -c4-15)" = ""])
-
-AT_CHECK([ovn-nbctl get logical_router_port rp-public ipv6_prefix | cut -c3-16], [0], [dnl
-[2001:1db8:3333]
-])
-AT_CHECK([ovn-nbctl get logical_router_port rp-sw0 ipv6_prefix | cut -c3-16], [0], [dnl
-[2001:1db8:3333]
-])
-AT_CHECK([ovn-nbctl get logical_router_port rp-sw1 ipv6_prefix | cut -c3-16], [0], [dnl
-[2001:1db8:3333]
-])
-
-prefix=$(ovn-nbctl list logical_router_port rp-public | awk -F/ '/ipv6_prefix/{print substr($1,25,9)}' | sed 's/://g')
-ovn-nbctl set logical_router_port rp-sw0 options:prefix=false
-ovn-nbctl set logical_router_port rp-sw1 options:prefix=false
-
-# Renew message
-NS_CHECK_EXEC([server], [tcpdump -c 1 -nni s1 ip6[[48:1]]=0x05 and ip6[[113:4]]=0x${prefix} > renew.pcap &])
-# Reply message with Status OK
-NS_CHECK_EXEC([server], [tcpdump -c 1 -nni s1 ip6[[48:1]]=0x07 and ip6[[81:4]]=0x${prefix} > reply.pcap &])
-
-OVS_WAIT_UNTIL([
- total_pkts=$(cat renew.pcap | wc -l)
- test "${total_pkts}" = "1"
-])
-
-OVS_WAIT_UNTIL([
- total_pkts=$(cat reply.pcap | wc -l)
- test "${total_pkts}" = "1"
-])
-
-kill $(pidof tcpdump)
-
-ovn-nbctl set logical_router_port rp-sw0 options:prefix=false
-ovn-nbctl clear logical_router_port rp-sw0 ipv6_prefix
-OVS_WAIT_WHILE([test "$(ovn-nbctl get logical_router_port rp-sw0 ipv6_prefix | cut -c3-16)" = "[2001:1db8:3333]"])
-AT_CHECK([ovn-nbctl get logical_router_port rp-sw0 ipv6_prefix | cut -c3-16], [0], [dnl
-[]
+OVN_TEST_IPV6_PREFIX_DELEGATION(DGP)
+AT_CLEANUP
])
-kill $(pidof ovn-controller)
-
-as ovn-sb
-OVS_APP_EXIT_AND_WAIT([ovsdb-server])
-
-as ovn-nb
-OVS_APP_EXIT_AND_WAIT([ovsdb-server])
-
-as northd
-OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([IPv6 prefix delegation - gw router])
+AT_SKIP_IF([test $HAVE_DHCPD = no])
+AT_SKIP_IF([test $HAVE_TCPDUMP = no])
+AT_KEYWORDS([ovn-ipv6-prefix_d])
-as
-OVS_TRAFFIC_VSWITCHD_STOP(["/.*error receiving.*/d
-/.*terminating with signal 15.*/d"])
+OVN_TEST_IPV6_PREFIX_DELEGATION(GR)
AT_CLEANUP
])
@@ -6489,8 +6353,12 @@ OVS_WAIT_UNTIL([tc qdisc show | grep -q 'htb 1: dev ovs-public'])
OVS_WAIT_UNTIL([tc class show dev ovs-public | \
grep -q 'class htb .* rate 200Kbit ceil 300Kbit burst 375000b cburst 375000b'])
-AT_CHECK([ovn-nbctl remove Logical_Switch_Port public options qos_min_rate=200000])
+
AT_CHECK([ovn-nbctl remove Logical_Switch_Port public options qos_max_rate=300000])
+OVS_WAIT_UNTIL([tc class show dev ovs-public | \
+ grep -q 'class htb .* rate 200Kbit ceil 34359Mbit burst 375000b .*'])
+
+AT_CHECK([ovn-nbctl remove Logical_Switch_Port public options qos_min_rate=200000])
AT_CHECK([ovn-nbctl remove Logical_Switch_Port public options qos_burst=3000000])
OVS_WAIT_UNTIL([test "$(tc qdisc show | grep 'htb 1: dev ovs-public')" = ""])
@@ -8343,3 +8211,985 @@ OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
AT_CLEANUP
])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([SNAT in gateway router mode])
+AT_KEYWORDS([ovnnat])
+
+CHECK_CONNTRACK()
+CHECK_CONNTRACK_NAT()
+ovn_start
+OVS_TRAFFIC_VSWITCHD_START()
+
+ADD_BR([br-int])
+check ovs-ofctl add-flow br0 action=normal
+# Set external-ids in br-int needed for ovn-controller
+ovs-vsctl \
+ -- set Open_vSwitch . external-ids:system-id=hv1 \
+ -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+ -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+ -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+ -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
+
+# Start ovn-controller
+start_daemon ovn-controller
+
+check ip link set br0 up
+check ovs-vsctl set open . external-ids:ovn-bridge-mappings=provider:br0
+
+check ovn-nbctl ls-add ls1
+check ovn-nbctl lsp-add ls1 ls1p1
+check ovn-nbctl lsp-set-addresses ls1p1 "00:00:00:01:01:01 192.168.1.1 2001::1"
+check ovn-nbctl lsp-add ls1 ls1p2
+check ovn-nbctl lsp-set-addresses ls1p2 "00:00:00:01:01:02 192.168.1.2 2001::2"
+
+check ovn-nbctl lr-add lr1
+check ovn-nbctl lrp-add lr1 lr1-ls1 00:00:00:00:00:01 192.168.1.254/24 2001::a/64
+check ovn-nbctl lsp-add ls1 ls1-lr1
+check ovn-nbctl lsp-set-addresses ls1-lr1 "00:00:00:00:00:01 192.168.1.254 2001::a"
+check ovn-nbctl lsp-set-type ls1-lr1 router
+check ovn-nbctl lsp-set-options ls1-lr1 router-port=lr1-ls1
+
+check ovn-nbctl set logical_router lr1 options:chassis=hv1
+
+check ovn-nbctl lrp-add lr1 lr1-pub 00:00:00:00:0f:01 172.16.1.254/24 1711::a/64
+check ovn-nbctl ls-add pub
+check ovn-nbctl lsp-add pub pub-lr1
+check ovn-nbctl lsp-set-type pub-lr1 router
+check ovn-nbctl lsp-set-options pub-lr1 router-port=lr1-pub
+check ovn-nbctl lsp-set-addresses pub-lr1 router
+
+check ovn-nbctl lsp-add pub ln -- lsp-set-options ln network_name=provider
+check ovn-nbctl lsp-set-type ln localnet
+check ovn-nbctl lsp-set-addresses ln unknown
+
+check ovn-nbctl lr-nat-add lr1 snat 172.16.1.10 192.168.1.0/24
+check ovn-nbctl lr-nat-add lr1 snat 1711::10 2001::/64
+
+NS_ADD_INT(ls1p1, ls1p1, br-int, "192.168.1.1/24", "00:00:00:01:01:01", "2001::1/64", "192.168.1.254", "2001::a" )
+NS_ADD_INT(ls1p2, ls1p2, br-int, "192.168.1.2/24", "00:00:00:01:01:02", "2001::2/64", "192.168.1.254", "2001::a" )
+
+ADD_NAMESPACES(ext1)
+ADD_INT(ext1, ext1, br0, 172.16.1.1/24, 1711::1/64)
+check ovn-nbctl --wait=hv sync
+wait_for_ports_up
+OVS_WAIT_UNTIL([test "$(ip netns exec ls1p1 ip a | grep 2001::1 | grep tentative)" = ""])
+OVS_WAIT_UNTIL([test "$(ip netns exec ls1p2 ip a | grep 2002::1 | grep tentative)" = ""])
+
+NS_CHECK_EXEC([ls1p1], [ping -q -c 3 -i 0.3 -w 2 172.16.1.1 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+NS_CHECK_EXEC([ls1p1], [ping6 -v -q -c 3 -i 0.3 -w 2 1711::1 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+OVS_APP_EXIT_AND_WAIT([ovn-controller])
+as
+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+/connection dropped.*/d
+/removing policing failed: No such device/d"])
+AT_CLEANUP
+])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([mcast flow count])
+AT_KEYWORDS([ovnigmp IP-multicast])
+AT_SKIP_IF([test $HAVE_TCPDUMP = no])
+ovn_start
+
+OVS_TRAFFIC_VSWITCHD_START()
+ADD_BR([br-int])
+
+# Set external-ids in br-int needed for ovn-controller
+ovs-vsctl \
+ -- set Open_vSwitch . external-ids:system-id=hv1 \
+ -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+ -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+ -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+ -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
+
+# Start ovn-controller
+start_daemon ovn-controller
+
+check ovn-nbctl ls-add ls
+check ovn-nbctl lsp-add ls vm1
+check ovn-nbctl lsp-set-addresses vm1 00:00:00:00:00:01
+check ovn-nbctl lsp-add ls vm2
+check ovn-nbctl lsp-set-addresses vm2 00:00:00:00:00:02
+check ovn-nbctl lsp-add ls vm3
+check ovn-nbctl lsp-set-addresses vm3 00:00:00:00:00:03
+
+check ovn-nbctl set logical_switch ls other_config:mcast_querier=false other_config:mcast_snoop=true other_config:mcast_query_interval=30 other_config:mcast_eth_src=00:00:00:00:00:05 other_config:mcast_ip4_src=42.42.42.5 other_config:mcast_ip6_src=fe80::1 other_config:mcast_idle_timeout=3000
+ovn-sbctl list ip_multicast
+
+wait_igmp_flows_installed()
+{
+ OVS_WAIT_UNTIL([ovs-ofctl dump-flows br-int table=33 | \
+ grep 'priority=90' | grep "nw_dst=$1"])
+}
+
+ADD_NAMESPACES(vm1)
+ADD_INT([vm1], [vm1], [br-int], [42.42.42.1/24])
+NS_CHECK_EXEC([vm1], [ip link set vm1 address 00:00:00:00:00:01], [0])
+NS_CHECK_EXEC([vm1], [ip route add default via 42.42.42.5], [0])
+check ovs-vsctl set Interface vm1 external_ids:iface-id=vm1
+
+ADD_NAMESPACES(vm2)
+ADD_INT([vm2], [vm2], [br-int], [42.42.42.2/24])
+NS_CHECK_EXEC([vm2], [ip link set vm2 address 00:00:00:00:00:02], [0])
+NS_CHECK_EXEC([vm2], [ip link set lo up], [0])
+check ovs-vsctl set Interface vm2 external_ids:iface-id=vm2
+
+ADD_NAMESPACES(vm3)
+NETNS_DAEMONIZE([vm3], [tcpdump -n -i any -nnleX > vm3.pcap 2>/dev/null], [tcpdump3.pid])
+
+ADD_INT([vm3], [vm3], [br-int], [42.42.42.3/24])
+NS_CHECK_EXEC([vm3], [ip link set vm3 address 00:00:00:00:00:03], [0])
+NS_CHECK_EXEC([vm3], [ip link set lo up], [0])
+NS_CHECK_EXEC([vm3], [ip route add default via 42.42.42.5], [0])
+check ovs-vsctl set Interface vm3 external_ids:iface-id=vm3
+
+NS_CHECK_EXEC([vm2], [sysctl -w net.ipv4.igmp_max_memberships=100], [ignore], [ignore])
+NS_CHECK_EXEC([vm3], [sysctl -w net.ipv4.igmp_max_memberships=100], [ignore], [ignore])
+wait_for_ports_up
+
+NS_CHECK_EXEC([vm3], [ip addr add 228.0.0.1 dev vm3 autojoin], [0])
+wait_igmp_flows_installed 228.0.0.1
+
+NS_CHECK_EXEC([vm1], [ping -q -c 3 -i 0.3 -w 2 228.0.0.1], [ignore], [ignore])
+
+OVS_WAIT_UNTIL([
+ requests=`grep "ICMP echo request" -c vm3.pcap`
+ test "${requests}" -ge "3"
+])
+
+NETNS_DAEMONIZE([vm2], [tcpdump -n -i any -nnleX > vm2.pcap 2>/dev/null], [tcpdump2.pid])
+
+for i in `seq 1 40`;do
+ NS_CHECK_EXEC([vm2], [ip addr add 228.1.$i.1 dev vm2 autojoin &], [0])
+ NS_CHECK_EXEC([vm3], [ip addr add 229.1.$i.1 dev vm3 autojoin &], [0])
+ # Do not go too fast. If going fast, there is a higher chance of sb being busy, causing full recompute (engine has not run)
+ # In this test, we do not want too many recomputes as they might hide I+I related errors
+ sleep 0.2
+done
+
+for i in `seq 1 40`;do
+ wait_igmp_flows_installed 228.1.$i.1
+ wait_igmp_flows_installed 229.1.$i.1
+done
+ovn-sbctl list multicast_group
+
+NS_CHECK_EXEC([vm1], [ping -q -c 3 -i 0.3 -w 2 228.1.1.1], [ignore], [ignore])
+
+OVS_WAIT_UNTIL([
+ requests=`grep "ICMP echo request" -c vm2.pcap`
+ test "${requests}" -ge "3"
+])
+
+# The test could succeed thanks to a lucky northd recompute...after hitting too any flows
+# Double check we never hit error condition
+AT_CHECK([grep -qE 'Too many active mcast flows' northd/ovn-northd.log], [1])
+
+OVS_APP_EXIT_AND_WAIT([ovn-controller])
+
+as ovn-sb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as ovn-nb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as northd
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+as
+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+/connection dropped.*/d
+/removing policing failed: No such device/d"])
+AT_CLEANUP
+])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([DVR ping router port])
+AT_KEYWORDS([dvr])
+
+ovn_start
+
+OVS_TRAFFIC_VSWITCHD_START()
+ADD_BR([br-int])
+ADD_BR([br-ext])
+
+check ovs-ofctl add-flow br-ext action=normal
+# Set external-ids in br-int needed for ovn-controller
+ovs-vsctl \
+ -- set Open_vSwitch . external-ids:system-id=hv1 \
+ -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+ -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+ -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+ -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
+
+# Start ovn-controller
+start_daemon ovn-controller
+
+check ovs-vsctl set open . external_ids:ovn-bridge-mappings=phys:br-ext
+check ovs-vsctl set open . external-ids:ovn-chassis-mac-mappings="phys:ee:00:00:00:00:10"
+
+
+check ovn-nbctl ls-add internal
+
+check ovn-nbctl lsp-add internal ln_internal "" 100
+check ovn-nbctl lsp-set-addresses ln_internal unknown
+check ovn-nbctl lsp-set-type ln_internal localnet
+check ovn-nbctl lsp-set-options ln_internal network_name=phys
+
+check ovn-nbctl lsp-add internal internal-gw
+check ovn-nbctl lsp-set-type internal-gw router
+check ovn-nbctl lsp-set-addresses internal-gw router
+check ovn-nbctl lsp-set-options internal-gw router-port=gw-internal
+
+check ovn-nbctl lsp-add internal vif0
+# Set address as unknown so that LRP has to generate ARP request
+check ovn-nbctl lsp-set-addresses vif0 unknown
+
+check ovn-nbctl lr-add gw
+check ovn-nbctl lrp-add gw gw-internal 00:00:00:00:20:00 192.168.20.1/24
+
+ADD_NAMESPACES(vif0)
+ADD_VETH(vif0, vif0, br-int, "192.168.20.10/24", "00:00:00:00:20:10", "192.168.20.1")
+
+check ovn-nbctl --wait=sb sync
+check ovn-nbctl --wait=hv sync
+
+NS_CHECK_EXEC([vif0], [ping -q -c 3 -i 0.3 -w 1 192.168.20.1 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+OVS_APP_EXIT_AND_WAIT([ovn-controller])
+
+as ovn-sb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as ovn-nb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as northd
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+as
+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+/connection dropped.*/d"])
+AT_CLEANUP
+])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([load balancing affinity sessions - IPv4])
+AT_KEYWORDS([ovnlb])
+
+CHECK_CONNTRACK()
+CHECK_CONNTRACK_NAT()
+ovn_start
+OVS_TRAFFIC_VSWITCHD_START()
+ADD_BR([br-int])
+
+# Set external-ids in br-int needed for ovn-controller
+ovs-vsctl \
+ -- set Open_vSwitch . external-ids:system-id=hv1 \
+ -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+ -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+ -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+ -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
+
+# Start ovn-controller
+start_daemon ovn-controller
+
+# Logical network:
+# Two LRs - R1 and R2 that are connected to each other via LS "join"
+# in 20.0.0.0/24 network. R1 has switchess foo (192.168.1.0/24) and
+# bar (192.168.2.0/24) connected to it. R2 has alice (172.16.1.0/24) connected
+# to it. R2 is a gateway router on which we add load-balancing rules.
+#
+# foo -- R1 -- join - R2 -- alice
+# |
+# bar ----
+
+ovn-nbctl create Logical_Router name=R1
+ovn-nbctl create Logical_Router name=R2 options:chassis=hv1
+
+ovn-nbctl ls-add foo
+ovn-nbctl ls-add bar
+ovn-nbctl ls-add alice
+ovn-nbctl ls-add join
+
+# Connect foo to R1
+ovn-nbctl lrp-add R1 foo 00:00:01:01:02:03 192.168.1.1/24
+ovn-nbctl lsp-add foo rp-foo -- set Logical_Switch_Port rp-foo \
+ type=router options:router-port=foo addresses=\"00:00:01:01:02:03\"
+
+# Connect bar to R1
+ovn-nbctl lrp-add R1 bar 00:00:01:01:02:04 192.168.2.1/24
+ovn-nbctl lsp-add bar rp-bar -- set Logical_Switch_Port rp-bar \
+ type=router options:router-port=bar addresses=\"00:00:01:01:02:04\"
+
+# Connect alice to R2
+ovn-nbctl lrp-add R2 alice 00:00:02:01:02:03 172.16.1.1/24
+ovn-nbctl lsp-add alice rp-alice -- set Logical_Switch_Port rp-alice \
+ type=router options:router-port=alice addresses=\"00:00:02:01:02:03\"
+
+# Connect R1 to join
+ovn-nbctl lrp-add R1 R1_join 00:00:04:01:02:03 20.0.0.1/24
+ovn-nbctl lsp-add join r1-join -- set Logical_Switch_Port r1-join \
+ type=router options:router-port=R1_join addresses='"00:00:04:01:02:03"'
+
+# Connect R2 to join
+ovn-nbctl lrp-add R2 R2_join 00:00:04:01:02:04 20.0.0.2/24
+ovn-nbctl lsp-add join r2-join -- set Logical_Switch_Port r2-join \
+ type=router options:router-port=R2_join addresses='"00:00:04:01:02:04"'
+
+# Static routes.
+ovn-nbctl lr-route-add R1 172.16.1.0/24 20.0.0.2
+ovn-nbctl lr-route-add R2 192.168.0.0/16 20.0.0.1
+
+# Logical port 'foo1' in switch 'foo'.
+ADD_NAMESPACES(foo1)
+ADD_VETH(foo1, foo1, br-int, "192.168.1.2/24", "f0:00:00:01:02:03", \
+ "192.168.1.1")
+ovn-nbctl lsp-add foo foo1 \
+-- lsp-set-addresses foo1 "f0:00:00:01:02:03 192.168.1.2"
+
+# Logical port 'alice1' in switch 'alice'.
+ADD_NAMESPACES(alice1)
+ADD_VETH(alice1, alice1, br-int, "172.16.1.2/24", "f0:00:00:01:02:04", \
+ "172.16.1.1")
+ovn-nbctl lsp-add alice alice1 \
+-- lsp-set-addresses alice1 "f0:00:00:01:02:04 172.16.1.2"
+
+# Logical port 'bar1' in switch 'bar'.
+ADD_NAMESPACES(bar1)
+ADD_VETH(bar1, bar1, br-int, "192.168.2.2/24", "f0:00:00:01:02:05", \
+"192.168.2.1")
+ovn-nbctl lsp-add bar bar1 \
+-- lsp-set-addresses bar1 "f0:00:00:01:02:05 192.168.2.2"
+
+ADD_NAMESPACES(bar2)
+ADD_VETH(bar2, bar2, br-int, "192.168.2.3/24", "e0:00:00:01:02:05", \
+"192.168.2.1")
+ovn-nbctl lsp-add bar bar2 \
+-- lsp-set-addresses bar2 "e0:00:00:01:02:05 192.168.2.3"
+
+# Config OVN load-balancer with a VIP.
+
+ovn-nbctl lb-add lb0 172.16.1.100:8080 192.168.1.2:80,192.168.2.2:80
+ovn-nbctl lb-add lb10 172.16.1.110:8080 192.168.1.2:80,192.168.2.2:80
+ovn-nbctl lb-add lb0-no-aff 172.16.1.100:8081 192.168.1.2:80,192.168.2.2:80
+ovn-nbctl lb-add lb10-no-aff 172.16.1.110:8081 192.168.1.2:80,192.168.2.2:80
+ovn-nbctl lr-lb-add R2 lb0
+ovn-nbctl lr-lb-add R2 lb10
+ovn-nbctl lr-lb-add R2 lb0-no-aff
+ovn-nbctl lr-lb-add R2 lb10-no-aff
+
+# Start webservers in 'foo1', 'bar1'.
+NETNS_DAEMONIZE([foo1], [nc -l -k 192.168.1.2 80], [nc-foo1.pid])
+NETNS_DAEMONIZE([bar1], [nc -l -k 192.168.2.2 80], [nc-bar1.pid])
+
+# Wait for ovn-controller to catch up.
+ovn-nbctl --wait=hv sync
+OVS_WAIT_UNTIL([ovs-ofctl -O OpenFlow13 dump-groups br-int | \
+grep 'nat(dst=192.168.2.2:80)'])
+
+dnl Should work with the virtual IP address through NAT
+for i in $(seq 1 15); do
+ echo Request $i
+ NS_CHECK_EXEC([alice1], [nc -z 172.16.1.100 8080])
+done
+
+dnl Each server should have at least one connection.
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(172.16.1.100) |
+sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
+tcp,orig=(src=172.16.1.2,dst=172.16.1.100,sport=<cleared>,dport=<cleared>),reply=(src=192.168.1.2,dst=172.16.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
+tcp,orig=(src=172.16.1.2,dst=172.16.1.100,sport=<cleared>,dport=<cleared>),reply=(src=192.168.2.2,dst=172.16.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
+])
+
+# Flush conntrack entries for easier output parsing of next test.
+AT_CHECK([ovs-appctl dpctl/flush-conntrack])
+# Enable lb affinity
+ovn-nbctl --wait=sb set load_balancer lb0 options:affinity_timeout=60
+ovn-nbctl --wait=sb set load_balancer lb10 options:affinity_timeout=60
+
+for i in $(seq 1 15); do
+ echo Request $i
+ NS_CHECK_EXEC([alice1], [nc -z 172.16.1.100 8080])
+done
+
+dnl here we should have just one entry in the ct table
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(172.16.1.100) |
+sed -e 's/zone=[[0-9]]*/zone=<cleared>/; s/src=192.168.[[0-9]].2/src=192.168.<cleared>.2/'], [0], [dnl
+tcp,orig=(src=172.16.1.2,dst=172.16.1.100,sport=<cleared>,dport=<cleared>),reply=(src=192.168.<cleared>.2,dst=172.16.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
+])
+
+AT_CHECK([ovs-ofctl dump-flows br-int table=78 |grep cookie |sed -e 's/duration=[[0-9]]*.[[0-9]]*s/duration=<cleared>/; s/load:0xc0a80[[0-9]]02/load:0xc0a80<cleared>02/; s/n_packets=[[0-9]]*/n_packets=<cleared>/; s/n_bytes=[[0-9]]*/n_bytes=<cleared>/; s/idle_age=[[0-9]]*/idle_age=<cleared>/; s/hard_age=[[0-9]]*, //'], [0], [dnl
+ cookie=0x0, duration=<cleared>, table=78, n_packets=<cleared>, n_bytes=<cleared>, idle_timeout=60, idle_age=<cleared>, tcp,metadata=0x2,nw_src=172.16.1.2,nw_dst=172.16.1.100,tp_dst=8080 actions=load:0x1->NXM_NX_REG10[[14]],load:0xc0a80<cleared>02->NXM_NX_REG4[[]],load:0x50->NXM_NX_REG8[[0..15]]
+])
+
+check_affinity_flows () {
+n1=$(ovs-ofctl dump-flows br-int table=15 |awk '/priority=150,ct_state=\+new\+trk,ip,reg4=0xc0a80102/{print substr($4,11,length($4)-11)}')
+n2=$(ovs-ofctl dump-flows br-int table=15 |awk '/priority=150,ct_state=\+new\+trk,ip,reg4=0xc0a80202/{print substr($4,11,length($4)-11)}')
+[[ $n1 -gt 0 -a $n2 -eq 0 ]] || [[ $n1 -eq 0 -a $n2 -gt 0 ]]
+echo $?
+}
+AT_CHECK([test $(check_affinity_flows) -eq 0])
+NS_CHECK_EXEC([alice1], [nc -z 172.16.1.100 8081])
+
+# Flush conntrack entries for easier output parsing of next test.
+AT_CHECK([ovs-appctl dpctl/flush-conntrack])
+
+ovn-nbctl lb-add lb1 172.16.1.101:8080 192.168.1.2:80,192.168.2.2:80
+ovn-nbctl lb-add lb11 172.16.1.111:8080 192.168.1.2:80,192.168.2.2:80
+ovn-nbctl lb-add lb1-no-aff 172.16.1.101:8081 192.168.1.2:80,192.168.2.2:80
+ovn-nbctl lb-add lb11-no-aff 172.16.1.111:8081 192.168.1.2:80,192.168.2.2:80
+# Enable lb affinity
+ovn-nbctl --wait=sb set load_balancer lb1 options:affinity_timeout=3
+ovn-nbctl --wait=sb set load_balancer lb11 options:affinity_timeout=3
+ovn-nbctl lr-lb-add R2 lb1
+ovn-nbctl lr-lb-add R2 lb11
+ovn-nbctl lr-lb-add R2 lb1-no-aff
+ovn-nbctl lr-lb-add R2 lb11-no-aff
+
+# check we use both backends
+for i in $(seq 1 15); do
+ echo Request $i
+ NS_CHECK_EXEC([alice1], [nc -z 172.16.1.101 8080])
+ ovs-ofctl del-flows br-int table=78
+done
+
+dnl Each server should have at least one connection.
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(172.16.1.101) |
+sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
+tcp,orig=(src=172.16.1.2,dst=172.16.1.101,sport=<cleared>,dport=<cleared>),reply=(src=192.168.1.2,dst=172.16.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
+tcp,orig=(src=172.16.1.2,dst=172.16.1.101,sport=<cleared>,dport=<cleared>),reply=(src=192.168.2.2,dst=172.16.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
+])
+NS_CHECK_EXEC([alice1], [nc -z 172.16.1.101 8081])
+
+# Flush conntrack entries for easier output parsing of next test.
+AT_CHECK([ovs-appctl dpctl/flush-conntrack])
+
+NETNS_DAEMONIZE([bar2], [nc -l -k 192.168.2.3 80], [nc-bar2.pid])
+
+ovn-nbctl lb-add lb2 192.168.2.100:8080 192.168.2.2:80,192.168.2.3:80
+ovn-nbctl lb-add lb20 192.168.2.120:8080 192.168.2.2:80,192.168.2.3:80
+ovn-nbctl lb-add lb2-no-aff 192.168.2.100:8081 192.168.2.2:80,192.168.2.3:80
+ovn-nbctl lb-add lb20-no-aff 192.168.2.120:8081 192.168.2.2:80,192.168.2.3:80
+ovn-nbctl --wait=sb set load_balancer lb2 options:affinity_timeout=60
+ovn-nbctl --wait=sb set load_balancer lb20 options:affinity_timeout=60
+ovn-nbctl ls-lb-add foo lb2
+ovn-nbctl ls-lb-add foo lb20
+ovn-nbctl ls-lb-add foo lb2-no-aff
+ovn-nbctl ls-lb-add foo lb20-no-aff
+
+for i in $(seq 1 15); do
+ echo Request $i
+ NS_CHECK_EXEC([foo1], [nc -z 192.168.2.100 8080])
+done
+
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(192.168.2.100) |
+sed -e 's/zone=[[0-9]]*/zone=<cleared>/; s/src=192.168.2.[[0-9]]/src=192.168.2.<cleared>/'], [0], [dnl
+tcp,orig=(src=192.168.1.2,dst=192.168.2.100,sport=<cleared>,dport=<cleared>),reply=(src=192.168.2.<cleared>,dst=192.168.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
+])
+NS_CHECK_EXEC([foo1], [nc -z 192.168.2.100 8081])
+
+# Flush conntrack entries for easier output parsing of next test.
+AT_CHECK([ovs-appctl dpctl/flush-conntrack])
+
+ovn-nbctl lb-add lb3 192.168.2.101:8080 192.168.2.2:80,192.168.2.3:80
+ovn-nbctl lb-add lb30 192.168.2.131:8080 192.168.2.2:80,192.168.2.3:80
+ovn-nbctl lb-add lb3-no-aff 192.168.2.101:8081 192.168.2.2:80,192.168.2.3:80
+ovn-nbctl lb-add lb30-no-aff 192.168.2.131:8081 192.168.2.2:80,192.168.2.3:80
+ovn-nbctl --wait=sb set load_balancer lb3 options:affinity_timeout=3
+ovn-nbctl --wait=sb set load_balancer lb30 options:affinity_timeout=3
+ovn-nbctl ls-lb-add foo lb3
+ovn-nbctl ls-lb-add foo lb30
+ovn-nbctl ls-lb-add foo lb3-no-aff
+ovn-nbctl ls-lb-add foo lb30-no-aff
+# Flush conntrack entries for easier output parsing of next test.
+AT_CHECK([ovs-appctl dpctl/flush-conntrack])
+
+for i in $(seq 1 15); do
+ echo Request $i
+ NS_CHECK_EXEC([foo1], [nc -z 192.168.2.101 8080])
+ ovs-ofctl del-flows br-int table=78
+done
+
+dnl Each server should have at least one connection.
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(192.168.2.101) |
+sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
+tcp,orig=(src=192.168.1.2,dst=192.168.2.101,sport=<cleared>,dport=<cleared>),reply=(src=192.168.2.2,dst=192.168.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
+tcp,orig=(src=192.168.1.2,dst=192.168.2.101,sport=<cleared>,dport=<cleared>),reply=(src=192.168.2.3,dst=192.168.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
+])
+NS_CHECK_EXEC([foo1], [nc -z 192.168.2.101 8081])
+
+NS_CHECK_EXEC([foo1], [ip neigh add 192.168.1.200 lladdr 00:00:01:01:02:03 dev foo1], [0])
+ovn-nbctl lb-add lb4 192.168.1.100:8080 192.168.1.2:80
+ovn-nbctl lb-add lb40 192.168.1.140:8080 192.168.1.2:80
+ovn-nbctl lb-add lb4-no-aff 192.168.1.100:8081 192.168.1.2:80
+ovn-nbctl lb-add lb40-no-aff 192.168.1.140:8081 192.168.1.2:80
+ovn-nbctl --wait=sb set load_balancer lb4 options:affinity_timeout=60 options:hairpin_snat_ip=192.168.1.200
+ovn-nbctl --wait=sb set load_balancer lb40 options:affinity_timeout=60 options:hairpin_snat_ip=192.168.1.200
+ovn-nbctl ls-lb-add foo lb4
+ovn-nbctl ls-lb-add foo lb40
+ovn-nbctl lr-lb-add R1 lb4
+ovn-nbctl lr-lb-add R1 lb40
+ovn-nbctl ls-lb-add foo lb4-no-aff
+ovn-nbctl ls-lb-add foo lb40-no-aff
+ovn-nbctl lr-lb-add R1 lb4-no-aff
+ovn-nbctl lr-lb-add R1 lb40-no-aff
+
+# Flush conntrack entries for easier output parsing of next test.
+AT_CHECK([ovs-appctl dpctl/flush-conntrack])
+
+for i in $(seq 1 15); do
+ echo Request $i
+ NS_CHECK_EXEC([foo1], [nc -z 192.168.1.100 8080])
+done
+
+dnl Each server should have at least one connection.
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(192.168.1.2) |
+sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
+tcp,orig=(src=192.168.1.2,dst=192.168.1.100,sport=<cleared>,dport=<cleared>),reply=(src=192.168.1.2,dst=192.168.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
+tcp,orig=(src=192.168.1.2,dst=192.168.1.2,sport=<cleared>,dport=<cleared>),reply=(src=192.168.1.2,dst=192.168.1.200,sport=<cleared>,dport=<cleared>),zone=<cleared>,protoinfo=(state=<cleared>)
+tcp,orig=(src=192.168.1.200,dst=192.168.1.2,sport=<cleared>,dport=<cleared>),reply=(src=192.168.1.2,dst=192.168.1.200,sport=<cleared>,dport=<cleared>),zone=<cleared>,protoinfo=(state=<cleared>)
+])
+NS_CHECK_EXEC([foo1], [nc -z 192.168.1.100 8081])
+
+OVS_APP_EXIT_AND_WAIT([ovn-controller])
+
+as ovn-sb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as ovn-nb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as northd
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+as
+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+/connection dropped.*/d
+/inactivity probe*/d"])
+AT_CLEANUP
+])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([load balancing affinity sessions - IPv6])
+AT_KEYWORDS([ovnlb])
+
+CHECK_CONNTRACK()
+CHECK_CONNTRACK_NAT()
+ovn_start
+OVS_TRAFFIC_VSWITCHD_START()
+ADD_BR([br-int])
+
+# Set external-ids in br-int needed for ovn-controller
+ovs-vsctl \
+ -- set Open_vSwitch . external-ids:system-id=hv1 \
+ -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+ -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+ -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+ -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
+
+# Start ovn-controller
+start_daemon ovn-controller
+
+# Logical network:
+# Two LRs - R1 and R2 that are connected to each other via LS "join"
+# in fd20::/64 network. R1 has switchess foo (fd11::/64) and
+# bar (fd12::/64) connected to it. R2 has alice (fd72::/64) connected
+# to it. R2 is a gateway router on which we add load-balancing rules.
+#
+# foo -- R1 -- join - R2 -- alice
+# |
+# bar ----
+
+ovn-nbctl create Logical_Router name=R1
+ovn-nbctl create Logical_Router name=R2 options:chassis=hv1
+
+ovn-nbctl ls-add foo
+ovn-nbctl ls-add bar
+ovn-nbctl ls-add alice
+ovn-nbctl ls-add join
+
+# Connect foo to R1
+ovn-nbctl lrp-add R1 foo 00:00:01:01:02:03 fd11::1/64
+ovn-nbctl lsp-add foo rp-foo -- set Logical_Switch_Port rp-foo \
+ type=router options:router-port=foo addresses=\"00:00:01:01:02:03\"
+
+# Connect bar to R1
+ovn-nbctl lrp-add R1 bar 00:00:01:01:02:04 fd12::1/64
+ovn-nbctl lsp-add bar rp-bar -- set Logical_Switch_Port rp-bar \
+ type=router options:router-port=bar addresses=\"00:00:01:01:02:04\"
+
+# Connect alice to R2
+ovn-nbctl lrp-add R2 alice 00:00:02:01:02:03 fd72::1/64
+ovn-nbctl lsp-add alice rp-alice -- set Logical_Switch_Port rp-alice \
+ type=router options:router-port=alice addresses=\"00:00:02:01:02:03\"
+
+# Connect R1 to join
+ovn-nbctl lrp-add R1 R1_join 00:00:04:01:02:03 fd20::1/64
+ovn-nbctl lsp-add join r1-join -- set Logical_Switch_Port r1-join \
+ type=router options:router-port=R1_join addresses='"00:00:04:01:02:03"'
+
+# Connect R2 to join
+ovn-nbctl lrp-add R2 R2_join 00:00:04:01:02:04 fd20::2/64
+ovn-nbctl lsp-add join r2-join -- set Logical_Switch_Port r2-join \
+ type=router options:router-port=R2_join addresses='"00:00:04:01:02:04"'
+
+# Static routes.
+ovn-nbctl lr-route-add R1 fd72::/64 fd20::2
+ovn-nbctl lr-route-add R2 fd11::/64 fd20::1
+ovn-nbctl lr-route-add R2 fd12::/64 fd20::1
+
+# Logical port 'foo1' in switch 'foo'.
+ADD_NAMESPACES(foo1)
+ADD_VETH(foo1, foo1, br-int, "fd11::2/64", "f0:00:00:01:02:03", \
+ "fd11::1")
+OVS_WAIT_UNTIL([test "$(ip -n foo1 a | grep fd11::2 | grep tentative)" = ""])
+ovn-nbctl lsp-add foo foo1 \
+-- lsp-set-addresses foo1 "f0:00:00:01:02:03 fd11::2"
+
+# Logical port 'alice1' in switch 'alice'.
+ADD_NAMESPACES(alice1)
+ADD_VETH(alice1, alice1, br-int, "fd72::2/64", "f0:00:00:01:02:04", \
+ "fd72::1")
+OVS_WAIT_UNTIL([test "$(ip -n alice1 a | grep fd72::2 | grep tentative)" = ""])
+ovn-nbctl lsp-add alice alice1 \
+-- lsp-set-addresses alice1 "f0:00:00:01:02:04 fd72::2"
+
+# Logical port 'bar1' in switch 'bar'.
+ADD_NAMESPACES(bar1)
+ADD_VETH(bar1, bar1, br-int, "fd12::2/64", "f0:00:00:01:02:05", \
+"fd12::1")
+OVS_WAIT_UNTIL([test "$(ip -n bar1 a | grep fd12::2 | grep tentative)" = ""])
+ovn-nbctl lsp-add bar bar1 \
+-- lsp-set-addresses bar1 "f0:00:00:01:02:05 fd12::2"
+
+ADD_NAMESPACES(bar2)
+ADD_VETH(bar2, bar2, br-int, "fd12::3/64", "e0:00:00:01:02:05", \
+"fd12::1")
+OVS_WAIT_UNTIL([test "$(ip -n bar2 a | grep fd12::3 | grep tentative)" = ""])
+ovn-nbctl lsp-add bar bar2 \
+-- lsp-set-addresses bar2 "e0:00:00:01:02:05 fd12::3"
+
+ovn-nbctl lb-add lb0 [[fd30::1]]:8080 [[fd11::2]]:80,[[fd12::2]]:80
+ovn-nbctl lb-add lb10 [[fd30::10]]:8080 [[fd11::2]]:80,[[fd12::2]]:80
+ovn-nbctl lb-add lb0-no-aff [[fd30::1]]:8081 [[fd11::2]]:80,[[fd12::2]]:80
+ovn-nbctl lb-add lb10-no-aff [[fd30::10]]:8081 [[fd11::2]]:80,[[fd12::2]]:80
+ovn-nbctl lr-lb-add R2 lb0
+ovn-nbctl lr-lb-add R2 lb10
+ovn-nbctl lr-lb-add R2 lb0-no-aff
+ovn-nbctl lr-lb-add R2 lb10-no-aff
+
+# Wait for ovn-controller to catch up.
+ovn-nbctl --wait=hv sync
+OVS_WAIT_UNTIL([ovs-ofctl -O OpenFlow13 dump-groups br-int | \
+grep 'nat(dst=\[[fd11::2\]]:80)'])
+
+# Start webservers in 'foo1', 'bar1'.
+NETNS_DAEMONIZE([foo1], [nc -l -k fd11::2 80], [nc-foo1.pid])
+NETNS_DAEMONIZE([bar1], [nc -l -k fd12::2 80], [nc-bar1.pid])
+
+dnl Should work with the virtual IP address through NAT
+for i in $(seq 1 15); do
+ echo Request $i
+ NS_CHECK_EXEC([alice1], [nc -z fd30::1 8080])
+done
+
+dnl Each server should have at least one connection.
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd30::1) | grep -v fe80 |
+sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
+tcp,orig=(src=fd72::2,dst=fd30::1,sport=<cleared>,dport=<cleared>),reply=(src=fd11::2,dst=fd72::2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
+tcp,orig=(src=fd72::2,dst=fd30::1,sport=<cleared>,dport=<cleared>),reply=(src=fd12::2,dst=fd72::2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
+])
+NS_CHECK_EXEC([alice1], [nc -z fd30::1 8081])
+
+# Flush conntrack entries for easier output parsing of next test.
+AT_CHECK([ovs-appctl dpctl/flush-conntrack])
+# Enable lb affinity
+ovn-nbctl --wait=sb set load_balancer lb0 options:affinity_timeout=60
+ovn-nbctl --wait=sb set load_balancer lb10 options:affinity_timeout=60
+
+for i in $(seq 1 15); do
+ echo Request $i
+ NS_CHECK_EXEC([alice1], [nc -z fd30::1 8080])
+done
+
+dnl here we should have just one entry in the ct table
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd30::1) | grep -v fe80 |
+sed -e 's/zone=[[0-9]]*/zone=<cleared>/; s/src=fd1[[0-9]]::2/src=fd1<cleared>::2/'], [0], [dnl
+tcp,orig=(src=fd72::2,dst=fd30::1,sport=<cleared>,dport=<cleared>),reply=(src=fd1<cleared>::2,dst=fd72::2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
+])
+
+AT_CHECK([ovs-ofctl dump-flows br-int table=78 |grep cookie |sed -e 's/duration=[[0-9]]*.[[0-9]]*s/duration=<cleared>/; s/load:0xfd1[[0-9]]000000000000/load:0xfd1<cleared>000000000000/; s/n_packets=[[0-9]]*/n_packets=<cleared>/; s/n_bytes=[[0-9]]*/n_bytes=<cleared>/; s/idle_age=[[0-9]]*/idle_age=<cleared>/; s/hard_age=[[0-9]]*, //'], [0], [dnl
+ cookie=0x0, duration=<cleared>, table=78, n_packets=<cleared>, n_bytes=<cleared>, idle_timeout=60, idle_age=<cleared>, tcp6,metadata=0x2,ipv6_src=fd72::2,ipv6_dst=fd30::1,tp_dst=8080 actions=load:0x1->NXM_NX_REG10[[14]],load:0x2->NXM_NX_XXREG1[[0..63]],load:0xfd1<cleared>000000000000->NXM_NX_XXREG1[[64..127]],load:0x50->NXM_NX_REG8[[0..15]]
+])
+
+check_affinity_flows () {
+n1=$(ovs-ofctl dump-flows br-int table=15 |awk '/priority=150,ct_state=\+new\+trk,ipv6,reg4=0xfd110000/{print substr($4,11,length($4)-11)}')
+n2=$(ovs-ofctl dump-flows br-int table=15 |awk '/priority=150,ct_state=\+new\+trk,ipv6,reg4=0xfd120000/{print substr($4,11,length($4)-11)}')
+[[ $n1 -gt 0 -a $n2 -eq 0 ]] || [[ $n1 -eq 0 -a $n2 -gt 0 ]]
+echo $?
+}
+AT_CHECK([test $(check_affinity_flows) -eq 0])
+NS_CHECK_EXEC([alice1], [nc -z fd30::1 8081])
+
+# Flush conntrack entries for easier output parsing of next test.
+AT_CHECK([ovs-appctl dpctl/flush-conntrack])
+
+ovn-nbctl lb-add lb1 [[fd30::2]]:8080 [[fd11::2]]:80,[[fd12::2]]:80
+ovn-nbctl lb-add lb11 [[fd30::12]]:8080 [[fd11::2]]:80,[[fd12::2]]:80
+ovn-nbctl lb-add lb1-no-aff [[fd30::2]]:8081 [[fd11::2]]:80,[[fd12::2]]:80
+ovn-nbctl lb-add lb11-no-aff [[fd30::12]]:8081 [[fd11::2]]:80,[[fd12::2]]:80
+# Enable lb affinity
+ovn-nbctl --wait=sb set load_balancer lb1 options:affinity_timeout=3
+ovn-nbctl --wait=sb set load_balancer lb11 options:affinity_timeout=3
+ovn-nbctl lr-lb-add R2 lb1
+ovn-nbctl lr-lb-add R2 lb11
+ovn-nbctl lr-lb-add R2 lb1-no-aff
+ovn-nbctl lr-lb-add R2 lb11-no-aff
+
+# check we use both backends
+for i in $(seq 1 15); do
+ echo Request $i
+ NS_CHECK_EXEC([alice1], [nc -z fd30::2 8080])
+ ovs-ofctl del-flows br-int table=78
+done
+
+dnl Each server should have at least one connection.
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd30::2) | grep -v fe80 |
+sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
+tcp,orig=(src=fd72::2,dst=fd30::2,sport=<cleared>,dport=<cleared>),reply=(src=fd11::2,dst=fd72::2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
+tcp,orig=(src=fd72::2,dst=fd30::2,sport=<cleared>,dport=<cleared>),reply=(src=fd12::2,dst=fd72::2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
+])
+NS_CHECK_EXEC([alice1], [nc -z fd30::2 8081])
+
+# Flush conntrack entries for easier output parsing of next test.
+AT_CHECK([ovs-appctl dpctl/flush-conntrack])
+
+NETNS_DAEMONIZE([bar2], [nc -l -k fd12::3 80], [nc-bar2.pid])
+
+ovn-nbctl lb-add lb2 [[fd12::a]]:8080 [[fd12::2]]:80,[[fd12::3]]:80
+ovn-nbctl lb-add lb20 [[fd12::2a]]:8080 [[fd12::2]]:80,[[fd12::3]]:80
+ovn-nbctl lb-add lb2-no-aff [[fd12::a]]:8081 [[fd12::2]]:80,[[fd12::3]]:80
+ovn-nbctl lb-add lb20-no-aff [[fd12::2a]]:8081 [[fd12::2]]:80,[[fd12::3]]:80
+ovn-nbctl --wait=sb set load_balancer lb2 options:affinity_timeout=60
+ovn-nbctl --wait=sb set load_balancer lb20 options:affinity_timeout=60
+ovn-nbctl ls-lb-add foo lb2
+ovn-nbctl ls-lb-add foo lb20
+ovn-nbctl ls-lb-add foo lb2-no-aff
+ovn-nbctl ls-lb-add foo lb20-no-aff
+
+for i in $(seq 1 15); do
+ echo Request $i
+ NS_CHECK_EXEC([foo1], [nc -z fd12::a 8080])
+done
+
+dnl here we should have just one entry in the ct table
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd12::a) | grep -v fe80 |
+sed -e 's/zone=[[0-9]]*/zone=<cleared>/; s/src=fd12::[[0-9]]/src=fd12::<cleared>/'], [0], [dnl
+tcp,orig=(src=fd11::2,dst=fd12::a,sport=<cleared>,dport=<cleared>),reply=(src=fd12::<cleared>,dst=fd11::2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
+])
+NS_CHECK_EXEC([foo1], [nc -z fd12::a 8081])
+
+AT_CHECK([ovs-appctl dpctl/flush-conntrack])
+
+ovn-nbctl lb-add lb3 [[fd12::b]]:8080 [[fd12::2]]:80,[[fd12::3]]:80
+ovn-nbctl lb-add lb30 [[fd12::3b]]:8080 [[fd12::2]]:80,[[fd12::3]]:80
+ovn-nbctl lb-add lb3-no-aff [[fd12::b]]:8081 [[fd12::2]]:80,[[fd12::3]]:80
+ovn-nbctl lb-add lb30-no-aff [[fd12::3b]]:8081 [[fd12::2]]:80,[[fd12::3]]:80
+ovn-nbctl --wait=sb set load_balancer lb3 options:affinity_timeout=3
+ovn-nbctl --wait=sb set load_balancer lb30 options:affinity_timeout=3
+ovn-nbctl ls-lb-add foo lb3
+ovn-nbctl ls-lb-add foo lb30
+ovn-nbctl ls-lb-add foo lb3-no-aff
+ovn-nbctl ls-lb-add foo lb30-no-aff
+
+for i in $(seq 1 15); do
+ echo Request $i
+ NS_CHECK_EXEC([foo1], [nc -z fd12::b 8080])
+ ovs-ofctl del-flows br-int table=78
+done
+
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd12::b) | grep -v fe80 |
+sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
+tcp,orig=(src=fd11::2,dst=fd12::b,sport=<cleared>,dport=<cleared>),reply=(src=fd12::2,dst=fd11::2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
+tcp,orig=(src=fd11::2,dst=fd12::b,sport=<cleared>,dport=<cleared>),reply=(src=fd12::3,dst=fd11::2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
+])
+NS_CHECK_EXEC([foo1], [nc -z fd12::b 8081])
+
+NS_CHECK_EXEC([foo1], [ip -6 neigh add fd11::b lladdr 00:00:01:01:02:03 dev foo1], [0])
+ovn-nbctl --wait=sb lb-add lb4 [[fd11::a]]:8080 [[fd11::2]]:80
+ovn-nbctl --wait=sb lb-add lb40 [[fd11::a]]:8080 [[fd11::2]]:80
+ovn-nbctl --wait=sb lb-add lb4-no-aff [[fd11::a]]:8081 [[fd11::2]]:80
+ovn-nbctl --wait=sb lb-add lb40-no-aff [[fd11::a]]:8081 [[fd11::2]]:80
+ovn-nbctl --wait=sb set load_balancer lb4 options:affinity_timeout=60 options:hairpin_snat_ip="fd11::b"
+ovn-nbctl --wait=sb set load_balancer lb40 options:affinity_timeout=60 options:hairpin_snat_ip="fd11::b"
+ovn-nbctl ls-lb-add foo lb4
+ovn-nbctl ls-lb-add foo lb40
+ovn-nbctl lr-lb-add R1 lb4
+ovn-nbctl lr-lb-add R1 lb40
+ovn-nbctl ls-lb-add foo lb4-no-aff
+ovn-nbctl ls-lb-add foo lb40-no-aff
+ovn-nbctl lr-lb-add R1 lb4-no-aff
+ovn-nbctl lr-lb-add R1 lb40-no-aff
+
+# Flush conntrack entries for easier output parsing of next test.
+AT_CHECK([ovs-appctl dpctl/flush-conntrack])
+
+for i in $(seq 1 15); do
+ echo Request $i
+ NS_CHECK_EXEC([foo1], [nc -z fd11::a 8080])
+done
+
+dnl Each server should have at least one connection.
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd11::2) | grep -v fe80 |
+sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
+tcp,orig=(src=fd11::2,dst=fd11::2,sport=<cleared>,dport=<cleared>),reply=(src=fd11::2,dst=fd11::b,sport=<cleared>,dport=<cleared>),zone=<cleared>,protoinfo=(state=<cleared>)
+tcp,orig=(src=fd11::2,dst=fd11::a,sport=<cleared>,dport=<cleared>),reply=(src=fd11::2,dst=fd11::2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
+tcp,orig=(src=fd11::b,dst=fd11::2,sport=<cleared>,dport=<cleared>),reply=(src=fd11::2,dst=fd11::b,sport=<cleared>,dport=<cleared>),zone=<cleared>,protoinfo=(state=<cleared>)
+])
+NS_CHECK_EXEC([foo1], [nc -z fd11::a 8081])
+
+OVS_APP_EXIT_AND_WAIT([ovn-controller])
+
+as ovn-sb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as ovn-nb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as northd
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+as
+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+/connection dropped.*/d
+/inactivity probe*/d"])
+AT_CLEANUP
+])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([SNAT in separate zone from DNAT])
+
+CHECK_CONNTRACK()
+CHECK_CONNTRACK_NAT()
+ovn_start
+OVS_TRAFFIC_VSWITCHD_START()
+ADD_BR([br-int])
+
+# Set external-ids in br-int needed for ovn-controller
+ovs-vsctl \
+ -- set Open_vSwitch . external-ids:system-id=hv1 \
+ -- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
+ -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
+ -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
+ -- set bridge br-int fail-mode=secure other-config:disable-in-band=true
+
+# The goal of this test is to ensure that when traffic is first DNATted
+# (by way of a load balancer), and then SNATted, the SNAT happens in a
+# separate conntrack zone from the DNAT.
+
+start_daemon ovn-controller
+
+check ovn-nbctl ls-add public
+
+check ovn-nbctl lr-add r1
+check ovn-nbctl lrp-add r1 r1_public 00:de:ad:ff:00:01 172.16.0.1/16
+check ovn-nbctl lrp-add r1 r1_s1 00:de:ad:fe:00:01 173.0.1.1/24
+check ovn-nbctl lrp-set-gateway-chassis r1_public hv1
+
+check ovn-nbctl lb-add r1_lb 30.0.0.1 172.16.0.102
+check ovn-nbctl lr-lb-add r1 r1_lb
+
+check ovn-nbctl ls-add s1
+check ovn-nbctl lsp-add s1 s1_r1
+check ovn-nbctl lsp-set-type s1_r1 router
+check ovn-nbctl lsp-set-addresses s1_r1 router
+check ovn-nbctl lsp-set-options s1_r1 router-port=r1_s1
+
+check ovn-nbctl lsp-add s1 vm1
+check ovn-nbctl lsp-set-addresses vm1 "00:de:ad:01:00:01 173.0.1.2"
+
+check ovn-nbctl lsp-add public public_r1
+check ovn-nbctl lsp-set-type public_r1 router
+check ovn-nbctl lsp-set-addresses public_r1 router
+check ovn-nbctl lsp-set-options public_r1 router-port=r1_public nat-addresses=router
+
+check ovn-nbctl lr-add r2
+check ovn-nbctl lrp-add r2 r2_public 00:de:ad:ff:00:02 172.16.0.2/16
+check ovn-nbctl lrp-add r2 r2_s2 00:de:ad:fe:00:02 173.0.2.1/24
+check ovn-nbctl lr-nat-add r2 dnat_and_snat 172.16.0.102 173.0.2.2
+check ovn-nbctl lrp-set-gateway-chassis r2_public hv1
+
+check ovn-nbctl ls-add s2
+check ovn-nbctl lsp-add s2 s2_r2
+check ovn-nbctl lsp-set-type s2_r2 router
+check ovn-nbctl lsp-set-addresses s2_r2 router
+check ovn-nbctl lsp-set-options s2_r2 router-port=r2_s2
+
+check ovn-nbctl lsp-add s2 vm2
+check ovn-nbctl lsp-set-addresses vm2 "00:de:ad:01:00:02 173.0.2.2"
+
+check ovn-nbctl lsp-add public public_r2
+check ovn-nbctl lsp-set-type public_r2 router
+check ovn-nbctl lsp-set-addresses public_r2 router
+check ovn-nbctl lsp-set-options public_r2 router-port=r2_public nat-addresses=router
+
+ADD_NAMESPACES(vm1)
+ADD_VETH(vm1, vm1, br-int, "173.0.1.2/24", "00:de:ad:01:00:01", \
+ "173.0.1.1")
+ADD_NAMESPACES(vm2)
+ADD_VETH(vm2, vm2, br-int, "173.0.2.2/24", "00:de:ad:01:00:02", \
+ "173.0.2.1")
+
+check ovn-nbctl lr-nat-add r1 dnat_and_snat 172.16.0.101 173.0.1.2 vm1 00:00:00:01:02:03
+check ovn-nbctl --wait=hv sync
+
+# Next, make sure that a ping works as expected
+NS_CHECK_EXEC([vm1], [ping -q -c 3 -i 0.3 -w 2 30.0.0.1 | FORMAT_PING], \
+[0], [dnl
+3 packets transmitted, 3 received, 0% packet loss, time 0ms
+])
+
+# Finally, make sure that conntrack shows two separate zones being used for
+# DNAT and SNAT
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | \
+sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
+icmp,orig=(src=173.0.1.2,dst=30.0.0.1,id=<cleared>,type=8,code=0),reply=(src=172.16.0.102,dst=173.0.1.2,id=<cleared>,type=0,code=0),zone=<cleared>,mark=2
+])
+
+# The final two entries appear identical here. That is because FORMAT_CT
+# scrubs the zone numbers. In actuality, the zone numbers are different,
+# which is why there are two entries.
+AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(172.16.0.102) | \
+sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
+icmp,orig=(src=172.16.0.101,dst=172.16.0.102,id=<cleared>,type=8,code=0),reply=(src=173.0.2.2,dst=172.16.0.101,id=<cleared>,type=0,code=0),zone=<cleared>
+icmp,orig=(src=173.0.1.2,dst=172.16.0.102,id=<cleared>,type=8,code=0),reply=(src=172.16.0.102,dst=172.16.0.101,id=<cleared>,type=0,code=0),zone=<cleared>
+icmp,orig=(src=173.0.1.2,dst=172.16.0.102,id=<cleared>,type=8,code=0),reply=(src=172.16.0.102,dst=172.16.0.101,id=<cleared>,type=0,code=0),zone=<cleared>
+])
+
+OVS_APP_EXIT_AND_WAIT([ovn-controller])
+
+as ovn-sb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as ovn-nb
+OVS_APP_EXIT_AND_WAIT([ovsdb-server])
+
+as northd
+OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE])
+
+as
+OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
+/connection dropped.*/d"])
+AT_CLEANUP
+])
diff --git a/utilities/ovn-nbctl.c b/utilities/ovn-nbctl.c
index 3bbdbd998..2f8ec4348 100644
--- a/utilities/ovn-nbctl.c
+++ b/utilities/ovn-nbctl.c
@@ -4421,6 +4421,8 @@ nbctl_pre_lr_route_del(struct ctl_context *ctx)
ovsdb_idl_add_column(ctx->idl,
&nbrec_logical_router_static_route_col_policy);
+ ovsdb_idl_add_column(ctx->idl,
+ &nbrec_logical_router_static_route_col_bfd);
ovsdb_idl_add_column(ctx->idl,
&nbrec_logical_router_static_route_col_ip_prefix);
ovsdb_idl_add_column(ctx->idl,
@@ -4433,7 +4435,7 @@ nbctl_pre_lr_route_del(struct ctl_context *ctx)
}
static void
-nbctl_lr_route_del(struct ctl_context *ctx)
+ nbctl_lr_route_del(struct ctl_context *ctx)
{
const struct nbrec_logical_router *lr;
char *error = lr_by_name_or_uuid(ctx, ctx->argv[1], true, &lr);
@@ -4550,6 +4552,10 @@ nbctl_lr_route_del(struct ctl_context *ctx)
}
/* Everything matched. Removing. */
+ if (lr->static_routes[i]->bfd) {
+ nbrec_bfd_delete(lr->static_routes[i]->bfd);
+ }
+
nbrec_logical_router_update_static_routes_delvalue(
lr, lr->static_routes[i]);
n_removed++;
diff --git a/utilities/ovn-trace.c b/utilities/ovn-trace.c
index d9e7129d9..858f481fc 100644
--- a/utilities/ovn-trace.c
+++ b/utilities/ovn-trace.c
@@ -60,6 +60,9 @@ static char *unixctl_path;
/* The southbound database. */
static struct ovsdb_idl *ovnsb_idl;
+/* --leader-only, --no-leader-only: Only accept the leader in a cluster. */
+static int leader_only = true;
+
/* --detailed: Show a detailed, table-by-table trace. */
static bool detailed;
@@ -138,6 +141,7 @@ main(int argc, char *argv[])
1, INT_MAX, ovntrace_trace, NULL);
}
ovnsb_idl = ovsdb_idl_create(db, &sbrec_idl_class, true, false);
+ ovsdb_idl_set_leader_only(ovnsb_idl, leader_only);
bool already_read = false;
for (;;) {
@@ -243,6 +247,8 @@ parse_options(int argc, char *argv[])
{
enum {
OPT_DB = UCHAR_MAX + 1,
+ OPT_LEADER_ONLY,
+ OPT_NO_LEADER_ONLY,
OPT_UNIXCTL,
OPT_DETAILED,
OPT_SUMMARY,
@@ -260,6 +266,8 @@ parse_options(int argc, char *argv[])
};
static const struct option long_options[] = {
{"db", required_argument, NULL, OPT_DB},
+ {"leader-only", no_argument, NULL, OPT_LEADER_ONLY},
+ {"no-leader-only", no_argument, NULL, OPT_NO_LEADER_ONLY},
{"unixctl", required_argument, NULL, OPT_UNIXCTL},
{"detailed", no_argument, NULL, OPT_DETAILED},
{"summary", no_argument, NULL, OPT_SUMMARY},
@@ -294,6 +302,14 @@ parse_options(int argc, char *argv[])
db = optarg;
break;
+ case OPT_LEADER_ONLY:
+ leader_only = true;
+ break;
+
+ case OPT_NO_LEADER_ONLY:
+ leader_only = false;
+ break;
+
case OPT_UNIXCTL:
unixctl_path = optarg;
break;
@@ -390,6 +406,7 @@ Output style options:\n\
Other options:\n\
--db=DATABASE connect to DATABASE\n\
(default: %s)\n\
+ --no-leader-only accept any cluster member, not just the leader\n\
--ovs[=REMOTE] obtain corresponding OpenFlow flows from REMOTE\n\
(default: %s)\n\
--unixctl=SOCKET set control socket name\n\
@@ -3298,6 +3315,10 @@ trace_actions(const struct ovnact *ovnacts, size_t ovnacts_len,
break;
case OVNACT_CHK_ECMP_NH:
break;
+ case OVNACT_COMMIT_LB_AFF:
+ break;
+ case OVNACT_CHK_LB_AFF:
+ break;
}
}
ofpbuf_uninit(&stack);