Blob Blame History Raw
From 14de43e68f38fab76e50c21d3773f44c6e119d0e Mon Sep 17 00:00:00 2001
From: Numan Siddique <numans@ovn.org>
Date: Mon, 27 Jan 2020 18:36:24 +0530
Subject: [PATCH 2/2] ovn-northd: Consider load balancer active backends in
 router pipeline
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The commit [1] which added lood balancer health check support
missed out on updating the logical flows to consider only
active backends in the logical router pipeline if a load balancer
is associated. This patch fixes it. It also refactors the code
a bit.

Without this, an offline backend may be chosen for load balancing,
resulting in the packet loss.

This patch also adds logical flows in ingress ACL and egress ACL logical
switch datapath pipeline to skip the ACL policies for service monitor health
check traffic.

[1] - ba0d6eae960d("ovn-northd: Add support for Load Balancer health check")

Reported-by: Maciej Józefczyk <mjozefcz@redhat.com>
Fixes - ba0d6eae960d("ovn-northd: Add support for Load Balancer health check")
Signed-off-by: Numan Siddique <numans@ovn.org>
Acked-by: Dumitru Ceara <dceara@redhat.com>

(cherry-picked from upstream commit bb9f2b9ce56c9319066524c0ad89034adae6b168)
Conflicts:
	tests/ovn.at

Change-Id: Ibacbaace0d710aa4d3e0b1d077e032443dd2a9e6
---
 ovn/northd/ovn-northd.8.xml |  22 +++
 ovn/northd/ovn-northd.c     | 268 +++++++++++++++++-------------------
 tests/ovn.at                |  27 ++++
 3 files changed, 172 insertions(+), 145 deletions(-)

diff --git a/ovn/northd/ovn-northd.8.xml b/ovn/northd/ovn-northd.8.xml
index 11ef65964..50f7d941b 100644
--- a/ovn/northd/ovn-northd.8.xml
+++ b/ovn/northd/ovn-northd.8.xml
@@ -410,6 +410,17 @@
         in the request direction are skipped here to let a newly created
         ACL re-allow this connection.
       </li>
+
+      <li>
+        A priority 34000 logical flow is added for each logical switch datapath
+        with the match <code>eth.dst = <var>E</var></code> to allow the service
+        monitor reply packet destined to <code>ovn-controller</code>
+        with the action <code>next</code>, where <var>E</var> is the
+        service monitor mac defined in the
+        <ref column="options:svc_monitor_mac" table="NB_Global"
+        db="OVN_Northbound"/> colum of <ref table="NB_Global"
+        db="OVN_Northbound"/> table.
+      </li>
     </ul>
 
     <h3>Ingress Table 7: <code>from-lport</code> QoS Marking</h3>
@@ -1220,6 +1231,17 @@ output;
         to allow the DNS reply packet from the
         <code>Ingress Table 15:DNS responses</code>.
       </li>
+
+      <li>
+        A priority 34000 logical flow is added for each logical switch datapath
+        with the match <code>eth.src = <var>E</var></code> to allow the service
+        monitor request packet generated by <code>ovn-controller</code>
+        with the action <code>next</code>, where <var>E</var> is the
+        service monitor mac defined in the
+        <ref column="options:svc_monitor_mac" table="NB_Global"
+        db="OVN_Northbound"/> colum of <ref table="NB_Global"
+        db="OVN_Northbound"/> table.
+      </li>
     </ul>
 
     <h3>Egress Table 5: <code>to-lport</code> QoS Marking</h3>
diff --git a/ovn/northd/ovn-northd.c b/ovn/northd/ovn-northd.c
index b71ae7a94..7139143b3 100644
--- a/ovn/northd/ovn-northd.c
+++ b/ovn/northd/ovn-northd.c
@@ -3008,6 +3008,7 @@ struct lb_vip {
 struct lb_vip_backend {
     char *ip;
     uint16_t port;
+    int addr_family;
 
     struct ovn_port *op; /* Logical port to which the ip belong to. */
     bool health_check;
@@ -3163,6 +3164,7 @@ ovn_lb_create(struct northd_context *ctx, struct hmap *lbs,
 
             lb->vips[n_vips].backends[i].ip = backend_ip;
             lb->vips[n_vips].backends[i].port = backend_port;
+            lb->vips[n_vips].backends[i].addr_family = addr_family;
             lb->vips[n_vips].backends[i].op = op;
             lb->vips[n_vips].backends[i].svc_mon_src_ip = svc_mon_src_ip;
 
@@ -3226,6 +3228,38 @@ ovn_lb_destroy(struct ovn_lb *lb)
     free(lb->vips);
 }
 
+static void build_lb_vip_ct_lb_actions(struct lb_vip *lb_vip,
+                                       struct ds *action)
+{
+    if (lb_vip->health_check) {
+        ds_put_cstr(action, "ct_lb(");
+
+        size_t n_active_backends = 0;
+        for (size_t k = 0; k < lb_vip->n_backends; k++) {
+            struct lb_vip_backend *backend = &lb_vip->backends[k];
+            if (backend->health_check && backend->sbrec_monitor &&
+                backend->sbrec_monitor->status &&
+                strcmp(backend->sbrec_monitor->status, "online")) {
+                continue;
+            }
+
+            n_active_backends++;
+            ds_put_format(action, "%s:%"PRIu16",",
+            backend->ip, backend->port);
+        }
+
+        if (!n_active_backends) {
+            ds_clear(action);
+            ds_put_cstr(action, "drop;");
+        } else {
+            ds_chomp(action, ',');
+            ds_put_cstr(action, ");");
+        }
+    } else {
+        ds_put_format(action, "ct_lb(%s);", lb_vip->backend_ips);
+    }
+}
+
 static void
 build_ovn_lbs(struct northd_context *ctx, struct hmap *ports,
               struct hmap *lbs)
@@ -4579,11 +4613,11 @@ ls_has_dns_records(const struct nbrec_logical_switch *nbs)
 
 static void
 build_empty_lb_event_flow(struct ovn_datapath *od, struct hmap *lflows,
-                          struct smap_node *node, char *ip_address,
-                          struct nbrec_load_balancer *lb, uint16_t port,
-                          int addr_family, int pl, struct shash *meter_groups)
+                          struct lb_vip *lb_vip,
+                          struct nbrec_load_balancer *lb,
+                          int pl, struct shash *meter_groups)
 {
-    if (!controller_event_en || node->value[0]) {
+    if (!controller_event_en || lb_vip->n_backends) {
         return;
     }
 
@@ -4594,32 +4628,35 @@ build_empty_lb_event_flow(struct ovn_datapath *od, struct hmap *lflows,
         meter = "event-elb";
     }
 
-    if (addr_family == AF_INET) {
-        ds_put_format(&match, "ip4.dst == %s && %s",
-                      ip_address, lb->protocol);
-    } else {
-        ds_put_format(&match, "ip6.dst == %s && %s",
-                      ip_address, lb->protocol);
-    }
-    if (port) {
+    ds_put_format(&match, "ip%s.dst == %s && %s",
+                  lb_vip->addr_family == AF_INET ? "4": "6",
+                  lb_vip->vip, lb->protocol);
+
+    char *vip = lb_vip->vip;
+    if (lb_vip->vip_port) {
         ds_put_format(&match, " && %s.dst == %u", lb->protocol,
-                      port);
+                      lb_vip->vip_port);
+        vip = xasprintf("%s:%u", lb_vip->vip, lb_vip->vip_port);
     }
+
     action = xasprintf("trigger_event(event = \"%s\", "
                        "meter = \"%s\", vip = \"%s\", "
                        "protocol = \"%s\", "
                        "load_balancer = \"" UUID_FMT "\");",
                        event_to_string(OVN_EVENT_EMPTY_LB_BACKENDS),
-                       meter, node->key, lb->protocol,
+                       meter, vip, lb->protocol,
                        UUID_ARGS(&lb->header_.uuid));
     ovn_lflow_add(lflows, od, pl, 130, ds_cstr(&match), action);
     ds_destroy(&match);
+    if (lb_vip->vip_port) {
+        free(vip);
+    }
     free(action);
 }
 
 static void
 build_pre_lb(struct ovn_datapath *od, struct hmap *lflows,
-             struct shash *meter_groups)
+             struct shash *meter_groups, struct hmap *lbs)
 {
     /* Do not send ND packets to conntrack */
     ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_LB, 110,
@@ -4643,40 +4680,29 @@ build_pre_lb(struct ovn_datapath *od, struct hmap *lflows,
     struct sset all_ips_v6 = SSET_INITIALIZER(&all_ips_v6);
     bool vip_configured = false;
     for (int i = 0; i < od->nbs->n_load_balancer; i++) {
-        struct nbrec_load_balancer *lb = od->nbs->load_balancer[i];
-        struct smap *vips = &lb->vips;
-        struct smap_node *node;
-        int addr_family = AF_INET;
-
-        SMAP_FOR_EACH (node, vips) {
-            vip_configured = true;
-
-            /* node->key contains IP:port or just IP. */
-            char *ip_address = NULL;
-            uint16_t port;
-            ip_address_and_port_from_lb_key(node->key, &ip_address, &port,
-                                            &addr_family);
-            if (!ip_address) {
-                continue;
-            }
+        struct nbrec_load_balancer *nb_lb = od->nbs->load_balancer[i];
+        struct ovn_lb *lb =
+            ovn_lb_find(lbs, &nb_lb->header_.uuid);
+        ovs_assert(lb);
 
-            if (addr_family == AF_INET) {
-                sset_add(&all_ips_v4, ip_address);
+        for (size_t j = 0; j < lb->n_vips; j++) {
+            struct lb_vip *lb_vip = &lb->vips[j];
+            if (lb_vip->addr_family == AF_INET) {
+                sset_add(&all_ips_v4, lb_vip->vip);
             } else {
-                sset_add(&all_ips_v6, ip_address);
+                sset_add(&all_ips_v6, lb_vip->vip);
             }
 
-            build_empty_lb_event_flow(od, lflows, node, ip_address, lb,
-                                      port, addr_family, S_SWITCH_IN_PRE_LB,
-                                      meter_groups);
-
-            free(ip_address);
+            build_empty_lb_event_flow(od, lflows, lb_vip, nb_lb,
+                                      S_SWITCH_IN_PRE_LB, meter_groups);
 
             /* Ignore L4 port information in the key because fragmented packets
              * may not have L4 information.  The pre-stateful table will send
              * the packet through ct() action to de-fragment. In stateful
              * table, we will eventually look at L4 information. */
         }
+
+        vip_configured = !!lb->n_vips;
     }
 
     /* 'REGBIT_CONNTRACK_DEFRAG' is set to let the pre-stateful table send
@@ -5224,6 +5250,20 @@ build_acls(struct ovn_datapath *od, struct hmap *lflows,
             lflows, od, S_SWITCH_OUT_ACL, 34000, "udp.src == 53",
             actions);
     }
+
+    /* Add a 34000 priority flow to advance the service monitor reply
+     * packets to skip applying ingress ACLs. */
+    char *svc_check_match = xasprintf("eth.dst == %s", svc_monitor_mac);
+    ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, 34000, svc_check_match,
+                  "next;");
+    free(svc_check_match);
+
+    /* Add a 34000 priority flow to advance the service monitor packets
+     * generated by ovn-controller to skip applying egress ACLs. */
+    svc_check_match = xasprintf("eth.src == %s", svc_monitor_mac);
+    ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, 34000, svc_check_match,
+                  "next;");
+    free(svc_check_match);
 }
 
 static void
@@ -5349,36 +5389,7 @@ build_stateful(struct ovn_datapath *od, struct hmap *lflows, struct hmap *lbs)
             struct lb_vip *lb_vip = &lb->vips[j];
             /* New connections in Ingress table. */
             struct ds action = DS_EMPTY_INITIALIZER;
-            if (lb_vip->health_check) {
-                ds_put_cstr(&action, "ct_lb(");
-
-                size_t n_active_backends = 0;
-                for (size_t k = 0; k < lb_vip->n_backends; k++) {
-                    struct lb_vip_backend *backend = &lb_vip->backends[k];
-                    bool is_up = true;
-                    if (backend->health_check && backend->sbrec_monitor &&
-                        backend->sbrec_monitor->status &&
-                        strcmp(backend->sbrec_monitor->status, "online")) {
-                        is_up = false;
-                    }
-
-                    if (is_up) {
-                        n_active_backends++;
-                        ds_put_format(&action, "%s:%"PRIu16",",
-                        backend->ip, backend->port);
-                    }
-                }
-
-                if (!n_active_backends) {
-                    ds_clear(&action);
-                    ds_put_cstr(&action, "drop;");
-                } else {
-                    ds_chomp(&action, ',');
-                    ds_put_cstr(&action, ");");
-                }
-            } else {
-                ds_put_format(&action, "ct_lb(%s);", lb_vip->backend_ips);
-            }
+            build_lb_vip_ct_lb_actions(lb_vip, &action);
 
             struct ds match = DS_EMPTY_INITIALIZER;
             if (lb_vip->addr_family == AF_INET) {
@@ -5699,7 +5710,7 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
         }
 
         build_pre_acls(od, lflows);
-        build_pre_lb(od, lflows, meter_groups);
+        build_pre_lb(od, lflows, meter_groups, lbs);
         build_pre_stateful(od, lflows);
         build_acls(od, lflows, port_groups);
         build_qos(od, lflows);
@@ -6963,15 +6974,11 @@ get_force_snat_ip(struct ovn_datapath *od, const char *key_type,
 static void
 add_router_lb_flow(struct hmap *lflows, struct ovn_datapath *od,
                    struct ds *match, struct ds *actions, int priority,
-                   const char *lb_force_snat_ip, struct smap_node *lb_info,
-                   bool is_udp, int addr_family, char *ip_addr,
-                   uint16_t l4_port, struct nbrec_load_balancer *lb,
+                   const char *lb_force_snat_ip, struct lb_vip *lb_vip,
+                   bool is_udp, struct nbrec_load_balancer *lb,
                    struct shash *meter_groups)
 {
-    char *backend_ips = lb_info->value;
-
-    build_empty_lb_event_flow(od, lflows, lb_info, ip_addr, lb,
-                              l4_port, addr_family, S_ROUTER_IN_DNAT,
+    build_empty_lb_event_flow(od, lflows, lb_vip, lb, S_ROUTER_IN_DNAT,
                               meter_groups);
 
     /* A match and actions for new connections. */
@@ -7000,7 +7007,7 @@ add_router_lb_flow(struct hmap *lflows, struct ovn_datapath *od,
     free(new_match);
     free(est_match);
 
-    if (!od->l3dgw_port || !od->l3redirect_port || !backend_ips) {
+    if (!od->l3dgw_port || !od->l3redirect_port || !lb_vip->n_backends) {
         return;
     }
 
@@ -7009,46 +7016,28 @@ add_router_lb_flow(struct hmap *lflows, struct ovn_datapath *od,
      * router has a gateway router port associated.
      */
     struct ds undnat_match = DS_EMPTY_INITIALIZER;
-    if (addr_family == AF_INET) {
+    if (lb_vip->addr_family == AF_INET) {
         ds_put_cstr(&undnat_match, "ip4 && (");
     } else {
         ds_put_cstr(&undnat_match, "ip6 && (");
     }
-    char *start, *next, *ip_str;
-    start = next = xstrdup(backend_ips);
-    ip_str = strsep(&next, ",");
-    bool backend_ips_found = false;
-    while (ip_str && ip_str[0]) {
-        char *ip_address = NULL;
-        uint16_t port = 0;
-        int addr_family_;
-        ip_address_and_port_from_lb_key(ip_str, &ip_address, &port,
-                                        &addr_family_);
-        if (!ip_address) {
-            break;
-        }
 
-        if (addr_family_ == AF_INET) {
-            ds_put_format(&undnat_match, "(ip4.src == %s", ip_address);
+    for (size_t i = 0; i < lb_vip->n_backends; i++) {
+        struct lb_vip_backend *backend = &lb_vip->backends[i];
+        if (backend->addr_family == AF_INET) {
+            ds_put_format(&undnat_match, "(ip4.src == %s", backend->ip);
         } else {
-            ds_put_format(&undnat_match, "(ip6.src == %s", ip_address);
+            ds_put_format(&undnat_match, "(ip6.src == %s", backend->ip);
         }
-        free(ip_address);
-        if (port) {
+
+        if (backend->port) {
             ds_put_format(&undnat_match, " && %s.src == %d) || ",
-                          is_udp ? "udp" : "tcp", port);
+                          is_udp ? "udp" : "tcp", backend->port);
         } else {
             ds_put_cstr(&undnat_match, ") || ");
         }
-        ip_str = strsep(&next, ",");
-        backend_ips_found = true;
     }
 
-    free(start);
-    if (!backend_ips_found) {
-        ds_destroy(&undnat_match);
-        return;
-    }
     ds_chomp(&undnat_match, ' ');
     ds_chomp(&undnat_match, '|');
     ds_chomp(&undnat_match, '|');
@@ -7166,7 +7155,8 @@ lrouter_nat_is_stateless(const struct nbrec_nat *nat)
 
 static void
 build_lrouter_flows(struct hmap *datapaths, struct hmap *ports,
-                    struct hmap *lflows, struct shash *meter_groups)
+                    struct hmap *lflows, struct shash *meter_groups,
+                    struct hmap *lbs)
 {
     /* This flow table structure is documented in ovn-northd(8), so please
      * update ovn-northd.8.xml if you change anything. */
@@ -8546,24 +8536,18 @@ build_lrouter_flows(struct hmap *datapaths, struct hmap *ports,
         struct sset all_ips = SSET_INITIALIZER(&all_ips);
 
         for (int i = 0; i < od->nbr->n_load_balancer; i++) {
-            struct nbrec_load_balancer *lb = od->nbr->load_balancer[i];
-            struct smap *vips = &lb->vips;
-            struct smap_node *node;
-
-            SMAP_FOR_EACH (node, vips) {
-                uint16_t port = 0;
-                int addr_family;
-
-                /* node->key contains IP:port or just IP. */
-                char *ip_address = NULL;
-                ip_address_and_port_from_lb_key(node->key, &ip_address, &port,
-                        &addr_family);
-                if (!ip_address) {
-                    continue;
-                }
+            struct nbrec_load_balancer *nb_lb = od->nbr->load_balancer[i];
+            struct ovn_lb *lb =
+                ovn_lb_find(lbs, &nb_lb->header_.uuid);
+            ovs_assert(lb);
+
+            for (size_t j = 0; j < lb->n_vips; j++) {
+                struct lb_vip *lb_vip = &lb->vips[j];
+                ds_clear(&actions);
+                build_lb_vip_ct_lb_actions(lb_vip, &actions);
 
-                if (!sset_contains(&all_ips, ip_address)) {
-                    sset_add(&all_ips, ip_address);
+                if (!sset_contains(&all_ips, lb_vip->vip)) {
+                    sset_add(&all_ips, lb_vip->vip);
                     /* If there are any load balancing rules, we should send
                      * the packet to conntrack for defragmentation and
                      * tracking.  This helps with two things.
@@ -8573,12 +8557,12 @@ build_lrouter_flows(struct hmap *datapaths, struct hmap *ports,
                      * 2. If there are L4 ports in load balancing rules, we
                      *    need the defragmentation to match on L4 ports. */
                     ds_clear(&match);
-                    if (addr_family == AF_INET) {
+                    if (lb_vip->addr_family == AF_INET) {
                         ds_put_format(&match, "ip && ip4.dst == %s",
-                                      ip_address);
-                    } else if (addr_family == AF_INET6) {
+                                      lb_vip->vip);
+                    } else if (lb_vip->addr_family == AF_INET6) {
                         ds_put_format(&match, "ip && ip6.dst == %s",
-                                      ip_address);
+                                      lb_vip->vip);
                     }
                     ovn_lflow_add(lflows, od, S_ROUTER_IN_DEFRAG,
                                   100, ds_cstr(&match), "ct_next;");
@@ -8589,28 +8573,25 @@ build_lrouter_flows(struct hmap *datapaths, struct hmap *ports,
                  * via add_router_lb_flow().  One flow is for specific matching
                  * on ct.new with an action of "ct_lb($targets);".  The other
                  * flow is for ct.est with an action of "ct_dnat;". */
-                ds_clear(&actions);
-                ds_put_format(&actions, "ct_lb(%s);", node->value);
-
                 ds_clear(&match);
-                if (addr_family == AF_INET) {
+                if (lb_vip->addr_family == AF_INET) {
                     ds_put_format(&match, "ip && ip4.dst == %s",
-                                ip_address);
-                } else if (addr_family == AF_INET6) {
+                                  lb_vip->vip);
+                } else if (lb_vip->addr_family == AF_INET6) {
                     ds_put_format(&match, "ip && ip6.dst == %s",
-                                ip_address);
+                                  lb_vip->vip);
                 }
 
                 int prio = 110;
-                bool is_udp = lb->protocol && !strcmp(lb->protocol, "udp") ?
-                    true : false;
-                if (port) {
+                bool is_udp = nullable_string_is_equal(nb_lb->protocol, "udp");
+
+                if (lb_vip->vip_port) {
                     if (is_udp) {
                         ds_put_format(&match, " && udp && udp.dst == %d",
-                                      port);
+                                      lb_vip->vip_port);
                     } else {
                         ds_put_format(&match, " && tcp && tcp.dst == %d",
-                                      port);
+                                      lb_vip->vip_port);
                     }
                     prio = 120;
                 }
@@ -8620,11 +8601,8 @@ build_lrouter_flows(struct hmap *datapaths, struct hmap *ports,
                                   od->l3redirect_port->json_key);
                 }
                 add_router_lb_flow(lflows, od, &match, &actions, prio,
-                                   lb_force_snat_ip, node, is_udp,
-                                   addr_family, ip_address, port, lb,
-                                   meter_groups);
-
-                free(ip_address);
+                                   lb_force_snat_ip, lb_vip, is_udp,
+                                   nb_lb, meter_groups);
             }
         }
         sset_destroy(&all_ips);
@@ -9431,7 +9409,7 @@ build_lflows(struct northd_context *ctx, struct hmap *datapaths,
 
     build_lswitch_flows(datapaths, ports, port_groups, &lflows, mcgroups,
                         igmp_groups, meter_groups, lbs);
-    build_lrouter_flows(datapaths, ports, &lflows, meter_groups);
+    build_lrouter_flows(datapaths, ports, &lflows, meter_groups, lbs);
 
     /* Push changes to the Logical_Flow table to database. */
     const struct sbrec_logical_flow *sbflow, *next_sbflow;
diff --git a/tests/ovn.at b/tests/ovn.at
index e2565f274..3a5ecf211 100644
--- a/tests/ovn.at
+++ b/tests/ovn.at
@@ -17007,6 +17007,22 @@ ovn-nbctl --wait=sb ls-lb-add sw0 lb1
 ovn-nbctl --wait=sb ls-lb-add sw1 lb1
 ovn-nbctl --wait=sb lr-lb-add lr0 lb1
 
+ovn-nbctl ls-add public
+ovn-nbctl lrp-add lr0 lr0-public 00:00:20:20:12:13 172.168.0.100/24
+ovn-nbctl lsp-add public public-lr0
+ovn-nbctl lsp-set-type public-lr0 router
+ovn-nbctl lsp-set-addresses public-lr0 router
+ovn-nbctl lsp-set-options public-lr0 router-port=lr0-public
+
+# localnet port
+ovn-nbctl lsp-add public ln-public
+ovn-nbctl lsp-set-type ln-public localnet
+ovn-nbctl lsp-set-addresses ln-public unknown
+ovn-nbctl lsp-set-options ln-public network_name=public
+
+# schedule the gw router port to a chassis. Change the name of the chassis
+ovn-nbctl --wait=hv lrp-set-gateway-chassis lr0-public hv1 20
+
 OVN_POPULATE_ARP
 ovn-nbctl --wait=hv sync
 
@@ -17018,6 +17034,11 @@ AT_CHECK([cat lflows.txt], [0], [dnl
   table=10(ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(ct_lb(10.0.0.3:80,20.0.0.3:80);)
 ])
 
+ovn-sbctl dump-flows lr0 | grep ct_lb | grep priority=120 > lflows.txt
+AT_CHECK([cat lflows.txt], [0], [dnl
+  table=6 (lr_in_dnat         ), priority=120  , match=(ct.new && ip && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80 && is_chassis_resident("cr-lr0-public")), action=(ct_lb(10.0.0.3:80,20.0.0.3:80);)
+])
+
 # get the svc monitor mac.
 svc_mon_src_mac=`ovn-nbctl get NB_Global . options:svc_monitor_mac | \
 sed s/":"//g | sed s/\"//g`
@@ -17051,5 +17072,11 @@ AT_CHECK([cat lflows.txt], [0], [dnl
   table=10(ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(drop;)
 ])
 
+ovn-sbctl dump-flows lr0 | grep lr_in_dnat | grep priority=120 > lflows.txt
+AT_CHECK([cat lflows.txt], [0], [dnl
+  table=6 (lr_in_dnat         ), priority=120  , match=(ct.est && ip && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80 && is_chassis_resident("cr-lr0-public")), action=(ct_dnat;)
+  table=6 (lr_in_dnat         ), priority=120  , match=(ct.new && ip && ip4.dst == 10.0.0.10 && tcp && tcp.dst == 80 && is_chassis_resident("cr-lr0-public")), action=(drop;)
+])
+
 OVN_CLEANUP([hv1], [hv2])
 AT_CLEANUP
-- 
2.24.1